qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
To: Juan Quintela <quintela@redhat.com>
Cc: qemu-devel@nongnu.org
Subject: Re: [Qemu-devel] [PATCH 25/31] ram: Use the RAMState bytes_transferred parameter
Date: Fri, 17 Mar 2017 09:57:43 +0000	[thread overview]
Message-ID: <20170317095743.GB2396@work-vm> (raw)
In-Reply-To: <20170315135021.6978-26-quintela@redhat.com>

* Juan Quintela (quintela@redhat.com) wrote:
> Somewhere it was passed by reference, just use it from RAMState.
> 
> Signed-off-by: Juan Quintela <quintela@redhat.com>

Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>

> ---
>  migration/ram.c | 77 ++++++++++++++++++++-------------------------------------
>  1 file changed, 27 insertions(+), 50 deletions(-)
> 
> diff --git a/migration/ram.c b/migration/ram.c
> index f9933b2..9c9533d 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -477,12 +477,10 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
>   * @block: block that contains the page we want to send
>   * @offset: offset inside the block for the page
>   * @last_stage: if we are at the completion stage
> - * @bytes_transferred: increase it with the number of transferred bytes
>   */
>  static int save_xbzrle_page(QEMUFile *f, RAMState *rs, uint8_t **current_data,
>                              ram_addr_t current_addr, RAMBlock *block,
> -                            ram_addr_t offset, bool last_stage,
> -                            uint64_t *bytes_transferred)
> +                            ram_addr_t offset, bool last_stage)
>  {
>      int encoded_len = 0, bytes_xbzrle;
>      uint8_t *prev_cached_page;
> @@ -538,7 +536,7 @@ static int save_xbzrle_page(QEMUFile *f, RAMState *rs, uint8_t **current_data,
>      bytes_xbzrle += encoded_len + 1 + 2;
>      rs->xbzrle_pages++;
>      rs->xbzrle_bytes += bytes_xbzrle;
> -    *bytes_transferred += bytes_xbzrle;
> +    rs->bytes_transferred += bytes_xbzrle;
>  
>      return 1;
>  }
> @@ -701,20 +699,18 @@ static void migration_bitmap_sync(RAMState *rs)
>   * @block: block that contains the page we want to send
>   * @offset: offset inside the block for the page
>   * @p: pointer to the page
> - * @bytes_transferred: increase it with the number of transferred bytes
>   */
>  static int save_zero_page(RAMState *rs, QEMUFile *f, RAMBlock *block,
> -                          ram_addr_t offset,
> -                          uint8_t *p, uint64_t *bytes_transferred)
> +                          ram_addr_t offset, uint8_t *p)
>  {
>      int pages = -1;
>  
>      if (is_zero_range(p, TARGET_PAGE_SIZE)) {
>          rs->zero_pages++;
> -        *bytes_transferred += save_page_header(f, block,
> -                                               offset | RAM_SAVE_FLAG_COMPRESS);
> +        rs->bytes_transferred += save_page_header(f, block,
> +                                                  offset | RAM_SAVE_FLAG_COMPRESS);
>          qemu_put_byte(f, 0);
> -        *bytes_transferred += 1;
> +        rs->bytes_transferred += 1;
>          pages = 1;
>      }
>  
> @@ -745,11 +741,9 @@ static void ram_release_pages(MigrationState *ms, const char *block_name,
>   * @block: block that contains the page we want to send
>   * @offset: offset inside the block for the page
>   * @last_stage: if we are at the completion stage
> - * @bytes_transferred: increase it with the number of transferred bytes
>   */
>  static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
> -                         PageSearchStatus *pss, bool last_stage,
> -                         uint64_t *bytes_transferred)
> +                         PageSearchStatus *pss, bool last_stage)
>  {
>      int pages = -1;
>      uint64_t bytes_xmit;
> @@ -767,7 +761,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
>      ret = ram_control_save_page(f, block->offset,
>                             offset, TARGET_PAGE_SIZE, &bytes_xmit);
>      if (bytes_xmit) {
> -        *bytes_transferred += bytes_xmit;
> +        rs->bytes_transferred += bytes_xmit;
>          pages = 1;
>      }
>  
> @@ -787,7 +781,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
>              }
>          }
>      } else {
> -        pages = save_zero_page(rs, f, block, offset, p, bytes_transferred);
> +        pages = save_zero_page(rs, f, block, offset, p);
>          if (pages > 0) {
>              /* Must let xbzrle know, otherwise a previous (now 0'd) cached
>               * page would be stale
> @@ -797,7 +791,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
>          } else if (!rs->ram_bulk_stage &&
>                     !migration_in_postcopy(ms) && migrate_use_xbzrle()) {
>              pages = save_xbzrle_page(f, rs, &p, current_addr, block,
> -                                     offset, last_stage, bytes_transferred);
> +                                     offset, last_stage);
>              if (!last_stage) {
>                  /* Can't send this cached data async, since the cache page
>                   * might get updated before it gets to the wire
> @@ -809,7 +803,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
>  
>      /* XBZRLE overflow or normal page */
>      if (pages == -1) {
> -        *bytes_transferred += save_page_header(f, block,
> +        rs->bytes_transferred += save_page_header(f, block,
>                                                 offset | RAM_SAVE_FLAG_PAGE);
>          if (send_async) {
>              qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE,
> @@ -818,7 +812,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
>          } else {
>              qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
>          }
> -        *bytes_transferred += TARGET_PAGE_SIZE;
> +        rs->bytes_transferred += TARGET_PAGE_SIZE;
>          pages = 1;
>          rs->norm_pages++;
>      }
> @@ -886,8 +880,7 @@ static inline void set_compress_params(CompressParam *param, RAMBlock *block,
>  }
>  
>  static int compress_page_with_multi_thread(RAMState *rs, QEMUFile *f,
> -                                           RAMBlock *block, ram_addr_t offset,
> -                                           uint64_t *bytes_transferred)
> +                                           RAMBlock *block, ram_addr_t offset)
>  {
>      int idx, thread_count, bytes_xmit = -1, pages = -1;
>  
> @@ -904,7 +897,7 @@ static int compress_page_with_multi_thread(RAMState *rs, QEMUFile *f,
>                  qemu_mutex_unlock(&comp_param[idx].mutex);
>                  pages = 1;
>                  rs->norm_pages++;
> -                *bytes_transferred += bytes_xmit;
> +                rs->bytes_transferred += bytes_xmit;
>                  break;
>              }
>          }
> @@ -930,12 +923,10 @@ static int compress_page_with_multi_thread(RAMState *rs, QEMUFile *f,
>   * @block: block that contains the page we want to send
>   * @offset: offset inside the block for the page
>   * @last_stage: if we are at the completion stage
> - * @bytes_transferred: increase it with the number of transferred bytes
>   */
>  static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
>                                      QEMUFile *f,
> -                                    PageSearchStatus *pss, bool last_stage,
> -                                    uint64_t *bytes_transferred)
> +                                    PageSearchStatus *pss, bool last_stage)
>  {
>      int pages = -1;
>      uint64_t bytes_xmit = 0;
> @@ -949,7 +940,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
>      ret = ram_control_save_page(f, block->offset,
>                                  offset, TARGET_PAGE_SIZE, &bytes_xmit);
>      if (bytes_xmit) {
> -        *bytes_transferred += bytes_xmit;
> +        rs->bytes_transferred += bytes_xmit;
>          pages = 1;
>      }
>      if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
> @@ -969,7 +960,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
>           */
>          if (block != rs->last_sent_block) {
>              flush_compressed_data(rs, f);
> -            pages = save_zero_page(rs, f, block, offset, p, bytes_transferred);
> +            pages = save_zero_page(rs, f, block, offset, p);
>              if (pages == -1) {
>                  /* Make sure the first page is sent out before other pages */
>                  bytes_xmit = save_page_header(f, block, offset |
> @@ -977,7 +968,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
>                  blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
>                                                   migrate_compress_level());
>                  if (blen > 0) {
> -                    *bytes_transferred += bytes_xmit + blen;
> +                    rs->bytes_transferred += bytes_xmit + blen;
>                      rs->norm_pages++;
>                      pages = 1;
>                  } else {
> @@ -990,10 +981,9 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
>              }
>          } else {
>              offset |= RAM_SAVE_FLAG_CONTINUE;
> -            pages = save_zero_page(rs, f, block, offset, p, bytes_transferred);
> +            pages = save_zero_page(rs, f, block, offset, p);
>              if (pages == -1) {
> -                pages = compress_page_with_multi_thread(rs, f, block, offset,
> -                                                        bytes_transferred);
> +                pages = compress_page_with_multi_thread(rs, f, block, offset);
>              } else {
>                  ram_release_pages(ms, block->idstr, pss->offset, pages);
>              }
> @@ -1256,7 +1246,6 @@ err:
>   * @block: pointer to block that contains the page we want to send
>   * @offset: offset inside the block for the page;
>   * @last_stage: if we are at the completion stage
> - * @bytes_transferred: increase it with the number of transferred bytes
>   * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
>   *
>   * Returns: Number of pages written.
> @@ -1264,7 +1253,6 @@ err:
>  static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
>                                  PageSearchStatus *pss,
>                                  bool last_stage,
> -                                uint64_t *bytes_transferred,
>                                  ram_addr_t dirty_ram_abs)
>  {
>      int res = 0;
> @@ -1273,12 +1261,9 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
>      if (migration_bitmap_clear_dirty(rs, dirty_ram_abs)) {
>          unsigned long *unsentmap;
>          if (compression_switch && migrate_use_compression()) {
> -            res = ram_save_compressed_page(rs, ms, f, pss,
> -                                           last_stage,
> -                                           bytes_transferred);
> +            res = ram_save_compressed_page(rs, ms, f, pss, last_stage);
>          } else {
> -            res = ram_save_page(rs, ms, f, pss, last_stage,
> -                                bytes_transferred);
> +            res = ram_save_page(rs, ms, f, pss, last_stage);
>          }
>  
>          if (res < 0) {
> @@ -1317,21 +1302,18 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
>   * @offset: offset inside the block for the page; updated to last target page
>   *          sent
>   * @last_stage: if we are at the completion stage
> - * @bytes_transferred: increase it with the number of transferred bytes
>   * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
>   */
>  static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
>                                PageSearchStatus *pss,
>                                bool last_stage,
> -                              uint64_t *bytes_transferred,
>                                ram_addr_t dirty_ram_abs)
>  {
>      int tmppages, pages = 0;
>      size_t pagesize = qemu_ram_pagesize(pss->block);
>  
>      do {
> -        tmppages = ram_save_target_page(rs, ms, f, pss, last_stage,
> -                                        bytes_transferred, dirty_ram_abs);
> +        tmppages = ram_save_target_page(rs, ms, f, pss, last_stage, dirty_ram_abs);
>          if (tmppages < 0) {
>              return tmppages;
>          }
> @@ -1357,14 +1339,12 @@ static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
>   * @rs: The RAM state
>   * @f: QEMUFile where to send the data
>   * @last_stage: if we are at the completion stage
> - * @bytes_transferred: increase it with the number of transferred bytes
>   *
>   * On systems where host-page-size > target-page-size it will send all the
>   * pages in a host page that are dirty.
>   */
>  
> -static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage,
> -                                   uint64_t *bytes_transferred)
> +static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage)
>  {
>      PageSearchStatus pss;
>      MigrationState *ms = migrate_get_current();
> @@ -1396,9 +1376,7 @@ static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage,
>          }
>  
>          if (found) {
> -            pages = ram_save_host_page(rs, ms, f, &pss,
> -                                       last_stage, bytes_transferred,
> -                                       dirty_ram_abs);
> +            pages = ram_save_host_page(rs, ms, f, &pss, last_stage, dirty_ram_abs);
>          }
>      } while (!pages && again);
>  
> @@ -2046,7 +2024,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
>      while ((ret = qemu_file_rate_limit(f)) == 0) {
>          int pages;
>  
> -        pages = ram_find_and_save_block(rs, f, false, &rs->bytes_transferred);
> +        pages = ram_find_and_save_block(rs, f, false);
>          /* no more pages to sent */
>          if (pages == 0) {
>              done = 1;
> @@ -2107,8 +2085,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
>      while (true) {
>          int pages;
>  
> -        pages = ram_find_and_save_block(rs, f, !migration_in_colo_state(),
> -                                        &rs->bytes_transferred);
> +        pages = ram_find_and_save_block(rs, f, !migration_in_colo_state());
>          /* no more blocks to sent */
>          if (pages == 0) {
>              break;
> -- 
> 2.9.3
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK

  reply	other threads:[~2017-03-17  9:57 UTC|newest]

Thread overview: 68+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-03-15 13:49 [Qemu-devel] [PATCH 00/31] Creating RAMState for migration Juan Quintela
2017-03-15 13:49 ` [Qemu-devel] [PATCH 01/31] ram: move more fields into RAMState Juan Quintela
2017-03-16 12:09   ` Dr. David Alan Gilbert
2017-03-16 21:32     ` Philippe Mathieu-Daudé
2017-03-20 19:36     ` Juan Quintela
2017-03-15 13:49 ` [Qemu-devel] [PATCH 02/31] ram: Add dirty_rate_high_cnt to RAMState Juan Quintela
2017-03-16 12:20   ` Dr. David Alan Gilbert
2017-03-16 21:32     ` Philippe Mathieu-Daudé
2017-03-20 19:39     ` Juan Quintela
2017-03-15 13:49 ` [Qemu-devel] [PATCH 03/31] ram: move bitmap_sync_count into RAMState Juan Quintela
2017-03-16 12:21   ` Dr. David Alan Gilbert
2017-03-16 21:33     ` Philippe Mathieu-Daudé
2017-03-15 13:49 ` [Qemu-devel] [PATCH 04/31] ram: Move start time " Juan Quintela
2017-03-16 12:21   ` Dr. David Alan Gilbert
2017-03-16 21:33     ` Philippe Mathieu-Daudé
2017-03-15 13:49 ` [Qemu-devel] [PATCH 05/31] ram: Move bytes_xfer_prev " Juan Quintela
2017-03-16 12:22   ` Dr. David Alan Gilbert
2017-03-16 21:34     ` Philippe Mathieu-Daudé
2017-03-15 13:49 ` [Qemu-devel] [PATCH 06/31] ram: Move num_dirty_pages_period " Juan Quintela
2017-03-16 12:23   ` Dr. David Alan Gilbert
2017-03-16 21:35     ` Philippe Mathieu-Daudé
2017-03-15 13:49 ` [Qemu-devel] [PATCH 07/31] ram: Move xbzrle_cache_miss_prev " Juan Quintela
2017-03-16 12:24   ` Dr. David Alan Gilbert
2017-03-16 21:35     ` Philippe Mathieu-Daudé
2017-03-15 13:49 ` [Qemu-devel] [PATCH 08/31] ram: Move iterations_prev " Juan Quintela
2017-03-16 12:26   ` Dr. David Alan Gilbert
2017-03-16 21:36     ` Philippe Mathieu-Daudé
2017-03-15 13:49 ` [Qemu-devel] [PATCH 09/31] ram: Move dup_pages " Juan Quintela
2017-03-16 12:27   ` Dr. David Alan Gilbert
2017-03-15 13:50 ` [Qemu-devel] [PATCH 10/31] ram: Remove unused dump_mig_dbytes_transferred() Juan Quintela
2017-03-16 15:48   ` Dr. David Alan Gilbert
2017-03-15 13:50 ` [Qemu-devel] [PATCH 11/31] ram: Remove unused pages_skiped variable Juan Quintela
2017-03-16 15:52   ` Dr. David Alan Gilbert
2017-03-15 13:50 ` [Qemu-devel] [PATCH 12/31] ram: Move norm_pages to RAMState Juan Quintela
2017-03-16 16:09   ` Dr. David Alan Gilbert
2017-03-15 13:50 ` [Qemu-devel] [PATCH 13/31] ram: Remove norm_mig_bytes_transferred Juan Quintela
2017-03-16 16:14   ` Dr. David Alan Gilbert
2017-03-15 13:50 ` [Qemu-devel] [PATCH 14/31] ram: Move iterations into RAMState Juan Quintela
2017-03-16 20:04   ` Dr. David Alan Gilbert
2017-03-16 21:40     ` Philippe Mathieu-Daudé
2017-03-15 13:50 ` [Qemu-devel] [PATCH 15/31] ram: Move xbzrle_bytes " Juan Quintela
2017-03-15 13:50 ` [Qemu-devel] [PATCH 16/31] ram: Move xbzrle_pages " Juan Quintela
2017-03-15 13:50 ` [Qemu-devel] [PATCH 17/31] ram: Move xbzrle_cache_miss " Juan Quintela
2017-03-15 13:50 ` [Qemu-devel] [PATCH 18/31] ram: move xbzrle_cache_miss_rate " Juan Quintela
2017-03-15 13:50 ` [Qemu-devel] [PATCH 19/31] ram: move xbzrle_overflows " Juan Quintela
2017-03-16 20:07   ` Dr. David Alan Gilbert
2017-03-15 13:50 ` [Qemu-devel] [PATCH 20/31] ram: move migration_dirty_pages to RAMState Juan Quintela
2017-03-15 13:50 ` [Qemu-devel] [PATCH 21/31] ram: Everything was init to zero, so use memset Juan Quintela
2017-03-16 20:15   ` Dr. David Alan Gilbert
2017-03-15 13:50 ` [Qemu-devel] [PATCH 22/31] ram: move migration_bitmap_mutex into RAMState Juan Quintela
2017-03-16 20:21   ` Dr. David Alan Gilbert
2017-03-15 13:50 ` [Qemu-devel] [PATCH 23/31] ram: Move migration_bitmap_rcu " Juan Quintela
2017-03-17  9:51   ` Dr. David Alan Gilbert
2017-03-20 20:10     ` Juan Quintela
2017-03-15 13:50 ` [Qemu-devel] [PATCH 24/31] ram: Move bytes_transferred " Juan Quintela
2017-03-15 13:50 ` [Qemu-devel] [PATCH 25/31] ram: Use the RAMState bytes_transferred parameter Juan Quintela
2017-03-17  9:57   ` Dr. David Alan Gilbert [this message]
2017-03-15 13:50 ` [Qemu-devel] [PATCH 26/31] ram: Remove ram_save_remaining Juan Quintela
2017-03-15 13:50 ` [Qemu-devel] [PATCH 27/31] ram: Move last_req_rb to RAMState Juan Quintela
2017-03-17 10:14   ` Dr. David Alan Gilbert
2017-03-20 20:13     ` Juan Quintela
2017-03-15 13:50 ` [Qemu-devel] [PATCH 28/31] ram: Create ram_dirty_sync_count() Juan Quintela
2017-03-15 13:50 ` [Qemu-devel] [PATCH 29/31] ram: Remove dirty_bytes_rate Juan Quintela
2017-03-17 10:21   ` Dr. David Alan Gilbert
2017-03-15 13:50 ` [Qemu-devel] [PATCH 30/31] ram: move dirty_pages_rate to RAMState Juan Quintela
2017-03-17 10:45   ` Dr. David Alan Gilbert
2017-03-15 13:50 ` [Qemu-devel] [PATCH 31/31] ram: move postcopy_requests into RAMState Juan Quintela
2017-03-15 14:25 ` [Qemu-devel] [PATCH 00/31] Creating RAMState for migration no-reply

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170317095743.GB2396@work-vm \
    --to=dgilbert@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).