qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Peter Xu <peterx@redhat.com>
To: Denis Plotnikov <dplotnikov@virtuozzo.com>
Cc: dgilbert@redhat.com, quintela@redhat.com, pbonzini@redhat.com,
	qemu-devel@nongnu.org
Subject: Re: [Qemu-devel] [PATCH v1 10/17] background snapshots: adapt the page queueing code for using page copies
Date: Fri, 20 Jul 2018 16:39:16 +0800	[thread overview]
Message-ID: <20180720083915.GS4071@xz-mi> (raw)
In-Reply-To: <20180718154200.26777-11-dplotnikov@virtuozzo.com>

On Wed, Jul 18, 2018 at 06:41:53PM +0300, Denis Plotnikov wrote:
> The background snapshot uses memeory page copying to seal the page
> memory content. The patch adapts the migration infrastructure to save
> copies of the pages.

Again, since previous page only defined some fields that are firstly
used in this patch, I think you can squash that into this one.

> 
> Signed-off-by: Denis Plotnikov <dplotnikov@virtuozzo.com>
> ---
>  migration/migration.c |  2 +-
>  migration/ram.c       | 59 ++++++++++++++++++++++++++++++++-----------
>  migration/ram.h       |  3 ++-
>  3 files changed, 47 insertions(+), 17 deletions(-)
> 
> diff --git a/migration/migration.c b/migration/migration.c
> index 87096d23ef..131d0904e4 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -1716,7 +1716,7 @@ static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
>          return;
>      }
>  
> -    if (ram_save_queue_pages(rbname, start, len)) {
> +    if (ram_save_queue_pages(NULL, rbname, start, len, NULL)) {
>          mark_source_rp_bad(ms);
>      }
>  }
> diff --git a/migration/ram.c b/migration/ram.c
> index dc7dfe0726..3c4ccd85b4 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -976,7 +976,12 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
>      RAMBlock *block = pss->block;
>      ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
>  
> -    p = block->host + offset;
> +    if (pss->page_copy) {
> +        p = pss->page_copy;
> +    } else {
> +        p = block->host + offset;
> +    }
> +
>      trace_ram_save_page(block->idstr, (uint64_t)offset, p);
>  
>      /* In doubt sent page as normal */
> @@ -1003,13 +1008,18 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
>      } else {
>          pages = save_zero_page(rs, block, offset, p);
>          if (pages > 0) {
> -            /* Must let xbzrle know, otherwise a previous (now 0'd) cached
> -             * page would be stale
> -             */
> -            xbzrle_cache_zero_page(rs, current_addr);
> -            ram_release_pages(block->idstr, offset, pages);
> +            if (pss->page_copy) {
> +                qemu_madvise(p, TARGET_PAGE_SIZE, MADV_DONTNEED);
> +            } else {
> +                /* Must let xbzrle know, otherwise a previous (now 0'd) cached
> +                 * page would be stale
> +                 */
> +                xbzrle_cache_zero_page(rs, current_addr);
> +                ram_release_pages(block->idstr, offset, pages);
> +            }
>          } else if (!rs->ram_bulk_stage &&
> -                   !migration_in_postcopy() && migrate_use_xbzrle()) {
> +                   !migration_in_postcopy() && migrate_use_xbzrle() &&
> +                   !migrate_background_snapshot()) {
>              pages = save_xbzrle_page(rs, &p, current_addr, block,
>                                       offset, last_stage);
>              if (!last_stage) {
> @@ -1026,9 +1036,10 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
>          ram_counters.transferred +=
>              save_page_header(rs, rs->f, block, offset | RAM_SAVE_FLAG_PAGE);
>          if (send_async) {
> -            qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
> -                                  migrate_release_ram() &
> -                                  migration_in_postcopy());
> +            bool may_free = migrate_background_snapshot() ||
> +                            (migrate_release_ram() &&
> +                             migration_in_postcopy());
> +            qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE, may_free);
>          } else {
>              qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
>          }
> @@ -1269,7 +1280,8 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
>   * @rs: current RAM state
>   * @offset: used to return the offset within the RAMBlock
>   */
> -static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
> +static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset,
> +                              void **page_copy)
>  {
>      RAMBlock *block = NULL;
>  
> @@ -1279,10 +1291,14 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
>                                  QSIMPLEQ_FIRST(&rs->src_page_requests);
>          block = entry->rb;
>          *offset = entry->offset;
> +        *page_copy = entry->page_copy;
>  
>          if (entry->len > TARGET_PAGE_SIZE) {
>              entry->len -= TARGET_PAGE_SIZE;
>              entry->offset += TARGET_PAGE_SIZE;
> +            if (entry->page_copy) {
> +                entry->page_copy += TARGET_PAGE_SIZE / sizeof(void *);
> +            }
>          } else {
>              memory_region_unref(block->mr);
>              QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
> @@ -1309,9 +1325,10 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
>      RAMBlock  *block;
>      ram_addr_t offset;
>      bool dirty;
> +    void *page_copy;
>  
>      do {
> -        block = unqueue_page(rs, &offset);
> +        block = unqueue_page(rs, &offset, &page_copy);
>          /*
>           * We're sending this page, and since it's postcopy nothing else
>           * will dirty it, and we must make sure it doesn't get sent again
> @@ -1349,6 +1366,7 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
>           */
>          pss->block = block;
>          pss->page = offset >> TARGET_PAGE_BITS;
> +        pss->page_copy = page_copy;
>      }
>  
>      return !!block;
> @@ -1386,17 +1404,25 @@ static void migration_page_queue_free(RAMState *rs)
>   *
>   * @rbname: Name of the RAMBLock of the request. NULL means the
>   *          same that last one.
> + * @block: RAMBlock to use. block and rbname have mutualy exclusive
> + *         semantic with higher priority of the block.

If "mutual exclusive" then no priority at all?  Maybe simply:

  @block: RAMBlock to use.  When @block is provided, then @rbname is ignored.
 
>   * @start: starting address from the start of the RAMBlock
>   * @len: length (in bytes) to send
> + * @page_copy: the address the page should be written from

Maybe:

  @page_copy: the page to copy to destination.  If not specified, will
              use the page data specified by @start and @len.

>   */
> -int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
> +int ram_save_queue_pages(RAMBlock *block, const char *rbname,
> +                         ram_addr_t start, ram_addr_t len, void *page_copy)
>  {
>      RAMBlock *ramblock;
>      RAMState *rs = ram_state;
>  
>      ram_counters.postcopy_requests++;
> +
>      rcu_read_lock();
> -    if (!rbname) {
> +
> +    if (block) {
> +        ramblock = block;
> +    } else if (!rbname) {
>          /* Reuse last RAMBlock */
>          ramblock = rs->last_req_rb;
>  
> @@ -1431,6 +1457,7 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
>      new_entry->rb = ramblock;
>      new_entry->offset = start;
>      new_entry->len = len;
> +    new_entry->page_copy = page_copy;
>  
>      memory_region_ref(ramblock->mr);
>      qemu_mutex_lock(&rs->src_page_req_mutex);
> @@ -1468,7 +1495,8 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
>           * xbzrle can do better than compression.
>           */
>          if (migrate_use_compression() &&
> -            (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
> +            (rs->ram_bulk_stage || !migrate_use_xbzrle()) &&
> +            !migrate_background_snapshot()) {

This seems unecessary - in the first patch you have already made sure
that compression is not enabled during a live snapshot, so we should
be fine since we check migrate_use_compression() first.

>              res = ram_save_compressed_page(rs, pss, last_stage);
>          } else {
>              res = ram_save_page(rs, pss, last_stage);
> @@ -1706,6 +1734,7 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage)
>      pss.block = rs->last_seen_block;
>      pss.page = rs->last_page;
>      pss.complete_round = false;
> +    pss.page_copy = NULL;
>  
>      if (!pss.block) {
>          pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
> diff --git a/migration/ram.h b/migration/ram.h
> index 4c463597a5..c3679ba65e 100644
> --- a/migration/ram.h
> +++ b/migration/ram.h
> @@ -46,7 +46,8 @@ int multifd_load_setup(void);
>  int multifd_load_cleanup(Error **errp);
>  
>  uint64_t ram_pagesize_summary(void);
> -int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
> +int ram_save_queue_pages(RAMBlock *block, const char *rbname,
> +                         ram_addr_t start, ram_addr_t len, void *page_copy);
>  void acct_update_position(QEMUFile *f, size_t size, bool zero);
>  void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
>                             unsigned long pages);
> -- 
> 2.17.0
> 
> 

Regards,

-- 
Peter Xu

  reply	other threads:[~2018-07-20  8:39 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-07-18 15:41 [Qemu-devel] [PATCH v1 00/17] Background snapshots Denis Plotnikov
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 01/17] migration: add background snapshot capability Denis Plotnikov
2018-07-20  5:14   ` Peter Xu
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 02/17] bitops: add some atomic versions of bitmap operations Denis Plotnikov
2018-07-20  5:09   ` Peter Xu
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 03/17] threads: add infrastructure to process sigsegv Denis Plotnikov
2018-07-20  4:58   ` Peter Xu
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 04/17] background snapshot: make a dedicated type for ram block list Denis Plotnikov
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 05/17] ram: extend the data structures for background snapshotting Denis Plotnikov
2018-07-20  7:59   ` Peter Xu
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 06/17] background snapshot: add helpers to manage a copy of ram block list Denis Plotnikov
2018-07-20  7:57   ` Peter Xu
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 07/17] background snapshot: introduce page buffer Denis Plotnikov
2018-07-20  9:22   ` Peter Xu
2018-07-20 10:34   ` Paolo Bonzini
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 08/17] migration: add helpers to change VM memory protection rights Denis Plotnikov
2018-07-20 11:28   ` Dr. David Alan Gilbert
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 09/17] background snapshot: extend RAM request for holding a page copy pointer Denis Plotnikov
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 10/17] background snapshots: adapt the page queueing code for using page copies Denis Plotnikov
2018-07-20  8:39   ` Peter Xu [this message]
2018-07-20 11:46   ` Peter Xu
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 11/17] background snapshot: add a memory page copying function Denis Plotnikov
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 12/17] ram: add background snapshot support in ram page saving part of migration Denis Plotnikov
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 13/17] background snapshot: add write-protected page access handler function Denis Plotnikov
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 14/17] kvm: add failed memeory access exit reason Denis Plotnikov
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 15/17] kvm: add vCPU failed memeory access processing Denis Plotnikov
2018-07-20  9:44   ` Peter Xu
2018-07-18 15:41 ` [Qemu-devel] [PATCH v1 16/17] migration: move the device state saving logic to a separate function Denis Plotnikov
2018-07-18 15:42 ` [Qemu-devel] [PATCH v1 17/17] background snapshot: enable background snapshot Denis Plotnikov
2018-07-20  9:27 ` [Qemu-devel] [PATCH v1 00/17] Background snapshots Peter Xu
2018-09-04 13:00   ` Denis Plotnikov
2018-09-05  3:32     ` Peter Xu
2018-09-05  9:18       ` Denis Plotnikov
2018-09-05  9:30         ` Peter Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180720083915.GS4071@xz-mi \
    --to=peterx@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=dplotnikov@virtuozzo.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).