qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: Wei Wang <wei.w.wang@intel.com>
Cc: peterx@redhat.com, david@redhat.com, qemu-devel@nongnu.org,
	dgilbert@redhat.com, quintela@redhat.com
Subject: Re: [PATCH v1] migration: clear the memory region dirty bitmap when skipping free pages
Date: Wed, 14 Jul 2021 06:27:50 -0400	[thread overview]
Message-ID: <20210714062715-mutt-send-email-mst@kernel.org> (raw)
In-Reply-To: <20210714075104.397484-1-wei.w.wang@intel.com>

On Wed, Jul 14, 2021 at 03:51:04AM -0400, Wei Wang wrote:
> When skipping free pages, their corresponding dirty bits in the memory
> region dirty bitmap need to be cleared. Otherwise the skipped pages will
> be sent in the next round after the migration thread syncs dirty bits
> from the memory region dirty bitmap.
> 
> migration_clear_memory_region_dirty_bitmap_range is put outside the
> bitmap_mutex, becasue

because?

> memory_region_clear_dirty_bitmap is possible to block
> on the kvm slot mutex (don't want holding bitmap_mutex while blocked on
> another mutex), and clear_bmap_test_and_clear uses atomic operation.
> 
> Cc: David Hildenbrand <david@redhat.com>
> Cc: Peter Xu <peterx@redhat.com>
> Reported-by: David Hildenbrand <david@redhat.com>
> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
> ---
>  capstone        |  2 +-

Seems unnecessary.

>  migration/ram.c | 73 +++++++++++++++++++++++++++++++++++++------------
>  slirp           |  2 +-
>  ui/keycodemapdb |  2 +-

These too.

>  4 files changed, 58 insertions(+), 21 deletions(-)
> 
> diff --git a/capstone b/capstone
> index f8b1b83301..22ead3e0bf 160000
> --- a/capstone
> +++ b/capstone
> @@ -1 +1 @@
> -Subproject commit f8b1b833015a4ae47110ed068e0deb7106ced66d
> +Subproject commit 22ead3e0bfdb87516656453336160e0a37b066bf
> diff --git a/migration/ram.c b/migration/ram.c
> index 88ff34f574..c44c6e2fed 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -789,6 +789,51 @@ unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
>      return find_next_bit(bitmap, size, start);
>  }
>  
> +static void migration_clear_memory_region_dirty_bitmap(RAMState *rs,
> +                                                       RAMBlock *rb,
> +                                                       unsigned long page)
> +{
> +    uint8_t shift;
> +    hwaddr size, start;
> +
> +    if (!rb->clear_bmap || !clear_bmap_test_and_clear(rb, page)) {
> +        return;
> +    }
> +
> +    shift = rb->clear_bmap_shift;
> +    /*
> +     * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
> +     * can make things easier sometimes since then start address
> +     * of the small chunk will always be 64 pages aligned so the
> +     * bitmap will always be aligned to unsigned long. We should
> +     * even be able to remove this restriction but I'm simply
> +     * keeping it.
> +     */
> +    assert(shift >= 6);
> +
> +    size = 1ULL << (TARGET_PAGE_BITS + shift);
> +    start = (((ram_addr_t)page) << TARGET_PAGE_BITS) & (-size);
> +    trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
> +    memory_region_clear_dirty_bitmap(rb->mr, start, size);
> +}
> +
> +static void
> +migration_clear_memory_region_dirty_bitmap_range(RAMState *rs,
> +                                                 RAMBlock *rb,
> +                                                 unsigned long start,
> +                                                 unsigned long npages)
> +{
> +    unsigned long page_to_clear, i, nchunks;
> +    unsigned long chunk_pages = 1UL << rb->clear_bmap_shift;
> +
> +    nchunks = (start + npages) / chunk_pages - start / chunk_pages + 1;
> +
> +    for (i = 0; i < nchunks; i++) {
> +        page_to_clear = start + i * chunk_pages;
> +        migration_clear_memory_region_dirty_bitmap(rs, rb, page_to_clear);
> +    }
> +}
> +
>  static inline bool migration_bitmap_clear_dirty(RAMState *rs,
>                                                  RAMBlock *rb,
>                                                  unsigned long page)
> @@ -805,26 +850,9 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
>       * the page in the chunk we clear the remote dirty bitmap for all.
>       * Clearing it earlier won't be a problem, but too late will.
>       */
> -    if (rb->clear_bmap && clear_bmap_test_and_clear(rb, page)) {
> -        uint8_t shift = rb->clear_bmap_shift;
> -        hwaddr size = 1ULL << (TARGET_PAGE_BITS + shift);
> -        hwaddr start = (((ram_addr_t)page) << TARGET_PAGE_BITS) & (-size);
> -
> -        /*
> -         * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
> -         * can make things easier sometimes since then start address
> -         * of the small chunk will always be 64 pages aligned so the
> -         * bitmap will always be aligned to unsigned long.  We should
> -         * even be able to remove this restriction but I'm simply
> -         * keeping it.
> -         */
> -        assert(shift >= 6);
> -        trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
> -        memory_region_clear_dirty_bitmap(rb->mr, start, size);
> -    }
> +    migration_clear_memory_region_dirty_bitmap(rs, rb, page);
>  
>      ret = test_and_clear_bit(page, rb->bmap);
> -
>      if (ret) {
>          rs->migration_dirty_pages--;
>      }
> @@ -2742,6 +2770,15 @@ void qemu_guest_free_page_hint(void *addr, size_t len)
>          start = offset >> TARGET_PAGE_BITS;
>          npages = used_len >> TARGET_PAGE_BITS;
>  
> +        /*
> +         * The skipped free pages are equavelent to be sent from clear_bmap's
> +         * perspective, so clear the bits from the memory region bitmap which
> +         * are initially set. Otherwise those skipped pages will be sent in
> +         * the next round after syncing from the memory region bitmap.
> +         */
> +        migration_clear_memory_region_dirty_bitmap_range(ram_state, block,
> +                                                         start, npages);
> +
>          qemu_mutex_lock(&ram_state->bitmap_mutex);
>          ram_state->migration_dirty_pages -=
>                        bitmap_count_one_with_offset(block->bmap, start, npages);
> diff --git a/slirp b/slirp
> index 8f43a99191..2faae0f778 160000
> --- a/slirp
> +++ b/slirp
> @@ -1 +1 @@
> -Subproject commit 8f43a99191afb47ca3f3c6972f6306209f367ece
> +Subproject commit 2faae0f778f818fadc873308f983289df697eb93
> diff --git a/ui/keycodemapdb b/ui/keycodemapdb
> index 6119e6e19a..320f92c36a 160000
> --- a/ui/keycodemapdb
> +++ b/ui/keycodemapdb
> @@ -1 +1 @@
> -Subproject commit 6119e6e19a050df847418de7babe5166779955e4
> +Subproject commit 320f92c36a80bfafc5d57834592a7be5fd79f104
> -- 
> 2.25.1



  reply	other threads:[~2021-07-14 10:29 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-14  7:51 [PATCH v1] migration: clear the memory region dirty bitmap when skipping free pages Wei Wang
2021-07-14 10:27 ` Michael S. Tsirkin [this message]
2021-07-14 10:30   ` David Hildenbrand
2021-07-14 14:58     ` Wang, Wei W
2021-07-14 15:24       ` Peter Xu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210714062715-mutt-send-email-mst@kernel.org \
    --to=mst@redhat.com \
    --cc=david@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quintela@redhat.com \
    --cc=wei.w.wang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).