qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Wei Wang <wei.w.wang@intel.com>
To: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: Peter Xu <peterx@redhat.com>,
	qemu-devel@nongnu.org, virtio-dev@lists.oasis-open.org,
	mst@redhat.com, quintela@redhat.com, pbonzini@redhat.com,
	liliang.opensource@gmail.com, yang.zhang.wz@gmail.com,
	quan.xu0@gmail.com, nilal@redhat.com, riel@redhat.com
Subject: Re: [Qemu-devel] [PATCH v8 4/6] migration/ram.c: add ram save state notifiers
Date: Wed, 20 Jun 2018 19:15:38 +0800	[thread overview]
Message-ID: <5B2A375A.4030504@intel.com> (raw)
In-Reply-To: <20180619173109.GM2368@work-vm>

On 06/20/2018 01:31 AM, Dr. David Alan Gilbert wrote:
> * Wei Wang (wei.w.wang@intel.com) wrote:
>> On 06/12/2018 03:50 PM, Peter Xu wrote:
>>> On Fri, Jun 08, 2018 at 04:10:41PM +0800, Wei Wang wrote:
>>>> This patch adds a ram save state notifier list, and expose RAMState for
>>>> the notifer callbacks to use.
>>>>
>>>> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
>>>> CC: Dr. David Alan Gilbert <dgilbert@redhat.com>
>>>> CC: Juan Quintela <quintela@redhat.com>
>>>> CC: Michael S. Tsirkin <mst@redhat.com>
>>>> CC: Peter Xu <peterx@redhat.com>
>>>> ---
>>>>    include/migration/misc.h | 52 +++++++++++++++++++++++++++++++++++++++
>>>>    migration/ram.c          | 64 +++++++++++++++++-------------------------------
>>>>    2 files changed, 75 insertions(+), 41 deletions(-)
>>>>
>>>> diff --git a/include/migration/misc.h b/include/migration/misc.h
>>>> index 113320e..b970d7d 100644
>>>> --- a/include/migration/misc.h
>>>> +++ b/include/migration/misc.h
>>>> @@ -16,9 +16,61 @@
>>>>    #include "exec/cpu-common.h"
>>>>    #include "qemu/notify.h"
>>>> +#include "qemu/thread.h"
>>>>    /* migration/ram.c */
>>>> +typedef enum RamSaveState {
>>>> +    RAM_SAVE_ERR = 0,
>>>> +    RAM_SAVE_RESET = 1,
>>>> +    RAM_SAVE_BEFORE_SYNC_BITMAP = 2,
>>>> +    RAM_SAVE_AFTER_SYNC_BITMAP = 3,
>>>> +    RAM_SAVE_MAX = 4,
>>>> +} RamSaveState;
>>>> +
>>>> +/* State of RAM for migration */
>>>> +typedef struct RAMState {
>>>> +    /* QEMUFile used for this migration */
>>>> +    QEMUFile *f;
>>>> +    /* Last block that we have visited searching for dirty pages */
>>>> +    RAMBlock *last_seen_block;
>>>> +    /* Last block from where we have sent data */
>>>> +    RAMBlock *last_sent_block;
>>>> +    /* Last dirty target page we have sent */
>>>> +    ram_addr_t last_page;
>>>> +    /* last ram version we have seen */
>>>> +    uint32_t last_version;
>>>> +    /* We are in the first round */
>>>> +    bool ram_bulk_stage;
>>>> +    /* How many times we have dirty too many pages */
>>>> +    int dirty_rate_high_cnt;
>>>> +    /* ram save states used for notifiers */
>>>> +    int ram_save_state;
>>>> +    /* these variables are used for bitmap sync */
>>>> +    /* last time we did a full bitmap_sync */
>>>> +    int64_t time_last_bitmap_sync;
>>>> +    /* bytes transferred at start_time */
>>>> +    uint64_t bytes_xfer_prev;
>>>> +    /* number of dirty pages since start_time */
>>>> +    uint64_t num_dirty_pages_period;
>>>> +    /* xbzrle misses since the beginning of the period */
>>>> +    uint64_t xbzrle_cache_miss_prev;
>>>> +    /* number of iterations at the beginning of period */
>>>> +    uint64_t iterations_prev;
>>>> +    /* Iterations since start */
>>>> +    uint64_t iterations;
>>>> +    /* number of dirty bits in the bitmap */
>>>> +    uint64_t migration_dirty_pages;
>>>> +    /* protects modification of the bitmap */
>>>> +    QemuMutex bitmap_mutex;
>>>> +    /* The RAMBlock used in the last src_page_requests */
>>>> +    RAMBlock *last_req_rb;
>>>> +    /* Queue of outstanding page requests from the destination */
>>>> +    QemuMutex src_page_req_mutex;
>>>> +    QSIMPLEQ_HEAD(src_page_requests, RAMSrcPageRequest) src_page_requests;
>>>> +} RAMState;
>>> Why move these chunk?  Can it be avoided?
>>>
>>>> +
>>>> +void add_ram_save_state_change_notifier(Notifier *notify);
>>>>    void ram_mig_init(void);
>>>>    void qemu_guest_free_page_hint(void *addr, size_t len);
>>>> diff --git a/migration/ram.c b/migration/ram.c
>>>> index 237f11e..d45b5a4 100644
>>>> --- a/migration/ram.c
>>>> +++ b/migration/ram.c
>>>> @@ -56,6 +56,9 @@
>>>>    #include "qemu/uuid.h"
>>>>    #include "savevm.h"
>>>> +static NotifierList ram_save_state_notifiers =
>>>> +    NOTIFIER_LIST_INITIALIZER(ram_save_state_notifiers);
>>>> +
>>>>    /***********************************************************/
>>>>    /* ram save/restore */
>>>> @@ -267,49 +270,18 @@ struct RAMSrcPageRequest {
>>>>        QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
>>>>    };
>>>> -/* State of RAM for migration */
>>>> -struct RAMState {
>>>> -    /* QEMUFile used for this migration */
>>>> -    QEMUFile *f;
>>>> -    /* Last block that we have visited searching for dirty pages */
>>>> -    RAMBlock *last_seen_block;
>>>> -    /* Last block from where we have sent data */
>>>> -    RAMBlock *last_sent_block;
>>>> -    /* Last dirty target page we have sent */
>>>> -    ram_addr_t last_page;
>>>> -    /* last ram version we have seen */
>>>> -    uint32_t last_version;
>>>> -    /* We are in the first round */
>>>> -    bool ram_bulk_stage;
>>>> -    /* How many times we have dirty too many pages */
>>>> -    int dirty_rate_high_cnt;
>>>> -    /* these variables are used for bitmap sync */
>>>> -    /* last time we did a full bitmap_sync */
>>>> -    int64_t time_last_bitmap_sync;
>>>> -    /* bytes transferred at start_time */
>>>> -    uint64_t bytes_xfer_prev;
>>>> -    /* number of dirty pages since start_time */
>>>> -    uint64_t num_dirty_pages_period;
>>>> -    /* xbzrle misses since the beginning of the period */
>>>> -    uint64_t xbzrle_cache_miss_prev;
>>>> -    /* number of iterations at the beginning of period */
>>>> -    uint64_t iterations_prev;
>>>> -    /* Iterations since start */
>>>> -    uint64_t iterations;
>>>> -    /* number of dirty bits in the bitmap */
>>>> -    uint64_t migration_dirty_pages;
>>>> -    /* protects modification of the bitmap */
>>>> -    QemuMutex bitmap_mutex;
>>>> -    /* The RAMBlock used in the last src_page_requests */
>>>> -    RAMBlock *last_req_rb;
>>>> -    /* Queue of outstanding page requests from the destination */
>>>> -    QemuMutex src_page_req_mutex;
>>>> -    QSIMPLEQ_HEAD(src_page_requests, RAMSrcPageRequest) src_page_requests;
>>>> -};
>>>> -typedef struct RAMState RAMState;
>>>> -
>>>>    static RAMState *ram_state;
>>>> +void add_ram_save_state_change_notifier(Notifier *notify)
>>>> +{
>>>> +    notifier_list_add(&ram_save_state_notifiers, notify);
>>>> +}
>>>> +
>>>> +static void notify_ram_save_state_change_notifier(void)
>>>> +{
>>>> +    notifier_list_notify(&ram_save_state_notifiers, ram_state);
>>>> +}
>>>> +
>>>>    uint64_t ram_bytes_remaining(void)
>>>>    {
>>>>        return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
>>>> @@ -1139,6 +1111,9 @@ static void migration_bitmap_sync(RAMState *rs)
>>>>        int64_t end_time;
>>>>        uint64_t bytes_xfer_now;
>>>> +    rs->ram_save_state = RAM_SAVE_BEFORE_SYNC_BITMAP;
>>> What's the point to keep this state after all?...
>>>
>>>> +    notify_ram_save_state_change_notifier();
>>>> +
>>>>        ram_counters.dirty_sync_count++;
>>>>        if (!rs->time_last_bitmap_sync) {
>>>> @@ -1205,6 +1180,9 @@ static void migration_bitmap_sync(RAMState *rs)
>>>>        if (migrate_use_events()) {
>>>>            qapi_event_send_migration_pass(ram_counters.dirty_sync_count, NULL);
>>>>        }
>>>> +
>>>> +    rs->ram_save_state = RAM_SAVE_AFTER_SYNC_BITMAP;
>>>> +    notify_ram_save_state_change_notifier();
>>>>    }
>>>>    /**
>>>> @@ -1961,6 +1939,8 @@ static void ram_state_reset(RAMState *rs)
>>>>        rs->last_page = 0;
>>>>        rs->last_version = ram_list.version;
>>>>        rs->ram_bulk_stage = true;
>>>> +    rs->ram_save_state = RAM_SAVE_RESET;
>>> ... and this state is much more meaningless afaict ...
>>>
>>>> +    notify_ram_save_state_change_notifier();
>>>>    }
>>>>    #define MAX_WAIT 50 /* ms, half buffered_file limit */
>>>> @@ -2709,6 +2689,8 @@ out:
>>>>        ret = qemu_file_get_error(f);
>>>>        if (ret < 0) {
>>>> +        rs->ram_save_state = RAM_SAVE_ERR;
>>>> +        notify_ram_save_state_change_notifier();
>>>>            return ret;
>>>>        }
>>> Also, I would prefer to add a general event framework for migration
>>> rather than "ram save" specific.  Then we add SYNC_START/SYNC_END
>>> events.  These notifiers aren't a lot, it'll be a bit awkward to
>>> introduce the framework multiple times for similar purpuse (after we
>>> have a common notifier we can merge the postcopy notifiers AFAIU).
>> Hi Peter,
>>
>> Thanks for the review. We just got a little accident in the kernel part,
>> which may cause some modifications to the QEMU side interfaces. So I will
>> implement a new version and let you review in v9 patches.
> Yes, but please do try and keep RAMState private to ram.c.
>

OK, will use functions to expose the related fields (e.g. ram_bulk_stage).

Best,
Wei

  reply	other threads:[~2018-06-20 11:11 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-06-08  8:10 [Qemu-devel] [PATCH v8 0/6] virtio-balloon: free page hint reporting support Wei Wang
2018-06-08  8:10 ` [Qemu-devel] [PATCH v8 1/6] bitmap: bitmap_count_one_with_offset Wei Wang
2018-06-08  8:10 ` [Qemu-devel] [PATCH v8 2/6] migration: use bitmap_mutex in migration_bitmap_clear_dirty Wei Wang
2018-06-08  8:10 ` [Qemu-devel] [PATCH v8 3/6] migration: API to clear bits of guest free pages from the dirty bitmap Wei Wang
2018-06-08  8:10 ` [Qemu-devel] [PATCH v8 4/6] migration/ram.c: add ram save state notifiers Wei Wang
2018-06-12  7:50   ` Peter Xu
2018-06-12 11:12     ` Wei Wang
2018-06-13  2:01       ` Peter Xu
2018-06-19 17:31       ` Dr. David Alan Gilbert
2018-06-20 11:15         ` Wei Wang [this message]
2018-06-08  8:10 ` [Qemu-devel] [PATCH v8 5/6] migration: move migrate_postcopy() to include/migration/misc.h Wei Wang
2018-06-29 10:35   ` Dr. David Alan Gilbert
2018-06-08  8:10 ` [Qemu-devel] [PATCH v8 6/6] virtio-balloon: VIRTIO_BALLOON_F_FREE_PAGE_HINT Wei Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5B2A375A.4030504@intel.com \
    --to=wei.w.wang@intel.com \
    --cc=dgilbert@redhat.com \
    --cc=liliang.opensource@gmail.com \
    --cc=mst@redhat.com \
    --cc=nilal@redhat.com \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=quan.xu0@gmail.com \
    --cc=quintela@redhat.com \
    --cc=riel@redhat.com \
    --cc=virtio-dev@lists.oasis-open.org \
    --cc=yang.zhang.wz@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).