* [Qemu-devel] [PATCH] Change the method to calculate dirty-pages-rate
@ 2017-03-14 1:26 Chao Fan
2017-03-14 1:41 ` Li Zhijian
0 siblings, 1 reply; 3+ messages in thread
From: Chao Fan @ 2017-03-14 1:26 UTC (permalink / raw)
To: pbonzini, quintela, dgilbert, qemu-devel, berrange
Cc: caoj.fnst, douly.fnst, maozy.fnst, Chao Fan, Li Zhijian
In function cpu_physical_memory_sync_dirty_bitmap, file
include/exec/ram_addr.h:
if (src[idx][offset]) {
unsigned long bits = atomic_xchg(&src[idx][offset], 0);
unsigned long new_dirty;
new_dirty = ~dest[k];
dest[k] |= bits;
new_dirty &= bits;
num_dirty += ctpopl(new_dirty);
}
After these codes executed, only the pages not dirtied in bitmap(dest),
but dirtied in dirty_memory[DIRTY_MEMORY_MIGRATION] will be calculated.
For example:
When ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION] = 0b00001111,
and atomic_rcu_read(&migration_bitmap_rcu)->bmap = 0b00000011,
the new_dirty will be 0b00001100, and this function will return 2 but not
4 which is expected.
the dirty pages in dirty_memory[DIRTY_MEMORY_MIGRATION] are all new,
so these should be calculated also.
Signed-off-by: Chao Fan <fanc.fnst@cn.fujitsu.com>
Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
---
include/exec/ram_addr.h | 5 ++++-
migration/ram.c | 7 +++----
2 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index cd432e7..b05dc84 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -355,7 +355,8 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
static inline
uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
ram_addr_t start,
- ram_addr_t length)
+ ram_addr_t length,
+ int64_t *real_dirty_pages)
{
ram_addr_t addr;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
@@ -379,6 +380,7 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
if (src[idx][offset]) {
unsigned long bits = atomic_xchg(&src[idx][offset], 0);
unsigned long new_dirty;
+ *real_dirty_pages += ctpopl(bits);
new_dirty = ~dest[k];
dest[k] |= bits;
new_dirty &= bits;
@@ -398,6 +400,7 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
start + addr,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
+ *real_dirty_pages += 1;
long k = (start + addr) >> TARGET_PAGE_BITS;
if (!test_and_set_bit(k, dest)) {
num_dirty++;
diff --git a/migration/ram.c b/migration/ram.c
index 719425b..fdf9a85 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -576,18 +576,18 @@ static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
return ret;
}
+static int64_t num_dirty_pages_period;
static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
{
unsigned long *bitmap;
bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
- migration_dirty_pages +=
- cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
+ migration_dirty_pages += cpu_physical_memory_sync_dirty_bitmap(bitmap,
+ start, length, &num_dirty_pages_period);
}
/* Fix me: there are too many global variables used in migration process. */
static int64_t start_time;
static int64_t bytes_xfer_prev;
-static int64_t num_dirty_pages_period;
static uint64_t xbzrle_cache_miss_prev;
static uint64_t iterations_prev;
@@ -648,7 +648,6 @@ static void migration_bitmap_sync(void)
trace_migration_bitmap_sync_end(migration_dirty_pages
- num_dirty_pages_init);
- num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
/* more than 1 second = 1000 millisecons */
--
2.9.3
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [Qemu-devel] [PATCH] Change the method to calculate dirty-pages-rate
2017-03-14 1:26 [Qemu-devel] [PATCH] Change the method to calculate dirty-pages-rate Chao Fan
@ 2017-03-14 1:41 ` Li Zhijian
2017-03-14 1:45 ` Chao Fan
0 siblings, 1 reply; 3+ messages in thread
From: Li Zhijian @ 2017-03-14 1:41 UTC (permalink / raw)
To: Chao Fan, pbonzini, quintela, dgilbert, qemu-devel, berrange
Cc: caoj.fnst, douly.fnst, maozy.fnst
On 03/14/2017 09:26 AM, Chao Fan wrote:
> In function cpu_physical_memory_sync_dirty_bitmap, file
> include/exec/ram_addr.h:
>
> if (src[idx][offset]) {
> unsigned long bits = atomic_xchg(&src[idx][offset], 0);
> unsigned long new_dirty;
> new_dirty = ~dest[k];
> dest[k] |= bits;
> new_dirty &= bits;
> num_dirty += ctpopl(new_dirty);
> }
>
> After these codes executed, only the pages not dirtied in bitmap(dest),
> but dirtied in dirty_memory[DIRTY_MEMORY_MIGRATION] will be calculated.
> For example:
> When ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION] = 0b00001111,
> and atomic_rcu_read(&migration_bitmap_rcu)->bmap = 0b00000011,
> the new_dirty will be 0b00001100, and this function will return 2 but not
> 4 which is expected.
> the dirty pages in dirty_memory[DIRTY_MEMORY_MIGRATION] are all new,
> so these should be calculated also.
>
> Signed-off-by: Chao Fan <fanc.fnst@cn.fujitsu.com>
> Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
> ---
> include/exec/ram_addr.h | 5 ++++-
> migration/ram.c | 7 +++----
> 2 files changed, 7 insertions(+), 5 deletions(-)
>
> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
> index cd432e7..b05dc84 100644
> --- a/include/exec/ram_addr.h
> +++ b/include/exec/ram_addr.h
> @@ -355,7 +355,8 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
> static inline
> uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
> ram_addr_t start,
> - ram_addr_t length)
> + ram_addr_t length,
> + int64_t *real_dirty_pages)
> {
> ram_addr_t addr;
> unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
> @@ -379,6 +380,7 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
> if (src[idx][offset]) {
> unsigned long bits = atomic_xchg(&src[idx][offset], 0);
> unsigned long new_dirty;
> + *real_dirty_pages += ctpopl(bits);
> new_dirty = ~dest[k];
> dest[k] |= bits;
> new_dirty &= bits;
> @@ -398,6 +400,7 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
> start + addr,
> TARGET_PAGE_SIZE,
> DIRTY_MEMORY_MIGRATION)) {
> + *real_dirty_pages += 1;
> long k = (start + addr) >> TARGET_PAGE_BITS;
> if (!test_and_set_bit(k, dest)) {
> num_dirty++;
> diff --git a/migration/ram.c b/migration/ram.c
> index 719425b..fdf9a85 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -576,18 +576,18 @@ static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
> return ret;
> }
>
> +static int64_t num_dirty_pages_period;
> static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
> {
> unsigned long *bitmap;
> bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
> - migration_dirty_pages +=
> - cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
> + migration_dirty_pages += cpu_physical_memory_sync_dirty_bitmap(bitmap,
> + start, length, &num_dirty_pages_period);
> }
>
> /* Fix me: there are too many global variables used in migration process. */
> static int64_t start_time;
> static int64_t bytes_xfer_prev;
> -static int64_t num_dirty_pages_period;
> static uint64_t xbzrle_cache_miss_prev;
> static uint64_t iterations_prev;
>
> @@ -648,7 +648,6 @@ static void migration_bitmap_sync(void)
>
> trace_migration_bitmap_sync_end(migration_dirty_pages
> - num_dirty_pages_init);
> - num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
it seems num_dirty_pages_init should be removed also and update trace_migration_bitmap_sync_end()'s parameter.
Thanks
Zhijian
> end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
>
> /* more than 1 second = 1000 millisecons */
>
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [Qemu-devel] [PATCH] Change the method to calculate dirty-pages-rate
2017-03-14 1:41 ` Li Zhijian
@ 2017-03-14 1:45 ` Chao Fan
0 siblings, 0 replies; 3+ messages in thread
From: Chao Fan @ 2017-03-14 1:45 UTC (permalink / raw)
To: Li Zhijian
Cc: pbonzini, quintela, dgilbert, qemu-devel, berrange, caoj.fnst,
douly.fnst, maozy.fnst
On Tue, Mar 14, 2017 at 09:41:30AM +0800, Li Zhijian wrote:
>
>
>On 03/14/2017 09:26 AM, Chao Fan wrote:
>> In function cpu_physical_memory_sync_dirty_bitmap, file
>> include/exec/ram_addr.h:
>>
>> if (src[idx][offset]) {
>> unsigned long bits = atomic_xchg(&src[idx][offset], 0);
>> unsigned long new_dirty;
>> new_dirty = ~dest[k];
>> dest[k] |= bits;
>> new_dirty &= bits;
>> num_dirty += ctpopl(new_dirty);
>> }
>>
>> After these codes executed, only the pages not dirtied in bitmap(dest),
>> but dirtied in dirty_memory[DIRTY_MEMORY_MIGRATION] will be calculated.
>> For example:
>> When ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION] = 0b00001111,
>> and atomic_rcu_read(&migration_bitmap_rcu)->bmap = 0b00000011,
>> the new_dirty will be 0b00001100, and this function will return 2 but not
>> 4 which is expected.
>> the dirty pages in dirty_memory[DIRTY_MEMORY_MIGRATION] are all new,
>> so these should be calculated also.
>>
>> Signed-off-by: Chao Fan <fanc.fnst@cn.fujitsu.com>
>> Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com>
>> ---
>> include/exec/ram_addr.h | 5 ++++-
>> migration/ram.c | 7 +++----
>> 2 files changed, 7 insertions(+), 5 deletions(-)
>>
>> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
>> index cd432e7..b05dc84 100644
>> --- a/include/exec/ram_addr.h
>> +++ b/include/exec/ram_addr.h
>> @@ -355,7 +355,8 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
>> static inline
>> uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
>> ram_addr_t start,
>> - ram_addr_t length)
>> + ram_addr_t length,
>> + int64_t *real_dirty_pages)
>> {
>> ram_addr_t addr;
>> unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
>> @@ -379,6 +380,7 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
>> if (src[idx][offset]) {
>> unsigned long bits = atomic_xchg(&src[idx][offset], 0);
>> unsigned long new_dirty;
>> + *real_dirty_pages += ctpopl(bits);
>> new_dirty = ~dest[k];
>> dest[k] |= bits;
>> new_dirty &= bits;
>> @@ -398,6 +400,7 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
>> start + addr,
>> TARGET_PAGE_SIZE,
>> DIRTY_MEMORY_MIGRATION)) {
>> + *real_dirty_pages += 1;
>> long k = (start + addr) >> TARGET_PAGE_BITS;
>> if (!test_and_set_bit(k, dest)) {
>> num_dirty++;
>> diff --git a/migration/ram.c b/migration/ram.c
>> index 719425b..fdf9a85 100644
>> --- a/migration/ram.c
>> +++ b/migration/ram.c
>> @@ -576,18 +576,18 @@ static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
>> return ret;
>> }
>>
>> +static int64_t num_dirty_pages_period;
>> static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
>> {
>> unsigned long *bitmap;
>> bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
>> - migration_dirty_pages +=
>> - cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
>> + migration_dirty_pages += cpu_physical_memory_sync_dirty_bitmap(bitmap,
>> + start, length, &num_dirty_pages_period);
>> }
>>
>> /* Fix me: there are too many global variables used in migration process. */
>> static int64_t start_time;
>> static int64_t bytes_xfer_prev;
>> -static int64_t num_dirty_pages_period;
>> static uint64_t xbzrle_cache_miss_prev;
>> static uint64_t iterations_prev;
>>
>> @@ -648,7 +648,6 @@ static void migration_bitmap_sync(void)
>>
>> trace_migration_bitmap_sync_end(migration_dirty_pages
>> - num_dirty_pages_init);
>> - num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
>
>it seems num_dirty_pages_init should be removed also and update trace_migration_bitmap_sync_end()'s parameter.
Hi Li Zhijian,
OK, I will update and resend the V2 version.
Thanks,
Chao Fan
>
>
>Thanks
>Zhijian
>
>> end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
>>
>> /* more than 1 second = 1000 millisecons */
>>
>
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2017-03-14 1:46 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-03-14 1:26 [Qemu-devel] [PATCH] Change the method to calculate dirty-pages-rate Chao Fan
2017-03-14 1:41 ` Li Zhijian
2017-03-14 1:45 ` Chao Fan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).