From: David Gibson <david@gibson.dropbear.id.au>
To: "Philippe Mathieu-Daudé" <philmd@redhat.com>
Cc: Kevin Wolf <kwolf@redhat.com>,
Peter Maydell <peter.maydell@linaro.org>,
Cornelia Huck <cohuck@redhat.com>,
Alex Williamson <alex.williamson@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
qemu-block@nongnu.org, David Hildenbrand <david@redhat.com>,
Juan Quintela <quintela@redhat.com>,
qemu-devel@nongnu.org, Max Reitz <mreitz@redhat.com>,
Halil Pasic <pasic@linux.ibm.com>,
Christian Borntraeger <borntraeger@de.ibm.com>,
qemu-s390x@nongnu.org, qemu-ppc@nongnu.org,
Keith Busch <kbusch@kernel.org>,
Paolo Bonzini <pbonzini@redhat.com>,
"Dr. David Alan Gilbert" <dgilbert@redhat.com>,
Richard Henderson <rth@twiddle.net>
Subject: Re: [PATCH 10/10] exec: Move cpu_physical_memory_* functions to 'exec/memory-internal.h'
Date: Mon, 11 May 2020 11:35:22 +1000 [thread overview]
Message-ID: <20200511013522.GL2183@umbus.fritz.box> (raw)
In-Reply-To: <20200507173958.25894-11-philmd@redhat.com>
[-- Attachment #1: Type: text/plain, Size: 26773 bytes --]
On Thu, May 07, 2020 at 07:39:58PM +0200, Philippe Mathieu-Daudé wrote:
> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
ppc parts
Acked-by: David Gibson <david@gibson.dropbear.id.au>
> ---
> include/exec/memory-internal.h | 305 ++++++++++++++++++++++++++++++++-
> include/exec/ram_addr.h | 303 +-------------------------------
> accel/tcg/cputlb.c | 1 -
> hw/ppc/spapr.c | 1 -
> hw/ppc/spapr_pci.c | 1 -
> memory.c | 1 -
> 6 files changed, 305 insertions(+), 307 deletions(-)
>
> diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
> index b2b7c1e78a..4abb3bbd85 100644
> --- a/include/exec/memory-internal.h
> +++ b/include/exec/memory-internal.h
> @@ -21,8 +21,13 @@
> #define MEMORY_INTERNAL_H
>
> #include "cpu.h"
> +#include "sysemu/tcg.h"
> +#include "sysemu/xen.h"
> +#include "exec/ramlist.h"
> +#include "exec/ramblock.h"
>
> #ifdef CONFIG_SOFTMMU
> +
> static inline AddressSpaceDispatch *flatview_to_dispatch(FlatView *fv)
> {
> return fv->dispatch;
> @@ -49,5 +54,303 @@ void address_space_dispatch_free(AddressSpaceDispatch *d);
>
> void mtree_print_dispatch(struct AddressSpaceDispatch *d,
> MemoryRegion *root);
> -#endif
> +
> +#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
> +#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
> +
> +static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
> + ram_addr_t length,
> + unsigned client)
> +{
> + DirtyMemoryBlocks *blocks;
> + unsigned long end, page;
> + unsigned long idx, offset, base;
> + bool dirty = false;
> +
> + assert(client < DIRTY_MEMORY_NUM);
> +
> + end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
> + page = start >> TARGET_PAGE_BITS;
> +
> + WITH_RCU_READ_LOCK_GUARD() {
> + blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
> +
> + idx = page / DIRTY_MEMORY_BLOCK_SIZE;
> + offset = page % DIRTY_MEMORY_BLOCK_SIZE;
> + base = page - offset;
> + while (page < end) {
> + unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
> + unsigned long num = next - base;
> + unsigned long found = find_next_bit(blocks->blocks[idx],
> + num, offset);
> + if (found < num) {
> + dirty = true;
> + break;
> + }
> +
> + page = next;
> + idx++;
> + offset = 0;
> + base += DIRTY_MEMORY_BLOCK_SIZE;
> + }
> + }
> +
> + return dirty;
> +}
> +
> +static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
> + ram_addr_t length,
> + unsigned client)
> +{
> + DirtyMemoryBlocks *blocks;
> + unsigned long end, page;
> + unsigned long idx, offset, base;
> + bool dirty = true;
> +
> + assert(client < DIRTY_MEMORY_NUM);
> +
> + end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
> + page = start >> TARGET_PAGE_BITS;
> +
> + RCU_READ_LOCK_GUARD();
> +
> + blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
> +
> + idx = page / DIRTY_MEMORY_BLOCK_SIZE;
> + offset = page % DIRTY_MEMORY_BLOCK_SIZE;
> + base = page - offset;
> + while (page < end) {
> + unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
> + unsigned long num = next - base;
> + unsigned long found = find_next_zero_bit(blocks->blocks[idx],
> + num, offset);
> + if (found < num) {
> + dirty = false;
> + break;
> + }
> +
> + page = next;
> + idx++;
> + offset = 0;
> + base += DIRTY_MEMORY_BLOCK_SIZE;
> + }
> +
> + return dirty;
> +}
> +
> +static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
> + unsigned client)
> +{
> + return cpu_physical_memory_get_dirty(addr, 1, client);
> +}
> +
> +static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
> +{
> + bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
> + bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
> + bool migration = cpu_physical_memory_get_dirty_flag(addr,
> + DIRTY_MEMORY_MIGRATION);
> + return !(vga && code && migration);
> +}
> +
> +static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
> + ram_addr_t length,
> + uint8_t mask)
> +{
> + uint8_t ret = 0;
> +
> + if (mask & (1 << DIRTY_MEMORY_VGA) &&
> + !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
> + ret |= (1 << DIRTY_MEMORY_VGA);
> + }
> + if (mask & (1 << DIRTY_MEMORY_CODE) &&
> + !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
> + ret |= (1 << DIRTY_MEMORY_CODE);
> + }
> + if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
> + !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
> + ret |= (1 << DIRTY_MEMORY_MIGRATION);
> + }
> + return ret;
> +}
> +
> +static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
> + unsigned client)
> +{
> + unsigned long page, idx, offset;
> + DirtyMemoryBlocks *blocks;
> +
> + assert(client < DIRTY_MEMORY_NUM);
> +
> + page = addr >> TARGET_PAGE_BITS;
> + idx = page / DIRTY_MEMORY_BLOCK_SIZE;
> + offset = page % DIRTY_MEMORY_BLOCK_SIZE;
> +
> + RCU_READ_LOCK_GUARD();
> +
> + blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
> +
> + set_bit_atomic(offset, blocks->blocks[idx]);
> +}
> +
> +static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
> + ram_addr_t length,
> + uint8_t mask)
> +{
> + DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
> + unsigned long end, page;
> + unsigned long idx, offset, base;
> + int i;
> +
> + if (!mask && !xen_enabled()) {
> + return;
> + }
> +
> + end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
> + page = start >> TARGET_PAGE_BITS;
> +
> + WITH_RCU_READ_LOCK_GUARD() {
> + for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
> + blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
> + }
> +
> + idx = page / DIRTY_MEMORY_BLOCK_SIZE;
> + offset = page % DIRTY_MEMORY_BLOCK_SIZE;
> + base = page - offset;
> + while (page < end) {
> + unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
> +
> + if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
> + bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
> + offset, next - page);
> + }
> + if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
> + bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
> + offset, next - page);
> + }
> + if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
> + bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
> + offset, next - page);
> + }
> +
> + page = next;
> + idx++;
> + offset = 0;
> + base += DIRTY_MEMORY_BLOCK_SIZE;
> + }
> + }
> +
> + xen_hvm_modified_memory(start, length);
> +}
> +
> +#if !defined(_WIN32)
> +static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
> + ram_addr_t start,
> + ram_addr_t pages)
> +{
> + unsigned long i, j;
> + unsigned long page_number, c;
> + hwaddr addr;
> + ram_addr_t ram_addr;
> + unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
> + unsigned long hpratio = qemu_real_host_page_size / TARGET_PAGE_SIZE;
> + unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
> +
> + /* start address is aligned at the start of a word? */
> + if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
> + (hpratio == 1)) {
> + unsigned long **blocks[DIRTY_MEMORY_NUM];
> + unsigned long idx;
> + unsigned long offset;
> + long k;
> + long nr = BITS_TO_LONGS(pages);
> +
> + idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
> + offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
> + DIRTY_MEMORY_BLOCK_SIZE);
> +
> + WITH_RCU_READ_LOCK_GUARD() {
> + for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
> + blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
> + }
> +
> + for (k = 0; k < nr; k++) {
> + if (bitmap[k]) {
> + unsigned long temp = leul_to_cpu(bitmap[k]);
> +
> + atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
> +
> + if (global_dirty_log) {
> + atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
> + temp);
> + }
> +
> + if (tcg_enabled()) {
> + atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
> + temp);
> + }
> + }
> +
> + if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
> + offset = 0;
> + idx++;
> + }
> + }
> + }
> +
> + xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
> + } else {
> + uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL
> + : DIRTY_CLIENTS_NOCODE;
> +
> + if (!global_dirty_log) {
> + clients &= ~(1 << DIRTY_MEMORY_MIGRATION);
> + }
> +
> + /*
> + * bitmap-traveling is faster than memory-traveling (for addr...)
> + * especially when most of the memory is not dirty.
> + */
> + for (i = 0; i < len; i++) {
> + if (bitmap[i] != 0) {
> + c = leul_to_cpu(bitmap[i]);
> + do {
> + j = ctzl(c);
> + c &= ~(1ul << j);
> + page_number = (i * HOST_LONG_BITS + j) * hpratio;
> + addr = page_number * TARGET_PAGE_SIZE;
> + ram_addr = start + addr;
> + cpu_physical_memory_set_dirty_range(ram_addr,
> + TARGET_PAGE_SIZE * hpratio, clients);
> + } while (c != 0);
> + }
> + }
> + }
> +}
> +#endif /* not _WIN32 */
> +
> +bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
> + ram_addr_t length,
> + unsigned client);
> +
> +DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty(
> + MemoryRegion *mr,
> + hwaddr offset,
> + hwaddr length,
> + unsigned client);
> +
> +bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
> + ram_addr_t start,
> + ram_addr_t length);
> +
> +static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
> + ram_addr_t length)
> +{
> + cpu_physical_memory_test_and_clear_dirty(start, length,
> + DIRTY_MEMORY_MIGRATION);
> + cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
> + cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
> +}
> +
> +#endif /* CONFIG_SOFTMMU */
> #endif
> diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
> index 6acde47a0f..64bf28a332 100644
> --- a/include/exec/ram_addr.h
> +++ b/include/exec/ram_addr.h
> @@ -21,310 +21,9 @@
>
> #ifndef CONFIG_USER_ONLY
> #include "cpu.h"
> -#include "sysemu/xen.h"
> -#include "sysemu/tcg.h"
> #include "exec/ramlist.h"
> #include "exec/ramblock.h"
> -
> -
> -
> -#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1)
> -#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
> -
> -static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
> - ram_addr_t length,
> - unsigned client)
> -{
> - DirtyMemoryBlocks *blocks;
> - unsigned long end, page;
> - unsigned long idx, offset, base;
> - bool dirty = false;
> -
> - assert(client < DIRTY_MEMORY_NUM);
> -
> - end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
> - page = start >> TARGET_PAGE_BITS;
> -
> - WITH_RCU_READ_LOCK_GUARD() {
> - blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
> -
> - idx = page / DIRTY_MEMORY_BLOCK_SIZE;
> - offset = page % DIRTY_MEMORY_BLOCK_SIZE;
> - base = page - offset;
> - while (page < end) {
> - unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
> - unsigned long num = next - base;
> - unsigned long found = find_next_bit(blocks->blocks[idx],
> - num, offset);
> - if (found < num) {
> - dirty = true;
> - break;
> - }
> -
> - page = next;
> - idx++;
> - offset = 0;
> - base += DIRTY_MEMORY_BLOCK_SIZE;
> - }
> - }
> -
> - return dirty;
> -}
> -
> -static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
> - ram_addr_t length,
> - unsigned client)
> -{
> - DirtyMemoryBlocks *blocks;
> - unsigned long end, page;
> - unsigned long idx, offset, base;
> - bool dirty = true;
> -
> - assert(client < DIRTY_MEMORY_NUM);
> -
> - end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
> - page = start >> TARGET_PAGE_BITS;
> -
> - RCU_READ_LOCK_GUARD();
> -
> - blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
> -
> - idx = page / DIRTY_MEMORY_BLOCK_SIZE;
> - offset = page % DIRTY_MEMORY_BLOCK_SIZE;
> - base = page - offset;
> - while (page < end) {
> - unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
> - unsigned long num = next - base;
> - unsigned long found = find_next_zero_bit(blocks->blocks[idx],
> - num, offset);
> - if (found < num) {
> - dirty = false;
> - break;
> - }
> -
> - page = next;
> - idx++;
> - offset = 0;
> - base += DIRTY_MEMORY_BLOCK_SIZE;
> - }
> -
> - return dirty;
> -}
> -
> -static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
> - unsigned client)
> -{
> - return cpu_physical_memory_get_dirty(addr, 1, client);
> -}
> -
> -static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
> -{
> - bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
> - bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
> - bool migration = cpu_physical_memory_get_dirty_flag(addr,
> - DIRTY_MEMORY_MIGRATION);
> - return !(vga && code && migration);
> -}
> -
> -static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
> - ram_addr_t length,
> - uint8_t mask)
> -{
> - uint8_t ret = 0;
> -
> - if (mask & (1 << DIRTY_MEMORY_VGA) &&
> - !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
> - ret |= (1 << DIRTY_MEMORY_VGA);
> - }
> - if (mask & (1 << DIRTY_MEMORY_CODE) &&
> - !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
> - ret |= (1 << DIRTY_MEMORY_CODE);
> - }
> - if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
> - !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
> - ret |= (1 << DIRTY_MEMORY_MIGRATION);
> - }
> - return ret;
> -}
> -
> -static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
> - unsigned client)
> -{
> - unsigned long page, idx, offset;
> - DirtyMemoryBlocks *blocks;
> -
> - assert(client < DIRTY_MEMORY_NUM);
> -
> - page = addr >> TARGET_PAGE_BITS;
> - idx = page / DIRTY_MEMORY_BLOCK_SIZE;
> - offset = page % DIRTY_MEMORY_BLOCK_SIZE;
> -
> - RCU_READ_LOCK_GUARD();
> -
> - blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
> -
> - set_bit_atomic(offset, blocks->blocks[idx]);
> -}
> -
> -static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
> - ram_addr_t length,
> - uint8_t mask)
> -{
> - DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM];
> - unsigned long end, page;
> - unsigned long idx, offset, base;
> - int i;
> -
> - if (!mask && !xen_enabled()) {
> - return;
> - }
> -
> - end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
> - page = start >> TARGET_PAGE_BITS;
> -
> - WITH_RCU_READ_LOCK_GUARD() {
> - for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
> - blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]);
> - }
> -
> - idx = page / DIRTY_MEMORY_BLOCK_SIZE;
> - offset = page % DIRTY_MEMORY_BLOCK_SIZE;
> - base = page - offset;
> - while (page < end) {
> - unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE);
> -
> - if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
> - bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx],
> - offset, next - page);
> - }
> - if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
> - bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx],
> - offset, next - page);
> - }
> - if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
> - bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx],
> - offset, next - page);
> - }
> -
> - page = next;
> - idx++;
> - offset = 0;
> - base += DIRTY_MEMORY_BLOCK_SIZE;
> - }
> - }
> -
> - xen_hvm_modified_memory(start, length);
> -}
> -
> -#if !defined(_WIN32)
> -static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
> - ram_addr_t start,
> - ram_addr_t pages)
> -{
> - unsigned long i, j;
> - unsigned long page_number, c;
> - hwaddr addr;
> - ram_addr_t ram_addr;
> - unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
> - unsigned long hpratio = qemu_real_host_page_size / TARGET_PAGE_SIZE;
> - unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
> -
> - /* start address is aligned at the start of a word? */
> - if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
> - (hpratio == 1)) {
> - unsigned long **blocks[DIRTY_MEMORY_NUM];
> - unsigned long idx;
> - unsigned long offset;
> - long k;
> - long nr = BITS_TO_LONGS(pages);
> -
> - idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE;
> - offset = BIT_WORD((start >> TARGET_PAGE_BITS) %
> - DIRTY_MEMORY_BLOCK_SIZE);
> -
> - WITH_RCU_READ_LOCK_GUARD() {
> - for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
> - blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks;
> - }
> -
> - for (k = 0; k < nr; k++) {
> - if (bitmap[k]) {
> - unsigned long temp = leul_to_cpu(bitmap[k]);
> -
> - atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
> -
> - if (global_dirty_log) {
> - atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
> - temp);
> - }
> -
> - if (tcg_enabled()) {
> - atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
> - temp);
> - }
> - }
> -
> - if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) {
> - offset = 0;
> - idx++;
> - }
> - }
> - }
> -
> - xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
> - } else {
> - uint8_t clients = tcg_enabled()
> - ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
> -
> - if (!global_dirty_log) {
> - clients &= ~(1 << DIRTY_MEMORY_MIGRATION);
> - }
> -
> - /*
> - * bitmap-traveling is faster than memory-traveling (for addr...)
> - * especially when most of the memory is not dirty.
> - */
> - for (i = 0; i < len; i++) {
> - if (bitmap[i] != 0) {
> - c = leul_to_cpu(bitmap[i]);
> - do {
> - j = ctzl(c);
> - c &= ~(1ul << j);
> - page_number = (i * HOST_LONG_BITS + j) * hpratio;
> - addr = page_number * TARGET_PAGE_SIZE;
> - ram_addr = start + addr;
> - cpu_physical_memory_set_dirty_range(ram_addr,
> - TARGET_PAGE_SIZE * hpratio, clients);
> - } while (c != 0);
> - }
> - }
> - }
> -}
> -#endif /* not _WIN32 */
> -
> -bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
> - ram_addr_t length,
> - unsigned client);
> -
> -DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty(
> - MemoryRegion *mr,
> - hwaddr offset,
> - hwaddr length,
> - unsigned client);
> -
> -bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
> - ram_addr_t start,
> - ram_addr_t length);
> -
> -static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
> - ram_addr_t length)
> -{
> - cpu_physical_memory_test_and_clear_dirty(start, length,
> - DIRTY_MEMORY_MIGRATION);
> - cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
> - cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
> -}
> -
> +#include "exec/memory-internal.h"
>
> /* Called with RCU critical section */
> static inline
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index e3b5750c3b..922671f246 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -26,7 +26,6 @@
> #include "exec/cpu_ldst.h"
> #include "exec/cputlb.h"
> #include "exec/memory-internal.h"
> -#include "exec/ram_addr.h"
> #include "tcg/tcg.h"
> #include "qemu/error-report.h"
> #include "exec/log.h"
> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> index c18eab0a23..d7c3bf3932 100644
> --- a/hw/ppc/spapr.c
> +++ b/hw/ppc/spapr.c
> @@ -69,7 +69,6 @@
> #include "hw/virtio/vhost-scsi-common.h"
>
> #include "exec/address-spaces.h"
> -#include "exec/ram_addr.h"
> #include "hw/usb.h"
> #include "qemu/config-file.h"
> #include "qemu/error-report.h"
> diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
> index 61b84a392d..8d9aeba6e6 100644
> --- a/hw/ppc/spapr_pci.c
> +++ b/hw/ppc/spapr_pci.c
> @@ -36,7 +36,6 @@
> #include "hw/ppc/spapr.h"
> #include "hw/pci-host/spapr.h"
> #include "exec/address-spaces.h"
> -#include "exec/ram_addr.h"
> #include <libfdt.h>
> #include "trace.h"
> #include "qemu/error-report.h"
> diff --git a/memory.c b/memory.c
> index e8e7bcd6c7..4e1d19c5fc 100644
> --- a/memory.c
> +++ b/memory.c
> @@ -27,7 +27,6 @@
> #include "trace-root.h"
>
> #include "exec/memory-internal.h"
> -#include "exec/ram_addr.h"
> #include "exec/ramblock.h"
> #include "sysemu/kvm.h"
> #include "sysemu/runstate.h"
--
David Gibson | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au | minimalist, thank you. NOT _the_ _other_
| _way_ _around_!
http://www.ozlabs.org/~dgibson
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]
next prev parent reply other threads:[~2020-05-11 1:46 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-07 17:39 [PATCH 00/10] exec: Shear 'exec/ram_addr.h' and make NVMe device target-agnostic Philippe Mathieu-Daudé
2020-05-07 17:39 ` [PATCH 01/10] exec: Rename qemu_ram_writeback() as qemu_ram_msync() Philippe Mathieu-Daudé
2020-05-08 7:59 ` Juan Quintela
2020-05-07 17:39 ` [PATCH 02/10] exec/ramblock: Add missing 'qemu/rcu.h' include Philippe Mathieu-Daudé
2020-05-08 7:59 ` Juan Quintela
2020-05-07 17:39 ` [PATCH 03/10] exec: Move tb_invalidate_phys_range() to 'exec/exec-all.h' Philippe Mathieu-Daudé
2020-05-07 17:39 ` [PATCH 04/10] exec/memory-internal: Check CONFIG_SOFTMMU instead of CONFIG_USER_ONLY Philippe Mathieu-Daudé
2020-05-08 8:01 ` Juan Quintela
2020-05-07 17:39 ` [PATCH 05/10] exec: Move qemu_minrampagesize/qemu_maxrampagesize to 'qemu-common.h' Philippe Mathieu-Daudé
2020-05-08 8:03 ` Juan Quintela
2020-05-08 9:09 ` Cornelia Huck
2020-05-11 1:32 ` David Gibson
2020-05-07 17:39 ` [PATCH 06/10] exec: Move ramblock_recv_bitmap_offset() to migration/ram.c Philippe Mathieu-Daudé
2020-05-08 8:07 ` Juan Quintela
2020-05-08 9:27 ` Philippe Mathieu-Daudé
2020-05-07 17:39 ` [PATCH 07/10] exec: Move all RAMBlock functions to 'exec/ramblock.h' Philippe Mathieu-Daudé
2020-05-08 8:08 ` Juan Quintela
2020-05-08 9:32 ` Cornelia Huck
2020-05-11 1:33 ` David Gibson
2020-05-07 17:39 ` [PATCH 08/10] hw/block: Let the NVMe emulated device be target-agnostic Philippe Mathieu-Daudé
2020-05-07 17:42 ` Philippe Mathieu-Daudé
2020-05-08 8:09 ` Juan Quintela
2020-05-07 17:39 ` [PATCH 09/10] exec: Update coding style to make checkpatch.pl happy Philippe Mathieu-Daudé
2020-05-08 9:05 ` Juan Quintela
2020-05-07 17:39 ` [PATCH 10/10] exec: Move cpu_physical_memory_* functions to 'exec/memory-internal.h' Philippe Mathieu-Daudé
2020-05-08 8:12 ` Juan Quintela
2020-05-11 1:35 ` David Gibson [this message]
2020-05-07 22:15 ` [PATCH 00/10] exec: Shear 'exec/ram_addr.h' and make NVMe device target-agnostic Paolo Bonzini
2020-05-08 8:21 ` Juan Quintela
2020-05-08 6:19 ` no-reply
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200511013522.GL2183@umbus.fritz.box \
--to=david@gibson.dropbear.id.au \
--cc=alex.williamson@redhat.com \
--cc=borntraeger@de.ibm.com \
--cc=cohuck@redhat.com \
--cc=david@redhat.com \
--cc=dgilbert@redhat.com \
--cc=kbusch@kernel.org \
--cc=kwolf@redhat.com \
--cc=mreitz@redhat.com \
--cc=mst@redhat.com \
--cc=pasic@linux.ibm.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=philmd@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=qemu-ppc@nongnu.org \
--cc=qemu-s390x@nongnu.org \
--cc=quintela@redhat.com \
--cc=rth@twiddle.net \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).