* [Qemu-devel] [PATCH 1/9] exec: introduce cpu_reload_memory_map
2015-02-03 12:52 [Qemu-devel] [PATCH v2 0/9] RCUification of the memory API, part 2 Paolo Bonzini
@ 2015-02-03 12:52 ` Paolo Bonzini
2015-02-04 1:46 ` Fam Zheng
2015-02-03 12:52 ` [Qemu-devel] [PATCH 2/9] exec: make iotlb RCU-friendly Paolo Bonzini
` (7 subsequent siblings)
8 siblings, 1 reply; 22+ messages in thread
From: Paolo Bonzini @ 2015-02-03 12:52 UTC (permalink / raw)
To: qemu-devel; +Cc: peter.maydell, famz, fred.konrad
This for now is a simple TLB flush. This can change later for two
reasons:
1) an AddressSpaceDispatch will be cached in the CPUState object
2) it will not be possible to do tlb_flush once the TCG-generated code
runs outside the BQL.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
cpu-exec.c | 6 ++++++
exec.c | 2 +-
include/exec/exec-all.h | 1 +
3 files changed, 8 insertions(+), 1 deletion(-)
diff --git a/cpu-exec.c b/cpu-exec.c
index fa506e6..78fe382 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -141,6 +141,12 @@ void cpu_resume_from_signal(CPUState *cpu, void *puc)
cpu->exception_index = -1;
siglongjmp(cpu->jmp_env, 1);
}
+
+void cpu_reload_memory_map(CPUState *cpu)
+{
+ /* The TLB is protected by the iothread lock. */
+ tlb_flush(cpu, 1);
+}
#endif
/* Execute a TB, and fix up the CPU state afterwards if necessary */
diff --git a/exec.c b/exec.c
index 6b79ad1..5a75909 100644
--- a/exec.c
+++ b/exec.c
@@ -2026,7 +2026,7 @@ static void tcg_commit(MemoryListener *listener)
if (cpu->tcg_as_listener != listener) {
continue;
}
- tlb_flush(cpu, 1);
+ cpu_reload_memory_map(cpu);
}
}
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 6a15448..1b30813 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -96,6 +96,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access);
#if !defined(CONFIG_USER_ONLY)
+void cpu_reload_memory_map(CPUState *cpu);
void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
/* cputlb.c */
void tlb_flush_page(CPUState *cpu, target_ulong addr);
--
1.8.3.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* Re: [Qemu-devel] [PATCH 1/9] exec: introduce cpu_reload_memory_map
2015-02-03 12:52 ` [Qemu-devel] [PATCH 1/9] exec: introduce cpu_reload_memory_map Paolo Bonzini
@ 2015-02-04 1:46 ` Fam Zheng
0 siblings, 0 replies; 22+ messages in thread
From: Fam Zheng @ 2015-02-04 1:46 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: peter.maydell, qemu-devel, fred.konrad
On Tue, 02/03 13:52, Paolo Bonzini wrote:
> This for now is a simple TLB flush. This can change later for two
> reasons:
>
> 1) an AddressSpaceDispatch will be cached in the CPUState object
>
> 2) it will not be possible to do tlb_flush once the TCG-generated code
> runs outside the BQL.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> cpu-exec.c | 6 ++++++
> exec.c | 2 +-
> include/exec/exec-all.h | 1 +
> 3 files changed, 8 insertions(+), 1 deletion(-)
>
> diff --git a/cpu-exec.c b/cpu-exec.c
> index fa506e6..78fe382 100644
> --- a/cpu-exec.c
> +++ b/cpu-exec.c
> @@ -141,6 +141,12 @@ void cpu_resume_from_signal(CPUState *cpu, void *puc)
> cpu->exception_index = -1;
> siglongjmp(cpu->jmp_env, 1);
> }
> +
> +void cpu_reload_memory_map(CPUState *cpu)
> +{
> + /* The TLB is protected by the iothread lock. */
> + tlb_flush(cpu, 1);
> +}
> #endif
>
> /* Execute a TB, and fix up the CPU state afterwards if necessary */
> diff --git a/exec.c b/exec.c
> index 6b79ad1..5a75909 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -2026,7 +2026,7 @@ static void tcg_commit(MemoryListener *listener)
> if (cpu->tcg_as_listener != listener) {
> continue;
> }
> - tlb_flush(cpu, 1);
> + cpu_reload_memory_map(cpu);
> }
> }
>
> diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
> index 6a15448..1b30813 100644
> --- a/include/exec/exec-all.h
> +++ b/include/exec/exec-all.h
> @@ -96,6 +96,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
> void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
> int is_cpu_write_access);
> #if !defined(CONFIG_USER_ONLY)
> +void cpu_reload_memory_map(CPUState *cpu);
> void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
> /* cputlb.c */
> void tlb_flush_page(CPUState *cpu, target_ulong addr);
> --
> 1.8.3.1
>
>
>
Reviewed-by: Fam Zheng <famz@redhat.com>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [Qemu-devel] [PATCH 2/9] exec: make iotlb RCU-friendly
2015-02-03 12:52 [Qemu-devel] [PATCH v2 0/9] RCUification of the memory API, part 2 Paolo Bonzini
2015-02-03 12:52 ` [Qemu-devel] [PATCH 1/9] exec: introduce cpu_reload_memory_map Paolo Bonzini
@ 2015-02-03 12:52 ` Paolo Bonzini
2015-02-04 2:31 ` Fam Zheng
2015-02-03 12:52 ` [Qemu-devel] [PATCH 3/9] exec: RCUify AddressSpaceDispatch Paolo Bonzini
` (6 subsequent siblings)
8 siblings, 1 reply; 22+ messages in thread
From: Paolo Bonzini @ 2015-02-03 12:52 UTC (permalink / raw)
To: qemu-devel; +Cc: peter.maydell, famz, fred.konrad
After the previous patch, TLBs will be flushed on every change to
the memory mapping. This patch augments that with synchronization
of the MemoryRegionSections referred to in the iotlb array.
With this change, it is guaranteed that iotlb_to_region will access
the correct memory map, even once the TLB will be accessed outside
the BQL.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
cpu-exec.c | 6 +++++-
cputlb.c | 5 ++---
exec.c | 13 ++++++++-----
include/exec/cputlb.h | 2 +-
include/exec/exec-all.h | 3 ++-
include/qom/cpu.h | 1 +
softmmu_template.h | 4 ++--
7 files changed, 21 insertions(+), 13 deletions(-)
diff --git a/cpu-exec.c b/cpu-exec.c
index 78fe382..98f968d 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -24,6 +24,8 @@
#include "qemu/atomic.h"
#include "sysemu/qtest.h"
#include "qemu/timer.h"
+#include "exec/address-spaces.h"
+#include "exec/memory-internal.h"
/* -icount align implementation. */
@@ -144,7 +146,9 @@ void cpu_resume_from_signal(CPUState *cpu, void *puc)
void cpu_reload_memory_map(CPUState *cpu)
{
- /* The TLB is protected by the iothread lock. */
+ /* The CPU and TLB are protected by the iothread lock. */
+ AddressSpaceDispatch *d = cpu->as->dispatch;
+ cpu->memory_dispatch = d;
tlb_flush(cpu, 1);
}
#endif
diff --git a/cputlb.c b/cputlb.c
index 3b271d4..f92db5e 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -265,8 +265,7 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
}
sz = size;
- section = address_space_translate_for_iotlb(cpu->as, paddr,
- &xlat, &sz);
+ section = address_space_translate_for_iotlb(cpu, paddr, &xlat, &sz);
assert(sz >= TARGET_PAGE_SIZE);
#if defined(DEBUG_TLB)
@@ -347,7 +346,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
cpu_ldub_code(env1, addr);
}
pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
- mr = iotlb_to_region(cpu->as, pd);
+ mr = iotlb_to_region(cpu, pd);
if (memory_region_is_unassigned(mr)) {
CPUClass *cc = CPU_GET_CLASS(cpu);
diff --git a/exec.c b/exec.c
index 5a75909..1854c95 100644
--- a/exec.c
+++ b/exec.c
@@ -401,11 +401,12 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
}
MemoryRegionSection *
-address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
- hwaddr *plen)
+address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
+ hwaddr *xlat, hwaddr *plen)
{
MemoryRegionSection *section;
- section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
+ section = address_space_translate_internal(cpu->memory_dispatch,
+ addr, xlat, plen, false);
assert(!section->mr->iommu_ops);
return section;
@@ -1961,9 +1962,11 @@ static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
return phys_section_add(map, §ion);
}
-MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
+MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
{
- return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
+ MemoryRegionSection *sections = cpu->memory_dispatch->map.sections;
+
+ return sections[index & ~TARGET_PAGE_MASK].mr;
}
static void io_mem_init(void)
diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h
index b8ecd6f..e0da9d7 100644
--- a/include/exec/cputlb.h
+++ b/include/exec/cputlb.h
@@ -34,7 +34,7 @@ extern int tlb_flush_count;
void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
MemoryRegionSection *
-address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
+address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr, hwaddr *xlat,
hwaddr *plen);
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section,
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 1b30813..bb3fd37 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -338,7 +338,8 @@ extern uintptr_t tci_tb_ptr;
void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align));
-struct MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index);
+struct MemoryRegion *iotlb_to_region(CPUState *cpu,
+ hwaddr index);
bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
uint64_t *pvalue, unsigned size);
bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 2098f1c..48fd6fb 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -256,6 +256,7 @@ struct CPUState {
sigjmp_buf jmp_env;
AddressSpace *as;
+ struct AddressSpaceDispatch *memory_dispatch;
MemoryListener *tcg_as_listener;
void *env_ptr; /* CPUArchState */
diff --git a/softmmu_template.h b/softmmu_template.h
index 6b4e615..0e3dd35 100644
--- a/softmmu_template.h
+++ b/softmmu_template.h
@@ -149,7 +149,7 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
{
uint64_t val;
CPUState *cpu = ENV_GET_CPU(env);
- MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
+ MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
cpu->mem_io_pc = retaddr;
@@ -369,7 +369,7 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
uintptr_t retaddr)
{
CPUState *cpu = ENV_GET_CPU(env);
- MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
+ MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
--
1.8.3.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* Re: [Qemu-devel] [PATCH 2/9] exec: make iotlb RCU-friendly
2015-02-03 12:52 ` [Qemu-devel] [PATCH 2/9] exec: make iotlb RCU-friendly Paolo Bonzini
@ 2015-02-04 2:31 ` Fam Zheng
0 siblings, 0 replies; 22+ messages in thread
From: Fam Zheng @ 2015-02-04 2:31 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: peter.maydell, qemu-devel, fred.konrad
On Tue, 02/03 13:52, Paolo Bonzini wrote:
> After the previous patch, TLBs will be flushed on every change to
> the memory mapping. This patch augments that with synchronization
> of the MemoryRegionSections referred to in the iotlb array.
>
> With this change, it is guaranteed that iotlb_to_region will access
> the correct memory map, even once the TLB will be accessed outside
> the BQL.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> cpu-exec.c | 6 +++++-
> cputlb.c | 5 ++---
> exec.c | 13 ++++++++-----
> include/exec/cputlb.h | 2 +-
> include/exec/exec-all.h | 3 ++-
> include/qom/cpu.h | 1 +
> softmmu_template.h | 4 ++--
> 7 files changed, 21 insertions(+), 13 deletions(-)
>
> diff --git a/cpu-exec.c b/cpu-exec.c
> index 78fe382..98f968d 100644
> --- a/cpu-exec.c
> +++ b/cpu-exec.c
> @@ -24,6 +24,8 @@
> #include "qemu/atomic.h"
> #include "sysemu/qtest.h"
> #include "qemu/timer.h"
> +#include "exec/address-spaces.h"
> +#include "exec/memory-internal.h"
>
> /* -icount align implementation. */
>
> @@ -144,7 +146,9 @@ void cpu_resume_from_signal(CPUState *cpu, void *puc)
>
> void cpu_reload_memory_map(CPUState *cpu)
> {
> - /* The TLB is protected by the iothread lock. */
> + /* The CPU and TLB are protected by the iothread lock. */
> + AddressSpaceDispatch *d = cpu->as->dispatch;
> + cpu->memory_dispatch = d;
> tlb_flush(cpu, 1);
> }
> #endif
> diff --git a/cputlb.c b/cputlb.c
> index 3b271d4..f92db5e 100644
> --- a/cputlb.c
> +++ b/cputlb.c
> @@ -265,8 +265,7 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
> }
>
> sz = size;
> - section = address_space_translate_for_iotlb(cpu->as, paddr,
> - &xlat, &sz);
> + section = address_space_translate_for_iotlb(cpu, paddr, &xlat, &sz);
> assert(sz >= TARGET_PAGE_SIZE);
>
> #if defined(DEBUG_TLB)
> @@ -347,7 +346,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
> cpu_ldub_code(env1, addr);
> }
> pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
> - mr = iotlb_to_region(cpu->as, pd);
> + mr = iotlb_to_region(cpu, pd);
> if (memory_region_is_unassigned(mr)) {
> CPUClass *cc = CPU_GET_CLASS(cpu);
>
> diff --git a/exec.c b/exec.c
> index 5a75909..1854c95 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -401,11 +401,12 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
> }
>
> MemoryRegionSection *
> -address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
> - hwaddr *plen)
> +address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
> + hwaddr *xlat, hwaddr *plen)
> {
> MemoryRegionSection *section;
> - section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
> + section = address_space_translate_internal(cpu->memory_dispatch,
> + addr, xlat, plen, false);
>
> assert(!section->mr->iommu_ops);
> return section;
> @@ -1961,9 +1962,11 @@ static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
> return phys_section_add(map, §ion);
> }
>
> -MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
>+MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
> {
> - return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
> + MemoryRegionSection *sections = cpu->memory_dispatch->map.sections;
> +
> + return sections[index & ~TARGET_PAGE_MASK].mr;
> }
>
> static void io_mem_init(void)
> diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h
> index b8ecd6f..e0da9d7 100644
> --- a/include/exec/cputlb.h
> +++ b/include/exec/cputlb.h
> @@ -34,7 +34,7 @@ extern int tlb_flush_count;
> void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
>
> MemoryRegionSection *
> -address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
> +address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr, hwaddr *xlat,
> hwaddr *plen);
> hwaddr memory_region_section_get_iotlb(CPUState *cpu,
> MemoryRegionSection *section,
> diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
> index 1b30813..bb3fd37 100644
> --- a/include/exec/exec-all.h
> +++ b/include/exec/exec-all.h
> @@ -338,7 +338,8 @@ extern uintptr_t tci_tb_ptr;
>
> void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align));
>
> -struct MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index);
> +struct MemoryRegion *iotlb_to_region(CPUState *cpu,
> + hwaddr index);
> bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
> uint64_t *pvalue, unsigned size);
> bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
> diff --git a/include/qom/cpu.h b/include/qom/cpu.h
> index 2098f1c..48fd6fb 100644
> --- a/include/qom/cpu.h
> +++ b/include/qom/cpu.h
> @@ -256,6 +256,7 @@ struct CPUState {
> sigjmp_buf jmp_env;
>
> AddressSpace *as;
> + struct AddressSpaceDispatch *memory_dispatch;
> MemoryListener *tcg_as_listener;
>
> void *env_ptr; /* CPUArchState */
> diff --git a/softmmu_template.h b/softmmu_template.h
> index 6b4e615..0e3dd35 100644
> --- a/softmmu_template.h
> +++ b/softmmu_template.h
> @@ -149,7 +149,7 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
> {
> uint64_t val;
> CPUState *cpu = ENV_GET_CPU(env);
> - MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
> + MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
>
> physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
> cpu->mem_io_pc = retaddr;
> @@ -369,7 +369,7 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
> uintptr_t retaddr)
> {
> CPUState *cpu = ENV_GET_CPU(env);
> - MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
> + MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
>
> physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
> if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
> --
> 1.8.3.1
>
>
Reviewed-by: Fam Zheng <famz@redhat.com>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [Qemu-devel] [PATCH 3/9] exec: RCUify AddressSpaceDispatch
2015-02-03 12:52 [Qemu-devel] [PATCH v2 0/9] RCUification of the memory API, part 2 Paolo Bonzini
2015-02-03 12:52 ` [Qemu-devel] [PATCH 1/9] exec: introduce cpu_reload_memory_map Paolo Bonzini
2015-02-03 12:52 ` [Qemu-devel] [PATCH 2/9] exec: make iotlb RCU-friendly Paolo Bonzini
@ 2015-02-03 12:52 ` Paolo Bonzini
2015-02-04 2:56 ` Fam Zheng
2015-02-03 12:52 ` [Qemu-devel] [PATCH 4/9] rcu: introduce RCU-enabled QLIST Paolo Bonzini
` (5 subsequent siblings)
8 siblings, 1 reply; 22+ messages in thread
From: Paolo Bonzini @ 2015-02-03 12:52 UTC (permalink / raw)
To: qemu-devel; +Cc: peter.maydell, famz, fred.konrad
Note that even after this patch, most callers of address_space_*
functions must still be under the big QEMU lock, otherwise the memory
region returned by address_space_translate can disappear as soon as
address_space_translate returns. This will be fixed in the next part
of this series.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
cpu-exec.c | 25 ++++++++++++++++++++++++-
cpus.c | 2 +-
cputlb.c | 8 ++++++--
exec.c | 34 ++++++++++++++++++++++++++--------
hw/i386/intel_iommu.c | 3 +++
hw/pci-host/apb.c | 1 +
hw/ppc/spapr_iommu.c | 1 +
include/exec/exec-all.h | 1 +
8 files changed, 63 insertions(+), 12 deletions(-)
diff --git a/cpu-exec.c b/cpu-exec.c
index 98f968d..adb939a 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -26,6 +26,7 @@
#include "qemu/timer.h"
#include "exec/address-spaces.h"
#include "exec/memory-internal.h"
+#include "qemu/rcu.h"
/* -icount align implementation. */
@@ -146,8 +147,27 @@ void cpu_resume_from_signal(CPUState *cpu, void *puc)
void cpu_reload_memory_map(CPUState *cpu)
{
+ AddressSpaceDispatch *d;
+
+ if (qemu_in_vcpu_thread()) {
+ /* Do not let the guest prolong the critical section as much as it
+ * as it desires.
+ *
+ * Currently, this is prevented by the I/O thread's periodinc kicking
+ * of the VCPU thread (iothread_requesting_mutex, qemu_cpu_kick_thread)
+ * but this will go away once TCG's execution moves out of the global
+ * mutex.
+ *
+ * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
+ * only protects cpu->as->dispatch. Since we reload it below, we can
+ * split the critical section.
+ */
+ rcu_read_unlock();
+ rcu_read_lock();
+ }
+
/* The CPU and TLB are protected by the iothread lock. */
- AddressSpaceDispatch *d = cpu->as->dispatch;
+ d = atomic_rcu_read(&cpu->as->dispatch);
cpu->memory_dispatch = d;
tlb_flush(cpu, 1);
}
@@ -362,6 +382,8 @@ int cpu_exec(CPUArchState *env)
* an instruction scheduling constraint on modern architectures. */
smp_mb();
+ rcu_read_lock();
+
if (unlikely(exit_request)) {
cpu->exit_request = 1;
}
@@ -564,6 +586,7 @@ int cpu_exec(CPUArchState *env)
} /* for(;;) */
cc->cpu_exec_exit(cpu);
+ rcu_read_unlock();
/* fail safe : never use current_cpu outside cpu_exec() */
current_cpu = NULL;
diff --git a/cpus.c b/cpus.c
index 0cdd1d7..b826fac 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1104,7 +1104,7 @@ bool qemu_cpu_is_self(CPUState *cpu)
return qemu_thread_is_self(cpu->thread);
}
-static bool qemu_in_vcpu_thread(void)
+bool qemu_in_vcpu_thread(void)
{
return current_cpu && qemu_cpu_is_self(current_cpu);
}
diff --git a/cputlb.c b/cputlb.c
index f92db5e..38f2151 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -243,8 +243,12 @@ static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
}
/* Add a new TLB entry. At most one entry for a given virtual address
- is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
- supplied size is only used by tlb_flush_page. */
+ * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
+ * supplied size is only used by tlb_flush_page.
+ *
+ * Called from TCG-generated code, which is under an RCU read-side
+ * critical section.
+ */
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
hwaddr paddr, int prot,
int mmu_idx, target_ulong size)
diff --git a/exec.c b/exec.c
index 1854c95..a423def 100644
--- a/exec.c
+++ b/exec.c
@@ -115,6 +115,8 @@ struct PhysPageEntry {
typedef PhysPageEntry Node[P_L2_SIZE];
typedef struct PhysPageMap {
+ struct rcu_head rcu;
+
unsigned sections_nb;
unsigned sections_nb_alloc;
unsigned nodes_nb;
@@ -124,6 +126,8 @@ typedef struct PhysPageMap {
} PhysPageMap;
struct AddressSpaceDispatch {
+ struct rcu_head rcu;
+
/* This is a multi-level map on the physical address space.
* The bottom level has pointers to MemoryRegionSections.
*/
@@ -315,6 +319,7 @@ bool memory_region_is_unassigned(MemoryRegion *mr)
&& mr != &io_mem_watch;
}
+/* Called from RCU critical section */
static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
hwaddr addr,
bool resolve_subpage)
@@ -330,6 +335,7 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
return section;
}
+/* Called from RCU critical section */
static MemoryRegionSection *
address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
hwaddr *plen, bool resolve_subpage)
@@ -370,8 +376,10 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
MemoryRegion *mr;
hwaddr len = *plen;
+ rcu_read_lock();
for (;;) {
- section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
+ AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
+ section = address_space_translate_internal(d, addr, &addr, plen, true);
mr = section->mr;
if (!mr->iommu_ops) {
@@ -397,9 +405,11 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
*plen = len;
*xlat = addr;
+ rcu_read_unlock();
return mr;
}
+/* Called from RCU critical section */
MemoryRegionSection *
address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
hwaddr *xlat, hwaddr *plen)
@@ -852,6 +862,7 @@ static void cpu_physical_memory_set_dirty_tracking(bool enable)
in_migration = enable;
}
+/* Called from RCU critical section */
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
MemoryRegionSection *section,
target_ulong vaddr,
@@ -1964,7 +1975,8 @@ static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
{
- MemoryRegionSection *sections = cpu->memory_dispatch->map.sections;
+ AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
+ MemoryRegionSection *sections = d->map.sections;
return sections[index & ~TARGET_PAGE_MASK].mr;
}
@@ -2000,6 +2012,12 @@ static void mem_begin(MemoryListener *listener)
as->next_dispatch = d;
}
+static void address_space_dispatch_free(AddressSpaceDispatch *d)
+{
+ phys_sections_free(&d->map);
+ g_free(d);
+}
+
static void mem_commit(MemoryListener *listener)
{
AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
@@ -2008,11 +2026,9 @@ static void mem_commit(MemoryListener *listener)
phys_page_compact_all(next, next->map.nodes_nb);
- as->dispatch = next;
-
+ atomic_rcu_set(&as->dispatch, next);
if (cur) {
- phys_sections_free(&cur->map);
- g_free(cur);
+ call_rcu(cur, address_space_dispatch_free, rcu);
}
}
@@ -2067,8 +2083,10 @@ void address_space_destroy_dispatch(AddressSpace *as)
AddressSpaceDispatch *d = as->dispatch;
memory_listener_unregister(&as->dispatch_listener);
- g_free(d);
- as->dispatch = NULL;
+ atomic_rcu_set(&as->dispatch, NULL);
+ if (d) {
+ call_rcu(d, address_space_dispatch_free, rcu);
+ }
}
static void memory_map_init(void)
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 0a4282a..7da70ff 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -745,6 +745,9 @@ static inline bool vtd_is_interrupt_addr(hwaddr addr)
/* Map dev to context-entry then do a paging-structures walk to do a iommu
* translation.
+ *
+ * Called from RCU critical section.
+ *
* @bus_num: The bus number
* @devfn: The devfn, which is the combined of device and function number
* @is_write: The access is a write operation
diff --git a/hw/pci-host/apb.c b/hw/pci-host/apb.c
index f573875..832b6c7 100644
--- a/hw/pci-host/apb.c
+++ b/hw/pci-host/apb.c
@@ -205,6 +205,7 @@ static AddressSpace *pbm_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
return &is->iommu_as;
}
+/* Called from RCU critical section */
static IOMMUTLBEntry pbm_translate_iommu(MemoryRegion *iommu, hwaddr addr,
bool is_write)
{
diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c
index da47474..ba003da 100644
--- a/hw/ppc/spapr_iommu.c
+++ b/hw/ppc/spapr_iommu.c
@@ -59,6 +59,7 @@ static sPAPRTCETable *spapr_tce_find_by_liobn(uint32_t liobn)
return NULL;
}
+/* Called from RCU critical section */
static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr,
bool is_write)
{
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index bb3fd37..8eb0db3 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -96,6 +96,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
int is_cpu_write_access);
#if !defined(CONFIG_USER_ONLY)
+bool qemu_in_vcpu_thread(void);
void cpu_reload_memory_map(CPUState *cpu);
void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
/* cputlb.c */
--
1.8.3.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* Re: [Qemu-devel] [PATCH 3/9] exec: RCUify AddressSpaceDispatch
2015-02-03 12:52 ` [Qemu-devel] [PATCH 3/9] exec: RCUify AddressSpaceDispatch Paolo Bonzini
@ 2015-02-04 2:56 ` Fam Zheng
0 siblings, 0 replies; 22+ messages in thread
From: Fam Zheng @ 2015-02-04 2:56 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: peter.maydell, qemu-devel, fred.konrad
On Tue, 02/03 13:52, Paolo Bonzini wrote:
> Note that even after this patch, most callers of address_space_*
> functions must still be under the big QEMU lock, otherwise the memory
> region returned by address_space_translate can disappear as soon as
> address_space_translate returns. This will be fixed in the next part
> of this series.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> cpu-exec.c | 25 ++++++++++++++++++++++++-
> cpus.c | 2 +-
> cputlb.c | 8 ++++++--
> exec.c | 34 ++++++++++++++++++++++++++--------
> hw/i386/intel_iommu.c | 3 +++
> hw/pci-host/apb.c | 1 +
> hw/ppc/spapr_iommu.c | 1 +
> include/exec/exec-all.h | 1 +
> 8 files changed, 63 insertions(+), 12 deletions(-)
>
> diff --git a/cpu-exec.c b/cpu-exec.c
> index 98f968d..adb939a 100644
> --- a/cpu-exec.c
> +++ b/cpu-exec.c
> @@ -26,6 +26,7 @@
> #include "qemu/timer.h"
> #include "exec/address-spaces.h"
> #include "exec/memory-internal.h"
> +#include "qemu/rcu.h"
>
> /* -icount align implementation. */
>
> @@ -146,8 +147,27 @@ void cpu_resume_from_signal(CPUState *cpu, void *puc)
>
> void cpu_reload_memory_map(CPUState *cpu)
> {
> + AddressSpaceDispatch *d;
> +
> + if (qemu_in_vcpu_thread()) {
> + /* Do not let the guest prolong the critical section as much as it
> + * as it desires.
> + *
> + * Currently, this is prevented by the I/O thread's periodinc kicking
> + * of the VCPU thread (iothread_requesting_mutex, qemu_cpu_kick_thread)
> + * but this will go away once TCG's execution moves out of the global
> + * mutex.
> + *
> + * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
> + * only protects cpu->as->dispatch. Since we reload it below, we can
> + * split the critical section.
> + */
> + rcu_read_unlock();
> + rcu_read_lock();
> + }
> +
> /* The CPU and TLB are protected by the iothread lock. */
> - AddressSpaceDispatch *d = cpu->as->dispatch;
> + d = atomic_rcu_read(&cpu->as->dispatch);
> cpu->memory_dispatch = d;
> tlb_flush(cpu, 1);
> }
> @@ -362,6 +382,8 @@ int cpu_exec(CPUArchState *env)
> * an instruction scheduling constraint on modern architectures. */
> smp_mb();
>
> + rcu_read_lock();
> +
> if (unlikely(exit_request)) {
> cpu->exit_request = 1;
> }
> @@ -564,6 +586,7 @@ int cpu_exec(CPUArchState *env)
> } /* for(;;) */
>
> cc->cpu_exec_exit(cpu);
> + rcu_read_unlock();
>
> /* fail safe : never use current_cpu outside cpu_exec() */
> current_cpu = NULL;
> diff --git a/cpus.c b/cpus.c
> index 0cdd1d7..b826fac 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -1104,7 +1104,7 @@ bool qemu_cpu_is_self(CPUState *cpu)
> return qemu_thread_is_self(cpu->thread);
> }
>
> -static bool qemu_in_vcpu_thread(void)
> +bool qemu_in_vcpu_thread(void)
> {
> return current_cpu && qemu_cpu_is_self(current_cpu);
> }
> diff --git a/cputlb.c b/cputlb.c
> index f92db5e..38f2151 100644
> --- a/cputlb.c
> +++ b/cputlb.c
> @@ -243,8 +243,12 @@ static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
> }
>
> /* Add a new TLB entry. At most one entry for a given virtual address
> - is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
> - supplied size is only used by tlb_flush_page. */
> + * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
> + * supplied size is only used by tlb_flush_page.
> + *
> + * Called from TCG-generated code, which is under an RCU read-side
> + * critical section.
> + */
> void tlb_set_page(CPUState *cpu, target_ulong vaddr,
> hwaddr paddr, int prot,
> int mmu_idx, target_ulong size)
> diff --git a/exec.c b/exec.c
> index 1854c95..a423def 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -115,6 +115,8 @@ struct PhysPageEntry {
> typedef PhysPageEntry Node[P_L2_SIZE];
>
> typedef struct PhysPageMap {
> + struct rcu_head rcu;
> +
> unsigned sections_nb;
> unsigned sections_nb_alloc;
> unsigned nodes_nb;
> @@ -124,6 +126,8 @@ typedef struct PhysPageMap {
> } PhysPageMap;
>
> struct AddressSpaceDispatch {
> + struct rcu_head rcu;
> +
> /* This is a multi-level map on the physical address space.
> * The bottom level has pointers to MemoryRegionSections.
> */
> @@ -315,6 +319,7 @@ bool memory_region_is_unassigned(MemoryRegion *mr)
> && mr != &io_mem_watch;
> }
>
> +/* Called from RCU critical section */
> static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
> hwaddr addr,
> bool resolve_subpage)
> @@ -330,6 +335,7 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
> return section;
> }
>
> +/* Called from RCU critical section */
> static MemoryRegionSection *
> address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
> hwaddr *plen, bool resolve_subpage)
> @@ -370,8 +376,10 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
> MemoryRegion *mr;
> hwaddr len = *plen;
>
> + rcu_read_lock();
> for (;;) {
> - section = address_space_translate_internal(as->dispatch, addr, &addr, plen, true);
> + AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
> + section = address_space_translate_internal(d, addr, &addr, plen, true);
> mr = section->mr;
>
> if (!mr->iommu_ops) {
> @@ -397,9 +405,11 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
>
> *plen = len;
> *xlat = addr;
> + rcu_read_unlock();
> return mr;
> }
>
> +/* Called from RCU critical section */
> MemoryRegionSection *
> address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
> hwaddr *xlat, hwaddr *plen)
> @@ -852,6 +862,7 @@ static void cpu_physical_memory_set_dirty_tracking(bool enable)
> in_migration = enable;
> }
>
> +/* Called from RCU critical section */
> hwaddr memory_region_section_get_iotlb(CPUState *cpu,
> MemoryRegionSection *section,
> target_ulong vaddr,
> @@ -1964,7 +1975,8 @@ static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
>
> MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
> {
> - MemoryRegionSection *sections = cpu->memory_dispatch->map.sections;
> + AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
> + MemoryRegionSection *sections = d->map.sections;
>
> return sections[index & ~TARGET_PAGE_MASK].mr;
> }
> @@ -2000,6 +2012,12 @@ static void mem_begin(MemoryListener *listener)
> as->next_dispatch = d;
> }
>
> +static void address_space_dispatch_free(AddressSpaceDispatch *d)
> +{
> + phys_sections_free(&d->map);
> + g_free(d);
> +}
> +
> static void mem_commit(MemoryListener *listener)
> {
> AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
> @@ -2008,11 +2026,9 @@ static void mem_commit(MemoryListener *listener)
>
> phys_page_compact_all(next, next->map.nodes_nb);
>
> - as->dispatch = next;
> -
> + atomic_rcu_set(&as->dispatch, next);
> if (cur) {
> - phys_sections_free(&cur->map);
> - g_free(cur);
> + call_rcu(cur, address_space_dispatch_free, rcu);
> }
> }
>
> @@ -2067,8 +2083,10 @@ void address_space_destroy_dispatch(AddressSpace *as)
> AddressSpaceDispatch *d = as->dispatch;
>
> memory_listener_unregister(&as->dispatch_listener);
> - g_free(d);
> - as->dispatch = NULL;
> + atomic_rcu_set(&as->dispatch, NULL);
> + if (d) {
> + call_rcu(d, address_space_dispatch_free, rcu);
> + }
> }
>
> static void memory_map_init(void)
> diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
> index 0a4282a..7da70ff 100644
> --- a/hw/i386/intel_iommu.c
> +++ b/hw/i386/intel_iommu.c
> @@ -745,6 +745,9 @@ static inline bool vtd_is_interrupt_addr(hwaddr addr)
>
> /* Map dev to context-entry then do a paging-structures walk to do a iommu
> * translation.
> + *
> + * Called from RCU critical section.
> + *
> * @bus_num: The bus number
> * @devfn: The devfn, which is the combined of device and function number
> * @is_write: The access is a write operation
> diff --git a/hw/pci-host/apb.c b/hw/pci-host/apb.c
> index f573875..832b6c7 100644
> --- a/hw/pci-host/apb.c
> +++ b/hw/pci-host/apb.c
> @@ -205,6 +205,7 @@ static AddressSpace *pbm_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
> return &is->iommu_as;
> }
>
> +/* Called from RCU critical section */
> static IOMMUTLBEntry pbm_translate_iommu(MemoryRegion *iommu, hwaddr addr,
> bool is_write)
> {
> diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c
> index da47474..ba003da 100644
> --- a/hw/ppc/spapr_iommu.c
> +++ b/hw/ppc/spapr_iommu.c
> @@ -59,6 +59,7 @@ static sPAPRTCETable *spapr_tce_find_by_liobn(uint32_t liobn)
> return NULL;
> }
>
> +/* Called from RCU critical section */
> static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr,
> bool is_write)
> {
> diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
> index bb3fd37..8eb0db3 100644
> --- a/include/exec/exec-all.h
> +++ b/include/exec/exec-all.h
> @@ -96,6 +96,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
> void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
> int is_cpu_write_access);
> #if !defined(CONFIG_USER_ONLY)
> +bool qemu_in_vcpu_thread(void);
> void cpu_reload_memory_map(CPUState *cpu);
> void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
> /* cputlb.c */
> --
> 1.8.3.1
>
>
Reviewed-by: Fam Zheng <famz@redhat.com>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [Qemu-devel] [PATCH 4/9] rcu: introduce RCU-enabled QLIST
2015-02-03 12:52 [Qemu-devel] [PATCH v2 0/9] RCUification of the memory API, part 2 Paolo Bonzini
` (2 preceding siblings ...)
2015-02-03 12:52 ` [Qemu-devel] [PATCH 3/9] exec: RCUify AddressSpaceDispatch Paolo Bonzini
@ 2015-02-03 12:52 ` Paolo Bonzini
2015-02-04 3:42 ` Fam Zheng
2015-02-03 12:52 ` [Qemu-devel] [PATCH 5/9] exec: protect mru_block with RCU Paolo Bonzini
` (4 subsequent siblings)
8 siblings, 1 reply; 22+ messages in thread
From: Paolo Bonzini @ 2015-02-03 12:52 UTC (permalink / raw)
To: qemu-devel; +Cc: Mike Day, peter.maydell, famz, fred.konrad
From: Mike Day <ncmike@ncultra.org>
Add RCU-enabled variants on the existing bsd DQ facility. Each
operation has the same interface as the existing (non-RCU)
version. Also, each operation is implemented as macro.
Using the RCU-enabled QLIST, existing QLIST users will be able to
convert to RCU without using a different list interface.
Signed-off-by: Mike Day <ncmike@ncultra.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
hw/9pfs/virtio-9p-synth.c | 2 +-
include/qemu/queue.h | 11 --
include/qemu/rcu_queue.h | 134 ++++++++++++++++++++
tests/Makefile | 5 +-
tests/test-rcu-list.c | 306 ++++++++++++++++++++++++++++++++++++++++++++++
5 files changed, 445 insertions(+), 13 deletions(-)
create mode 100644 include/qemu/rcu_queue.h
create mode 100644 tests/test-rcu-list.c
diff --git a/hw/9pfs/virtio-9p-synth.c b/hw/9pfs/virtio-9p-synth.c
index e75aa87..a0ab9a8 100644
--- a/hw/9pfs/virtio-9p-synth.c
+++ b/hw/9pfs/virtio-9p-synth.c
@@ -18,7 +18,7 @@
#include "fsdev/qemu-fsdev.h"
#include "virtio-9p-synth.h"
#include "qemu/rcu.h"
-
+#include "qemu/rcu_queue.h"
#include <sys/stat.h>
/* Root node for synth file system */
diff --git a/include/qemu/queue.h b/include/qemu/queue.h
index c602797..8094150 100644
--- a/include/qemu/queue.h
+++ b/include/qemu/queue.h
@@ -139,17 +139,6 @@ struct { \
(elm)->field.le_prev = &(head)->lh_first; \
} while (/*CONSTCOND*/0)
-#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
- (elm)->field.le_prev = &(head)->lh_first; \
- (elm)->field.le_next = (head)->lh_first; \
- smp_wmb(); /* fill elm before linking it */ \
- if ((head)->lh_first != NULL) { \
- (head)->lh_first->field.le_prev = &(elm)->field.le_next; \
- } \
- (head)->lh_first = (elm); \
- smp_wmb(); \
-} while (/* CONSTCOND*/0)
-
#define QLIST_REMOVE(elm, field) do { \
if ((elm)->field.le_next != NULL) \
(elm)->field.le_next->field.le_prev = \
diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h
new file mode 100644
index 0000000..3aca7a5
--- /dev/null
+++ b/include/qemu/rcu_queue.h
@@ -0,0 +1,134 @@
+#ifndef QEMU_RCU_QUEUE_H
+#define QEMU_RCU_QUEUE_H
+
+/*
+ * rcu_queue.h
+ *
+ * RCU-friendly versions of the queue.h primitives.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Copyright (c) 2013 Mike D. Day, IBM Corporation.
+ *
+ * IBM's contributions to this file may be relicensed under LGPLv2 or later.
+ */
+
+#include "qemu/queue.h"
+#include "qemu/atomic.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * List access methods.
+ */
+#define QLIST_EMPTY_RCU(head) (atomic_rcu_read(&(head)->lh_first) == NULL)
+#define QLIST_FIRST_RCU(head) (atomic_rcu_read(&(head)->lh_first))
+#define QLIST_NEXT_RCU(elm, field) (atomic_rcu_read(&(elm)->field.le_next))
+
+/*
+ * List functions.
+ */
+
+
+/*
+ * The difference between atomic_read/set and atomic_rcu_read/set
+ * is in the including of a read/write memory barrier to the volatile
+ * access. atomic_rcu_* macros include the memory barrier, the
+ * plain atomic macros do not. Therefore, it should be correct to
+ * issue a series of reads or writes to the same element using only
+ * the atomic_* macro, until the last read or write, which should be
+ * atomic_rcu_* to introduce a read or write memory barrier as
+ * appropriate.
+ */
+
+/* Upon publication of the listelm->next value, list readers
+ * will see the new node when following next pointers from
+ * antecedent nodes, but may not see the new node when following
+ * prev pointers from subsequent nodes until after the RCU grace
+ * period expires.
+ * see linux/include/rculist.h __list_add_rcu(new, prev, next)
+ */
+#define QLIST_INSERT_AFTER_RCU(listelm, elm, field) do { \
+ (elm)->field.le_next = (listelm)->field.le_next; \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+ atomic_rcu_set(&(listelm)->field.le_next, (elm)); \
+ if ((elm)->field.le_next != NULL) { \
+ (elm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ } \
+} while (/*CONSTCOND*/0)
+
+/* Upon publication of the listelm->prev->next value, list
+ * readers will see the new element when following prev pointers
+ * from subsequent elements, but may not see the new element
+ * when following next pointers from antecedent elements
+ * until after the RCU grace period expires.
+ */
+#define QLIST_INSERT_BEFORE_RCU(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ atomic_rcu_set((listelm)->field.le_prev, (elm)); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+/* Upon publication of the head->first value, list readers
+ * will see the new element when following the head, but may
+ * not see the new element when following prev pointers from
+ * subsequent elements until after the RCU grace period has
+ * expired.
+ */
+#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
+ (elm)->field.le_prev = &(head)->lh_first; \
+ (elm)->field.le_next = (head)->lh_first; \
+ atomic_rcu_set((&(head)->lh_first), (elm)); \
+ if ((elm)->field.le_next != NULL) { \
+ (elm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ } \
+} while (/*CONSTCOND*/0)
+
+
+/* prior to publication of the elm->prev->next value, some list
+ * readers may still see the removed element when following
+ * the antecedent's next pointer.
+ */
+#define QLIST_REMOVE_RCU(elm, field) do { \
+ if ((elm)->field.le_next != NULL) { \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ } \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+/* List traversal must occur within an RCU critical section. */
+#define QLIST_FOREACH_RCU(var, head, field) \
+ for ((var) = atomic_rcu_read(&(head)->lh_first); \
+ (var); \
+ (var) = atomic_rcu_read(&(var)->field.le_next))
+
+/* List traversal must occur within an RCU critical section. */
+#define QLIST_FOREACH_SAFE_RCU(var, head, field, next_var) \
+ for ((var) = (atomic_rcu_read(&(head)->lh_first)); \
+ (var) && \
+ ((next_var) = atomic_rcu_read(&(var)->field.le_next), 1); \
+ (var) = (next_var))
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* QEMU_RCU_QUEUE.H */
diff --git a/tests/Makefile b/tests/Makefile
index db5b3c3..ef8d589 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -62,6 +62,8 @@ check-unit-y += tests/test-int128$(EXESUF)
gcov-files-test-int128-y =
check-unit-y += tests/rcutorture$(EXESUF)
gcov-files-rcutorture-y = util/rcu.c
+check-unit-y += tests/test-rcu-list$(EXESUF)
+gcov-files-test-rcu-list-y = util/rcu.c
check-unit-y += tests/test-bitops$(EXESUF)
check-unit-$(CONFIG_HAS_GLIB_SUBPROCESS_TESTS) += tests/test-qdev-global-props$(EXESUF)
check-unit-y += tests/check-qom-interface$(EXESUF)
@@ -226,7 +228,7 @@ test-obj-y = tests/check-qint.o tests/check-qstring.o tests/check-qdict.o \
tests/test-qmp-commands.o tests/test-visitor-serialization.o \
tests/test-x86-cpuid.o tests/test-mul64.o tests/test-int128.o \
tests/test-opts-visitor.o tests/test-qmp-event.o \
- tests/rcutorture.o
+ tests/rcutorture.o tests/test-rcu-list.o
test-qapi-obj-y = tests/test-qapi-visit.o tests/test-qapi-types.o \
tests/test-qapi-event.o
@@ -256,6 +258,7 @@ tests/test-xbzrle$(EXESUF): tests/test-xbzrle.o migration/xbzrle.o page_cache.o
tests/test-cutils$(EXESUF): tests/test-cutils.o util/cutils.o
tests/test-int128$(EXESUF): tests/test-int128.o
tests/rcutorture$(EXESUF): tests/rcutorture.o libqemuutil.a
+tests/test-rcu-list$(EXESUF): tests/test-rcu-list.o libqemuutil.a
tests/test-qdev-global-props$(EXESUF): tests/test-qdev-global-props.o \
hw/core/qdev.o hw/core/qdev-properties.o hw/core/hotplug.o\
diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c
new file mode 100644
index 0000000..5ce55e9
--- /dev/null
+++ b/tests/test-rcu-list.c
@@ -0,0 +1,306 @@
+/*
+ * rcuq_test.c
+ *
+ * usage: rcuq_test <readers> <duration>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (c) 2013 Mike D. Day, IBM Corporation.
+ */
+
+/*
+ * Test variables.
+ */
+
+#include <glib.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include "qemu/atomic.h"
+#include "qemu/rcu.h"
+#include "qemu/compiler.h"
+#include "qemu/osdep.h"
+#include "qemu/thread.h"
+#include "qemu/rcu_queue.h"
+
+long long n_reads = 0LL;
+long long n_updates = 0LL;
+long long n_reclaims = 0LL;
+long long n_nodes_removed = 0LL;
+long long n_nodes = 0LL;
+int g_test_in_charge = 0;
+
+int nthreadsrunning;
+
+char argsbuf[64];
+
+#define GOFLAG_INIT 0
+#define GOFLAG_RUN 1
+#define GOFLAG_STOP 2
+
+static volatile int goflag = GOFLAG_INIT;
+
+#define RCU_READ_RUN 1000
+#define RCU_UPDATE_RUN 10
+#define NR_THREADS 100
+#define RCU_Q_LEN 100
+
+static QemuThread threads[NR_THREADS];
+static struct rcu_reader_data *data[NR_THREADS];
+static int n_threads;
+
+static int select_random_el(int max)
+{
+ return (rand() % max);
+}
+
+
+static void create_thread(void *(*func)(void *))
+{
+ if (n_threads >= NR_THREADS) {
+ fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
+ exit(-1);
+ }
+ qemu_thread_create(&threads[n_threads], "test", func, &data[n_threads],
+ QEMU_THREAD_JOINABLE);
+ n_threads++;
+}
+
+static void wait_all_threads(void)
+{
+ int i;
+
+ for (i = 0; i < n_threads; i++) {
+ qemu_thread_join(&threads[i]);
+ }
+ n_threads = 0;
+}
+
+
+struct list_element {
+ QLIST_ENTRY(list_element) entry;
+ struct rcu_head rcu;
+ long long val;
+};
+
+static void reclaim_list_el(struct rcu_head *prcu)
+{
+ struct list_element *el = container_of(prcu, struct list_element, rcu);
+ g_free(el);
+ atomic_add(&n_reclaims, 1);
+}
+
+static QLIST_HEAD(q_list_head, list_element) Q_list_head;
+
+static void *rcu_q_reader(void *arg)
+{
+ long long j, n_reads_local = 0;
+ struct list_element *el;
+
+ *(struct rcu_reader_data **)arg = &rcu_reader;
+ atomic_inc(&nthreadsrunning);
+ while (goflag == GOFLAG_INIT) {
+ g_usleep(1000);
+ }
+
+ while (goflag == GOFLAG_RUN) {
+ rcu_read_lock();
+ QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
+ j = atomic_read(&el->val);
+ (void)j;
+ n_reads_local++;
+ if (goflag == GOFLAG_STOP) {
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ g_usleep(100);
+ }
+ atomic_add(&n_reads, n_reads_local);
+ return NULL;
+}
+
+
+static void *rcu_q_updater(void *arg)
+{
+ int j, target_el;
+ long long n_updates_local = 0;
+ long long n_removed_local = 0;
+ struct list_element *el, *prev_el;
+
+ *(struct rcu_reader_data **)arg = &rcu_reader;
+ atomic_inc(&nthreadsrunning);
+ while (goflag == GOFLAG_INIT) {
+ g_usleep(1000);
+ }
+
+ while (goflag == GOFLAG_RUN) {
+ target_el = select_random_el(RCU_Q_LEN);
+ j = 0;
+ /* FOREACH_RCU could work here but let's use both macros */
+ QLIST_FOREACH_SAFE_RCU(prev_el, &Q_list_head, entry, el) {
+ j++;
+ if (target_el == j) {
+ QLIST_REMOVE_RCU(prev_el, entry);
+ /* may be more than one updater in the future */
+ call_rcu1(&prev_el->rcu, reclaim_list_el);
+ n_removed_local++;
+ break;
+ }
+ }
+ if (goflag == GOFLAG_STOP) {
+ break;
+ }
+ target_el = select_random_el(RCU_Q_LEN);
+ j = 0;
+ QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
+ j++;
+ if (target_el == j) {
+ prev_el = g_new(struct list_element, 1);
+ atomic_add(&n_nodes, 1);
+ prev_el->val = atomic_read(&n_nodes);
+ QLIST_INSERT_BEFORE_RCU(el, prev_el, entry);
+ break;
+ }
+ }
+
+ n_updates_local += 2;
+ synchronize_rcu();
+ }
+ synchronize_rcu();
+ atomic_add(&n_updates, n_updates_local);
+ atomic_add(&n_nodes_removed, n_removed_local);
+ return NULL;
+}
+
+static void rcu_qtest_init(void)
+{
+ struct list_element *new_el;
+ int i;
+ nthreadsrunning = 0;
+ srand(time(0));
+ for (i = 0; i < RCU_Q_LEN; i++) {
+ new_el = g_new(struct list_element, 1);
+ new_el->val = i;
+ QLIST_INSERT_HEAD_RCU(&Q_list_head, new_el, entry);
+ }
+ atomic_add(&n_nodes, RCU_Q_LEN);
+}
+
+static void rcu_qtest_run(int duration, int nreaders)
+{
+ int nthreads = nreaders + 1;
+ while (atomic_read(&nthreadsrunning) < nthreads) {
+ g_usleep(1000);
+ }
+
+ goflag = GOFLAG_RUN;
+ sleep(duration);
+ goflag = GOFLAG_STOP;
+ wait_all_threads();
+}
+
+
+static void rcu_qtest(const char *test, int duration, int nreaders)
+{
+ int i;
+ long long n_removed_local = 0;
+
+ struct list_element *el, *prev_el;
+
+ rcu_qtest_init();
+ for (i = 0; i < nreaders; i++) {
+ create_thread(rcu_q_reader);
+ }
+ create_thread(rcu_q_updater);
+ rcu_qtest_run(duration, nreaders);
+
+ QLIST_FOREACH_SAFE_RCU(prev_el, &Q_list_head, entry, el) {
+ QLIST_REMOVE_RCU(prev_el, entry);
+ call_rcu1(&prev_el->rcu, reclaim_list_el);
+ n_removed_local++;
+ }
+ atomic_add(&n_nodes_removed, n_removed_local);
+ synchronize_rcu();
+ while (n_nodes_removed > n_reclaims) {
+ g_usleep(100);
+ synchronize_rcu();
+ }
+ if (g_test_in_charge) {
+ g_assert_cmpint(n_nodes_removed, ==, n_reclaims);
+ } else {
+ printf("%s: %d readers; 1 updater; nodes read: " \
+ "%lld, nodes removed: %lld; nodes reclaimed: %lld\n",
+ test, nthreadsrunning - 1, n_reads, n_nodes_removed, n_reclaims);
+ exit(0);
+ }
+}
+
+static void usage(int argc, char *argv[])
+{
+ fprintf(stderr, "Usage: %s duration nreaders\n", argv[0]);
+ exit(-1);
+}
+
+static int gtest_seconds;
+
+static void gtest_rcuq_one(void)
+{
+ rcu_qtest("rcuqtest", gtest_seconds / 4, 1);
+}
+
+static void gtest_rcuq_few(void)
+{
+ rcu_qtest("rcuqtest", gtest_seconds / 4, 5);
+}
+
+static void gtest_rcuq_many(void)
+{
+ rcu_qtest("rcuqtest", gtest_seconds / 2, 20);
+}
+
+
+int main(int argc, char *argv[])
+{
+ int duration = 0, readers = 0;
+
+ if (argc >= 2) {
+ if (argv[1][0] == '-') {
+ g_test_init(&argc, &argv, NULL);
+ if (g_test_quick()) {
+ gtest_seconds = 4;
+ } else {
+ gtest_seconds = 20;
+ }
+ g_test_add_func("/rcu/qlist/single-threaded", gtest_rcuq_one);
+ g_test_add_func("/rcu/qlist/short-few", gtest_rcuq_few);
+ g_test_add_func("/rcu/qlist/long-many", gtest_rcuq_many);
+ g_test_in_charge = 1;
+ return g_test_run();
+ }
+ duration = strtoul(argv[1], NULL, 0);
+ }
+ if (argc >= 3) {
+ readers = strtoul(argv[2], NULL, 0);
+ }
+ if (duration && readers) {
+ rcu_qtest(argv[0], duration, readers);
+ return 0;
+ }
+
+ usage(argc, argv);
+ return -1;
+}
--
1.8.3.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* Re: [Qemu-devel] [PATCH 4/9] rcu: introduce RCU-enabled QLIST
2015-02-03 12:52 ` [Qemu-devel] [PATCH 4/9] rcu: introduce RCU-enabled QLIST Paolo Bonzini
@ 2015-02-04 3:42 ` Fam Zheng
2015-02-04 12:46 ` Paolo Bonzini
0 siblings, 1 reply; 22+ messages in thread
From: Fam Zheng @ 2015-02-04 3:42 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: Mike Day, peter.maydell, qemu-devel, fred.konrad
On Tue, 02/03 13:52, Paolo Bonzini wrote:
> From: Mike Day <ncmike@ncultra.org>
>
> Add RCU-enabled variants on the existing bsd DQ facility. Each
> operation has the same interface as the existing (non-RCU)
> version. Also, each operation is implemented as macro.
>
> Using the RCU-enabled QLIST, existing QLIST users will be able to
> convert to RCU without using a different list interface.
>
> Signed-off-by: Mike Day <ncmike@ncultra.org>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> hw/9pfs/virtio-9p-synth.c | 2 +-
> include/qemu/queue.h | 11 --
> include/qemu/rcu_queue.h | 134 ++++++++++++++++++++
> tests/Makefile | 5 +-
> tests/test-rcu-list.c | 306 ++++++++++++++++++++++++++++++++++++++++++++++
> 5 files changed, 445 insertions(+), 13 deletions(-)
> create mode 100644 include/qemu/rcu_queue.h
> create mode 100644 tests/test-rcu-list.c
>
> diff --git a/hw/9pfs/virtio-9p-synth.c b/hw/9pfs/virtio-9p-synth.c
> index e75aa87..a0ab9a8 100644
> --- a/hw/9pfs/virtio-9p-synth.c
> +++ b/hw/9pfs/virtio-9p-synth.c
> @@ -18,7 +18,7 @@
> #include "fsdev/qemu-fsdev.h"
> #include "virtio-9p-synth.h"
> #include "qemu/rcu.h"
> -
> +#include "qemu/rcu_queue.h"
> #include <sys/stat.h>
>
> /* Root node for synth file system */
> diff --git a/include/qemu/queue.h b/include/qemu/queue.h
> index c602797..8094150 100644
> --- a/include/qemu/queue.h
> +++ b/include/qemu/queue.h
> @@ -139,17 +139,6 @@ struct { \
> (elm)->field.le_prev = &(head)->lh_first; \
> } while (/*CONSTCOND*/0)
>
> -#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
> - (elm)->field.le_prev = &(head)->lh_first; \
> - (elm)->field.le_next = (head)->lh_first; \
> - smp_wmb(); /* fill elm before linking it */ \
> - if ((head)->lh_first != NULL) { \
> - (head)->lh_first->field.le_prev = &(elm)->field.le_next; \
> - } \
> - (head)->lh_first = (elm); \
> - smp_wmb(); \
> -} while (/* CONSTCOND*/0)
> -
> #define QLIST_REMOVE(elm, field) do { \
> if ((elm)->field.le_next != NULL) \
> (elm)->field.le_next->field.le_prev = \
> diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h
> new file mode 100644
> index 0000000..3aca7a5
> --- /dev/null
> +++ b/include/qemu/rcu_queue.h
> @@ -0,0 +1,134 @@
> +#ifndef QEMU_RCU_QUEUE_H
> +#define QEMU_RCU_QUEUE_H
> +
> +/*
> + * rcu_queue.h
> + *
> + * RCU-friendly versions of the queue.h primitives.
> + *
> + * This library is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * This library is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with this library; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + *
> + * Copyright (c) 2013 Mike D. Day, IBM Corporation.
> + *
> + * IBM's contributions to this file may be relicensed under LGPLv2 or later.
> + */
> +
> +#include "qemu/queue.h"
> +#include "qemu/atomic.h"
> +
> +#ifdef __cplusplus
> +extern "C" {
> +#endif
> +
> +
> +/*
> + * List access methods.
> + */
> +#define QLIST_EMPTY_RCU(head) (atomic_rcu_read(&(head)->lh_first) == NULL)
> +#define QLIST_FIRST_RCU(head) (atomic_rcu_read(&(head)->lh_first))
> +#define QLIST_NEXT_RCU(elm, field) (atomic_rcu_read(&(elm)->field.le_next))
> +
> +/*
> + * List functions.
> + */
> +
> +
> +/*
> + * The difference between atomic_read/set and atomic_rcu_read/set
> + * is in the including of a read/write memory barrier to the volatile
> + * access. atomic_rcu_* macros include the memory barrier, the
> + * plain atomic macros do not. Therefore, it should be correct to
> + * issue a series of reads or writes to the same element using only
> + * the atomic_* macro, until the last read or write, which should be
> + * atomic_rcu_* to introduce a read or write memory barrier as
> + * appropriate.
> + */
> +
> +/* Upon publication of the listelm->next value, list readers
> + * will see the new node when following next pointers from
> + * antecedent nodes, but may not see the new node when following
> + * prev pointers from subsequent nodes until after the RCU grace
> + * period expires.
> + * see linux/include/rculist.h __list_add_rcu(new, prev, next)
> + */
> +#define QLIST_INSERT_AFTER_RCU(listelm, elm, field) do { \
> + (elm)->field.le_next = (listelm)->field.le_next; \
> + (elm)->field.le_prev = &(listelm)->field.le_next; \
> + atomic_rcu_set(&(listelm)->field.le_next, (elm)); \
> + if ((elm)->field.le_next != NULL) { \
> + (elm)->field.le_next->field.le_prev = \
> + &(elm)->field.le_next; \
> + } \
> +} while (/*CONSTCOND*/0)
> +
> +/* Upon publication of the listelm->prev->next value, list
> + * readers will see the new element when following prev pointers
> + * from subsequent elements, but may not see the new element
> + * when following next pointers from antecedent elements
> + * until after the RCU grace period expires.
> + */
> +#define QLIST_INSERT_BEFORE_RCU(listelm, elm, field) do { \
> + (elm)->field.le_prev = (listelm)->field.le_prev; \
> + (elm)->field.le_next = (listelm); \
> + atomic_rcu_set((listelm)->field.le_prev, (elm)); \
> + (listelm)->field.le_prev = &(elm)->field.le_next; \
> +} while (/*CONSTCOND*/0)
> +
> +/* Upon publication of the head->first value, list readers
> + * will see the new element when following the head, but may
> + * not see the new element when following prev pointers from
> + * subsequent elements until after the RCU grace period has
> + * expired.
> + */
> +#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
> + (elm)->field.le_prev = &(head)->lh_first; \
> + (elm)->field.le_next = (head)->lh_first; \
> + atomic_rcu_set((&(head)->lh_first), (elm)); \
> + if ((elm)->field.le_next != NULL) { \
> + (elm)->field.le_next->field.le_prev = \
> + &(elm)->field.le_next; \
> + } \
> +} while (/*CONSTCOND*/0)
> +
> +
> +/* prior to publication of the elm->prev->next value, some list
> + * readers may still see the removed element when following
> + * the antecedent's next pointer.
> + */
> +#define QLIST_REMOVE_RCU(elm, field) do { \
> + if ((elm)->field.le_next != NULL) { \
> + (elm)->field.le_next->field.le_prev = \
> + (elm)->field.le_prev; \
> + } \
> + *(elm)->field.le_prev = (elm)->field.le_next; \
> +} while (/*CONSTCOND*/0)
> +
> +/* List traversal must occur within an RCU critical section. */
> +#define QLIST_FOREACH_RCU(var, head, field) \
> + for ((var) = atomic_rcu_read(&(head)->lh_first); \
> + (var); \
> + (var) = atomic_rcu_read(&(var)->field.le_next))
> +
> +/* List traversal must occur within an RCU critical section. */
> +#define QLIST_FOREACH_SAFE_RCU(var, head, field, next_var) \
> + for ((var) = (atomic_rcu_read(&(head)->lh_first)); \
> + (var) && \
> + ((next_var) = atomic_rcu_read(&(var)->field.le_next), 1); \
> + (var) = (next_var))
> +
> +#ifdef __cplusplus
> +}
> +#endif
> +#endif /* QEMU_RCU_QUEUE.H */
> diff --git a/tests/Makefile b/tests/Makefile
> index db5b3c3..ef8d589 100644
> --- a/tests/Makefile
> +++ b/tests/Makefile
> @@ -62,6 +62,8 @@ check-unit-y += tests/test-int128$(EXESUF)
> gcov-files-test-int128-y =
> check-unit-y += tests/rcutorture$(EXESUF)
> gcov-files-rcutorture-y = util/rcu.c
> +check-unit-y += tests/test-rcu-list$(EXESUF)
> +gcov-files-test-rcu-list-y = util/rcu.c
> check-unit-y += tests/test-bitops$(EXESUF)
> check-unit-$(CONFIG_HAS_GLIB_SUBPROCESS_TESTS) += tests/test-qdev-global-props$(EXESUF)
> check-unit-y += tests/check-qom-interface$(EXESUF)
> @@ -226,7 +228,7 @@ test-obj-y = tests/check-qint.o tests/check-qstring.o tests/check-qdict.o \
> tests/test-qmp-commands.o tests/test-visitor-serialization.o \
> tests/test-x86-cpuid.o tests/test-mul64.o tests/test-int128.o \
> tests/test-opts-visitor.o tests/test-qmp-event.o \
> - tests/rcutorture.o
> + tests/rcutorture.o tests/test-rcu-list.o
>
> test-qapi-obj-y = tests/test-qapi-visit.o tests/test-qapi-types.o \
> tests/test-qapi-event.o
> @@ -256,6 +258,7 @@ tests/test-xbzrle$(EXESUF): tests/test-xbzrle.o migration/xbzrle.o page_cache.o
> tests/test-cutils$(EXESUF): tests/test-cutils.o util/cutils.o
> tests/test-int128$(EXESUF): tests/test-int128.o
> tests/rcutorture$(EXESUF): tests/rcutorture.o libqemuutil.a
> +tests/test-rcu-list$(EXESUF): tests/test-rcu-list.o libqemuutil.a
>
> tests/test-qdev-global-props$(EXESUF): tests/test-qdev-global-props.o \
> hw/core/qdev.o hw/core/qdev-properties.o hw/core/hotplug.o\
> diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c
> new file mode 100644
> index 0000000..5ce55e9
> --- /dev/null
> +++ b/tests/test-rcu-list.c
> @@ -0,0 +1,306 @@
> +/*
> + * rcuq_test.c
> + *
> + * usage: rcuq_test <readers> <duration>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License as published by
> + * the Free Software Foundation; either version 2 of the License, or
> + * (at your option) any later version.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> + * GNU General Public License for more details.
> + *
> + * You should have received a copy of the GNU General Public License
> + * along with this program; if not, write to the Free Software
> + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
> + *
> + * Copyright (c) 2013 Mike D. Day, IBM Corporation.
> + */
> +
> +/*
> + * Test variables.
> + */
Move below the includes?
> +
> +#include <glib.h>
> +#include <stdlib.h>
> +#include <stdio.h>
> +#include <string.h>
> +#include "qemu/atomic.h"
> +#include "qemu/rcu.h"
> +#include "qemu/compiler.h"
> +#include "qemu/osdep.h"
> +#include "qemu/thread.h"
> +#include "qemu/rcu_queue.h"
> +
> +long long n_reads = 0LL;
> +long long n_updates = 0LL;
> +long long n_reclaims = 0LL;
> +long long n_nodes_removed = 0LL;
> +long long n_nodes = 0LL;
> +int g_test_in_charge = 0;
> +
> +int nthreadsrunning;
> +
> +char argsbuf[64];
> +
> +#define GOFLAG_INIT 0
> +#define GOFLAG_RUN 1
> +#define GOFLAG_STOP 2
> +
> +static volatile int goflag = GOFLAG_INIT;
> +
> +#define RCU_READ_RUN 1000
> +#define RCU_UPDATE_RUN 10
> +#define NR_THREADS 100
> +#define RCU_Q_LEN 100
> +
> +static QemuThread threads[NR_THREADS];
> +static struct rcu_reader_data *data[NR_THREADS];
> +static int n_threads;
> +
> +static int select_random_el(int max)
> +{
> + return (rand() % max);
> +}
> +
> +
> +static void create_thread(void *(*func)(void *))
> +{
> + if (n_threads >= NR_THREADS) {
> + fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
> + exit(-1);
> + }
> + qemu_thread_create(&threads[n_threads], "test", func, &data[n_threads],
> + QEMU_THREAD_JOINABLE);
> + n_threads++;
> +}
> +
> +static void wait_all_threads(void)
> +{
> + int i;
> +
> + for (i = 0; i < n_threads; i++) {
> + qemu_thread_join(&threads[i]);
> + }
> + n_threads = 0;
> +}
> +
> +
> +struct list_element {
> + QLIST_ENTRY(list_element) entry;
> + struct rcu_head rcu;
> + long long val;
> +};
> +
> +static void reclaim_list_el(struct rcu_head *prcu)
> +{
> + struct list_element *el = container_of(prcu, struct list_element, rcu);
> + g_free(el);
> + atomic_add(&n_reclaims, 1);
> +}
> +
> +static QLIST_HEAD(q_list_head, list_element) Q_list_head;
> +
> +static void *rcu_q_reader(void *arg)
> +{
> + long long j, n_reads_local = 0;
> + struct list_element *el;
> +
> + *(struct rcu_reader_data **)arg = &rcu_reader;
> + atomic_inc(&nthreadsrunning);
> + while (goflag == GOFLAG_INIT) {
> + g_usleep(1000);
> + }
> +
> + while (goflag == GOFLAG_RUN) {
> + rcu_read_lock();
> + QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
> + j = atomic_read(&el->val);
> + (void)j;
> + n_reads_local++;
> + if (goflag == GOFLAG_STOP) {
> + break;
> + }
> + }
> + rcu_read_unlock();
> +
> + g_usleep(100);
> + }
> + atomic_add(&n_reads, n_reads_local);
> + return NULL;
> +}
> +
> +
> +static void *rcu_q_updater(void *arg)
> +{
> + int j, target_el;
> + long long n_updates_local = 0;
> + long long n_removed_local = 0;
> + struct list_element *el, *prev_el;
> +
> + *(struct rcu_reader_data **)arg = &rcu_reader;
> + atomic_inc(&nthreadsrunning);
> + while (goflag == GOFLAG_INIT) {
> + g_usleep(1000);
> + }
> +
> + while (goflag == GOFLAG_RUN) {
> + target_el = select_random_el(RCU_Q_LEN);
> + j = 0;
> + /* FOREACH_RCU could work here but let's use both macros */
> + QLIST_FOREACH_SAFE_RCU(prev_el, &Q_list_head, entry, el) {
> + j++;
> + if (target_el == j) {
> + QLIST_REMOVE_RCU(prev_el, entry);
> + /* may be more than one updater in the future */
> + call_rcu1(&prev_el->rcu, reclaim_list_el);
> + n_removed_local++;
> + break;
> + }
> + }
> + if (goflag == GOFLAG_STOP) {
> + break;
> + }
> + target_el = select_random_el(RCU_Q_LEN);
> + j = 0;
> + QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
> + j++;
> + if (target_el == j) {
> + prev_el = g_new(struct list_element, 1);
> + atomic_add(&n_nodes, 1);
> + prev_el->val = atomic_read(&n_nodes);
> + QLIST_INSERT_BEFORE_RCU(el, prev_el, entry);
> + break;
> + }
> + }
> +
> + n_updates_local += 2;
> + synchronize_rcu();
> + }
> + synchronize_rcu();
Is one of two synchronize_rcu's superfluous? Otherwise looks good:
Reviewed-by: Fam Zheng <famz@redhat.com>
> + atomic_add(&n_updates, n_updates_local);
> + atomic_add(&n_nodes_removed, n_removed_local);
> + return NULL;
> +}
<...>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [Qemu-devel] [PATCH 4/9] rcu: introduce RCU-enabled QLIST
2015-02-04 3:42 ` Fam Zheng
@ 2015-02-04 12:46 ` Paolo Bonzini
2015-02-05 2:03 ` Fam Zheng
0 siblings, 1 reply; 22+ messages in thread
From: Paolo Bonzini @ 2015-02-04 12:46 UTC (permalink / raw)
To: Fam Zheng; +Cc: Mike Day, peter.maydell, qemu-devel, fred.konrad
On 04/02/2015 04:42, Fam Zheng wrote:
> On Tue, 02/03 13:52, Paolo Bonzini wrote:
>> From: Mike Day <ncmike@ncultra.org>
>>
>> Add RCU-enabled variants on the existing bsd DQ facility. Each
>> operation has the same interface as the existing (non-RCU)
>> version. Also, each operation is implemented as macro.
>>
>> Using the RCU-enabled QLIST, existing QLIST users will be able to
>> convert to RCU without using a different list interface.
>>
>> Signed-off-by: Mike Day <ncmike@ncultra.org>
>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>> ---
>> hw/9pfs/virtio-9p-synth.c | 2 +-
>> include/qemu/queue.h | 11 --
>> include/qemu/rcu_queue.h | 134 ++++++++++++++++++++
>> tests/Makefile | 5 +-
>> tests/test-rcu-list.c | 306 ++++++++++++++++++++++++++++++++++++++++++++++
>> 5 files changed, 445 insertions(+), 13 deletions(-)
>> create mode 100644 include/qemu/rcu_queue.h
>> create mode 100644 tests/test-rcu-list.c
>>
>> diff --git a/hw/9pfs/virtio-9p-synth.c b/hw/9pfs/virtio-9p-synth.c
>> index e75aa87..a0ab9a8 100644
>> --- a/hw/9pfs/virtio-9p-synth.c
>> +++ b/hw/9pfs/virtio-9p-synth.c
>> @@ -18,7 +18,7 @@
>> #include "fsdev/qemu-fsdev.h"
>> #include "virtio-9p-synth.h"
>> #include "qemu/rcu.h"
>> -
>> +#include "qemu/rcu_queue.h"
>> #include <sys/stat.h>
>>
>> /* Root node for synth file system */
>> diff --git a/include/qemu/queue.h b/include/qemu/queue.h
>> index c602797..8094150 100644
>> --- a/include/qemu/queue.h
>> +++ b/include/qemu/queue.h
>> @@ -139,17 +139,6 @@ struct { \
>> (elm)->field.le_prev = &(head)->lh_first; \
>> } while (/*CONSTCOND*/0)
>>
>> -#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
>> - (elm)->field.le_prev = &(head)->lh_first; \
>> - (elm)->field.le_next = (head)->lh_first; \
>> - smp_wmb(); /* fill elm before linking it */ \
>> - if ((head)->lh_first != NULL) { \
>> - (head)->lh_first->field.le_prev = &(elm)->field.le_next; \
>> - } \
>> - (head)->lh_first = (elm); \
>> - smp_wmb(); \
>> -} while (/* CONSTCOND*/0)
>> -
>> #define QLIST_REMOVE(elm, field) do { \
>> if ((elm)->field.le_next != NULL) \
>> (elm)->field.le_next->field.le_prev = \
>> diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h
>> new file mode 100644
>> index 0000000..3aca7a5
>> --- /dev/null
>> +++ b/include/qemu/rcu_queue.h
>> @@ -0,0 +1,134 @@
>> +#ifndef QEMU_RCU_QUEUE_H
>> +#define QEMU_RCU_QUEUE_H
>> +
>> +/*
>> + * rcu_queue.h
>> + *
>> + * RCU-friendly versions of the queue.h primitives.
>> + *
>> + * This library is free software; you can redistribute it and/or
>> + * modify it under the terms of the GNU Lesser General Public
>> + * License as published by the Free Software Foundation; either
>> + * version 2.1 of the License, or (at your option) any later version.
>> + *
>> + * This library is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
>> + * Lesser General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU Lesser General Public
>> + * License along with this library; if not, write to the Free Software
>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
>> + *
>> + * Copyright (c) 2013 Mike D. Day, IBM Corporation.
>> + *
>> + * IBM's contributions to this file may be relicensed under LGPLv2 or later.
>> + */
>> +
>> +#include "qemu/queue.h"
>> +#include "qemu/atomic.h"
>> +
>> +#ifdef __cplusplus
>> +extern "C" {
>> +#endif
>> +
>> +
>> +/*
>> + * List access methods.
>> + */
>> +#define QLIST_EMPTY_RCU(head) (atomic_rcu_read(&(head)->lh_first) == NULL)
>> +#define QLIST_FIRST_RCU(head) (atomic_rcu_read(&(head)->lh_first))
>> +#define QLIST_NEXT_RCU(elm, field) (atomic_rcu_read(&(elm)->field.le_next))
>> +
>> +/*
>> + * List functions.
>> + */
>> +
>> +
>> +/*
>> + * The difference between atomic_read/set and atomic_rcu_read/set
>> + * is in the including of a read/write memory barrier to the volatile
>> + * access. atomic_rcu_* macros include the memory barrier, the
>> + * plain atomic macros do not. Therefore, it should be correct to
>> + * issue a series of reads or writes to the same element using only
>> + * the atomic_* macro, until the last read or write, which should be
>> + * atomic_rcu_* to introduce a read or write memory barrier as
>> + * appropriate.
>> + */
>> +
>> +/* Upon publication of the listelm->next value, list readers
>> + * will see the new node when following next pointers from
>> + * antecedent nodes, but may not see the new node when following
>> + * prev pointers from subsequent nodes until after the RCU grace
>> + * period expires.
>> + * see linux/include/rculist.h __list_add_rcu(new, prev, next)
>> + */
>> +#define QLIST_INSERT_AFTER_RCU(listelm, elm, field) do { \
>> + (elm)->field.le_next = (listelm)->field.le_next; \
>> + (elm)->field.le_prev = &(listelm)->field.le_next; \
>> + atomic_rcu_set(&(listelm)->field.le_next, (elm)); \
>> + if ((elm)->field.le_next != NULL) { \
>> + (elm)->field.le_next->field.le_prev = \
>> + &(elm)->field.le_next; \
>> + } \
>> +} while (/*CONSTCOND*/0)
>> +
>> +/* Upon publication of the listelm->prev->next value, list
>> + * readers will see the new element when following prev pointers
>> + * from subsequent elements, but may not see the new element
>> + * when following next pointers from antecedent elements
>> + * until after the RCU grace period expires.
>> + */
>> +#define QLIST_INSERT_BEFORE_RCU(listelm, elm, field) do { \
>> + (elm)->field.le_prev = (listelm)->field.le_prev; \
>> + (elm)->field.le_next = (listelm); \
>> + atomic_rcu_set((listelm)->field.le_prev, (elm)); \
>> + (listelm)->field.le_prev = &(elm)->field.le_next; \
>> +} while (/*CONSTCOND*/0)
>> +
>> +/* Upon publication of the head->first value, list readers
>> + * will see the new element when following the head, but may
>> + * not see the new element when following prev pointers from
>> + * subsequent elements until after the RCU grace period has
>> + * expired.
>> + */
>> +#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
>> + (elm)->field.le_prev = &(head)->lh_first; \
>> + (elm)->field.le_next = (head)->lh_first; \
>> + atomic_rcu_set((&(head)->lh_first), (elm)); \
>> + if ((elm)->field.le_next != NULL) { \
>> + (elm)->field.le_next->field.le_prev = \
>> + &(elm)->field.le_next; \
>> + } \
>> +} while (/*CONSTCOND*/0)
>> +
>> +
>> +/* prior to publication of the elm->prev->next value, some list
>> + * readers may still see the removed element when following
>> + * the antecedent's next pointer.
>> + */
>> +#define QLIST_REMOVE_RCU(elm, field) do { \
>> + if ((elm)->field.le_next != NULL) { \
>> + (elm)->field.le_next->field.le_prev = \
>> + (elm)->field.le_prev; \
>> + } \
>> + *(elm)->field.le_prev = (elm)->field.le_next; \
>> +} while (/*CONSTCOND*/0)
>> +
>> +/* List traversal must occur within an RCU critical section. */
>> +#define QLIST_FOREACH_RCU(var, head, field) \
>> + for ((var) = atomic_rcu_read(&(head)->lh_first); \
>> + (var); \
>> + (var) = atomic_rcu_read(&(var)->field.le_next))
>> +
>> +/* List traversal must occur within an RCU critical section. */
>> +#define QLIST_FOREACH_SAFE_RCU(var, head, field, next_var) \
>> + for ((var) = (atomic_rcu_read(&(head)->lh_first)); \
>> + (var) && \
>> + ((next_var) = atomic_rcu_read(&(var)->field.le_next), 1); \
>> + (var) = (next_var))
>> +
>> +#ifdef __cplusplus
>> +}
>> +#endif
>> +#endif /* QEMU_RCU_QUEUE.H */
>> diff --git a/tests/Makefile b/tests/Makefile
>> index db5b3c3..ef8d589 100644
>> --- a/tests/Makefile
>> +++ b/tests/Makefile
>> @@ -62,6 +62,8 @@ check-unit-y += tests/test-int128$(EXESUF)
>> gcov-files-test-int128-y =
>> check-unit-y += tests/rcutorture$(EXESUF)
>> gcov-files-rcutorture-y = util/rcu.c
>> +check-unit-y += tests/test-rcu-list$(EXESUF)
>> +gcov-files-test-rcu-list-y = util/rcu.c
>> check-unit-y += tests/test-bitops$(EXESUF)
>> check-unit-$(CONFIG_HAS_GLIB_SUBPROCESS_TESTS) += tests/test-qdev-global-props$(EXESUF)
>> check-unit-y += tests/check-qom-interface$(EXESUF)
>> @@ -226,7 +228,7 @@ test-obj-y = tests/check-qint.o tests/check-qstring.o tests/check-qdict.o \
>> tests/test-qmp-commands.o tests/test-visitor-serialization.o \
>> tests/test-x86-cpuid.o tests/test-mul64.o tests/test-int128.o \
>> tests/test-opts-visitor.o tests/test-qmp-event.o \
>> - tests/rcutorture.o
>> + tests/rcutorture.o tests/test-rcu-list.o
>>
>> test-qapi-obj-y = tests/test-qapi-visit.o tests/test-qapi-types.o \
>> tests/test-qapi-event.o
>> @@ -256,6 +258,7 @@ tests/test-xbzrle$(EXESUF): tests/test-xbzrle.o migration/xbzrle.o page_cache.o
>> tests/test-cutils$(EXESUF): tests/test-cutils.o util/cutils.o
>> tests/test-int128$(EXESUF): tests/test-int128.o
>> tests/rcutorture$(EXESUF): tests/rcutorture.o libqemuutil.a
>> +tests/test-rcu-list$(EXESUF): tests/test-rcu-list.o libqemuutil.a
>>
>> tests/test-qdev-global-props$(EXESUF): tests/test-qdev-global-props.o \
>> hw/core/qdev.o hw/core/qdev-properties.o hw/core/hotplug.o\
>> diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c
>> new file mode 100644
>> index 0000000..5ce55e9
>> --- /dev/null
>> +++ b/tests/test-rcu-list.c
>> @@ -0,0 +1,306 @@
>> +/*
>> + * rcuq_test.c
>> + *
>> + * usage: rcuq_test <readers> <duration>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License as published by
>> + * the Free Software Foundation; either version 2 of the License, or
>> + * (at your option) any later version.
>> + *
>> + * This program is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
>> + * GNU General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU General Public License
>> + * along with this program; if not, write to the Free Software
>> + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
>> + *
>> + * Copyright (c) 2013 Mike D. Day, IBM Corporation.
>> + */
>> +
>> +/*
>> + * Test variables.
>> + */
>
> Move below the includes?
>
>> +
>> +#include <glib.h>
>> +#include <stdlib.h>
>> +#include <stdio.h>
>> +#include <string.h>
>> +#include "qemu/atomic.h"
>> +#include "qemu/rcu.h"
>> +#include "qemu/compiler.h"
>> +#include "qemu/osdep.h"
>> +#include "qemu/thread.h"
>> +#include "qemu/rcu_queue.h"
>> +
>> +long long n_reads = 0LL;
>> +long long n_updates = 0LL;
>> +long long n_reclaims = 0LL;
>> +long long n_nodes_removed = 0LL;
>> +long long n_nodes = 0LL;
>> +int g_test_in_charge = 0;
>> +
>> +int nthreadsrunning;
>> +
>> +char argsbuf[64];
>> +
>> +#define GOFLAG_INIT 0
>> +#define GOFLAG_RUN 1
>> +#define GOFLAG_STOP 2
>> +
>> +static volatile int goflag = GOFLAG_INIT;
>> +
>> +#define RCU_READ_RUN 1000
>> +#define RCU_UPDATE_RUN 10
>> +#define NR_THREADS 100
>> +#define RCU_Q_LEN 100
>> +
>> +static QemuThread threads[NR_THREADS];
>> +static struct rcu_reader_data *data[NR_THREADS];
>> +static int n_threads;
>> +
>> +static int select_random_el(int max)
>> +{
>> + return (rand() % max);
>> +}
>> +
>> +
>> +static void create_thread(void *(*func)(void *))
>> +{
>> + if (n_threads >= NR_THREADS) {
>> + fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
>> + exit(-1);
>> + }
>> + qemu_thread_create(&threads[n_threads], "test", func, &data[n_threads],
>> + QEMU_THREAD_JOINABLE);
>> + n_threads++;
>> +}
>> +
>> +static void wait_all_threads(void)
>> +{
>> + int i;
>> +
>> + for (i = 0; i < n_threads; i++) {
>> + qemu_thread_join(&threads[i]);
>> + }
>> + n_threads = 0;
>> +}
>> +
>> +
>> +struct list_element {
>> + QLIST_ENTRY(list_element) entry;
>> + struct rcu_head rcu;
>> + long long val;
>> +};
>> +
>> +static void reclaim_list_el(struct rcu_head *prcu)
>> +{
>> + struct list_element *el = container_of(prcu, struct list_element, rcu);
>> + g_free(el);
>> + atomic_add(&n_reclaims, 1);
>> +}
>> +
>> +static QLIST_HEAD(q_list_head, list_element) Q_list_head;
>> +
>> +static void *rcu_q_reader(void *arg)
>> +{
>> + long long j, n_reads_local = 0;
>> + struct list_element *el;
>> +
>> + *(struct rcu_reader_data **)arg = &rcu_reader;
>> + atomic_inc(&nthreadsrunning);
>> + while (goflag == GOFLAG_INIT) {
>> + g_usleep(1000);
>> + }
>> +
>> + while (goflag == GOFLAG_RUN) {
>> + rcu_read_lock();
>> + QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
>> + j = atomic_read(&el->val);
>> + (void)j;
>> + n_reads_local++;
>> + if (goflag == GOFLAG_STOP) {
>> + break;
>> + }
>> + }
>> + rcu_read_unlock();
>> +
>> + g_usleep(100);
>> + }
>> + atomic_add(&n_reads, n_reads_local);
>> + return NULL;
>> +}
>> +
>> +
>> +static void *rcu_q_updater(void *arg)
>> +{
>> + int j, target_el;
>> + long long n_updates_local = 0;
>> + long long n_removed_local = 0;
>> + struct list_element *el, *prev_el;
>> +
>> + *(struct rcu_reader_data **)arg = &rcu_reader;
>> + atomic_inc(&nthreadsrunning);
>> + while (goflag == GOFLAG_INIT) {
>> + g_usleep(1000);
>> + }
>> +
>> + while (goflag == GOFLAG_RUN) {
>> + target_el = select_random_el(RCU_Q_LEN);
>> + j = 0;
>> + /* FOREACH_RCU could work here but let's use both macros */
>> + QLIST_FOREACH_SAFE_RCU(prev_el, &Q_list_head, entry, el) {
>> + j++;
>> + if (target_el == j) {
>> + QLIST_REMOVE_RCU(prev_el, entry);
>> + /* may be more than one updater in the future */
>> + call_rcu1(&prev_el->rcu, reclaim_list_el);
>> + n_removed_local++;
>> + break;
>> + }
>> + }
>> + if (goflag == GOFLAG_STOP) {
>> + break;
>> + }
>> + target_el = select_random_el(RCU_Q_LEN);
>> + j = 0;
>> + QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
>> + j++;
>> + if (target_el == j) {
>> + prev_el = g_new(struct list_element, 1);
>> + atomic_add(&n_nodes, 1);
>> + prev_el->val = atomic_read(&n_nodes);
>> + QLIST_INSERT_BEFORE_RCU(el, prev_el, entry);
>> + break;
>> + }
>> + }
>> +
>> + n_updates_local += 2;
>> + synchronize_rcu();
>> + }
>> + synchronize_rcu();
>
> Is one of two synchronize_rcu's superfluous? Otherwise looks good:
There is a "break"s in the loop, in that case the second synchronize_rcu
is necessary.
Paolo
>
> Reviewed-by: Fam Zheng <famz@redhat.com>
>
>> + atomic_add(&n_updates, n_updates_local);
>> + atomic_add(&n_nodes_removed, n_removed_local);
>> + return NULL;
>> +}
>
> <...>
>
>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [Qemu-devel] [PATCH 4/9] rcu: introduce RCU-enabled QLIST
2015-02-04 12:46 ` Paolo Bonzini
@ 2015-02-05 2:03 ` Fam Zheng
0 siblings, 0 replies; 22+ messages in thread
From: Fam Zheng @ 2015-02-05 2:03 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: Mike Day, peter.maydell, qemu-devel, fred.konrad
On Wed, 02/04 13:46, Paolo Bonzini wrote:
>
>
> On 04/02/2015 04:42, Fam Zheng wrote:
> > On Tue, 02/03 13:52, Paolo Bonzini wrote:
> >> From: Mike Day <ncmike@ncultra.org>
> >>
> >> Add RCU-enabled variants on the existing bsd DQ facility. Each
> >> operation has the same interface as the existing (non-RCU)
> >> version. Also, each operation is implemented as macro.
> >>
> >> Using the RCU-enabled QLIST, existing QLIST users will be able to
> >> convert to RCU without using a different list interface.
> >>
> >> Signed-off-by: Mike Day <ncmike@ncultra.org>
> >> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> >> ---
> >> hw/9pfs/virtio-9p-synth.c | 2 +-
> >> include/qemu/queue.h | 11 --
> >> include/qemu/rcu_queue.h | 134 ++++++++++++++++++++
> >> tests/Makefile | 5 +-
> >> tests/test-rcu-list.c | 306 ++++++++++++++++++++++++++++++++++++++++++++++
> >> 5 files changed, 445 insertions(+), 13 deletions(-)
> >> create mode 100644 include/qemu/rcu_queue.h
> >> create mode 100644 tests/test-rcu-list.c
> >>
> >> diff --git a/hw/9pfs/virtio-9p-synth.c b/hw/9pfs/virtio-9p-synth.c
> >> index e75aa87..a0ab9a8 100644
> >> --- a/hw/9pfs/virtio-9p-synth.c
> >> +++ b/hw/9pfs/virtio-9p-synth.c
> >> @@ -18,7 +18,7 @@
> >> #include "fsdev/qemu-fsdev.h"
> >> #include "virtio-9p-synth.h"
> >> #include "qemu/rcu.h"
> >> -
> >> +#include "qemu/rcu_queue.h"
> >> #include <sys/stat.h>
> >>
> >> /* Root node for synth file system */
> >> diff --git a/include/qemu/queue.h b/include/qemu/queue.h
> >> index c602797..8094150 100644
> >> --- a/include/qemu/queue.h
> >> +++ b/include/qemu/queue.h
> >> @@ -139,17 +139,6 @@ struct { \
> >> (elm)->field.le_prev = &(head)->lh_first; \
> >> } while (/*CONSTCOND*/0)
> >>
> >> -#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
> >> - (elm)->field.le_prev = &(head)->lh_first; \
> >> - (elm)->field.le_next = (head)->lh_first; \
> >> - smp_wmb(); /* fill elm before linking it */ \
> >> - if ((head)->lh_first != NULL) { \
> >> - (head)->lh_first->field.le_prev = &(elm)->field.le_next; \
> >> - } \
> >> - (head)->lh_first = (elm); \
> >> - smp_wmb(); \
> >> -} while (/* CONSTCOND*/0)
> >> -
> >> #define QLIST_REMOVE(elm, field) do { \
> >> if ((elm)->field.le_next != NULL) \
> >> (elm)->field.le_next->field.le_prev = \
> >> diff --git a/include/qemu/rcu_queue.h b/include/qemu/rcu_queue.h
> >> new file mode 100644
> >> index 0000000..3aca7a5
> >> --- /dev/null
> >> +++ b/include/qemu/rcu_queue.h
> >> @@ -0,0 +1,134 @@
> >> +#ifndef QEMU_RCU_QUEUE_H
> >> +#define QEMU_RCU_QUEUE_H
> >> +
> >> +/*
> >> + * rcu_queue.h
> >> + *
> >> + * RCU-friendly versions of the queue.h primitives.
> >> + *
> >> + * This library is free software; you can redistribute it and/or
> >> + * modify it under the terms of the GNU Lesser General Public
> >> + * License as published by the Free Software Foundation; either
> >> + * version 2.1 of the License, or (at your option) any later version.
> >> + *
> >> + * This library is distributed in the hope that it will be useful,
> >> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> >> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> >> + * Lesser General Public License for more details.
> >> + *
> >> + * You should have received a copy of the GNU Lesser General Public
> >> + * License along with this library; if not, write to the Free Software
> >> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> >> + *
> >> + * Copyright (c) 2013 Mike D. Day, IBM Corporation.
> >> + *
> >> + * IBM's contributions to this file may be relicensed under LGPLv2 or later.
> >> + */
> >> +
> >> +#include "qemu/queue.h"
> >> +#include "qemu/atomic.h"
> >> +
> >> +#ifdef __cplusplus
> >> +extern "C" {
> >> +#endif
> >> +
> >> +
> >> +/*
> >> + * List access methods.
> >> + */
> >> +#define QLIST_EMPTY_RCU(head) (atomic_rcu_read(&(head)->lh_first) == NULL)
> >> +#define QLIST_FIRST_RCU(head) (atomic_rcu_read(&(head)->lh_first))
> >> +#define QLIST_NEXT_RCU(elm, field) (atomic_rcu_read(&(elm)->field.le_next))
> >> +
> >> +/*
> >> + * List functions.
> >> + */
> >> +
> >> +
> >> +/*
> >> + * The difference between atomic_read/set and atomic_rcu_read/set
> >> + * is in the including of a read/write memory barrier to the volatile
> >> + * access. atomic_rcu_* macros include the memory barrier, the
> >> + * plain atomic macros do not. Therefore, it should be correct to
> >> + * issue a series of reads or writes to the same element using only
> >> + * the atomic_* macro, until the last read or write, which should be
> >> + * atomic_rcu_* to introduce a read or write memory barrier as
> >> + * appropriate.
> >> + */
> >> +
> >> +/* Upon publication of the listelm->next value, list readers
> >> + * will see the new node when following next pointers from
> >> + * antecedent nodes, but may not see the new node when following
> >> + * prev pointers from subsequent nodes until after the RCU grace
> >> + * period expires.
> >> + * see linux/include/rculist.h __list_add_rcu(new, prev, next)
> >> + */
> >> +#define QLIST_INSERT_AFTER_RCU(listelm, elm, field) do { \
> >> + (elm)->field.le_next = (listelm)->field.le_next; \
> >> + (elm)->field.le_prev = &(listelm)->field.le_next; \
> >> + atomic_rcu_set(&(listelm)->field.le_next, (elm)); \
> >> + if ((elm)->field.le_next != NULL) { \
> >> + (elm)->field.le_next->field.le_prev = \
> >> + &(elm)->field.le_next; \
> >> + } \
> >> +} while (/*CONSTCOND*/0)
> >> +
> >> +/* Upon publication of the listelm->prev->next value, list
> >> + * readers will see the new element when following prev pointers
> >> + * from subsequent elements, but may not see the new element
> >> + * when following next pointers from antecedent elements
> >> + * until after the RCU grace period expires.
> >> + */
> >> +#define QLIST_INSERT_BEFORE_RCU(listelm, elm, field) do { \
> >> + (elm)->field.le_prev = (listelm)->field.le_prev; \
> >> + (elm)->field.le_next = (listelm); \
> >> + atomic_rcu_set((listelm)->field.le_prev, (elm)); \
> >> + (listelm)->field.le_prev = &(elm)->field.le_next; \
> >> +} while (/*CONSTCOND*/0)
> >> +
> >> +/* Upon publication of the head->first value, list readers
> >> + * will see the new element when following the head, but may
> >> + * not see the new element when following prev pointers from
> >> + * subsequent elements until after the RCU grace period has
> >> + * expired.
> >> + */
> >> +#define QLIST_INSERT_HEAD_RCU(head, elm, field) do { \
> >> + (elm)->field.le_prev = &(head)->lh_first; \
> >> + (elm)->field.le_next = (head)->lh_first; \
> >> + atomic_rcu_set((&(head)->lh_first), (elm)); \
> >> + if ((elm)->field.le_next != NULL) { \
> >> + (elm)->field.le_next->field.le_prev = \
> >> + &(elm)->field.le_next; \
> >> + } \
> >> +} while (/*CONSTCOND*/0)
> >> +
> >> +
> >> +/* prior to publication of the elm->prev->next value, some list
> >> + * readers may still see the removed element when following
> >> + * the antecedent's next pointer.
> >> + */
> >> +#define QLIST_REMOVE_RCU(elm, field) do { \
> >> + if ((elm)->field.le_next != NULL) { \
> >> + (elm)->field.le_next->field.le_prev = \
> >> + (elm)->field.le_prev; \
> >> + } \
> >> + *(elm)->field.le_prev = (elm)->field.le_next; \
> >> +} while (/*CONSTCOND*/0)
> >> +
> >> +/* List traversal must occur within an RCU critical section. */
> >> +#define QLIST_FOREACH_RCU(var, head, field) \
> >> + for ((var) = atomic_rcu_read(&(head)->lh_first); \
> >> + (var); \
> >> + (var) = atomic_rcu_read(&(var)->field.le_next))
> >> +
> >> +/* List traversal must occur within an RCU critical section. */
> >> +#define QLIST_FOREACH_SAFE_RCU(var, head, field, next_var) \
> >> + for ((var) = (atomic_rcu_read(&(head)->lh_first)); \
> >> + (var) && \
> >> + ((next_var) = atomic_rcu_read(&(var)->field.le_next), 1); \
> >> + (var) = (next_var))
> >> +
> >> +#ifdef __cplusplus
> >> +}
> >> +#endif
> >> +#endif /* QEMU_RCU_QUEUE.H */
> >> diff --git a/tests/Makefile b/tests/Makefile
> >> index db5b3c3..ef8d589 100644
> >> --- a/tests/Makefile
> >> +++ b/tests/Makefile
> >> @@ -62,6 +62,8 @@ check-unit-y += tests/test-int128$(EXESUF)
> >> gcov-files-test-int128-y =
> >> check-unit-y += tests/rcutorture$(EXESUF)
> >> gcov-files-rcutorture-y = util/rcu.c
> >> +check-unit-y += tests/test-rcu-list$(EXESUF)
> >> +gcov-files-test-rcu-list-y = util/rcu.c
> >> check-unit-y += tests/test-bitops$(EXESUF)
> >> check-unit-$(CONFIG_HAS_GLIB_SUBPROCESS_TESTS) += tests/test-qdev-global-props$(EXESUF)
> >> check-unit-y += tests/check-qom-interface$(EXESUF)
> >> @@ -226,7 +228,7 @@ test-obj-y = tests/check-qint.o tests/check-qstring.o tests/check-qdict.o \
> >> tests/test-qmp-commands.o tests/test-visitor-serialization.o \
> >> tests/test-x86-cpuid.o tests/test-mul64.o tests/test-int128.o \
> >> tests/test-opts-visitor.o tests/test-qmp-event.o \
> >> - tests/rcutorture.o
> >> + tests/rcutorture.o tests/test-rcu-list.o
> >>
> >> test-qapi-obj-y = tests/test-qapi-visit.o tests/test-qapi-types.o \
> >> tests/test-qapi-event.o
> >> @@ -256,6 +258,7 @@ tests/test-xbzrle$(EXESUF): tests/test-xbzrle.o migration/xbzrle.o page_cache.o
> >> tests/test-cutils$(EXESUF): tests/test-cutils.o util/cutils.o
> >> tests/test-int128$(EXESUF): tests/test-int128.o
> >> tests/rcutorture$(EXESUF): tests/rcutorture.o libqemuutil.a
> >> +tests/test-rcu-list$(EXESUF): tests/test-rcu-list.o libqemuutil.a
> >>
> >> tests/test-qdev-global-props$(EXESUF): tests/test-qdev-global-props.o \
> >> hw/core/qdev.o hw/core/qdev-properties.o hw/core/hotplug.o\
> >> diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c
> >> new file mode 100644
> >> index 0000000..5ce55e9
> >> --- /dev/null
> >> +++ b/tests/test-rcu-list.c
> >> @@ -0,0 +1,306 @@
> >> +/*
> >> + * rcuq_test.c
> >> + *
> >> + * usage: rcuq_test <readers> <duration>
> >> + *
> >> + * This program is free software; you can redistribute it and/or modify
> >> + * it under the terms of the GNU General Public License as published by
> >> + * the Free Software Foundation; either version 2 of the License, or
> >> + * (at your option) any later version.
> >> + *
> >> + * This program is distributed in the hope that it will be useful,
> >> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> >> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
> >> + * GNU General Public License for more details.
> >> + *
> >> + * You should have received a copy of the GNU General Public License
> >> + * along with this program; if not, write to the Free Software
> >> + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
> >> + *
> >> + * Copyright (c) 2013 Mike D. Day, IBM Corporation.
> >> + */
> >> +
> >> +/*
> >> + * Test variables.
> >> + */
> >
> > Move below the includes?
> >
> >> +
> >> +#include <glib.h>
> >> +#include <stdlib.h>
> >> +#include <stdio.h>
> >> +#include <string.h>
> >> +#include "qemu/atomic.h"
> >> +#include "qemu/rcu.h"
> >> +#include "qemu/compiler.h"
> >> +#include "qemu/osdep.h"
> >> +#include "qemu/thread.h"
> >> +#include "qemu/rcu_queue.h"
> >> +
> >> +long long n_reads = 0LL;
> >> +long long n_updates = 0LL;
> >> +long long n_reclaims = 0LL;
> >> +long long n_nodes_removed = 0LL;
> >> +long long n_nodes = 0LL;
> >> +int g_test_in_charge = 0;
> >> +
> >> +int nthreadsrunning;
> >> +
> >> +char argsbuf[64];
> >> +
> >> +#define GOFLAG_INIT 0
> >> +#define GOFLAG_RUN 1
> >> +#define GOFLAG_STOP 2
> >> +
> >> +static volatile int goflag = GOFLAG_INIT;
> >> +
> >> +#define RCU_READ_RUN 1000
> >> +#define RCU_UPDATE_RUN 10
> >> +#define NR_THREADS 100
> >> +#define RCU_Q_LEN 100
> >> +
> >> +static QemuThread threads[NR_THREADS];
> >> +static struct rcu_reader_data *data[NR_THREADS];
> >> +static int n_threads;
> >> +
> >> +static int select_random_el(int max)
> >> +{
> >> + return (rand() % max);
> >> +}
> >> +
> >> +
> >> +static void create_thread(void *(*func)(void *))
> >> +{
> >> + if (n_threads >= NR_THREADS) {
> >> + fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
> >> + exit(-1);
> >> + }
> >> + qemu_thread_create(&threads[n_threads], "test", func, &data[n_threads],
> >> + QEMU_THREAD_JOINABLE);
> >> + n_threads++;
> >> +}
> >> +
> >> +static void wait_all_threads(void)
> >> +{
> >> + int i;
> >> +
> >> + for (i = 0; i < n_threads; i++) {
> >> + qemu_thread_join(&threads[i]);
> >> + }
> >> + n_threads = 0;
> >> +}
> >> +
> >> +
> >> +struct list_element {
> >> + QLIST_ENTRY(list_element) entry;
> >> + struct rcu_head rcu;
> >> + long long val;
> >> +};
> >> +
> >> +static void reclaim_list_el(struct rcu_head *prcu)
> >> +{
> >> + struct list_element *el = container_of(prcu, struct list_element, rcu);
> >> + g_free(el);
> >> + atomic_add(&n_reclaims, 1);
> >> +}
> >> +
> >> +static QLIST_HEAD(q_list_head, list_element) Q_list_head;
> >> +
> >> +static void *rcu_q_reader(void *arg)
> >> +{
> >> + long long j, n_reads_local = 0;
> >> + struct list_element *el;
> >> +
> >> + *(struct rcu_reader_data **)arg = &rcu_reader;
> >> + atomic_inc(&nthreadsrunning);
> >> + while (goflag == GOFLAG_INIT) {
> >> + g_usleep(1000);
> >> + }
> >> +
> >> + while (goflag == GOFLAG_RUN) {
> >> + rcu_read_lock();
> >> + QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
> >> + j = atomic_read(&el->val);
> >> + (void)j;
> >> + n_reads_local++;
> >> + if (goflag == GOFLAG_STOP) {
> >> + break;
> >> + }
> >> + }
> >> + rcu_read_unlock();
> >> +
> >> + g_usleep(100);
> >> + }
> >> + atomic_add(&n_reads, n_reads_local);
> >> + return NULL;
> >> +}
> >> +
> >> +
> >> +static void *rcu_q_updater(void *arg)
> >> +{
> >> + int j, target_el;
> >> + long long n_updates_local = 0;
> >> + long long n_removed_local = 0;
> >> + struct list_element *el, *prev_el;
> >> +
> >> + *(struct rcu_reader_data **)arg = &rcu_reader;
> >> + atomic_inc(&nthreadsrunning);
> >> + while (goflag == GOFLAG_INIT) {
> >> + g_usleep(1000);
> >> + }
> >> +
> >> + while (goflag == GOFLAG_RUN) {
> >> + target_el = select_random_el(RCU_Q_LEN);
> >> + j = 0;
> >> + /* FOREACH_RCU could work here but let's use both macros */
> >> + QLIST_FOREACH_SAFE_RCU(prev_el, &Q_list_head, entry, el) {
> >> + j++;
> >> + if (target_el == j) {
> >> + QLIST_REMOVE_RCU(prev_el, entry);
> >> + /* may be more than one updater in the future */
> >> + call_rcu1(&prev_el->rcu, reclaim_list_el);
> >> + n_removed_local++;
> >> + break;
> >> + }
> >> + }
> >> + if (goflag == GOFLAG_STOP) {
> >> + break;
> >> + }
> >> + target_el = select_random_el(RCU_Q_LEN);
> >> + j = 0;
> >> + QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
> >> + j++;
> >> + if (target_el == j) {
> >> + prev_el = g_new(struct list_element, 1);
> >> + atomic_add(&n_nodes, 1);
> >> + prev_el->val = atomic_read(&n_nodes);
> >> + QLIST_INSERT_BEFORE_RCU(el, prev_el, entry);
> >> + break;
> >> + }
> >> + }
> >> +
> >> + n_updates_local += 2;
> >> + synchronize_rcu();
> >> + }
> >> + synchronize_rcu();
> >
> > Is one of two synchronize_rcu's superfluous? Otherwise looks good:
>
> There is a "break"s in the loop, in that case the second synchronize_rcu
> is necessary.
Oh yes. So my rev-by holds.
Fam
>
> Paolo
>
> >
> > Reviewed-by: Fam Zheng <famz@redhat.com>
> >
> >> + atomic_add(&n_updates, n_updates_local);
> >> + atomic_add(&n_nodes_removed, n_removed_local);
> >> + return NULL;
> >> +}
> >
> > <...>
> >
> >
>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [Qemu-devel] [PATCH 5/9] exec: protect mru_block with RCU
2015-02-03 12:52 [Qemu-devel] [PATCH v2 0/9] RCUification of the memory API, part 2 Paolo Bonzini
` (3 preceding siblings ...)
2015-02-03 12:52 ` [Qemu-devel] [PATCH 4/9] rcu: introduce RCU-enabled QLIST Paolo Bonzini
@ 2015-02-03 12:52 ` Paolo Bonzini
2015-02-05 6:23 ` Fam Zheng
2015-02-03 12:52 ` [Qemu-devel] [PATCH 6/9] cosmetic changes preparing for the following patches Paolo Bonzini
` (3 subsequent siblings)
8 siblings, 1 reply; 22+ messages in thread
From: Paolo Bonzini @ 2015-02-03 12:52 UTC (permalink / raw)
To: qemu-devel; +Cc: peter.maydell, famz, fred.konrad
Hence, freeing a RAMBlock has to be switched to call_rcu.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
exec.c | 60 +++++++++++++++++++++++++++++++++++---------------
include/exec/cpu-all.h | 2 ++
2 files changed, 44 insertions(+), 18 deletions(-)
diff --git a/exec.c b/exec.c
index a423def..05c5b44 100644
--- a/exec.c
+++ b/exec.c
@@ -811,7 +811,7 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
RAMBlock *block;
/* The list is protected by the iothread lock here. */
- block = ram_list.mru_block;
+ block = atomic_rcu_read(&ram_list.mru_block);
if (block && addr - block->offset < block->max_length) {
goto found;
}
@@ -825,6 +825,22 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
abort();
found:
+ /* It is safe to write mru_block outside the iothread lock. This
+ * is what happens:
+ *
+ * mru_block = xxx
+ * rcu_read_unlock()
+ * xxx removed from list
+ * rcu_read_lock()
+ * read mru_block
+ * mru_block = NULL;
+ * call_rcu(reclaim_ramblock, xxx);
+ * rcu_read_unlock()
+ *
+ * atomic_rcu_set is not needed here. The block was already published
+ * when it was placed into the list. Here we're just making an extra
+ * copy of the pointer.
+ */
ram_list.mru_block = block;
return block;
}
@@ -1381,14 +1397,16 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
}
ram_list.mru_block = NULL;
+ atomic_rcu_set(&ram_list.version, ram_list.version + 1);
- ram_list.version++;
qemu_mutex_unlock_ramlist();
new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
if (new_ram_size > old_ram_size) {
int i;
+
+ /* ram_list.dirty_memory[] is protected by the iothread lock. */
for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
ram_list.dirty_memory[i] =
bitmap_zero_extend(ram_list.dirty_memory[i],
@@ -1525,14 +1543,32 @@ void qemu_ram_free_from_ptr(ram_addr_t addr)
if (addr == block->offset) {
QTAILQ_REMOVE(&ram_list.blocks, block, next);
ram_list.mru_block = NULL;
- ram_list.version++;
- g_free(block);
+ atomic_rcu_set(&ram_list.version, ram_list.version + 1);
+ call_rcu(block, (void (*)(struct RAMBlock *))g_free, rcu);
break;
}
}
qemu_mutex_unlock_ramlist();
}
+static void reclaim_ramblock(RAMBlock *block)
+{
+ if (block->flags & RAM_PREALLOC) {
+ ;
+ } else if (xen_enabled()) {
+ xen_invalidate_map_cache_entry(block->host);
+#ifndef _WIN32
+ } else if (block->fd >= 0) {
+ munmap(block->host, block->max_length);
+ close(block->fd);
+#endif
+ } else {
+ qemu_anon_ram_free(block->host, block->max_length);
+ }
+ g_free(block);
+}
+
+/* Called with the iothread lock held */
void qemu_ram_free(ram_addr_t addr)
{
RAMBlock *block;
@@ -1543,20 +1579,8 @@ void qemu_ram_free(ram_addr_t addr)
if (addr == block->offset) {
QTAILQ_REMOVE(&ram_list.blocks, block, next);
ram_list.mru_block = NULL;
- ram_list.version++;
- if (block->flags & RAM_PREALLOC) {
- ;
- } else if (xen_enabled()) {
- xen_invalidate_map_cache_entry(block->host);
-#ifndef _WIN32
- } else if (block->fd >= 0) {
- munmap(block->host, block->max_length);
- close(block->fd);
-#endif
- } else {
- qemu_anon_ram_free(block->host, block->max_length);
- }
- g_free(block);
+ atomic_rcu_set(&ram_list.version, ram_list.version + 1);
+ call_rcu(block, reclaim_ramblock, rcu);
break;
}
}
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 2c48286..b8781d1 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -24,6 +24,7 @@
#include "exec/memory.h"
#include "qemu/thread.h"
#include "qom/cpu.h"
+#include "qemu/rcu.h"
/* some important defines:
*
@@ -268,6 +269,7 @@ CPUArchState *cpu_copy(CPUArchState *env);
typedef struct RAMBlock RAMBlock;
struct RAMBlock {
+ struct rcu_head rcu;
struct MemoryRegion *mr;
uint8_t *host;
ram_addr_t offset;
--
1.8.3.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* Re: [Qemu-devel] [PATCH 5/9] exec: protect mru_block with RCU
2015-02-03 12:52 ` [Qemu-devel] [PATCH 5/9] exec: protect mru_block with RCU Paolo Bonzini
@ 2015-02-05 6:23 ` Fam Zheng
2015-02-05 8:37 ` Paolo Bonzini
0 siblings, 1 reply; 22+ messages in thread
From: Fam Zheng @ 2015-02-05 6:23 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: peter.maydell, qemu-devel, fred.konrad
On Tue, 02/03 13:52, Paolo Bonzini wrote:
> Hence, freeing a RAMBlock has to be switched to call_rcu.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> exec.c | 60 +++++++++++++++++++++++++++++++++++---------------
> include/exec/cpu-all.h | 2 ++
> 2 files changed, 44 insertions(+), 18 deletions(-)
>
> diff --git a/exec.c b/exec.c
> index a423def..05c5b44 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -811,7 +811,7 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
> RAMBlock *block;
>
> /* The list is protected by the iothread lock here. */
> - block = ram_list.mru_block;
> + block = atomic_rcu_read(&ram_list.mru_block);
> if (block && addr - block->offset < block->max_length) {
> goto found;
> }
> @@ -825,6 +825,22 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
> abort();
>
> found:
> + /* It is safe to write mru_block outside the iothread lock. This
> + * is what happens:
> + *
> + * mru_block = xxx
> + * rcu_read_unlock()
> + * xxx removed from list
> + * rcu_read_lock()
> + * read mru_block
> + * mru_block = NULL;
> + * call_rcu(reclaim_ramblock, xxx);
> + * rcu_read_unlock()
> + *
> + * atomic_rcu_set is not needed here. The block was already published
> + * when it was placed into the list. Here we're just making an extra
> + * copy of the pointer.
> + */
> ram_list.mru_block = block;
> return block;
> }
> @@ -1381,14 +1397,16 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
> QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
> }
> ram_list.mru_block = NULL;
> + atomic_rcu_set(&ram_list.version, ram_list.version + 1);
>
> - ram_list.version++;
Why is this not atomic_inc (or why is atomic_rcu_set necessary here)?
Fam
> qemu_mutex_unlock_ramlist();
>
> new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
>
> if (new_ram_size > old_ram_size) {
> int i;
> +
> + /* ram_list.dirty_memory[] is protected by the iothread lock. */
> for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
> ram_list.dirty_memory[i] =
> bitmap_zero_extend(ram_list.dirty_memory[i],
> @@ -1525,14 +1543,32 @@ void qemu_ram_free_from_ptr(ram_addr_t addr)
> if (addr == block->offset) {
> QTAILQ_REMOVE(&ram_list.blocks, block, next);
> ram_list.mru_block = NULL;
> - ram_list.version++;
> - g_free(block);
> + atomic_rcu_set(&ram_list.version, ram_list.version + 1);
> + call_rcu(block, (void (*)(struct RAMBlock *))g_free, rcu);
> break;
> }
> }
> qemu_mutex_unlock_ramlist();
> }
>
> +static void reclaim_ramblock(RAMBlock *block)
> +{
> + if (block->flags & RAM_PREALLOC) {
> + ;
> + } else if (xen_enabled()) {
> + xen_invalidate_map_cache_entry(block->host);
> +#ifndef _WIN32
> + } else if (block->fd >= 0) {
> + munmap(block->host, block->max_length);
> + close(block->fd);
> +#endif
> + } else {
> + qemu_anon_ram_free(block->host, block->max_length);
> + }
> + g_free(block);
> +}
> +
> +/* Called with the iothread lock held */
> void qemu_ram_free(ram_addr_t addr)
> {
> RAMBlock *block;
> @@ -1543,20 +1579,8 @@ void qemu_ram_free(ram_addr_t addr)
> if (addr == block->offset) {
> QTAILQ_REMOVE(&ram_list.blocks, block, next);
> ram_list.mru_block = NULL;
> - ram_list.version++;
> - if (block->flags & RAM_PREALLOC) {
> - ;
> - } else if (xen_enabled()) {
> - xen_invalidate_map_cache_entry(block->host);
> -#ifndef _WIN32
> - } else if (block->fd >= 0) {
> - munmap(block->host, block->max_length);
> - close(block->fd);
> -#endif
> - } else {
> - qemu_anon_ram_free(block->host, block->max_length);
> - }
> - g_free(block);
> + atomic_rcu_set(&ram_list.version, ram_list.version + 1);
> + call_rcu(block, reclaim_ramblock, rcu);
> break;
> }
> }
> diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
> index 2c48286..b8781d1 100644
> --- a/include/exec/cpu-all.h
> +++ b/include/exec/cpu-all.h
> @@ -24,6 +24,7 @@
> #include "exec/memory.h"
> #include "qemu/thread.h"
> #include "qom/cpu.h"
> +#include "qemu/rcu.h"
>
> /* some important defines:
> *
> @@ -268,6 +269,7 @@ CPUArchState *cpu_copy(CPUArchState *env);
> typedef struct RAMBlock RAMBlock;
>
> struct RAMBlock {
> + struct rcu_head rcu;
> struct MemoryRegion *mr;
> uint8_t *host;
> ram_addr_t offset;
> --
> 1.8.3.1
>
>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [Qemu-devel] [PATCH 5/9] exec: protect mru_block with RCU
2015-02-05 6:23 ` Fam Zheng
@ 2015-02-05 8:37 ` Paolo Bonzini
2015-02-05 9:30 ` Fam Zheng
0 siblings, 1 reply; 22+ messages in thread
From: Paolo Bonzini @ 2015-02-05 8:37 UTC (permalink / raw)
To: Fam Zheng; +Cc: peter.maydell, qemu-devel, fred.konrad
On 05/02/2015 07:23, Fam Zheng wrote:
>> > @@ -1381,14 +1397,16 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
>> > QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
>> > }
>> > ram_list.mru_block = NULL;
>> > + atomic_rcu_set(&ram_list.version, ram_list.version + 1);
>> >
>> > - ram_list.version++;
> Why is this not atomic_inc
Because writes are protected by the ramlist lock. atomic_inc is more
expensive.
> (or why is atomic_rcu_set necessary here)?
I probably should move it to patch 9; it is needed to update the list
before ram_list.version.
If you prefer I can change it to
smp_wmb();
atomic_set(&ram_list.version, ram_list.version + 1);
?
Paolo
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [Qemu-devel] [PATCH 5/9] exec: protect mru_block with RCU
2015-02-05 8:37 ` Paolo Bonzini
@ 2015-02-05 9:30 ` Fam Zheng
0 siblings, 0 replies; 22+ messages in thread
From: Fam Zheng @ 2015-02-05 9:30 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: peter.maydell, qemu-devel, fred.konrad
On Thu, 02/05 09:37, Paolo Bonzini wrote:
>
>
> On 05/02/2015 07:23, Fam Zheng wrote:
> >> > @@ -1381,14 +1397,16 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
> >> > QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
> >> > }
> >> > ram_list.mru_block = NULL;
> >> > + atomic_rcu_set(&ram_list.version, ram_list.version + 1);
> >> >
> >> > - ram_list.version++;
> > Why is this not atomic_inc
>
> Because writes are protected by the ramlist lock. atomic_inc is more
> expensive.
OK!
>
> > (or why is atomic_rcu_set necessary here)?
>
> I probably should move it to patch 9; it is needed to update the list
> before ram_list.version.
>
> If you prefer I can change it to
>
> smp_wmb();
> atomic_set(&ram_list.version, ram_list.version + 1);
>
> ?
>
Yes, this looks more obvious :)
Fam
^ permalink raw reply [flat|nested] 22+ messages in thread
* [Qemu-devel] [PATCH 6/9] cosmetic changes preparing for the following patches
2015-02-03 12:52 [Qemu-devel] [PATCH v2 0/9] RCUification of the memory API, part 2 Paolo Bonzini
` (4 preceding siblings ...)
2015-02-03 12:52 ` [Qemu-devel] [PATCH 5/9] exec: protect mru_block with RCU Paolo Bonzini
@ 2015-02-03 12:52 ` Paolo Bonzini
2015-02-04 3:10 ` Fam Zheng
2015-02-03 12:52 ` [Qemu-devel] [PATCH 7/9] rcu: prod call_rcu thread when calling synchronize_rcu Paolo Bonzini
` (2 subsequent siblings)
8 siblings, 1 reply; 22+ messages in thread
From: Paolo Bonzini @ 2015-02-03 12:52 UTC (permalink / raw)
To: qemu-devel; +Cc: Mike Day, peter.maydell, famz, fred.konrad
From: Mike Day <ncmike@ncultra.org>
Signed-off-by: Mike Day <ncmike@ncultra.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
arch_init.c | 5 +--
exec.c | 84 +++++++++++++++++++++++++++++++++-----------------
include/exec/cpu-all.h | 1 +
3 files changed, 57 insertions(+), 33 deletions(-)
diff --git a/arch_init.c b/arch_init.c
index 89c8fa4..b13f74b 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -688,9 +688,9 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
}
}
}
+
last_seen_block = block;
last_offset = offset;
-
return bytes_sent;
}
@@ -1117,7 +1117,6 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
ret = -EINVAL;
break;
}
-
ch = qemu_get_byte(f);
ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
break;
@@ -1128,7 +1127,6 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
ret = -EINVAL;
break;
}
-
qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
break;
case RAM_SAVE_FLAG_XBZRLE:
@@ -1138,7 +1136,6 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
ret = -EINVAL;
break;
}
-
if (load_xbzrle(f, addr, host) < 0) {
error_report("Failed to decompress XBZRLE page at "
RAM_ADDR_FMT, addr);
diff --git a/exec.c b/exec.c
index 05c5b44..8239370 100644
--- a/exec.c
+++ b/exec.c
@@ -1265,11 +1265,12 @@ static RAMBlock *find_ram_block(ram_addr_t addr)
return NULL;
}
+/* Called with iothread lock held. */
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
{
- RAMBlock *new_block = find_ram_block(addr);
- RAMBlock *block;
+ RAMBlock *new_block, *block;
+ new_block = find_ram_block(addr);
assert(new_block);
assert(!new_block->idstr[0]);
@@ -1282,7 +1283,6 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
}
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
- /* This assumes the iothread lock is taken here too. */
qemu_mutex_lock_ramlist();
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
@@ -1294,10 +1294,17 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
qemu_mutex_unlock_ramlist();
}
+/* Called with iothread lock held. */
void qemu_ram_unset_idstr(ram_addr_t addr)
{
- RAMBlock *block = find_ram_block(addr);
+ RAMBlock *block;
+ /* FIXME: arch_init.c assumes that this is not called throughout
+ * migration. Ignore the problem since hot-unplug during migration
+ * does not work anyway.
+ */
+
+ block = find_ram_block(addr);
if (block) {
memset(block->idstr, 0, sizeof(block->idstr));
}
@@ -1585,7 +1592,6 @@ void qemu_ram_free(ram_addr_t addr)
}
}
qemu_mutex_unlock_ramlist();
-
}
#ifndef _WIN32
@@ -1633,7 +1639,6 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
memory_try_enable_merging(vaddr, length);
qemu_ram_setup_dump(vaddr, length);
}
- return;
}
}
}
@@ -1641,49 +1646,60 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
int qemu_get_ram_fd(ram_addr_t addr)
{
- RAMBlock *block = qemu_get_ram_block(addr);
+ RAMBlock *block;
+ int fd;
- return block->fd;
+ block = qemu_get_ram_block(addr);
+ fd = block->fd;
+ return fd;
}
void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
{
- RAMBlock *block = qemu_get_ram_block(addr);
+ RAMBlock *block;
+ void *ptr;
- return ramblock_ptr(block, 0);
+ block = qemu_get_ram_block(addr);
+ ptr = ramblock_ptr(block, 0);
+ return ptr;
}
/* Return a host pointer to ram allocated with qemu_ram_alloc.
- With the exception of the softmmu code in this file, this should
- only be used for local memory (e.g. video ram) that the device owns,
- and knows it isn't going to access beyond the end of the block.
-
- It should not be used for general purpose DMA.
- Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
+ * This should not be used for general purpose DMA. Use address_space_map
+ * or address_space_rw instead. For local memory (e.g. video ram) that the
+ * device owns, use memory_region_get_ram_ptr.
*/
void *qemu_get_ram_ptr(ram_addr_t addr)
{
- RAMBlock *block = qemu_get_ram_block(addr);
+ RAMBlock *block;
+ void *ptr;
- if (xen_enabled()) {
+ block = qemu_get_ram_block(addr);
+
+ if (xen_enabled() && block->host == NULL) {
/* We need to check if the requested address is in the RAM
* because we don't want to map the entire memory in QEMU.
* In that case just map until the end of the page.
*/
if (block->offset == 0) {
- return xen_map_cache(addr, 0, 0);
- } else if (block->host == NULL) {
- block->host =
- xen_map_cache(block->offset, block->max_length, 1);
+ ptr = xen_map_cache(addr, 0, 0);
+ goto done;
}
+
+ block->host = xen_map_cache(block->offset, block->max_length, 1);
}
- return ramblock_ptr(block, addr - block->offset);
+ ptr = ramblock_ptr(block, addr - block->offset);
+
+done:
+ return ptr;
}
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
- * but takes a size argument */
+ * but takes a size argument.
+ */
static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
{
+ void *ptr;
if (*size == 0) {
return NULL;
}
@@ -1691,12 +1707,12 @@ static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
return xen_map_cache(addr, *size, 1);
} else {
RAMBlock *block;
-
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (addr - block->offset < block->max_length) {
if (addr - block->offset + *size > block->max_length)
*size = block->max_length - addr + block->offset;
- return ramblock_ptr(block, addr - block->offset);
+ ptr = ramblock_ptr(block, addr - block->offset);
+ return ptr;
}
}
@@ -1706,15 +1722,24 @@ static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
}
/* Some of the softmmu routines need to translate from a host pointer
- (typically a TLB entry) back to a ram offset. */
+ * (typically a TLB entry) back to a ram offset.
+ *
+ * By the time this function returns, the returned pointer is not protected
+ * by RCU anymore. If the caller is not within an RCU critical section and
+ * does not hold the iothread lock, it must have other means of protecting the
+ * pointer, such as a reference to the region that includes the incoming
+ * ram_addr_t.
+ */
MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
{
RAMBlock *block;
uint8_t *host = ptr;
+ MemoryRegion *mr;
if (xen_enabled()) {
*ram_addr = xen_ram_addr_from_mapcache(ptr);
- return qemu_get_ram_block(*ram_addr)->mr;
+ mr = qemu_get_ram_block(*ram_addr)->mr;
+ return mr;
}
block = ram_list.mru_block;
@@ -1736,7 +1761,8 @@ MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
found:
*ram_addr = block->offset + (host - block->host);
- return block->mr;
+ mr = block->mr;
+ return mr;
}
static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index b8781d1..f7a3625 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -277,6 +277,7 @@ struct RAMBlock {
ram_addr_t max_length;
void (*resized)(const char*, uint64_t length, void *host);
uint32_t flags;
+ /* Protected by iothread lock. */
char idstr[256];
/* Reads can take either the iothread or the ramlist lock.
* Writes must take both locks.
--
1.8.3.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* Re: [Qemu-devel] [PATCH 6/9] cosmetic changes preparing for the following patches
2015-02-03 12:52 ` [Qemu-devel] [PATCH 6/9] cosmetic changes preparing for the following patches Paolo Bonzini
@ 2015-02-04 3:10 ` Fam Zheng
2015-02-04 12:51 ` Paolo Bonzini
0 siblings, 1 reply; 22+ messages in thread
From: Fam Zheng @ 2015-02-04 3:10 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: Mike Day, peter.maydell, qemu-devel, fred.konrad
On Tue, 02/03 13:52, Paolo Bonzini wrote:
> From: Mike Day <ncmike@ncultra.org>
>
> Signed-off-by: Mike Day <ncmike@ncultra.org>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> arch_init.c | 5 +--
> exec.c | 84 +++++++++++++++++++++++++++++++++-----------------
> include/exec/cpu-all.h | 1 +
> 3 files changed, 57 insertions(+), 33 deletions(-)
>
> diff --git a/arch_init.c b/arch_init.c
> index 89c8fa4..b13f74b 100644
> --- a/arch_init.c
> +++ b/arch_init.c
> @@ -688,9 +688,9 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
> }
> }
> }
> +
> last_seen_block = block;
> last_offset = offset;
> -
> return bytes_sent;
> }
>
> @@ -1117,7 +1117,6 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
> ret = -EINVAL;
> break;
> }
> -
> ch = qemu_get_byte(f);
> ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
> break;
> @@ -1128,7 +1127,6 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
> ret = -EINVAL;
> break;
> }
> -
> qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
> break;
> case RAM_SAVE_FLAG_XBZRLE:
> @@ -1138,7 +1136,6 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
> ret = -EINVAL;
> break;
> }
> -
> if (load_xbzrle(f, addr, host) < 0) {
> error_report("Failed to decompress XBZRLE page at "
> RAM_ADDR_FMT, addr);
> diff --git a/exec.c b/exec.c
> index 05c5b44..8239370 100644
> --- a/exec.c
> +++ b/exec.c
> @@ -1265,11 +1265,12 @@ static RAMBlock *find_ram_block(ram_addr_t addr)
> return NULL;
> }
>
> +/* Called with iothread lock held. */
> void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
> {
> - RAMBlock *new_block = find_ram_block(addr);
> - RAMBlock *block;
> + RAMBlock *new_block, *block;
>
> + new_block = find_ram_block(addr);
> assert(new_block);
> assert(!new_block->idstr[0]);
>
> @@ -1282,7 +1283,6 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
> }
> pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
>
> - /* This assumes the iothread lock is taken here too. */
> qemu_mutex_lock_ramlist();
> QTAILQ_FOREACH(block, &ram_list.blocks, next) {
> if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
> @@ -1294,10 +1294,17 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
> qemu_mutex_unlock_ramlist();
> }
>
> +/* Called with iothread lock held. */
> void qemu_ram_unset_idstr(ram_addr_t addr)
> {
> - RAMBlock *block = find_ram_block(addr);
> + RAMBlock *block;
>
> + /* FIXME: arch_init.c assumes that this is not called throughout
> + * migration. Ignore the problem since hot-unplug during migration
> + * does not work anyway.
> + */
> +
> + block = find_ram_block(addr);
> if (block) {
> memset(block->idstr, 0, sizeof(block->idstr));
> }
> @@ -1585,7 +1592,6 @@ void qemu_ram_free(ram_addr_t addr)
> }
> }
> qemu_mutex_unlock_ramlist();
> -
> }
>
> #ifndef _WIN32
> @@ -1633,7 +1639,6 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
> memory_try_enable_merging(vaddr, length);
> qemu_ram_setup_dump(vaddr, length);
> }
> - return;
Other changes are equivalent, but not quite for this one. But I think it is
still correct, so:
Reviewed-by: Fam Zheng <famz@redhat.com>
> }
> }
> }
> @@ -1641,49 +1646,60 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
>
> int qemu_get_ram_fd(ram_addr_t addr)
> {
> - RAMBlock *block = qemu_get_ram_block(addr);
> + RAMBlock *block;
> + int fd;
>
> - return block->fd;
> + block = qemu_get_ram_block(addr);
> + fd = block->fd;
> + return fd;
> }
>
> void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
> {
> - RAMBlock *block = qemu_get_ram_block(addr);
> + RAMBlock *block;
> + void *ptr;
>
> - return ramblock_ptr(block, 0);
> + block = qemu_get_ram_block(addr);
> + ptr = ramblock_ptr(block, 0);
> + return ptr;
> }
>
> /* Return a host pointer to ram allocated with qemu_ram_alloc.
> - With the exception of the softmmu code in this file, this should
> - only be used for local memory (e.g. video ram) that the device owns,
> - and knows it isn't going to access beyond the end of the block.
> -
> - It should not be used for general purpose DMA.
> - Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
> + * This should not be used for general purpose DMA. Use address_space_map
> + * or address_space_rw instead. For local memory (e.g. video ram) that the
> + * device owns, use memory_region_get_ram_ptr.
> */
> void *qemu_get_ram_ptr(ram_addr_t addr)
> {
> - RAMBlock *block = qemu_get_ram_block(addr);
> + RAMBlock *block;
> + void *ptr;
>
> - if (xen_enabled()) {
> + block = qemu_get_ram_block(addr);
> +
> + if (xen_enabled() && block->host == NULL) {
> /* We need to check if the requested address is in the RAM
> * because we don't want to map the entire memory in QEMU.
> * In that case just map until the end of the page.
> */
> if (block->offset == 0) {
> - return xen_map_cache(addr, 0, 0);
> - } else if (block->host == NULL) {
> - block->host =
> - xen_map_cache(block->offset, block->max_length, 1);
> + ptr = xen_map_cache(addr, 0, 0);
> + goto done;
> }
> +
> + block->host = xen_map_cache(block->offset, block->max_length, 1);
> }
> - return ramblock_ptr(block, addr - block->offset);
> + ptr = ramblock_ptr(block, addr - block->offset);
> +
> +done:
> + return ptr;
> }
>
> /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
> - * but takes a size argument */
> + * but takes a size argument.
> + */
> static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
> {
> + void *ptr;
> if (*size == 0) {
> return NULL;
> }
> @@ -1691,12 +1707,12 @@ static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
> return xen_map_cache(addr, *size, 1);
> } else {
> RAMBlock *block;
> -
> QTAILQ_FOREACH(block, &ram_list.blocks, next) {
> if (addr - block->offset < block->max_length) {
> if (addr - block->offset + *size > block->max_length)
> *size = block->max_length - addr + block->offset;
> - return ramblock_ptr(block, addr - block->offset);
> + ptr = ramblock_ptr(block, addr - block->offset);
> + return ptr;
> }
> }
>
> @@ -1706,15 +1722,24 @@ static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
> }
>
> /* Some of the softmmu routines need to translate from a host pointer
> - (typically a TLB entry) back to a ram offset. */
> + * (typically a TLB entry) back to a ram offset.
> + *
> + * By the time this function returns, the returned pointer is not protected
> + * by RCU anymore. If the caller is not within an RCU critical section and
> + * does not hold the iothread lock, it must have other means of protecting the
> + * pointer, such as a reference to the region that includes the incoming
> + * ram_addr_t.
> + */
> MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
> {
> RAMBlock *block;
> uint8_t *host = ptr;
> + MemoryRegion *mr;
>
> if (xen_enabled()) {
> *ram_addr = xen_ram_addr_from_mapcache(ptr);
> - return qemu_get_ram_block(*ram_addr)->mr;
> + mr = qemu_get_ram_block(*ram_addr)->mr;
> + return mr;
> }
>
> block = ram_list.mru_block;
> @@ -1736,7 +1761,8 @@ MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
>
> found:
> *ram_addr = block->offset + (host - block->host);
> - return block->mr;
> + mr = block->mr;
> + return mr;
> }
>
> static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
> diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
> index b8781d1..f7a3625 100644
> --- a/include/exec/cpu-all.h
> +++ b/include/exec/cpu-all.h
> @@ -277,6 +277,7 @@ struct RAMBlock {
> ram_addr_t max_length;
> void (*resized)(const char*, uint64_t length, void *host);
> uint32_t flags;
> + /* Protected by iothread lock. */
> char idstr[256];
> /* Reads can take either the iothread or the ramlist lock.
> * Writes must take both locks.
> --
> 1.8.3.1
>
>
^ permalink raw reply [flat|nested] 22+ messages in thread
* Re: [Qemu-devel] [PATCH 6/9] cosmetic changes preparing for the following patches
2015-02-04 3:10 ` Fam Zheng
@ 2015-02-04 12:51 ` Paolo Bonzini
0 siblings, 0 replies; 22+ messages in thread
From: Paolo Bonzini @ 2015-02-04 12:51 UTC (permalink / raw)
To: Fam Zheng; +Cc: Mike Day, peter.maydell, qemu-devel, fred.konrad
On 04/02/2015 04:10, Fam Zheng wrote:
>> > - return;
> Other changes are equivalent, but not quite for this one. But I think it is
> still correct, so:
>
> Reviewed-by: Fam Zheng <famz@redhat.com>
True, this is not entirely cosmetic. Still, the loop is guaranteed to
only hit one block, because of the check on offset.
Removing the "return;" matches what we do in the RAM_PREALLOC case.
Paolo
^ permalink raw reply [flat|nested] 22+ messages in thread
* [Qemu-devel] [PATCH 7/9] rcu: prod call_rcu thread when calling synchronize_rcu
2015-02-03 12:52 [Qemu-devel] [PATCH v2 0/9] RCUification of the memory API, part 2 Paolo Bonzini
` (5 preceding siblings ...)
2015-02-03 12:52 ` [Qemu-devel] [PATCH 6/9] cosmetic changes preparing for the following patches Paolo Bonzini
@ 2015-02-03 12:52 ` Paolo Bonzini
2015-02-04 3:13 ` Fam Zheng
2015-02-03 12:52 ` [Qemu-devel] [PATCH 8/9] exec: convert ram_list to QLIST Paolo Bonzini
2015-02-03 12:52 ` [Qemu-devel] [PATCH 9/9] Convert ram_list to RCU Paolo Bonzini
8 siblings, 1 reply; 22+ messages in thread
From: Paolo Bonzini @ 2015-02-03 12:52 UTC (permalink / raw)
To: qemu-devel; +Cc: peter.maydell, famz, fred.konrad
call_rcu operates on the principle that either there is a steady stream of
incoming RCU callbacks, or it is not worthwhile to wake up and process the
few that are there.
This however makes it hard to assert in testcases that all RCU callbacks
are processed. To avoid this, make call_rcu also process callbacks if there
is a steady stream of synchronize_rcu calls.
This avoids deadlocks in the upcoming test-rcu-list unit test, which waits
for call_rcu to reclaim all nodes that it allocates. Especially with very
high load on the host, call_rcu decided to wait for a few more callbacks
to pile up, but the test was done and was not going to produce more.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
util/rcu.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/util/rcu.c b/util/rcu.c
index c9c3e6e..aa9f639 100644
--- a/util/rcu.c
+++ b/util/rcu.c
@@ -48,6 +48,9 @@ unsigned long rcu_gp_ctr = RCU_GP_LOCKED;
QemuEvent rcu_gp_event;
static QemuMutex rcu_gp_lock;
+static int rcu_call_count;
+static QemuEvent rcu_call_ready_event;
+
/*
* Check whether a quiescent state was crossed between the beginning of
* update_counter_and_wait and now.
@@ -149,6 +152,9 @@ void synchronize_rcu(void)
}
qemu_mutex_unlock(&rcu_gp_lock);
+ if (atomic_read(&rcu_call_count)) {
+ qemu_event_set(&rcu_call_ready_event);
+ }
}
@@ -159,8 +165,6 @@ void synchronize_rcu(void)
*/
static struct rcu_head dummy;
static struct rcu_head *head = &dummy, **tail = &dummy.next;
-static int rcu_call_count;
-static QemuEvent rcu_call_ready_event;
static void enqueue(struct rcu_head *node)
{
--
1.8.3.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* Re: [Qemu-devel] [PATCH 7/9] rcu: prod call_rcu thread when calling synchronize_rcu
2015-02-03 12:52 ` [Qemu-devel] [PATCH 7/9] rcu: prod call_rcu thread when calling synchronize_rcu Paolo Bonzini
@ 2015-02-04 3:13 ` Fam Zheng
0 siblings, 0 replies; 22+ messages in thread
From: Fam Zheng @ 2015-02-04 3:13 UTC (permalink / raw)
To: Paolo Bonzini; +Cc: peter.maydell, qemu-devel, fred.konrad
On Tue, 02/03 13:52, Paolo Bonzini wrote:
> call_rcu operates on the principle that either there is a steady stream of
> incoming RCU callbacks, or it is not worthwhile to wake up and process the
> few that are there.
>
> This however makes it hard to assert in testcases that all RCU callbacks
> are processed. To avoid this, make call_rcu also process callbacks if there
> is a steady stream of synchronize_rcu calls.
>
> This avoids deadlocks in the upcoming test-rcu-list unit test, which waits
> for call_rcu to reclaim all nodes that it allocates. Especially with very
> high load on the host, call_rcu decided to wait for a few more callbacks
> to pile up, but the test was done and was not going to produce more.
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> util/rcu.c | 8 ++++++--
> 1 file changed, 6 insertions(+), 2 deletions(-)
>
> diff --git a/util/rcu.c b/util/rcu.c
> index c9c3e6e..aa9f639 100644
> --- a/util/rcu.c
> +++ b/util/rcu.c
> @@ -48,6 +48,9 @@ unsigned long rcu_gp_ctr = RCU_GP_LOCKED;
> QemuEvent rcu_gp_event;
> static QemuMutex rcu_gp_lock;
>
> +static int rcu_call_count;
> +static QemuEvent rcu_call_ready_event;
> +
> /*
> * Check whether a quiescent state was crossed between the beginning of
> * update_counter_and_wait and now.
> @@ -149,6 +152,9 @@ void synchronize_rcu(void)
> }
>
> qemu_mutex_unlock(&rcu_gp_lock);
> + if (atomic_read(&rcu_call_count)) {
> + qemu_event_set(&rcu_call_ready_event);
> + }
> }
>
>
> @@ -159,8 +165,6 @@ void synchronize_rcu(void)
> */
> static struct rcu_head dummy;
> static struct rcu_head *head = &dummy, **tail = &dummy.next;
> -static int rcu_call_count;
> -static QemuEvent rcu_call_ready_event;
>
> static void enqueue(struct rcu_head *node)
> {
> --
> 1.8.3.1
>
>
Reviewed-by: Fam Zheng <famz@redhat.com>
^ permalink raw reply [flat|nested] 22+ messages in thread
* [Qemu-devel] [PATCH 8/9] exec: convert ram_list to QLIST
2015-02-03 12:52 [Qemu-devel] [PATCH v2 0/9] RCUification of the memory API, part 2 Paolo Bonzini
` (6 preceding siblings ...)
2015-02-03 12:52 ` [Qemu-devel] [PATCH 7/9] rcu: prod call_rcu thread when calling synchronize_rcu Paolo Bonzini
@ 2015-02-03 12:52 ` Paolo Bonzini
2015-02-03 12:52 ` [Qemu-devel] [PATCH 9/9] Convert ram_list to RCU Paolo Bonzini
8 siblings, 0 replies; 22+ messages in thread
From: Paolo Bonzini @ 2015-02-03 12:52 UTC (permalink / raw)
To: qemu-devel; +Cc: Mike Day, peter.maydell, famz, fred.konrad
From: Mike Day <ncmike@ncultra.org>
QLIST has RCU-friendly primitives, so switch to it.
Signed-off-by: Mike Day <ncmike@ncultra.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Mike Day <ncmike@ncultra.org>
---
arch_init.c | 19 ++++++++--------
exec.c | 52 +++++++++++++++++++++++++-------------------
include/exec/cpu-all.h | 4 ++--
scripts/dump-guest-memory.py | 8 +++----
4 files changed, 46 insertions(+), 37 deletions(-)
diff --git a/arch_init.c b/arch_init.c
index b13f74b..757632b 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -523,7 +523,7 @@ static void migration_bitmap_sync(void)
trace_migration_bitmap_sync_start();
address_space_sync_dirty_bitmap(&address_space_memory);
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
migration_bitmap_sync_range(block->mr->ram_addr, block->used_length);
}
trace_migration_bitmap_sync_end(migration_dirty_pages
@@ -661,7 +661,7 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
MemoryRegion *mr;
if (!block)
- block = QTAILQ_FIRST(&ram_list.blocks);
+ block = QLIST_FIRST(&ram_list.blocks);
while (true) {
mr = block->mr;
@@ -672,9 +672,9 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
}
if (offset >= block->used_length) {
offset = 0;
- block = QTAILQ_NEXT(block, next);
+ block = QLIST_NEXT(block, next);
if (!block) {
- block = QTAILQ_FIRST(&ram_list.blocks);
+ block = QLIST_FIRST(&ram_list.blocks);
complete_round = true;
ram_bulk_stage = false;
}
@@ -728,8 +728,9 @@ uint64_t ram_bytes_total(void)
RAMBlock *block;
uint64_t total = 0;
- QTAILQ_FOREACH(block, &ram_list.blocks, next)
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
total += block->used_length;
+ }
return total;
}
@@ -830,7 +831,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
* gaps due to alignment or unplugs.
*/
migration_dirty_pages = 0;
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
uint64_t block_pages;
block_pages = block->used_length >> TARGET_PAGE_BITS;
@@ -843,7 +844,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
qemu_put_byte(f, strlen(block->idstr));
qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
qemu_put_be64(f, block->used_length);
@@ -1029,7 +1030,7 @@ static inline void *host_from_stream_offset(QEMUFile *f,
qemu_get_buffer(f, (uint8_t *)id, len);
id[len] = 0;
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
if (!strncmp(id, block->idstr, sizeof(id)) &&
block->max_length > offset) {
return memory_region_get_ram_ptr(block->mr) + offset;
@@ -1086,7 +1087,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
id[len] = 0;
length = qemu_get_be64(f);
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
if (!strncmp(id, block->idstr, sizeof(id))) {
if (length != block->used_length) {
Error *local_err = NULL;
diff --git a/exec.c b/exec.c
index 8239370..d1a5cef4 100644
--- a/exec.c
+++ b/exec.c
@@ -58,7 +58,7 @@
#if !defined(CONFIG_USER_ONLY)
static bool in_migration;
-RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
+RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
static MemoryRegion *system_memory;
static MemoryRegion *system_io;
@@ -815,7 +815,7 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
if (block && addr - block->offset < block->max_length) {
goto found;
}
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
if (addr - block->offset < block->max_length) {
goto found;
}
@@ -1197,15 +1197,16 @@ static ram_addr_t find_ram_offset(ram_addr_t size)
assert(size != 0); /* it would hand out same offset multiple times */
- if (QTAILQ_EMPTY(&ram_list.blocks))
+ if (QLIST_EMPTY(&ram_list.blocks)) {
return 0;
+ }
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
ram_addr_t end, next = RAM_ADDR_MAX;
end = block->offset + block->max_length;
- QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
+ QLIST_FOREACH(next_block, &ram_list.blocks, next) {
if (next_block->offset >= end) {
next = MIN(next, next_block->offset);
}
@@ -1230,9 +1231,9 @@ ram_addr_t last_ram_offset(void)
RAMBlock *block;
ram_addr_t last = 0;
- QTAILQ_FOREACH(block, &ram_list.blocks, next)
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
last = MAX(last, block->offset + block->max_length);
-
+ }
return last;
}
@@ -1256,7 +1257,7 @@ static RAMBlock *find_ram_block(ram_addr_t addr)
{
RAMBlock *block;
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
if (block->offset == addr) {
return block;
}
@@ -1284,7 +1285,7 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
qemu_mutex_lock_ramlist();
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
new_block->idstr);
@@ -1366,6 +1367,7 @@ int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
{
RAMBlock *block;
+ RAMBlock *last_block = NULL;
ram_addr_t old_ram_size, new_ram_size;
old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
@@ -1392,16 +1394,22 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
}
}
- /* Keep the list sorted from biggest to smallest block. */
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
+ * QLIST (which has an RCU-friendly variant) does not have insertion at
+ * tail, so save the last element in last_block.
+ */
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ last_block = block;
if (block->max_length < new_block->max_length) {
break;
}
}
if (block) {
- QTAILQ_INSERT_BEFORE(block, new_block, next);
- } else {
- QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
+ QLIST_INSERT_BEFORE(block, new_block, next);
+ } else if (last_block) {
+ QLIST_INSERT_AFTER(last_block, new_block, next);
+ } else { /* list is empty */
+ QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
}
ram_list.mru_block = NULL;
atomic_rcu_set(&ram_list.version, ram_list.version + 1);
@@ -1546,9 +1554,9 @@ void qemu_ram_free_from_ptr(ram_addr_t addr)
/* This assumes the iothread lock is taken here too. */
qemu_mutex_lock_ramlist();
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
if (addr == block->offset) {
- QTAILQ_REMOVE(&ram_list.blocks, block, next);
+ QLIST_REMOVE(block, next);
ram_list.mru_block = NULL;
atomic_rcu_set(&ram_list.version, ram_list.version + 1);
call_rcu(block, (void (*)(struct RAMBlock *))g_free, rcu);
@@ -1582,9 +1590,9 @@ void qemu_ram_free(ram_addr_t addr)
/* This assumes the iothread lock is taken here too. */
qemu_mutex_lock_ramlist();
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
if (addr == block->offset) {
- QTAILQ_REMOVE(&ram_list.blocks, block, next);
+ QLIST_REMOVE(block, next);
ram_list.mru_block = NULL;
atomic_rcu_set(&ram_list.version, ram_list.version + 1);
call_rcu(block, reclaim_ramblock, rcu);
@@ -1602,7 +1610,7 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
int flags;
void *area, *vaddr;
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
offset = addr - block->offset;
if (offset < block->max_length) {
vaddr = ramblock_ptr(block, offset);
@@ -1707,7 +1715,7 @@ static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
return xen_map_cache(addr, *size, 1);
} else {
RAMBlock *block;
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
if (addr - block->offset < block->max_length) {
if (addr - block->offset + *size > block->max_length)
*size = block->max_length - addr + block->offset;
@@ -1747,7 +1755,7 @@ MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
goto found;
}
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
/* This case append when the block is not mapped. */
if (block->host == NULL) {
continue;
@@ -3015,7 +3023,7 @@ void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
{
RAMBlock *block;
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
func(block->host, block->offset, block->used_length, opaque);
}
}
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index f7a3625..87b8658 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -282,7 +282,7 @@ struct RAMBlock {
/* Reads can take either the iothread or the ramlist lock.
* Writes must take both locks.
*/
- QTAILQ_ENTRY(RAMBlock) next;
+ QLIST_ENTRY(RAMBlock) next;
int fd;
};
@@ -299,7 +299,7 @@ typedef struct RAMList {
unsigned long *dirty_memory[DIRTY_MEMORY_NUM];
RAMBlock *mru_block;
/* Protected by the ramlist lock. */
- QTAILQ_HEAD(, RAMBlock) blocks;
+ QLIST_HEAD(, RAMBlock) blocks;
uint32_t version;
} RAMList;
extern RAMList ram_list;
diff --git a/scripts/dump-guest-memory.py b/scripts/dump-guest-memory.py
index 1ed8b67..dc8e44a 100644
--- a/scripts/dump-guest-memory.py
+++ b/scripts/dump-guest-memory.py
@@ -108,16 +108,16 @@ shape and this command should mostly work."""
assert (val["hi"] == 0)
return val["lo"]
- def qtailq_foreach(self, head, field_str):
- var_p = head["tqh_first"]
+ def qlist_foreach(self, head, field_str):
+ var_p = head["lh_first"]
while (var_p != 0):
var = var_p.dereference()
yield var
- var_p = var[field_str]["tqe_next"]
+ var_p = var[field_str]["le_next"]
def qemu_get_ram_block(self, ram_addr):
ram_blocks = gdb.parse_and_eval("ram_list.blocks")
- for block in self.qtailq_foreach(ram_blocks, "next"):
+ for block in self.qlist_foreach(ram_blocks, "next"):
if (ram_addr - block["offset"] < block["length"]):
return block
raise gdb.GdbError("Bad ram offset %x" % ram_addr)
--
1.8.3.1
^ permalink raw reply related [flat|nested] 22+ messages in thread
* [Qemu-devel] [PATCH 9/9] Convert ram_list to RCU
2015-02-03 12:52 [Qemu-devel] [PATCH v2 0/9] RCUification of the memory API, part 2 Paolo Bonzini
` (7 preceding siblings ...)
2015-02-03 12:52 ` [Qemu-devel] [PATCH 8/9] exec: convert ram_list to QLIST Paolo Bonzini
@ 2015-02-03 12:52 ` Paolo Bonzini
8 siblings, 0 replies; 22+ messages in thread
From: Paolo Bonzini @ 2015-02-03 12:52 UTC (permalink / raw)
To: qemu-devel; +Cc: Mike Day, peter.maydell, famz, fred.konrad
From: Mike Day <ncmike@ncultra.org>
Allow "unlocked" reads of the ram_list by using an RCU-enabled QLIST.
The ramlist mutex is kept, because call_rcu callbacks are not run within
the iothread lock. Thus, writers still need to take the ramlist mutex,
but they no longer need to assume that the iothread lock is taken.
Readers of the list, instead, no longer require either the iothread
or ramlist mutex, but they need to use rcu_read_lock() and
rcu_read_unlock().
One place in arch_init.c was downgrading from write side to read side
like this:
qemu_mutex_lock_iothread()
qemu_mutex_lock_ramlist()
...
qemu_mutex_unlock_iothread()
...
qemu_mutex_unlock_ramlist()
and the equivalent idiom is:
qemu_mutex_lock_ramlist()
rcu_read_lock()
...
qemu_mutex_unlock_ramlist()
...
rcu_read_unlock()
Signed-off-by: Mike Day <ncmike@ncultra.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
arch_init.c | 66 ++++++++++++++++++++++++-----------
exec.c | 95 ++++++++++++++++++++++++++++++++++----------------
include/exec/cpu-all.h | 6 ++--
3 files changed, 112 insertions(+), 55 deletions(-)
diff --git a/arch_init.c b/arch_init.c
index 757632b..731b490 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -52,6 +52,7 @@
#include "exec/ram_addr.h"
#include "hw/acpi/acpi.h"
#include "qemu/host-utils.h"
+#include "qemu/rcu_queue.h"
#ifdef DEBUG_ARCH_INIT
#define DPRINTF(fmt, ...) \
@@ -523,9 +524,12 @@ static void migration_bitmap_sync(void)
trace_migration_bitmap_sync_start();
address_space_sync_dirty_bitmap(&address_space_memory);
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ rcu_read_lock();
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
migration_bitmap_sync_range(block->mr->ram_addr, block->used_length);
}
+ rcu_read_unlock();
+
trace_migration_bitmap_sync_end(migration_dirty_pages
- num_dirty_pages_init);
num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
@@ -648,6 +652,8 @@ static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
/*
* ram_find_and_save_block: Finds a page to send and sends it to f
*
+ * Called within an RCU critical section.
+ *
* Returns: The number of bytes written.
* 0 means no dirty pages
*/
@@ -661,7 +667,7 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
MemoryRegion *mr;
if (!block)
- block = QLIST_FIRST(&ram_list.blocks);
+ block = QLIST_FIRST_RCU(&ram_list.blocks);
while (true) {
mr = block->mr;
@@ -672,9 +678,9 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
}
if (offset >= block->used_length) {
offset = 0;
- block = QLIST_NEXT(block, next);
+ block = QLIST_NEXT_RCU(block, next);
if (!block) {
- block = QLIST_FIRST(&ram_list.blocks);
+ block = QLIST_FIRST_RCU(&ram_list.blocks);
complete_round = true;
ram_bulk_stage = false;
}
@@ -728,10 +734,10 @@ uint64_t ram_bytes_total(void)
RAMBlock *block;
uint64_t total = 0;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ rcu_read_lock();
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
total += block->used_length;
- }
-
+ rcu_read_unlock();
return total;
}
@@ -777,6 +783,13 @@ static void reset_ram_globals(void)
#define MAX_WAIT 50 /* ms, half buffered_file limit */
+
+/* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
+ * long-running RCU critical section. When rcu-reclaims in the code
+ * start to become numerous it will be necessary to reduce the
+ * granularity of these critical sections.
+ */
+
static int ram_save_setup(QEMUFile *f, void *opaque)
{
RAMBlock *block;
@@ -817,8 +830,8 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
acct_clear();
}
- qemu_mutex_lock_iothread();
qemu_mutex_lock_ramlist();
+ rcu_read_lock();
bytes_transferred = 0;
reset_ram_globals();
@@ -831,7 +844,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
* gaps due to alignment or unplugs.
*/
migration_dirty_pages = 0;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
uint64_t block_pages;
block_pages = block->used_length >> TARGET_PAGE_BITS;
@@ -840,17 +853,17 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
memory_global_dirty_log_start();
migration_bitmap_sync();
- qemu_mutex_unlock_iothread();
+ qemu_mutex_unlock_ramlist();
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
qemu_put_byte(f, strlen(block->idstr));
qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
qemu_put_be64(f, block->used_length);
}
- qemu_mutex_unlock_ramlist();
+ rcu_read_unlock();
ram_control_before_iterate(f, RAM_CONTROL_SETUP);
ram_control_after_iterate(f, RAM_CONTROL_SETUP);
@@ -867,9 +880,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
int64_t t0;
int total_sent = 0;
- qemu_mutex_lock_ramlist();
-
- if (ram_list.version != last_version) {
+ rcu_read_lock();
+ if (atomic_rcu_read(&ram_list.version) != last_version) {
reset_ram_globals();
}
@@ -903,8 +915,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
}
i++;
}
-
- qemu_mutex_unlock_ramlist();
+ rcu_read_unlock();
/*
* Must occur before EOS (or any QEMUFile operation)
@@ -931,7 +942,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
static int ram_save_complete(QEMUFile *f, void *opaque)
{
- qemu_mutex_lock_ramlist();
+ rcu_read_lock();
+
migration_bitmap_sync();
ram_control_before_iterate(f, RAM_CONTROL_FINISH);
@@ -953,7 +965,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
ram_control_after_iterate(f, RAM_CONTROL_FINISH);
migration_end();
- qemu_mutex_unlock_ramlist();
+ rcu_read_unlock();
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
return 0;
@@ -967,7 +979,9 @@ static uint64_t ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size)
if (remaining_size < max_size) {
qemu_mutex_lock_iothread();
+ rcu_read_lock();
migration_bitmap_sync();
+ rcu_read_unlock();
qemu_mutex_unlock_iothread();
remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
}
@@ -1009,6 +1023,9 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
return 0;
}
+/* Must be called from within a rcu critical section.
+ * Returns a pointer from within the RCU-protected ram_list.
+ */
static inline void *host_from_stream_offset(QEMUFile *f,
ram_addr_t offset,
int flags)
@@ -1030,7 +1047,7 @@ static inline void *host_from_stream_offset(QEMUFile *f,
qemu_get_buffer(f, (uint8_t *)id, len);
id[len] = 0;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if (!strncmp(id, block->idstr, sizeof(id)) &&
block->max_length > offset) {
return memory_region_get_ram_ptr(block->mr) + offset;
@@ -1063,6 +1080,12 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
ret = -EINVAL;
}
+ /* This RCU critical section can be very long running.
+ * When RCU reclaims in the code start to become numerous,
+ * it will be necessary to reduce the granularity of this
+ * critical section.
+ */
+ rcu_read_lock();
while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
ram_addr_t addr, total_ram_bytes;
void *host;
@@ -1087,7 +1110,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
id[len] = 0;
length = qemu_get_be64(f);
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if (!strncmp(id, block->idstr, sizeof(id))) {
if (length != block->used_length) {
Error *local_err = NULL;
@@ -1161,6 +1184,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
}
}
+ rcu_read_unlock();
DPRINTF("Completed load of VM with exit code %d seq iteration "
"%" PRIu64 "\n", ret, seq_iter);
return ret;
diff --git a/exec.c b/exec.c
index d1a5cef4..e4bca65 100644
--- a/exec.c
+++ b/exec.c
@@ -44,7 +44,7 @@
#include "trace.h"
#endif
#include "exec/cpu-all.h"
-
+#include "qemu/rcu_queue.h"
#include "exec/cputlb.h"
#include "translate-all.h"
@@ -58,6 +58,9 @@
#if !defined(CONFIG_USER_ONLY)
static bool in_migration;
+/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
+ * are protected by the ramlist lock.
+ */
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
static MemoryRegion *system_memory;
@@ -806,16 +809,16 @@ void cpu_abort(CPUState *cpu, const char *fmt, ...)
}
#if !defined(CONFIG_USER_ONLY)
+/* Called from RCU critical section */
static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
{
RAMBlock *block;
- /* The list is protected by the iothread lock here. */
block = atomic_rcu_read(&ram_list.mru_block);
if (block && addr - block->offset < block->max_length) {
goto found;
}
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if (addr - block->offset < block->max_length) {
goto found;
}
@@ -854,10 +857,12 @@ static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
end = TARGET_PAGE_ALIGN(start + length);
start &= TARGET_PAGE_MASK;
+ rcu_read_lock();
block = qemu_get_ram_block(start);
assert(block == qemu_get_ram_block(end - 1));
start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
cpu_tlb_reset_dirty_all(start1, length);
+ rcu_read_unlock();
}
/* Note: start and end must be within the same ram block. */
@@ -1190,6 +1195,7 @@ error:
}
#endif
+/* Called with the ramlist lock held. */
static ram_addr_t find_ram_offset(ram_addr_t size)
{
RAMBlock *block, *next_block;
@@ -1197,16 +1203,16 @@ static ram_addr_t find_ram_offset(ram_addr_t size)
assert(size != 0); /* it would hand out same offset multiple times */
- if (QLIST_EMPTY(&ram_list.blocks)) {
+ if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
return 0;
}
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
ram_addr_t end, next = RAM_ADDR_MAX;
end = block->offset + block->max_length;
- QLIST_FOREACH(next_block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
if (next_block->offset >= end) {
next = MIN(next, next_block->offset);
}
@@ -1231,9 +1237,11 @@ ram_addr_t last_ram_offset(void)
RAMBlock *block;
ram_addr_t last = 0;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ rcu_read_lock();
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
last = MAX(last, block->offset + block->max_length);
}
+ rcu_read_unlock();
return last;
}
@@ -1253,11 +1261,14 @@ static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
}
}
+/* Called within an RCU critical section, or while the ramlist lock
+ * is held.
+ */
static RAMBlock *find_ram_block(ram_addr_t addr)
{
RAMBlock *block;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if (block->offset == addr) {
return block;
}
@@ -1271,6 +1282,7 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
{
RAMBlock *new_block, *block;
+ rcu_read_lock();
new_block = find_ram_block(addr);
assert(new_block);
assert(!new_block->idstr[0]);
@@ -1284,15 +1296,14 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
}
pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
- qemu_mutex_lock_ramlist();
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
new_block->idstr);
abort();
}
}
- qemu_mutex_unlock_ramlist();
+ rcu_read_unlock();
}
/* Called with iothread lock held. */
@@ -1305,10 +1316,12 @@ void qemu_ram_unset_idstr(ram_addr_t addr)
* does not work anyway.
*/
+ rcu_read_lock();
block = find_ram_block(addr);
if (block) {
memset(block->idstr, 0, sizeof(block->idstr));
}
+ rcu_read_unlock();
}
static int memory_try_enable_merging(void *addr, size_t len)
@@ -1372,7 +1385,6 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
- /* This assumes the iothread lock is taken here too. */
qemu_mutex_lock_ramlist();
new_block->offset = find_ram_offset(new_block->max_length);
@@ -1398,18 +1410,18 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
* QLIST (which has an RCU-friendly variant) does not have insertion at
* tail, so save the last element in last_block.
*/
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
last_block = block;
if (block->max_length < new_block->max_length) {
break;
}
}
if (block) {
- QLIST_INSERT_BEFORE(block, new_block, next);
+ QLIST_INSERT_BEFORE_RCU(block, new_block, next);
} else if (last_block) {
- QLIST_INSERT_AFTER(last_block, new_block, next);
+ QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
} else { /* list is empty */
- QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
+ QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
}
ram_list.mru_block = NULL;
atomic_rcu_set(&ram_list.version, ram_list.version + 1);
@@ -1552,11 +1564,10 @@ void qemu_ram_free_from_ptr(ram_addr_t addr)
{
RAMBlock *block;
- /* This assumes the iothread lock is taken here too. */
qemu_mutex_lock_ramlist();
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if (addr == block->offset) {
- QLIST_REMOVE(block, next);
+ QLIST_REMOVE_RCU(block, next);
ram_list.mru_block = NULL;
atomic_rcu_set(&ram_list.version, ram_list.version + 1);
call_rcu(block, (void (*)(struct RAMBlock *))g_free, rcu);
@@ -1583,16 +1594,14 @@ static void reclaim_ramblock(RAMBlock *block)
g_free(block);
}
-/* Called with the iothread lock held */
void qemu_ram_free(ram_addr_t addr)
{
RAMBlock *block;
- /* This assumes the iothread lock is taken here too. */
qemu_mutex_lock_ramlist();
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if (addr == block->offset) {
- QLIST_REMOVE(block, next);
+ QLIST_REMOVE_RCU(block, next);
ram_list.mru_block = NULL;
atomic_rcu_set(&ram_list.version, ram_list.version + 1);
call_rcu(block, reclaim_ramblock, rcu);
@@ -1610,7 +1619,7 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
int flags;
void *area, *vaddr;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
offset = addr - block->offset;
if (offset < block->max_length) {
vaddr = ramblock_ptr(block, offset);
@@ -1657,8 +1666,10 @@ int qemu_get_ram_fd(ram_addr_t addr)
RAMBlock *block;
int fd;
+ rcu_read_lock();
block = qemu_get_ram_block(addr);
fd = block->fd;
+ rcu_read_unlock();
return fd;
}
@@ -1667,8 +1678,10 @@ void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
RAMBlock *block;
void *ptr;
+ rcu_read_lock();
block = qemu_get_ram_block(addr);
ptr = ramblock_ptr(block, 0);
+ rcu_read_unlock();
return ptr;
}
@@ -1676,12 +1689,19 @@ void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
* This should not be used for general purpose DMA. Use address_space_map
* or address_space_rw instead. For local memory (e.g. video ram) that the
* device owns, use memory_region_get_ram_ptr.
+ *
+ * By the time this function returns, the returned pointer is not protected
+ * by RCU anymore. If the caller is not within an RCU critical section and
+ * does not hold the iothread lock, it must have other means of protecting the
+ * pointer, such as a reference to the region that includes the incoming
+ * ram_addr_t.
*/
void *qemu_get_ram_ptr(ram_addr_t addr)
{
RAMBlock *block;
void *ptr;
+ rcu_read_lock();
block = qemu_get_ram_block(addr);
if (xen_enabled() && block->host == NULL) {
@@ -1691,19 +1711,26 @@ void *qemu_get_ram_ptr(ram_addr_t addr)
*/
if (block->offset == 0) {
ptr = xen_map_cache(addr, 0, 0);
- goto done;
+ goto unlock;
}
block->host = xen_map_cache(block->offset, block->max_length, 1);
}
ptr = ramblock_ptr(block, addr - block->offset);
-done:
+unlock:
+ rcu_read_unlock();
return ptr;
}
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
* but takes a size argument.
+ *
+ * By the time this function returns, the returned pointer is not protected
+ * by RCU anymore. If the caller is not within an RCU critical section and
+ * does not hold the iothread lock, it must have other means of protecting the
+ * pointer, such as a reference to the region that includes the incoming
+ * ram_addr_t.
*/
static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
{
@@ -1715,11 +1742,13 @@ static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
return xen_map_cache(addr, *size, 1);
} else {
RAMBlock *block;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ rcu_read_lock();
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if (addr - block->offset < block->max_length) {
if (addr - block->offset + *size > block->max_length)
*size = block->max_length - addr + block->offset;
ptr = ramblock_ptr(block, addr - block->offset);
+ rcu_read_unlock();
return ptr;
}
}
@@ -1745,17 +1774,20 @@ MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
MemoryRegion *mr;
if (xen_enabled()) {
+ rcu_read_lock();
*ram_addr = xen_ram_addr_from_mapcache(ptr);
mr = qemu_get_ram_block(*ram_addr)->mr;
+ rcu_read_unlock();
return mr;
}
- block = ram_list.mru_block;
+ rcu_read_lock();
+ block = atomic_rcu_read(&ram_list.mru_block);
if (block && block->host && host - block->host < block->max_length) {
goto found;
}
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
/* This case append when the block is not mapped. */
if (block->host == NULL) {
continue;
@@ -1770,6 +1802,7 @@ MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
found:
*ram_addr = block->offset + (host - block->host);
mr = block->mr;
+ rcu_read_unlock();
return mr;
}
@@ -3023,8 +3056,10 @@ void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
{
RAMBlock *block;
- QLIST_FOREACH(block, &ram_list.blocks, next) {
+ rcu_read_lock();
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
func(block->host, block->offset, block->used_length, opaque);
}
+ rcu_read_unlock();
}
#endif
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 87b8658..ac06c67 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -279,9 +279,7 @@ struct RAMBlock {
uint32_t flags;
/* Protected by iothread lock. */
char idstr[256];
- /* Reads can take either the iothread or the ramlist lock.
- * Writes must take both locks.
- */
+ /* RCU-enabled, writes protected by the ramlist lock */
QLIST_ENTRY(RAMBlock) next;
int fd;
};
@@ -298,7 +296,7 @@ typedef struct RAMList {
/* Protected by the iothread lock. */
unsigned long *dirty_memory[DIRTY_MEMORY_NUM];
RAMBlock *mru_block;
- /* Protected by the ramlist lock. */
+ /* RCU-enabled, writes protected by the ramlist lock. */
QLIST_HEAD(, RAMBlock) blocks;
uint32_t version;
} RAMList;
--
1.8.3.1
^ permalink raw reply related [flat|nested] 22+ messages in thread