From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: Liu Ping Fan <qemulist@gmail.com>
Subject: [Qemu-devel] [PATCH 21/30] exec: separate current memory map from the one being built
Date: Fri, 28 Jun 2013 20:26:40 +0200 [thread overview]
Message-ID: <1372444009-11544-22-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1372444009-11544-1-git-send-email-pbonzini@redhat.com>
Currently, phys_node_map and phys_sections are shared by all
of the AddressSpaceDispatch. When updating mem topology, all
AddressSpaceDispatch will rebuild dispatch tables sequentially
on them. In order to prepare for RCU access, leave the old
memory map alive while the next one is being accessed.
When rebuilding, the new dispatch tables will build and lookup
next_map; after all dispatch tables are rebuilt, we can switch
to next_* and free the previous table.
Based on a patch from Liu Ping Fan.
Signed-off-by: Liu Ping Fan <qemulist@gmail.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
exec.c | 103 ++++++++++++++++++++++++++++++++++++++++-------------------------
1 file changed, 63 insertions(+), 40 deletions(-)
diff --git a/exec.c b/exec.c
index 7ad513c..f138e56 100644
--- a/exec.c
+++ b/exec.c
@@ -111,16 +111,24 @@ typedef struct subpage_t {
uint16_t sub_section[TARGET_PAGE_SIZE];
} subpage_t;
-static MemoryRegionSection *phys_sections;
-static unsigned phys_sections_nb, phys_sections_nb_alloc;
#define PHYS_SECTION_UNASSIGNED 0
#define PHYS_SECTION_NOTDIRTY 1
#define PHYS_SECTION_ROM 2
#define PHYS_SECTION_WATCH 3
-/* Simple allocator for PhysPageEntry nodes */
-static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
-static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
+typedef PhysPageEntry Node[L2_SIZE];
+
+typedef struct PhysPageMap {
+ unsigned sections_nb;
+ unsigned sections_nb_alloc;
+ unsigned nodes_nb;
+ unsigned nodes_nb_alloc;
+ Node *nodes;
+ MemoryRegionSection *sections;
+} PhysPageMap;
+
+static PhysPageMap cur_map;
+static PhysPageMap next_map;
#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
@@ -135,13 +143,13 @@ static MemoryRegion io_mem_watch;
static void phys_map_node_reserve(unsigned nodes)
{
- if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
- typedef PhysPageEntry Node[L2_SIZE];
- phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
- phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
- phys_map_nodes_nb + nodes);
- phys_map_nodes = g_renew(Node, phys_map_nodes,
- phys_map_nodes_nb_alloc);
+ if (next_map.nodes_nb + nodes > next_map.nodes_nb_alloc) {
+ next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc * 2,
+ 16);
+ next_map.nodes_nb_alloc = MAX(next_map.nodes_nb_alloc,
+ next_map.nodes_nb + nodes);
+ next_map.nodes = g_renew(Node, next_map.nodes,
+ next_map.nodes_nb_alloc);
}
}
@@ -150,12 +158,12 @@ static uint16_t phys_map_node_alloc(void)
unsigned i;
uint16_t ret;
- ret = phys_map_nodes_nb++;
+ ret = next_map.nodes_nb++;
assert(ret != PHYS_MAP_NODE_NIL);
- assert(ret != phys_map_nodes_nb_alloc);
+ assert(ret != next_map.nodes_nb_alloc);
for (i = 0; i < L2_SIZE; ++i) {
- phys_map_nodes[ret][i].is_leaf = 0;
- phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
+ next_map.nodes[ret][i].is_leaf = 0;
+ next_map.nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
}
return ret;
}
@@ -170,7 +178,7 @@ static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
lp->ptr = phys_map_node_alloc();
- p = phys_map_nodes[lp->ptr];
+ p = next_map.nodes[lp->ptr];
if (level == 0) {
for (i = 0; i < L2_SIZE; i++) {
p[i].is_leaf = 1;
@@ -178,7 +186,7 @@ static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
}
}
} else {
- p = phys_map_nodes[lp->ptr];
+ p = next_map.nodes[lp->ptr];
}
lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
@@ -205,20 +213,20 @@ static void phys_page_set(AddressSpaceDispatch *d,
phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
}
-static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
+static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr index,
+ Node *nodes, MemoryRegionSection *sections)
{
- PhysPageEntry lp = d->phys_map;
PhysPageEntry *p;
int i;
for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
if (lp.ptr == PHYS_MAP_NODE_NIL) {
- return &phys_sections[PHYS_SECTION_UNASSIGNED];
+ return §ions[PHYS_SECTION_UNASSIGNED];
}
- p = phys_map_nodes[lp.ptr];
+ p = nodes[lp.ptr];
lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
}
- return &phys_sections[lp.ptr];
+ return §ions[lp.ptr];
}
bool memory_region_is_unassigned(MemoryRegion *mr)
@@ -234,10 +242,11 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpace *as,
MemoryRegionSection *section;
subpage_t *subpage;
- section = phys_page_find(as->dispatch, addr >> TARGET_PAGE_BITS);
+ section = phys_page_find(as->dispatch->phys_map, addr >> TARGET_PAGE_BITS,
+ cur_map.nodes, cur_map.sections);
if (resolve_subpage && section->mr->subpage) {
subpage = container_of(section->mr, subpage_t, iomem);
- section = &phys_sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
+ section = &cur_map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
}
return section;
}
@@ -736,7 +745,7 @@ hwaddr memory_region_section_get_iotlb(CPUArchState *env,
iotlb |= PHYS_SECTION_ROM;
}
} else {
- iotlb = section - phys_sections;
+ iotlb = section - cur_map.sections;
iotlb += xlat;
}
@@ -769,16 +778,17 @@ static uint16_t phys_section_add(MemoryRegionSection *section)
* pointer to produce the iotlb entries. Thus it should
* never overflow into the page-aligned value.
*/
- assert(phys_sections_nb < TARGET_PAGE_SIZE);
+ assert(next_map.sections_nb < TARGET_PAGE_SIZE);
- if (phys_sections_nb == phys_sections_nb_alloc) {
- phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
- phys_sections = g_renew(MemoryRegionSection, phys_sections,
- phys_sections_nb_alloc);
+ if (next_map.sections_nb == next_map.sections_nb_alloc) {
+ next_map.sections_nb_alloc = MAX(next_map.sections_nb_alloc * 2,
+ 16);
+ next_map.sections = g_renew(MemoryRegionSection, next_map.sections,
+ next_map.sections_nb_alloc);
}
- phys_sections[phys_sections_nb] = *section;
+ next_map.sections[next_map.sections_nb] = *section;
memory_region_ref(section->mr);
- return phys_sections_nb++;
+ return next_map.sections_nb++;
}
static void phys_section_destroy(MemoryRegion *mr)
@@ -792,13 +802,14 @@ static void phys_section_destroy(MemoryRegion *mr)
}
}
-static void phys_sections_clear(void)
+static void phys_sections_clear(PhysPageMap *map)
{
- while (phys_sections_nb > 0) {
- MemoryRegionSection *section = &phys_sections[--phys_sections_nb];
+ while (map->sections_nb > 0) {
+ MemoryRegionSection *section = &map->sections[--map->sections_nb];
phys_section_destroy(section->mr);
}
- phys_map_nodes_nb = 0;
+ g_free(map->sections);
+ g_free(map->nodes);
}
static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
@@ -806,7 +817,8 @@ static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *secti
subpage_t *subpage;
hwaddr base = section->offset_within_address_space
& TARGET_PAGE_MASK;
- MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
+ MemoryRegionSection *existing = phys_page_find(d->phys_map, base >> TARGET_PAGE_BITS,
+ next_map.nodes, next_map.sections);
MemoryRegionSection subsection = {
.offset_within_address_space = base,
.size = int128_make64(TARGET_PAGE_SIZE),
@@ -1689,7 +1701,7 @@ static uint16_t dummy_section(MemoryRegion *mr)
MemoryRegion *iotlb_to_region(hwaddr index)
{
- return phys_sections[index & ~TARGET_PAGE_MASK].mr;
+ return cur_map.sections[index & ~TARGET_PAGE_MASK].mr;
}
static void io_mem_init(void)
@@ -1714,7 +1726,7 @@ static void core_begin(MemoryListener *listener)
{
uint16_t n;
- phys_sections_clear();
+ memset(&next_map, 0, sizeof(next_map));
n = dummy_section(&io_mem_unassigned);
assert(n == PHYS_SECTION_UNASSIGNED);
n = dummy_section(&io_mem_notdirty);
@@ -1725,6 +1737,16 @@ static void core_begin(MemoryListener *listener)
assert(n == PHYS_SECTION_WATCH);
}
+/* This listener's commit run after the other AddressSpaceDispatch listeners'.
+ * All AddressSpaceDispatch instances have switched to the next map.
+ */
+static void core_commit(MemoryListener *listener)
+{
+ PhysPageMap info = cur_map;
+ cur_map = next_map;
+ phys_sections_clear(&info);
+}
+
static void tcg_commit(MemoryListener *listener)
{
CPUArchState *env;
@@ -1749,6 +1771,7 @@ static void core_log_global_stop(MemoryListener *listener)
static MemoryListener core_memory_listener = {
.begin = core_begin,
+ .commit = core_commit,
.log_global_start = core_log_global_start,
.log_global_stop = core_log_global_stop,
.priority = 1,
--
1.8.1.4
next prev parent reply other threads:[~2013-06-28 18:27 UTC|newest]
Thread overview: 68+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-06-28 18:26 [Qemu-devel] [PATCH 00/30] Memory API changes for 1.6: RCU-protected address space dispatch Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 01/30] memory: access FlatView from a local variable Paolo Bonzini
2013-06-28 20:01 ` Anthony Liguori
2013-06-28 18:26 ` [Qemu-devel] [PATCH 02/30] memory: use a new FlatView pointer on every topology update Paolo Bonzini
2013-06-28 20:02 ` Anthony Liguori
2013-06-28 18:26 ` [Qemu-devel] [PATCH 03/30] memory: add reference counting to FlatView Paolo Bonzini
2013-06-28 20:07 ` Anthony Liguori
2013-06-28 18:26 ` [Qemu-devel] [PATCH 04/30] add a header file for atomic operations Paolo Bonzini
2013-06-28 20:41 ` Anthony Liguori
2013-07-01 10:21 ` Paolo Bonzini
2013-07-01 13:00 ` Anthony Liguori
2013-07-01 13:04 ` Paolo Bonzini
2013-07-01 13:20 ` Anthony Liguori
2013-07-04 5:24 ` liu ping fan
2013-07-01 11:08 ` Peter Maydell
2013-07-03 2:24 ` liu ping fan
2013-07-03 5:59 ` Paolo Bonzini
2013-07-03 7:07 ` liu ping fan
2013-06-28 18:26 ` [Qemu-devel] [PATCH 05/30] exec: do not use qemu/tls.h Paolo Bonzini
2013-06-28 20:43 ` Anthony Liguori
2013-06-28 23:53 ` Ed Maste
2013-07-01 10:16 ` Paolo Bonzini
2013-06-29 10:55 ` Peter Maydell
2013-07-01 10:45 ` Paolo Bonzini
2013-07-01 11:05 ` Peter Maydell
2013-07-01 16:21 ` Paolo Bonzini
2013-07-01 16:26 ` Peter Maydell
2013-07-01 20:52 ` Paolo Bonzini
2013-07-01 21:34 ` Peter Maydell
2013-07-02 13:40 ` Andreas Färber
2013-07-02 14:06 ` Alexander Graf
2013-06-28 18:26 ` [Qemu-devel] [PATCH 06/30] qemu-thread: add TLS wrappers Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 07/30] qemu-thread: add QemuEvent Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 08/30] rcu: add rcu library Paolo Bonzini
2013-07-01 9:47 ` Jan Kiszka
2013-06-28 18:26 ` [Qemu-devel] [PATCH 09/30] qemu-thread: register threads with RCU Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 10/30] rcu: add call_rcu Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 11/30] rcu: add rcutorture Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 12/30] rcu: allow nested calls to rcu_thread_offline/rcu_thread_online Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 13/30] qemu-thread: report RCU quiescent states Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 14/30] event loop: " Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 15/30] cpus: " Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 16/30] block: " Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 17/30] migration: " Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 18/30] memory: protect current_map by RCU Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 19/30] memory: avoid ref/unref in memory_region_find Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 20/30] exec: change well-known physical sections to macros Paolo Bonzini
2013-06-28 18:26 ` Paolo Bonzini [this message]
2013-07-02 14:41 ` [Qemu-devel] [PATCH 21/30] exec: separate current memory map from the one being built Jan Kiszka
2013-06-28 18:26 ` [Qemu-devel] [PATCH 22/30] memory: move MemoryListener declaration earlier Paolo Bonzini
2013-07-02 14:41 ` Jan Kiszka
2013-06-28 18:26 ` [Qemu-devel] [PATCH 23/30] exec: move listener from AddressSpaceDispatch to AddressSpace Paolo Bonzini
2013-07-02 14:41 ` Jan Kiszka
2013-06-28 18:26 ` [Qemu-devel] [PATCH 24/30] exec: separate current radix tree from the one being built Paolo Bonzini
2013-07-02 14:41 ` Jan Kiszka
2013-06-28 18:26 ` [Qemu-devel] [PATCH 25/30] exec: put memory map in AddressSpaceDispatch Paolo Bonzini
2013-07-02 14:42 ` Jan Kiszka
2013-07-02 15:08 ` Paolo Bonzini
2013-07-02 15:48 ` Jan Kiszka
2013-06-28 18:26 ` [Qemu-devel] [PATCH 26/30] exec: remove cur_map Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 27/30] exec: change some APIs to take AddressSpaceDispatch Paolo Bonzini
2013-07-02 14:47 ` Jan Kiszka
2013-06-28 18:26 ` [Qemu-devel] [PATCH 28/30] exec: change iotlb " Paolo Bonzini
2013-07-02 10:00 ` Jan Kiszka
2013-06-28 18:26 ` [Qemu-devel] [PATCH 29/30] exec: add a reference to the region returned by address_space_translate Paolo Bonzini
2013-06-28 18:26 ` [Qemu-devel] [PATCH 30/30] exec: put address space dispatch under RCU critical section Paolo Bonzini
2013-06-28 19:38 ` Jan Kiszka
2013-07-01 11:48 ` Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1372444009-11544-22-git-send-email-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=qemulist@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).