From: <anthony.perard@citrix.com>
To: QEMU-devel <qemu-devel@nongnu.org>,
Anthony Liguori <anthony@codemonkey.ws>,
Alexander Graf <agraf@suse.de>
Cc: Anthony PERARD <anthony.perard@citrix.com>,
Xen Devel <xen-devel@lists.xensource.com>,
Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Subject: [Qemu-devel] [PATCH V15 12/18] Introduce qemu_put_ram_ptr
Date: Thu, 5 May 2011 11:58:27 +0100 [thread overview]
Message-ID: <1304593113-10689-13-git-send-email-anthony.perard@citrix.com> (raw)
In-Reply-To: <1304593113-10689-1-git-send-email-anthony.perard@citrix.com>
From: Anthony PERARD <anthony.perard@citrix.com>
This function allows to unlock a ram_ptr give by qemu_get_ram_ptr. After
a call to qemu_put_ram_ptr, the pointer may be unmap from QEMU when
used with Xen.
Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
Acked-by: Alexander Graf <agraf@suse.de>
---
cpu-common.h | 1 +
exec.c | 38 +++++++++++++++++++++++++++++++++++---
trace-events | 3 +++
xen-mapcache.c | 33 +++++++++++++++++++++++++++++++++
4 files changed, 72 insertions(+), 3 deletions(-)
diff --git a/cpu-common.h b/cpu-common.h
index 96c02ae..1d4fdbf 100644
--- a/cpu-common.h
+++ b/cpu-common.h
@@ -56,6 +56,7 @@ void *qemu_get_ram_ptr(ram_addr_t addr);
/* Same but slower, to use for migration, where the order of
* RAMBlocks must not change. */
void *qemu_safe_ram_ptr(ram_addr_t addr);
+void qemu_put_ram_ptr(void *addr);
/* This should not be used by devices. */
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
diff --git a/exec.c b/exec.c
index 19707c5..063d2f6 100644
--- a/exec.c
+++ b/exec.c
@@ -3100,6 +3100,27 @@ void *qemu_safe_ram_ptr(ram_addr_t addr)
return NULL;
}
+void qemu_put_ram_ptr(void *addr)
+{
+ trace_qemu_put_ram_ptr(addr);
+
+ if (xen_mapcache_enabled()) {
+ RAMBlock *block;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ if (addr == block->host) {
+ break;
+ }
+ }
+ if (block && block->host) {
+ xen_unmap_block(block->host, block->length);
+ block->host = NULL;
+ } else {
+ qemu_map_cache_unlock(addr);
+ }
+ }
+}
+
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
{
RAMBlock *block;
@@ -3815,6 +3836,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
cpu_physical_memory_set_dirty_flags(
addr1, (0xff & ~CODE_DIRTY_FLAG));
}
+ qemu_put_ram_ptr(ptr);
}
} else {
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
@@ -3842,9 +3864,9 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
}
} else {
/* RAM case */
- ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
- (addr & ~TARGET_PAGE_MASK);
- memcpy(buf, ptr, l);
+ ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
+ memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
+ qemu_put_ram_ptr(ptr);
}
}
len -= l;
@@ -3885,6 +3907,7 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr,
/* ROM/RAM case */
ptr = qemu_get_ram_ptr(addr1);
memcpy(ptr, buf, l);
+ qemu_put_ram_ptr(ptr);
}
len -= l;
buf += l;
@@ -4026,6 +4049,15 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
access_len -= l;
}
}
+ if (xen_mapcache_enabled()) {
+ uint8_t *buffer1 = buffer;
+ uint8_t *end_buffer = buffer + len;
+
+ while (buffer1 < end_buffer) {
+ qemu_put_ram_ptr(buffer1);
+ buffer1 += TARGET_PAGE_SIZE;
+ }
+ }
return;
}
if (is_write) {
diff --git a/trace-events b/trace-events
index d703347..a00b63c 100644
--- a/trace-events
+++ b/trace-events
@@ -371,3 +371,6 @@ disable qemu_remap_bucket(uint64_t index) "index %#"PRIx64""
disable qemu_map_cache_return(void* ptr) "%p"
disable xen_map_block(uint64_t phys_addr, uint64_t size) "%#"PRIx64", size %#"PRIx64""
disable xen_unmap_block(void* addr, unsigned long size) "%p, size %#lx"
+
+# exec.c
+disable qemu_put_ram_ptr(void* addr) "%p"
diff --git a/xen-mapcache.c b/xen-mapcache.c
index 2ca18ce..349cc62 100644
--- a/xen-mapcache.c
+++ b/xen-mapcache.c
@@ -196,6 +196,39 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, u
return mapcache->last_address_vaddr + address_offset;
}
+void qemu_map_cache_unlock(void *buffer)
+{
+ MapCacheEntry *entry = NULL, *pentry = NULL;
+ MapCacheRev *reventry;
+ target_phys_addr_t paddr_index;
+ int found = 0;
+
+ QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
+ if (reventry->vaddr_req == buffer) {
+ paddr_index = reventry->paddr_index;
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ return;
+ }
+ QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
+ qemu_free(reventry);
+
+ entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
+ while (entry && entry->paddr_index != paddr_index) {
+ pentry = entry;
+ entry = entry->next;
+ }
+ if (!entry) {
+ return;
+ }
+ if (entry->lock > 0) {
+ entry->lock--;
+ }
+}
+
ram_addr_t qemu_ram_addr_from_mapcache(void *ptr)
{
MapCacheRev *reventry;
--
1.7.2.5
next prev parent reply other threads:[~2011-05-05 10:59 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-05-05 10:58 [Qemu-devel] [PATCH V15 00/18] Xen device model support anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 01/18] xen: Replace some tab-indents with spaces (clean-up) anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 02/18] xen: Make Xen build once anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 03/18] xen: Support new libxc calls from xen unstable anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 04/18] xen: Add initialisation of Xen anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 05/18] pc_memory_init: Move memory calculation to the caller anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 06/18] xen: Add xenfv machine anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 07/18] pc, Disable vmport initialisation with Xen anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 08/18] piix_pci: Introduces Xen specific call for irq anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 09/18] xen: Introduce Xen Interrupt Controller anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 10/18] xen: Introduce the Xen mapcache anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 11/18] xen: Adds a cap to the number of map cache entries anthony.perard
2011-05-05 10:58 ` anthony.perard [this message]
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 13/18] configure: Always use 64bits target physical addresses with xen enabled anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 14/18] pci: Use of qemu_put_ram_ptr in pci_add_option_rom anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 15/18] vl.c: Introduce getter for shutdown_requested and reset_requested anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 16/18] xen: Initialize event channels and io rings anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 17/18] xen: Set running state in xenstore anthony.perard
2011-05-05 10:58 ` [Qemu-devel] [PATCH V15 18/18] xen: Add Xen hypercall for sleep state in the cmos_s3 callback anthony.perard
2011-05-09 13:58 ` [Qemu-devel] [PATCH V15 00/18] Xen device model support Alexander Graf
2011-05-09 14:28 ` Anthony Liguori
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1304593113-10689-13-git-send-email-anthony.perard@citrix.com \
--to=anthony.perard@citrix.com \
--cc=agraf@suse.de \
--cc=anthony@codemonkey.ws \
--cc=qemu-devel@nongnu.org \
--cc=stefano.stabellini@eu.citrix.com \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).