qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: anthony.perard@citrix.com
To: QEMU-devel <qemu-devel@nongnu.org>
Cc: Anthony PERARD <anthony.perard@citrix.com>,
	Xen Devel <xen-devel@lists.xensource.com>,
	Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Subject: [Qemu-devel] [PATCH V9 12/16] Introduce qemu_ram_ptr_unlock.
Date: Tue, 25 Jan 2011 14:29:16 +0000	[thread overview]
Message-ID: <1295965760-31508-13-git-send-email-anthony.perard@citrix.com> (raw)
In-Reply-To: <1295965760-31508-1-git-send-email-anthony.perard@citrix.com>

From: Anthony PERARD <anthony.perard@citrix.com>

This function allows to unlock a ram_ptr give by qemu_get_ram_ptr. After
a call to qemu_ram_ptr_unlock, the pointer may be unmap from QEMU when
used with Xen.

Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
---
 cpu-common.h   |    1 +
 exec.c         |   10 ++++++++++
 xen-mapcache.c |   34 ++++++++++++++++++++++++++++++++++
 3 files changed, 45 insertions(+), 0 deletions(-)

diff --git a/cpu-common.h b/cpu-common.h
index 6d4a898..8fa6d80 100644
--- a/cpu-common.h
+++ b/cpu-common.h
@@ -55,6 +55,7 @@ void *qemu_get_ram_ptr(ram_addr_t addr);
 /* Same but slower, to use for migration, where the order of
  * RAMBlocks must not change. */
 void *qemu_safe_ram_ptr(ram_addr_t addr);
+void qemu_ram_ptr_unlock(void *addr);
 /* This should not be used by devices.  */
 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
diff --git a/exec.c b/exec.c
index 3b137dc..8acf2a9 100644
--- a/exec.c
+++ b/exec.c
@@ -2977,6 +2977,13 @@ void *qemu_safe_ram_ptr(ram_addr_t addr)
     return NULL;
 }
 
+void qemu_ram_ptr_unlock(void *addr)
+{
+    if (xen_mapcache_enabled()) {
+        qemu_map_cache_unlock(addr);
+    }
+}
+
 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
 {
     RAMBlock *block;
@@ -3692,6 +3699,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
                     cpu_physical_memory_set_dirty_flags(
                         addr1, (0xff & ~CODE_DIRTY_FLAG));
                 }
+                qemu_ram_ptr_unlock(ptr);
             }
         } else {
             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
@@ -3722,6 +3730,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
                     (addr & ~TARGET_PAGE_MASK);
                 memcpy(buf, ptr, l);
+                qemu_ram_ptr_unlock(ptr);
             }
         }
         len -= l;
@@ -3762,6 +3771,7 @@ void cpu_physical_memory_write_rom(target_phys_addr_t addr,
             /* ROM/RAM case */
             ptr = qemu_get_ram_ptr(addr1);
             memcpy(ptr, buf, l);
+            qemu_ram_ptr_unlock(ptr);
         }
         len -= l;
         buf += l;
diff --git a/xen-mapcache.c b/xen-mapcache.c
index 3e1cca9..23a23f9 100644
--- a/xen-mapcache.c
+++ b/xen-mapcache.c
@@ -187,6 +187,40 @@ uint8_t *qemu_map_cache(target_phys_addr_t phys_addr, target_phys_addr_t size, u
     return mapcache->last_address_vaddr + address_offset;
 }
 
+void qemu_map_cache_unlock(void *buffer)
+{
+    MapCacheEntry *entry = NULL, *pentry = NULL;
+    MapCacheRev *reventry;
+    target_phys_addr_t paddr_index;
+    int found = 0;
+
+    QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
+        if (reventry->vaddr_req == buffer) {
+            paddr_index = reventry->paddr_index;
+            found = 1;
+            break;
+        }
+    }
+    if (!found) {
+        return;
+    }
+    QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
+    qemu_free(reventry);
+
+    entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
+    while (entry && entry->paddr_index != paddr_index) {
+        pentry = entry;
+        entry = entry->next;
+    }
+    if (!entry) {
+        return;
+    }
+    entry->lock--;
+    if (entry->lock > 0) {
+        entry->lock--;
+    }
+}
+
 ram_addr_t qemu_ram_addr_from_mapcache(void *ptr)
 {
     MapCacheRev *reventry;
-- 
1.7.1

  parent reply	other threads:[~2011-01-25 14:30 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-01-25 14:29 [Qemu-devel] [PATCH V9 00/16] Xen device model support anthony.perard
2011-01-25 14:29 ` [Qemu-devel] [PATCH V9 01/16] xen: Replace some tab-indents with spaces (clean-up) anthony.perard
2011-01-25 14:29 ` [Qemu-devel] [PATCH V9 02/16] xen: Make xen build only on x86 target anthony.perard
2011-01-25 14:29 ` [Qemu-devel] [PATCH V9 03/16] xen: Add a generic layer for xc calls anthony.perard
2011-01-26 22:49   ` Anthony Liguori
2011-01-28 15:09     ` [Xen-devel] " Anthony PERARD
2011-01-28 15:12       ` Alexander Graf
2011-01-25 14:29 ` [Qemu-devel] [PATCH V9 04/16] xen: Support new libxc calls from xen unstable anthony.perard
2011-01-26 22:53   ` Anthony Liguori
2011-01-27 12:03     ` Stefano Stabellini
2011-01-27 12:16       ` [Xen-devel] " Keir Fraser
2011-01-27 12:21         ` Stefano Stabellini
2011-01-27 17:24     ` Anthony PERARD
2011-01-25 14:29 ` [Qemu-devel] [PATCH V9 05/16] xen: Add xen_machine_fv anthony.perard
2011-01-26 22:56   ` Anthony Liguori
2011-01-27 12:04     ` Stefano Stabellini
2011-01-25 14:29 ` [Qemu-devel] [PATCH V9 06/16] xen: Add initialisation of Xen anthony.perard
2011-01-26 22:57   ` Anthony Liguori
2011-01-25 14:29 ` [Qemu-devel] [PATCH V9 07/16] xen: Add the Xen platform pci device anthony.perard
2011-01-26 23:01   ` Anthony Liguori
2011-01-25 14:29 ` [Qemu-devel] [PATCH V9 08/16] piix_pci: Introduces Xen specific call for irq anthony.perard
2011-01-25 14:29 ` [Qemu-devel] [PATCH V9 09/16] xen: add a 8259 Interrupt Controller anthony.perard
2011-01-26 23:04   ` Anthony Liguori
2011-01-27 17:19     ` Anthony PERARD
2011-01-25 14:29 ` [Qemu-devel] [PATCH V9 10/16] xen: Introduce the Xen mapcache anthony.perard
2011-01-26 23:07   ` Anthony Liguori
2011-01-27 12:04     ` Stefano Stabellini
2011-01-31 16:30     ` Stefano Stabellini
2011-01-25 14:29 ` [Qemu-devel] [PATCH V9 11/16] configure: Always use 64bits target physical addresses with xen enabled anthony.perard
2011-01-25 14:29 ` anthony.perard [this message]
2011-01-26 23:10   ` [Qemu-devel] [PATCH V9 12/16] Introduce qemu_ram_ptr_unlock Anthony Liguori
2011-01-25 14:29 ` [Qemu-devel] [PATCH V9 13/16] vl.c: Introduce getter for shutdown_requested and reset_requested anthony.perard
2011-01-25 14:29 ` [Qemu-devel] [PATCH V9 14/16] xen: Initialize event channels and io rings anthony.perard
2011-01-25 14:29 ` [Qemu-devel] [PATCH V9 15/16] xen: Set running state in xenstore anthony.perard
2011-01-26 23:12   ` Anthony Liguori
2011-01-25 14:29 ` [Qemu-devel] [PATCH V9 16/16] acpi-piix4: Add Xen hypercall for sleep state anthony.perard
2011-01-26  2:49   ` Isaku Yamahata
2011-01-26 13:47     ` Anthony PERARD
2011-01-26 14:36     ` [Qemu-devel] Re: [PATCH V9 16/16] xen: Add Xen hypercall for sleep state in the cmos_s3 callback anthony.perard
2011-01-26 23:14       ` Anthony Liguori
2011-01-26 23:11   ` [Qemu-devel] [PATCH V9 16/16] acpi-piix4: Add Xen hypercall for sleep state Anthony Liguori
2011-01-25 14:41 ` [Qemu-devel] Re: [PATCH V9 00/16] Xen device model support Anthony PERARD

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1295965760-31508-13-git-send-email-anthony.perard@citrix.com \
    --to=anthony.perard@citrix.com \
    --cc=qemu-devel@nongnu.org \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).