public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Glauber Costa <glommer@redhat.com>
To: kvm@vger.kernel.org
Cc: jes@sgi.com, avi@qumranet.com, aliguori@us.ibm.com
Subject: [PATCH 5/9] move kvm_cpu_register_memory_area into qemu's
Date: Fri, 12 Sep 2008 12:10:46 -0300	[thread overview]
Message-ID: <1221232250-9653-6-git-send-email-glommer@redhat.com> (raw)
In-Reply-To: <1221232250-9653-1-git-send-email-glommer@redhat.com>

Turn the explicit calls to kvm_cpu_register_memoy_area()
an empty function. Provide a __kvm_cpu_register_memory_area()
that is called from within cpu_register_memory_area().
To avoid registering mmio regions to the hypervisor, since we depend on
them faulting, we keep track of what regions are mmio regions too.

This is to be bisection friendly. Direct calls are to be removed
in a later commit.

Signed-off-by: Glauber Costa <glommer@redhat.com>
---
 libkvm/libkvm.c |   84 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
 libkvm/libkvm.h |    6 ++++
 qemu/exec.c     |    3 ++
 qemu/qemu-kvm.c |   28 +++++++++++++++---
 4 files changed, 112 insertions(+), 9 deletions(-)

diff --git a/libkvm/libkvm.c b/libkvm/libkvm.c
index a5e20bb..5df201e 100644
--- a/libkvm/libkvm.c
+++ b/libkvm/libkvm.c
@@ -62,14 +62,22 @@ struct slot_info {
 	unsigned flags;
 };
 
+struct mmio_slot_info {
+    uint64_t phys_addr;
+    unsigned int len;
+};
+
 struct slot_info slots[KVM_MAX_NUM_MEM_REGIONS];
+struct mmio_slot_info mmio_slots[KVM_MAX_NUM_MEM_REGIONS];
 
 void init_slots(void)
 {
 	int i;
 
-	for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
+	for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i) {
 		slots[i].len = 0;
+		mmio_slots[i].len = 0;
+	}
 }
 
 int get_free_slot(kvm_context_t kvm)
@@ -99,6 +107,16 @@ int get_free_slot(kvm_context_t kvm)
 	return -1;
 }
 
+int get_free_mmio_slot(kvm_context_t kvm)
+{
+
+       unsigned int i;
+       for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
+		if (!mmio_slots[i].len)
+			return i;
+       return -1;
+}
+
 void register_slot(int slot, unsigned long phys_addr, unsigned long len,
 		   unsigned long userspace_addr, unsigned flags)
 {
@@ -149,14 +167,47 @@ int get_container_slot(uint64_t phys_addr, unsigned long size)
 	return -1;
 }
 
+int get_container_mmio_slot(kvm_context_t kvm, uint64_t phys_addr, unsigned long size)
+{
+       int i;
+
+       for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS ; ++i)
+               if (mmio_slots[i].len && mmio_slots[i].phys_addr <= phys_addr &&
+                   (mmio_slots[i].phys_addr + mmio_slots[i].len) >= phys_addr + size)
+                       return i;
+       return -1;
+}
+
+int kvm_register_mmio_slot(kvm_context_t kvm, uint64_t phys_addr, unsigned int size)
+{
+       int slot = get_free_mmio_slot(kvm);
+
+       if (slot == -1)
+               goto out;
+
+#ifdef DEBUG_MEMREG
+	printf("Registering mmio region %llx (%lx)\n", phys_addr, size);
+#endif
+       mmio_slots[slot].phys_addr = phys_addr;
+       mmio_slots[slot].len = size;
+out:
+       return slot;
+}
+
 int kvm_is_containing_region(kvm_context_t kvm, unsigned long phys_addr, unsigned long size)
 {
 	int slot = get_container_slot(phys_addr, size);
-	if (slot == -1)
-		return 0;
-	return 1;
+
+	if (slot != -1)
+		return 1;
+	slot = get_container_mmio_slot(kvm, phys_addr, size);
+	if (slot != -1)
+		return 1;
+
+	return 0;
 }
 
+
 /* 
  * dirty pages logging control 
  */
@@ -509,6 +560,31 @@ void kvm_destroy_phys_mem(kvm_context_t kvm, unsigned long phys_start,
 	free_slot(memory.slot);
 }
 
+void kvm_unregister_memory_area(kvm_context_t kvm, uint64_t phys_addr, unsigned long size)
+{
+
+       int slot = get_container_slot(phys_addr, size);
+
+       if (slot != -1) {
+#ifdef DEBUG_MEMREG
+               printf("Unregistering memory region %llx (%lx)\n", phys_addr, size);
+#endif
+               kvm_destroy_phys_mem(kvm, phys_addr, size);
+               return;
+       }
+
+       slot = get_container_mmio_slot(kvm, phys_addr, size);
+       if (slot != -1) {
+#ifdef DEBUG_MEMREG
+               printf("Unregistering mmio region %llx (%lx)\n", phys_addr, size);
+#endif
+               kvm_unregister_coalesced_mmio(kvm, phys_addr, size);
+               mmio_slots[slot].len = 0;
+       }
+
+       return;
+}
+
 static int kvm_get_map(kvm_context_t kvm, int ioctl_num, int slot, void *buf)
 {
 	int r;
diff --git a/libkvm/libkvm.h b/libkvm/libkvm.h
index 1e89993..f9cdb9c 100644
--- a/libkvm/libkvm.h
+++ b/libkvm/libkvm.h
@@ -454,6 +454,10 @@ void *kvm_create_phys_mem(kvm_context_t, unsigned long phys_start,
 			  unsigned long len, int log, int writable);
 void kvm_destroy_phys_mem(kvm_context_t, unsigned long phys_start, 
 			  unsigned long len);
+
+void kvm_unregister_memory_area(kvm_context_t, uint64_t phys_start,
+				 unsigned long len);
+
 int kvm_is_containing_region(kvm_context_t kvm, unsigned long phys_start, unsigned long size);
 int kvm_is_allocated_mem(kvm_context_t kvm, unsigned long phys_start,
 			 unsigned long len);
@@ -467,6 +471,8 @@ int kvm_get_dirty_pages_range(kvm_context_t kvm, unsigned long phys_addr,
 			      unsigned long end_addr, void *buf, void*opaque,
 			      int (*cb)(unsigned long start, unsigned long len,
 					void*bitmap, void *opaque));
+int kvm_register_mmio_slot(kvm_context_t kvm,
+				uint64_t addr, uint32_t size);
 int kvm_register_coalesced_mmio(kvm_context_t kvm,
 				uint64_t addr, uint32_t size);
 int kvm_unregister_coalesced_mmio(kvm_context_t kvm,
diff --git a/qemu/exec.c b/qemu/exec.c
index bf037f0..f0e84c8 100644
--- a/qemu/exec.c
+++ b/qemu/exec.c
@@ -2203,6 +2203,9 @@ void cpu_register_physical_memory(target_phys_addr_t start_addr,
         kqemu_set_phys_mem(start_addr, size, phys_offset);
     }
 #endif
+
+    __kvm_cpu_register_physical_memory(start_addr, size, phys_offset);
+
     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
     end_addr = start_addr + (target_phys_addr_t)size;
     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
diff --git a/qemu/qemu-kvm.c b/qemu/qemu-kvm.c
index e0b114a..444f79e 100644
--- a/qemu/qemu-kvm.c
+++ b/qemu/qemu-kvm.c
@@ -775,12 +775,34 @@ void kvm_cpu_register_physical_memory(target_phys_addr_t start_addr,
                                       unsigned long size,
                                       unsigned long phys_offset)
 {
-    int r = 0;
+}
 
+void __kvm_cpu_register_physical_memory(target_phys_addr_t start_addr,
+                                      unsigned long size,
+                                      unsigned long phys_offset)
+{
+    int r = 0;
+    unsigned long area_flags = phys_offset & ~TARGET_PAGE_MASK;
     phys_offset &= ~IO_MEM_ROM;
+
+
+    if (area_flags == IO_MEM_UNASSIGNED) {
+        kvm_unregister_memory_area(kvm_context, start_addr, size);
+        return;
+    }
+
     r = kvm_is_containing_region(kvm_context, start_addr, size);
     if (r)
         return;
+
+    if (area_flags >= TLB_MMIO) {
+        r = kvm_register_mmio_slot(kvm_context, start_addr, size);
+        if (r < 0) {
+            printf("No free mmio slots\n");
+            exit(1);
+        }
+        return;
+    }
     r = kvm_is_intersecting_mem(kvm_context, start_addr);
     if (r) {
         printf("Ignoring intersecting memory %llx (%lx)\n", start_addr, size);
@@ -788,10 +810,6 @@ void kvm_cpu_register_physical_memory(target_phys_addr_t start_addr,
         r = kvm_register_phys_mem(kvm_context, start_addr,
                                   phys_ram_base + phys_offset,
                                   size, 0);
-    if (r < 0) {
-        printf("kvm_cpu_register_physical_memory: failed\n");
-        exit(1);
-    }
     return;
 }
 
-- 
1.5.5.1


  parent reply	other threads:[~2008-09-12 15:32 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2008-09-12 15:10 [PATCH 0/9] Simplify memory registration Glauber Costa
2008-09-12 15:10 ` [PATCH 1/9] Don't separate registrations with IO_MEM_ROM set Glauber Costa
2008-09-12 15:47   ` Jan Kiszka
2008-09-12 16:04     ` Glauber Costa
2008-09-12 16:26       ` Jan Kiszka
2008-09-12 18:47         ` Glauber Costa
2008-09-13  6:26           ` Jan Kiszka
2008-09-15 12:44             ` Glauber Costa
2008-09-15 13:08               ` Jan Kiszka
2008-09-15 13:15                 ` Glauber Costa
2008-09-19 23:12               ` Avi Kivity
2008-09-12 15:10 ` [PATCH 2/9] do not use mem_hole anymore Glauber Costa
2008-09-12 15:10 ` [PATCH 3/9] allow intersecting region to be on the boundary Glauber Costa
2008-09-12 15:10 ` [PATCH 4/9] substitute is_allocated_mem with more general is_containing_region Glauber Costa
2008-09-12 15:10 ` Glauber Costa [this message]
2008-09-12 15:10 ` [PATCH 6/9] cleanup kvm memory registration Glauber Costa
2008-09-12 15:10 ` [PATCH 7/9] add debuging facilities to memory registration at libkvm Glauber Costa
2008-09-12 15:10 ` [PATCH 8/9] coalesce mmio regions without an explicit call Glauber Costa
2008-09-12 15:10 ` [PATCH 9/9] remove explicit calls to kvm_qemu_register_coalesced_mmio Glauber Costa

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1221232250-9653-6-git-send-email-glommer@redhat.com \
    --to=glommer@redhat.com \
    --cc=aliguori@us.ibm.com \
    --cc=avi@qumranet.com \
    --cc=jes@sgi.com \
    --cc=kvm@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox