public inbox for kvm@vger.kernel.org
 help / color / mirror / Atom feed
From: Glauber Costa <gcosta@redhat.com>
To: kvm@vger.kernel.org
Cc: avi@qumranet.com, aliguori@us.ibm.com
Subject: [PATCH 6/9] move kvm_cpu_register_memory_area into qemu's
Date: Tue, 12 Aug 2008 21:48:06 -0300	[thread overview]
Message-ID: <1218588489-17182-7-git-send-email-gcosta@redhat.com> (raw)
In-Reply-To: <1218588489-17182-6-git-send-email-gcosta@redhat.com>

Turn the explicit calls to kvm_cpu_register_memoy_area()
an empty function. Provide a __kvm_cpu_register_memory_area()
that is called from within cpu_register_memory_area().
To avoid registering mmio regions to the hypervisor, since we depend on
them faulting, we keep track of what regions are mmio regions too.

This is to be bisection friendly. Direct calls are to be removed
in a later commit.

Signed-off-by: Glauber Costa <gcosta@redhat.com>
---
 libkvm/libkvm.c |   84 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
 libkvm/libkvm.h |    6 ++++
 qemu/exec.c     |    3 ++
 qemu/qemu-kvm.c |   22 ++++++++++++++
 4 files changed, 111 insertions(+), 4 deletions(-)

diff --git a/libkvm/libkvm.c b/libkvm/libkvm.c
index c885dee..d62cb2a 100644
--- a/libkvm/libkvm.c
+++ b/libkvm/libkvm.c
@@ -65,14 +65,22 @@ struct slot_info {
 	unsigned flags;
 };
 
+struct mmio_slot_info {
+    uint64_t phys_addr;
+    unsigned int len;
+};
+
 struct slot_info slots[KVM_MAX_NUM_MEM_REGIONS];
+struct mmio_slot_info mmio_slots[KVM_MAX_NUM_MEM_REGIONS];
 
 void init_slots(void)
 {
 	int i;
 
-	for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
+	for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i) {
 		slots[i].len = 0;
+		mmio_slots[i].len = 0;
+	}
 }
 
 int get_free_slot(kvm_context_t kvm)
@@ -102,6 +110,16 @@ int get_free_slot(kvm_context_t kvm)
 	return -1;
 }
 
+int get_free_mmio_slot(kvm_context_t kvm)
+{
+
+       unsigned int i;
+       for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS; ++i)
+		if (!mmio_slots[i].len)
+			return i;
+       return -1;
+}
+
 void register_slot(int slot, unsigned long phys_addr, unsigned long len,
 		   int user_alloc, unsigned long userspace_addr, unsigned flags)
 {
@@ -153,14 +171,47 @@ int get_container_slot(uint64_t phys_addr, unsigned long size)
 	return -1;
 }
 
+int get_container_mmio_slot(kvm_context_t kvm, uint64_t phys_addr, unsigned long size)
+{
+       int i;
+
+       for (i = 0; i < KVM_MAX_NUM_MEM_REGIONS ; ++i)
+               if (mmio_slots[i].len && mmio_slots[i].phys_addr <= phys_addr &&
+                   (mmio_slots[i].phys_addr + mmio_slots[i].len) >= phys_addr + size)
+                       return i;
+       return -1;
+}
+
+int kvm_register_mmio_slot(kvm_context_t kvm, uint64_t phys_addr, unsigned int size)
+{
+       int slot = get_free_mmio_slot(kvm);
+
+       if (slot == -1)
+               goto out;
+
+#ifdef DEBUG_MEMREG
+	printf("Registering mmio region %llx (%lx)\n", phys_addr, size);
+#endif
+       mmio_slots[slot].phys_addr = phys_addr;
+       mmio_slots[slot].len = size;
+out:
+       return slot;
+}
+
 int kvm_is_containing_region(kvm_context_t kvm, unsigned long phys_addr, unsigned long size)
 {
 	int slot = get_container_slot(phys_addr, size);
-	if (slot == -1)
-		return 0;
-	return 1;
+
+	if (slot != -1)
+		return 1;
+	slot = get_container_mmio_slot(kvm, phys_addr, size);
+	if (slot != -1)
+		return 1;
+
+	return 0;
 }
 
+
 /* 
  * dirty pages logging control 
  */
@@ -576,6 +627,31 @@ void kvm_destroy_phys_mem(kvm_context_t kvm, unsigned long phys_start,
 		kvm_create_kernel_phys_mem(kvm, phys_start, 0, 0, 0);
 }
 
+void kvm_unregister_memory_area(kvm_context_t kvm, uint64_t phys_addr, unsigned long size)
+{
+       
+       int slot = get_container_slot(phys_addr, size);
+
+       if (slot != -1) {
+#ifdef DEBUG_MEMREG
+               printf("Unregistering memory region %llx (%lx)\n", phys_addr, size);
+#endif
+               kvm_destroy_phys_mem(kvm, phys_addr, size);
+               return;
+       }
+
+       slot = get_container_mmio_slot(kvm, phys_addr, size);
+       if (slot != -1) {
+#ifdef DEBUG_MEMREG
+               printf("Unregistering mmio region %llx (%lx)\n", phys_addr, size);
+#endif
+               kvm_unregister_coalesced_mmio(kvm, phys_addr, size);
+               mmio_slots[slot].len = 0;
+       }
+
+       return;
+}
+
 static int kvm_get_map(kvm_context_t kvm, int ioctl_num, int slot, void *buf)
 {
 	int r;
diff --git a/libkvm/libkvm.h b/libkvm/libkvm.h
index d762323..ceadc45 100644
--- a/libkvm/libkvm.h
+++ b/libkvm/libkvm.h
@@ -454,6 +454,10 @@ void *kvm_create_phys_mem(kvm_context_t, unsigned long phys_start,
 			  unsigned long len, int log, int writable);
 void kvm_destroy_phys_mem(kvm_context_t, unsigned long phys_start, 
 			  unsigned long len);
+
+void kvm_unregister_memory_area(kvm_context_t, uint64_t phys_start, 
+			  	unsigned long len);
+
 int kvm_is_containing_region(kvm_context_t kvm, unsigned long phys_start, unsigned long size);
 int kvm_is_allocated_mem(kvm_context_t kvm, unsigned long phys_start,
 			 unsigned long len);
@@ -467,6 +471,8 @@ int kvm_get_dirty_pages_range(kvm_context_t kvm, unsigned long phys_addr,
 			      unsigned long end_addr, void *buf, void*opaque,
 			      int (*cb)(unsigned long start, unsigned long len,
 					void*bitmap, void *opaque));
+int kvm_register_mmio_slot(kvm_context_t kvm,
+				uint64_t addr, uint32_t size);
 int kvm_register_coalesced_mmio(kvm_context_t kvm,
 				uint64_t addr, uint32_t size);
 int kvm_unregister_coalesced_mmio(kvm_context_t kvm,
diff --git a/qemu/exec.c b/qemu/exec.c
index 7a68062..14c3852 100644
--- a/qemu/exec.c
+++ b/qemu/exec.c
@@ -2196,6 +2196,9 @@ void cpu_register_physical_memory(target_phys_addr_t start_addr,
         kqemu_set_phys_mem(start_addr, size, phys_offset);
     }
 #endif
+
+    __kvm_cpu_register_physical_memory(start_addr, size, phys_offset);
+
     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
     end_addr = start_addr + (target_phys_addr_t)size;
     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
diff --git a/qemu/qemu-kvm.c b/qemu/qemu-kvm.c
index bfbaacc..225fbe6 100644
--- a/qemu/qemu-kvm.c
+++ b/qemu/qemu-kvm.c
@@ -776,16 +776,38 @@ void kvm_cpu_register_physical_memory(target_phys_addr_t start_addr,
                                       unsigned long size,
                                       unsigned long phys_offset)
 {
+}
+
+void __kvm_cpu_register_physical_memory(target_phys_addr_t start_addr,
+                                      unsigned long size,
+                                      unsigned long phys_offset)
+{
 #ifdef KVM_CAP_USER_MEMORY
     int r = 0;
 
 
     r = kvm_check_extension(kvm_context, KVM_CAP_USER_MEMORY);
     if (r) {
+        unsigned long area_flags = phys_offset & ~TARGET_PAGE_MASK;
         phys_offset &= ~IO_MEM_ROM;
+
+        if (area_flags == IO_MEM_UNASSIGNED) {
+            kvm_unregister_memory_area(kvm_context, start_addr, size);
+            return;
+        }
+
         r = kvm_is_containing_region(kvm_context, start_addr, size);
         if (r)
             return;
+
+        if (area_flags >= TLB_MMIO) {
+            r = kvm_register_mmio_slot(kvm_context, start_addr, size);
+            if (r < 0) {
+                printf("No free mmio slots\n");
+                exit(1);
+            }
+            return;
+        }
         r = kvm_is_intersecting_mem(kvm_context, start_addr);
         if (r) {
             printf("Ignoring intersecting memory %llx (%lx)\n", start_addr, size);
-- 
1.5.5.1


  reply	other threads:[~2008-08-13  0:56 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2008-08-13  0:48 [RFC 0/9] Memory registration rework Glauber Costa
2008-08-13  0:48 ` [PATCH 1/9] add debuging facilities to memory registration at libkvm Glauber Costa
2008-08-13  0:48   ` [PATCH 2/9] experimental change to avoid doing the same thing twice Glauber Costa
2008-08-13  0:48     ` [PATCH 3/9] do not use mem_hole anymore Glauber Costa
2008-08-13  0:48       ` [PATCH 4/9] allow intersecting region to be on the boundary Glauber Costa
2008-08-13  0:48         ` [PATCH 5/9] substitute is_allocated_mem with more general is_containing_region Glauber Costa
2008-08-13  0:48           ` Glauber Costa [this message]
2008-08-13  0:48             ` [PATCH 7/9] cleanup kvm memory registration Glauber Costa
2008-08-13  0:48               ` [PATCH 8/9] coalesce mmio regions without an explicit call Glauber Costa
2008-08-13  0:48                 ` [PATCH 9/9] remove explicit calls to kvm_qemu_register_coalesced_mmio Glauber Costa
2008-08-13 14:11             ` [PATCH 6/9] move kvm_cpu_register_memory_area into qemu's Anthony Liguori
2008-08-13 14:33               ` Glauber Costa
2008-08-13 11:41           ` [PATCH 5/9] substitute is_allocated_mem with more general is_containing_region Avi Kivity
2008-08-13 13:02             ` Glauber Costa
2008-08-13 11:43 ` [RFC 0/9] Memory registration rework Avi Kivity
2008-08-13 14:13   ` Anthony Liguori

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1218588489-17182-7-git-send-email-gcosta@redhat.com \
    --to=gcosta@redhat.com \
    --cc=aliguori@us.ibm.com \
    --cc=avi@qumranet.com \
    --cc=kvm@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox