qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [Qemu-devel] kqemu descriptor table cache patch
@ 2008-04-22 15:08 Henning Schild
  0 siblings, 0 replies; only message in thread
From: Henning Schild @ 2008-04-22 15:08 UTC (permalink / raw)
  To: qemu-devel

[-- Attachment #1: Type: text/plain, Size: 813 bytes --]

Hello,

i am currently porting kqemu to l4linux
(http://os.inf.tu-dresden.de/L4/LinuxOnL4/).
During my work i found that NetBSD does not map its GDT completely
which is the reason why it cant be used on kqemu. The GDT limit is
0xffff on NetBSD but only the first page of the GDT is mapped. Walking
the pagetable for the 2nd page fails.
Maybe there are other OSs out there that do the same thing ...

Kqemu returns pagefaults to the guest OS when LDT/GDT are not mapped
according to the given limits. I wrote a patch that keeps kqemu from
faulting when it updates its dt cache, instead all pages that are not
mapped will be skipped.

ftp://ftp.netbsd.org/pub/NetBSD/iso/livecd/netbsd-live-2007.iso
wont work with kqemu-1.3.0pre11, after applying the patch it will.

tested on a Linux host

regards,
Henning Schild

[-- Attachment #2: kqemu-1.3.0pre11_dont_fault_dt.patch --]
[-- Type: text/x-patch, Size: 5006 bytes --]

--- /tmp/kqemu-1.3.0pre11/common/monitor.c	2007-02-06 22:02:00.000000000 +0100
+++ common/monitor.c	2008-04-19 18:27:18.000000000 +0200
@@ -548,6 +548,8 @@
 static inline int ram_get_dirty(struct kqemu_state *s, unsigned long ram_addr,
                                 int dirty_flags)
 {
+    if (ram_addr == -1)
+        return 0;
     return s->ram_dirty[ram_addr >> PAGE_SHIFT] & dirty_flags;
 }
 
@@ -961,8 +963,8 @@
     return 1;
 }
 
-static void soft_tlb_fill(struct kqemu_state *s, unsigned long vaddr,
-                          int is_write, int is_user)
+static int soft_tlb_fill(struct kqemu_state *s, unsigned long vaddr,
+                          int is_write, int is_user, int fault)
 {
     long ret;
 #ifdef PROFILE_SOFTMMU
@@ -972,13 +974,18 @@
     ret = cpu_x86_handle_mmu_fault(s, vaddr, is_write, is_user, 1);
 #ifdef PROFILE_SOFTMMU
     ti = getclock() - ti;
-    monitor_log(s, "soft_tlb_fill: w=%d u=%d addr=%p cycle=%d\n",
-                is_write, is_user, (void *)vaddr, ti);
+    monitor_log(s, "soft_tlb_fill: w=%d u=%d addr=%p cycle=%d ret=%08lx\n",
+                is_write, is_user, (void *)vaddr, ti, ret);
 #endif
-    if (ret == 1)
-        raise_exception(s, EXCP0E_PAGE);
+    if (ret == 1) {
+        if (fault)
+            raise_exception(s, EXCP0E_PAGE);
+        else 
+            return 1;
+    }
     else if (ret == 2)
         raise_exception(s, KQEMU_RET_SOFTMMU);
+    return 0;
 }
 
 static void *map_vaddr(struct kqemu_state *s, unsigned long addr, 
@@ -990,8 +997,10 @@
     e = &s->soft_tlb[(addr >> PAGE_SHIFT) & (SOFT_TLB_SIZE - 1)];
  redo:
     if (e->vaddr[(is_user << 1) + is_write] != (addr & PAGE_MASK)) {
-        soft_tlb_fill(s, addr, is_write, is_user);
-        goto redo;
+        if (!soft_tlb_fill(s, addr, is_write, is_user, 0))
+            goto redo;
+        else
+            return NULL;
     } else {
         taddr = e->addend + addr;
     }
@@ -1008,7 +1017,7 @@
     e = &s->soft_tlb[(addr >> PAGE_SHIFT) & (SOFT_TLB_SIZE - 1)];
  redo:
     if (unlikely(e->vaddr[(is_user << 1)] != (addr & PAGE_MASK))) {
-        soft_tlb_fill(s, addr, 0, is_user);
+        soft_tlb_fill(s, addr, 0, is_user, 1);
         goto redo;
     } else {
         taddr = e->addend + addr;
@@ -1039,7 +1048,7 @@
                 val = v0 | (v1 << 8);
             }
         } else {
-            soft_tlb_fill(s, addr, 0, is_user);
+            soft_tlb_fill(s, addr, 0, is_user, 1);
             goto redo;
         }
     } else {
@@ -1075,7 +1084,7 @@
                 val = (v0 >> shift) | (v1 << (32 - shift));
             }
         } else {
-            soft_tlb_fill(s, addr, 0, is_user);
+            soft_tlb_fill(s, addr, 0, is_user, 1);
             goto redo;
         }
     } else {
@@ -1111,7 +1120,7 @@
                 val = (v0 >> shift) | (v1 << (64 - shift));
             }
         } else {
-            soft_tlb_fill(s, addr, 0, is_user);
+            soft_tlb_fill(s, addr, 0, is_user, 1);
             goto redo;
         }
     } else {
@@ -1131,7 +1140,7 @@
     e = &s->soft_tlb[(addr >> PAGE_SHIFT) & (SOFT_TLB_SIZE - 1)];
  redo:
     if (unlikely(e->vaddr[(is_user << 1) + 1] != (addr & PAGE_MASK))) {
-        soft_tlb_fill(s, addr, 1, is_user);
+        soft_tlb_fill(s, addr, 1, is_user, 1);
         goto redo;
     } else {
         taddr = e->addend + addr;
@@ -1158,7 +1167,7 @@
                 stb_slow(s, addr + 1, val >> 8, is_user);
             }
         } else {
-            soft_tlb_fill(s, addr, 1, is_user);
+            soft_tlb_fill(s, addr, 1, is_user, 1);
             goto redo;
         }
     } else {
@@ -1189,7 +1198,7 @@
                 stb_slow(s, addr + 3, val >> 24, is_user);
             }
         } else {
-            soft_tlb_fill(s, addr, 1, is_user);
+            soft_tlb_fill(s, addr, 1, is_user, 1);
             goto redo;
         }
     } else {
@@ -1224,7 +1233,7 @@
                 stb_slow(s, addr + 7, val >> 56, is_user);
             }
         } else {
-            soft_tlb_fill(s, addr, 1, is_user);
+            soft_tlb_fill(s, addr, 1, is_user, 1);
             goto redo;
         }
     } else {
@@ -1802,13 +1811,18 @@
             page_end = dt_end;
         sel2 = sel + (page_end - dt_ptr);
         ptr = map_vaddr(s, dt_ptr, 0, 0);
-        ram_addr = ram_ptr_to_ram_addr(s, ptr);
+        if (ptr)
+            ram_addr = ram_ptr_to_ram_addr(s, ptr);
+        else 
+            ram_addr = -1;
         if (dt_changed || 
             s->dt_ram_addr[dt_type][pindex] != ram_addr ||
             ram_get_dirty(s, ram_addr, DT_DIRTY_FLAG)) {
             s->dt_ram_addr[dt_type][pindex] = ram_addr;
-            check_dt_entries_page(s, dt_type, sel, sel2, ptr);
-            ram_reset_dirty(s, ram_addr, DT_DIRTY_FLAG);
+            if (ptr) {
+                check_dt_entries_page(s, dt_type, sel, sel2, ptr);
+                ram_reset_dirty(s, ram_addr, DT_DIRTY_FLAG);
+            }
         }
         sel = sel2;
         pindex++;

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2008-04-22 15:08 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-04-22 15:08 [Qemu-devel] kqemu descriptor table cache patch Henning Schild

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).