xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH][XEN] p2m cleanup: remove unused function
@ 2010-03-02 17:20 Christoph Egger
  2010-03-02 18:03 ` Keir Fraser
  0 siblings, 1 reply; 3+ messages in thread
From: Christoph Egger @ 2010-03-02 17:20 UTC (permalink / raw)
  To: xen-devel

[-- Attachment #1: Type: text/plain, Size: 593 bytes --]


Hi!

Attached patch removes gfn_to_mfn_current(). It is redundant and effectively 
unused.
All remaining users now use proper replacement function.
=> No functional change.

While here, silence stdvga and use page_mode_hap()
consequently.

Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>


-- 
---to satisfy European Law for business letters:
Advanced Micro Devices GmbH
Karl-Hammerschmidt-Str. 34, 85609 Dornach b. Muenchen
Geschaeftsfuehrer: Andrew Bowd, Thomas M. McCoy, Giuliano Meroni
Sitz: Dornach, Gemeinde Aschheim, Landkreis Muenchen
Registergericht Muenchen, HRB Nr. 43632

[-- Attachment #2: xen_p2m.diff --]
[-- Type: text/x-diff, Size: 18052 bytes --]

diff -r 8f91d6e94674 xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c	Tue Feb 23 09:10:59 2010 +0100
+++ b/xen/arch/x86/hvm/emulate.c	Tue Mar 02 18:12:53 2010 +0100
@@ -62,7 +62,7 @@ static int hvmemul_do_io(
     int rc;
 
     /* Check for paged out page */
-    ram_mfn = gfn_to_mfn_unshare(current->domain, ram_gfn, &p2mt, 0);
+    ram_mfn = gfn_to_mfn_unshare(curr->domain, ram_gfn, &p2mt, 0);
     if ( p2m_is_paging(p2mt) )
     {
         p2m_mem_paging_populate(curr->domain, ram_gfn);
@@ -638,6 +638,7 @@ static int hvmemul_rep_movs(
     unsigned long saddr, daddr, bytes;
     paddr_t sgpa, dgpa;
     uint32_t pfec = PFEC_page_present;
+    struct domain *d = current->domain;
     p2m_type_t p2mt;
     int rc, df = !!(ctxt->regs->eflags & X86_EFLAGS_DF);
     char *buf;
@@ -668,12 +669,12 @@ static int hvmemul_rep_movs(
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    (void)gfn_to_mfn_current(sgpa >> PAGE_SHIFT, &p2mt);
+    (void)gfn_to_mfn(d, sgpa >> PAGE_SHIFT, &p2mt);
     if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
         return hvmemul_do_mmio(
             sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL);
 
-    (void)gfn_to_mfn_current(dgpa >> PAGE_SHIFT, &p2mt);
+    (void)gfn_to_mfn(d, dgpa >> PAGE_SHIFT, &p2mt);
     if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
         return hvmemul_do_mmio(
             dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL);
diff -r 8f91d6e94674 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c	Tue Feb 23 09:10:59 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c	Tue Mar 02 18:12:53 2010 +0100
@@ -933,8 +933,9 @@ bool_t hvm_hap_nested_page_fault(unsigne
 {
     p2m_type_t p2mt;
     mfn_t mfn;
+    struct domain *d = current->domain;
 
-    mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
+    mfn = gfn_to_mfn_guest(d, gfn, &p2mt);
 
     /*
      * If this GFN is emulated MMIO or marked as read-only, pass the fault
@@ -949,20 +950,20 @@ bool_t hvm_hap_nested_page_fault(unsigne
 
     /* Check if the page has been paged out */
     if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
-        p2m_mem_paging_populate(current->domain, gfn);
+        p2m_mem_paging_populate(d, gfn);
 
     /* Mem sharing: unshare the page and try again */
     if ( p2mt == p2m_ram_shared )
     {
-        mem_sharing_unshare_page(current->domain, gfn, 0);
+        mem_sharing_unshare_page(d, gfn, 0);
         return 1;
     }
  
     /* Spurious fault? PoD and log-dirty also take this path. */
     if ( p2m_is_ram(p2mt) )
     {
-        paging_mark_dirty(current->domain, mfn_x(mfn));
-        p2m_change_type(current->domain, gfn, p2m_ram_logdirty, p2m_ram_rw);
+        paging_mark_dirty(d, mfn_x(mfn));
+        p2m_change_type(d, gfn, p2m_ram_logdirty, p2m_ram_rw);
         return 1;
     }
 
@@ -1087,7 +1088,7 @@ int hvm_set_cr0(unsigned long value)
         {
             /* The guest CR3 must be pointing to the guest physical. */
             gfn = v->arch.hvm_vcpu.guest_cr[3]>>PAGE_SHIFT;
-            mfn = mfn_x(gfn_to_mfn_current(gfn, &p2mt));
+            mfn = mfn_x(gfn_to_mfn(v->domain, gfn, &p2mt));
             if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
                  !get_page(mfn_to_page(mfn), v->domain))
             {
@@ -1174,7 +1175,7 @@ int hvm_set_cr3(unsigned long value)
     {
         /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
         HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
-        mfn = mfn_x(gfn_to_mfn_current(value >> PAGE_SHIFT, &p2mt));
+        mfn = mfn_x(gfn_to_mfn(v->domain, value >> PAGE_SHIFT, &p2mt));
         if ( !p2m_is_ram(p2mt) || !mfn_valid(mfn) ||
              !get_page(mfn_to_page(mfn), v->domain) )
               goto bad_cr3;
@@ -1317,6 +1318,7 @@ static void *hvm_map_entry(unsigned long
     unsigned long gfn, mfn;
     p2m_type_t p2mt;
     uint32_t pfec;
+    struct vcpu *v = current;
 
     if ( ((va & ~PAGE_MASK) + 8) > PAGE_SIZE )
     {
@@ -1330,13 +1332,13 @@ static void *hvm_map_entry(unsigned long
      * write the accessed flags in the descriptors (in 32-bit mode), but
      * we still treat it as a kernel-mode read (i.e. no access checks). */
     pfec = PFEC_page_present;
-    gfn = paging_gva_to_gfn(current, va, &pfec);
+    gfn = paging_gva_to_gfn(v, va, &pfec);
     if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared )
         return NULL;
-    mfn = mfn_x(gfn_to_mfn_unshare(current->domain, gfn, &p2mt, 0));
+    mfn = mfn_x(gfn_to_mfn_unshare(v->domain, gfn, &p2mt, 0));
     if ( p2m_is_paging(p2mt) )
     {
-        p2m_mem_paging_populate(current->domain, gfn);
+        p2m_mem_paging_populate(v->domain, gfn);
         return NULL;
     }
     if ( p2m_is_shared(p2mt) )
@@ -1344,13 +1346,13 @@ static void *hvm_map_entry(unsigned long
     if ( !p2m_is_ram(p2mt) )
     {
         gdprintk(XENLOG_ERR, "Failed to look up descriptor table entry\n");
-        domain_crash(current->domain);
+        domain_crash(v->domain);
         return NULL;
     }
 
     ASSERT(mfn_valid(mfn));
 
-    paging_mark_dirty(current->domain, mfn);
+    paging_mark_dirty(v->domain, mfn);
 
     return (char *)map_domain_page(mfn) + (va & ~PAGE_MASK);
 }
@@ -1731,7 +1733,7 @@ static enum hvm_copy_result __hvm_copy(
             gfn = addr >> PAGE_SHIFT;
         }
 
-        mfn = mfn_x(gfn_to_mfn_unshare(current->domain, gfn, &p2mt, 0));
+        mfn = mfn_x(gfn_to_mfn_unshare(curr->domain, gfn, &p2mt, 0));
 
         if ( p2m_is_paging(p2mt) )
         {
diff -r 8f91d6e94674 xen/arch/x86/hvm/stdvga.c
--- a/xen/arch/x86/hvm/stdvga.c	Tue Feb 23 09:10:59 2010 +0100
+++ b/xen/arch/x86/hvm/stdvga.c	Tue Mar 02 18:12:53 2010 +0100
@@ -481,7 +481,8 @@ static int mmio_move(struct hvm_hw_stdvg
                 if ( hvm_copy_to_guest_phys(data, &tmp, p->size) !=
                      HVMCOPY_okay )
                 {
-                    (void)gfn_to_mfn_current(data >> PAGE_SHIFT, &p2mt);
+                    (void)gfn_to_mfn(current->domain,
+                        data >> PAGE_SHIFT, &p2mt);
                     /*
                      * The only case we handle is vga_mem <-> vga_mem.
                      * Anything else disables caching and leaves it to qemu-dm.
@@ -503,7 +504,8 @@ static int mmio_move(struct hvm_hw_stdvg
                 if ( hvm_copy_from_guest_phys(&tmp, data, p->size) !=
                      HVMCOPY_okay )
                 {
-                    (void)gfn_to_mfn_current(data >> PAGE_SHIFT, &p2mt);
+                    (void)gfn_to_mfn(current->domain,
+                        data >> PAGE_SHIFT, &p2mt);
                     if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
                          ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
                         return 0;
@@ -561,8 +563,9 @@ static int stdvga_intercept_mmio(ioreq_t
         {
         case IOREQ_TYPE_COPY:
             buf = mmio_move(s, p);
-            if ( buf )
-                break;
+            if ( !buf )
+                s->cache = 0;
+            break;
         default:
             gdprintk(XENLOG_WARNING, "unsupported mmio request type:%d "
                      "addr:0x%04x data:0x%04x size:%d count:%d state:%d "
diff -r 8f91d6e94674 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c	Tue Feb 23 09:10:59 2010 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c	Tue Mar 02 18:12:53 2010 +0100
@@ -915,7 +915,7 @@ static void svm_do_nested_pgfault(paddr_
         return;
 
     /* Everything else is an error. */
-    mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
+    mfn = gfn_to_mfn_guest(current->domain, gfn, &p2mt);
     gdprintk(XENLOG_ERR, "SVM violation gpa %#"PRIpaddr", mfn %#lx, type %i\n",
              gpa, mfn_x(mfn), p2mt);
     domain_crash(current->domain);
diff -r 8f91d6e94674 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c	Tue Feb 23 09:10:59 2010 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c	Tue Mar 02 18:12:53 2010 +0100
@@ -2121,7 +2121,7 @@ static void ept_handle_violation(unsigne
         return;
 
     /* Everything else is an error. */
-    mfn = gfn_to_mfn_type_current(gfn, &p2mt, p2m_guest);
+    mfn = gfn_to_mfn_guest(current->domain, gfn, &p2mt);
     gdprintk(XENLOG_ERR, "EPT violation %#lx (%c%c%c/%c%c%c), "
              "gpa %#"PRIpaddr", mfn %#lx, type %i.\n", 
              qualification, 
diff -r 8f91d6e94674 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c	Tue Feb 23 09:10:59 2010 +0100
+++ b/xen/arch/x86/mm.c	Tue Mar 02 18:12:53 2010 +0100
@@ -3673,7 +3673,7 @@ static int replace_grant_p2m_mapping(
     if ( new_addr != 0 || (flags & GNTMAP_contains_pte) )
         return GNTST_general_error;
 
-    old_mfn = gfn_to_mfn_current(gfn, &type);
+    old_mfn = gfn_to_mfn(current->domain, gfn, &type);
     if ( !p2m_is_grant(type) || mfn_x(old_mfn) != frame )
     {
         gdprintk(XENLOG_WARNING,
diff -r 8f91d6e94674 xen/arch/x86/mm/hap/p2m-ept.c
--- a/xen/arch/x86/mm/hap/p2m-ept.c	Tue Feb 23 09:10:59 2010 +0100
+++ b/xen/arch/x86/mm/hap/p2m-ept.c	Tue Mar 02 18:12:53 2010 +0100
@@ -542,12 +542,6 @@ out:
     return;
 }
 
-static mfn_t ept_get_entry_current(unsigned long gfn, p2m_type_t *t,
-                                   p2m_query_t q)
-{
-    return ept_get_entry(current->domain, gfn, t, q);
-}
-
 /* 
  * To test if the new emt type is the same with old,
  * return 1 to not to reset ept entry.
@@ -718,7 +712,6 @@ void ept_p2m_init(struct domain *d)
 {
     d->arch.p2m->set_entry = ept_set_entry;
     d->arch.p2m->get_entry = ept_get_entry;
-    d->arch.p2m->get_entry_current = ept_get_entry_current;
     d->arch.p2m->change_entry_type_global = ept_change_entry_type_global;
 }
 
diff -r 8f91d6e94674 xen/arch/x86/mm/hap/private.h
--- a/xen/arch/x86/mm/hap/private.h	Tue Feb 23 09:10:59 2010 +0100
+++ b/xen/arch/x86/mm/hap/private.h	Tue Mar 02 18:12:53 2010 +0100
@@ -31,4 +31,4 @@ unsigned long hap_gva_to_gfn_4_levels(st
                                      uint32_t *pfec);
 
 
-#endif /* __SVM_NPT_H__ */
+#endif /* __HAP_PRIVATE_H__ */
diff -r 8f91d6e94674 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c	Tue Feb 23 09:10:59 2010 +0100
+++ b/xen/arch/x86/mm/p2m.c	Tue Mar 02 18:12:53 2010 +0100
@@ -1231,7 +1231,7 @@ p2m_set_entry(struct domain *d, unsigned
     if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
                          L3_PAGETABLE_SHIFT - PAGE_SHIFT,
                          ((CONFIG_PAGING_LEVELS == 3)
-                          ? (d->arch.hvm_domain.hap_enabled ? 4 : 8)
+                          ? (paging_mode_hap(d) ? 4 : 8)
                           : L3_PAGETABLE_ENTRIES),
                          PGT_l2_page_table) )
         goto out;
@@ -1421,131 +1421,6 @@ pod_retry_l1:
     return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn : _mfn(INVALID_MFN);
 }
 
-/* Read the current domain's p2m table (through the linear mapping). */
-static mfn_t p2m_gfn_to_mfn_current(unsigned long gfn, p2m_type_t *t,
-                                    p2m_query_t q)
-{
-    mfn_t mfn = _mfn(INVALID_MFN);
-    p2m_type_t p2mt = p2m_mmio_dm;
-    paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT;
-    /* XXX This is for compatibility with the old model, where anything not 
-     * XXX marked as RAM was considered to be emulated MMIO space.
-     * XXX Once we start explicitly registering MMIO regions in the p2m 
-     * XXX we will return p2m_invalid for unmapped gfns */
-
-    if ( gfn <= current->domain->arch.p2m->max_mapped_pfn )
-    {
-        l1_pgentry_t l1e = l1e_empty(), *p2m_entry;
-        l2_pgentry_t l2e = l2e_empty();
-        int ret;
-
-        ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START) 
-               / sizeof(l1_pgentry_t));
-
-        /*
-         * Read & process L2
-         */
-        p2m_entry = &__linear_l1_table[l1_linear_offset(RO_MPT_VIRT_START)
-                                       + l2_linear_offset(addr)];
-
-    pod_retry_l2:
-        ret = __copy_from_user(&l2e,
-                               p2m_entry,
-                               sizeof(l2e));
-        if ( ret != 0
-             || !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
-        {
-            if( (l2e_get_flags(l2e) & _PAGE_PSE)
-                && ( p2m_flags_to_type(l2e_get_flags(l2e))
-                     == p2m_populate_on_demand ) )
-            {
-                /* The read has succeeded, so we know that the mapping
-                 * exits at this point.  */
-                if ( q != p2m_query )
-                {
-                    if ( !p2m_pod_check_and_populate(current->domain, gfn,
-                                                            p2m_entry, 9, q) )
-                        goto pod_retry_l2;
-
-                    /* Allocate failed. */
-                    p2mt = p2m_invalid;
-                    printk("%s: Allocate failed!\n", __func__);
-                    goto out;
-                }
-                else
-                {
-                    p2mt = p2m_populate_on_demand;
-                    goto out;
-                }
-            }
-
-            goto pod_retry_l1;
-        }
-        
-        if (l2e_get_flags(l2e) & _PAGE_PSE)
-        {
-            p2mt = p2m_flags_to_type(l2e_get_flags(l2e));
-            ASSERT(l2e_get_pfn(l2e) != INVALID_MFN || !p2m_is_ram(p2mt));
-
-            if ( p2m_is_valid(p2mt) )
-                mfn = _mfn(l2e_get_pfn(l2e) + l1_table_offset(addr));
-            else
-                p2mt = p2m_mmio_dm;
-
-            goto out;
-        }
-
-        /*
-         * Read and process L1
-         */
-
-        /* Need to __copy_from_user because the p2m is sparse and this
-         * part might not exist */
-    pod_retry_l1:
-        p2m_entry = &phys_to_machine_mapping[gfn];
-
-        ret = __copy_from_user(&l1e,
-                               p2m_entry,
-                               sizeof(l1e));
-            
-        if ( ret == 0 ) {
-            p2mt = p2m_flags_to_type(l1e_get_flags(l1e));
-            ASSERT(l1e_get_pfn(l1e) != INVALID_MFN || !p2m_is_ram(p2mt));
-
-            if ( p2m_flags_to_type(l1e_get_flags(l1e))
-                 == p2m_populate_on_demand )
-            {
-                /* The read has succeeded, so we know that the mapping
-                 * exits at this point.  */
-                if ( q != p2m_query )
-                {
-                    if ( !p2m_pod_check_and_populate(current->domain, gfn,
-                                                            (l1_pgentry_t *)p2m_entry, 0, q) )
-                        goto pod_retry_l1;
-
-                    /* Allocate failed. */
-                    p2mt = p2m_invalid;
-                    goto out;
-                }
-                else
-                {
-                    p2mt = p2m_populate_on_demand;
-                    goto out;
-                }
-            }
-
-            if ( p2m_is_valid(p2mt) || p2m_is_grant(p2mt) )
-                mfn = _mfn(l1e_get_pfn(l1e));
-            else 
-                /* XXX see above */
-                p2mt = p2m_mmio_dm;
-        }
-    }
-out:
-    *t = p2mt;
-    return mfn;
-}
-
 /* Init the datastructures for later use by the p2m code */
 int p2m_init(struct domain *d)
 {
@@ -1565,10 +1440,9 @@ int p2m_init(struct domain *d)
 
     p2m->set_entry = p2m_set_entry;
     p2m->get_entry = p2m_gfn_to_mfn;
-    p2m->get_entry_current = p2m_gfn_to_mfn_current;
     p2m->change_entry_type_global = p2m_change_type_global;
 
-    if ( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled &&
+    if ( is_hvm_domain(d) && paging_mode_hap(d) &&
          (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
         ept_p2m_init(d);
 
@@ -1595,7 +1469,7 @@ int set_p2m_entry(struct domain *d, unsi
 
     while ( todo )
     {
-        if ( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled )
+        if ( is_hvm_domain(d) && paging_mode_hap(d) )
             order = (((gfn | mfn_x(mfn) | todo) & (SUPERPAGE_PAGES - 1)) == 0) ?
                 9 : 0;
         else
diff -r 8f91d6e94674 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h	Tue Feb 23 09:10:59 2010 +0100
+++ b/xen/include/asm-x86/p2m.h	Tue Mar 02 18:12:53 2010 +0100
@@ -179,9 +179,6 @@ struct p2m_domain {
     mfn_t              (*get_entry   )(struct domain *d, unsigned long gfn,
                                        p2m_type_t *p2mt,
                                        p2m_query_t q);
-    mfn_t              (*get_entry_current)(unsigned long gfn,
-                                            p2m_type_t *p2mt,
-                                            p2m_query_t q);
     void               (*change_entry_type_global)(struct domain *d,
                                                    p2m_type_t ot,
                                                    p2m_type_t nt);
@@ -267,13 +264,6 @@ static inline p2m_type_t p2m_flags_to_ty
 #endif
 }
 
-/* Read the current domain's p2m table.  Do not populate PoD pages. */
-static inline mfn_t gfn_to_mfn_type_current(unsigned long gfn, p2m_type_t *t,
-                                            p2m_query_t q)
-{
-    return current->domain->arch.p2m->get_entry_current(gfn, t, q);
-}
-
 /* Read another domain's P2M table, mapping pages as we go.
  * Do not populate PoD pages. */
 static inline
@@ -295,17 +285,13 @@ static inline mfn_t _gfn_to_mfn_type(str
         *t = p2m_ram_rw;
         return _mfn(gfn);
     }
-    if ( likely(current->domain == d) )
-        return gfn_to_mfn_type_current(gfn, t, q);
-    else
-        return gfn_to_mfn_type_foreign(d, gfn, t, q);
+    return gfn_to_mfn_type_foreign(d, gfn, t, q);
 }
 
 #define gfn_to_mfn(d, g, t) _gfn_to_mfn_type((d), (g), (t), p2m_alloc)
 #define gfn_to_mfn_query(d, g, t) _gfn_to_mfn_type((d), (g), (t), p2m_query)
 #define gfn_to_mfn_guest(d, g, t) _gfn_to_mfn_type((d), (g), (t), p2m_guest)
 
-#define gfn_to_mfn_current(g, t) gfn_to_mfn_type_current((g), (t), p2m_alloc)
 #define gfn_to_mfn_foreign(d, g, t) gfn_to_mfn_type_foreign((d), (g), (t), p2m_alloc)
 
 static inline mfn_t gfn_to_mfn_unshare(struct domain *d,

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH][XEN] p2m cleanup: remove unused function
  2010-03-02 17:20 [PATCH][XEN] p2m cleanup: remove unused function Christoph Egger
@ 2010-03-02 18:03 ` Keir Fraser
  2010-03-03 11:25   ` George Dunlap
  0 siblings, 1 reply; 3+ messages in thread
From: Keir Fraser @ 2010-03-02 18:03 UTC (permalink / raw)
  To: Christoph Egger, xen-devel@lists.xensource.com

On 02/03/2010 17:20, "Christoph Egger" <Christoph.Egger@amd.com> wrote:

> Attached patch removes gfn_to_mfn_current(). It is redundant and effectively
> unused.

Hm, well, it's unused after your patch, but used a fair bit right now. It'll
need an Ack from Tim Deegan, who's on holiday until middle of next week.

> All remaining users now use proper replacement function.
> => No functional change.
> 
> While here, silence stdvga

Is that a bug fix? Anyhow would belong in a separate properly explained
patch.

> and use page_mode_hap()
> consequently.

I see you change over to using page_mode_hap() in a few places. I wasn't
sure on the justification. If you could get rid of
arch.hvm_domain.hap_enabled entirely that might make it seem more
worthwhile. Again, it would need Ack from Tim.

 -- Keir

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH][XEN] p2m cleanup: remove unused function
  2010-03-02 18:03 ` Keir Fraser
@ 2010-03-03 11:25   ` George Dunlap
  0 siblings, 0 replies; 3+ messages in thread
From: George Dunlap @ 2010-03-03 11:25 UTC (permalink / raw)
  To: Keir Fraser; +Cc: Christoph Egger, xen-devel@lists.xensource.com

I thought the whole point of gfn_to_mfn_current() was that it can walk
the p2m table more efficiently if the domain in question is current.
This isn't true with EPT-HAP, but I thought that it was true for both
shadowed and NPT-HAP.  Is this no longer the case?

 -George

On Tue, Mar 2, 2010 at 6:03 PM, Keir Fraser <keir.fraser@eu.citrix.com> wrote:
> On 02/03/2010 17:20, "Christoph Egger" <Christoph.Egger@amd.com> wrote:
>
>> Attached patch removes gfn_to_mfn_current(). It is redundant and effectively
>> unused.
>
> Hm, well, it's unused after your patch, but used a fair bit right now. It'll
> need an Ack from Tim Deegan, who's on holiday until middle of next week.
>
>> All remaining users now use proper replacement function.
>> => No functional change.
>>
>> While here, silence stdvga
>
> Is that a bug fix? Anyhow would belong in a separate properly explained
> patch.
>
>> and use page_mode_hap()
>> consequently.
>
> I see you change over to using page_mode_hap() in a few places. I wasn't
> sure on the justification. If you could get rid of
> arch.hvm_domain.hap_enabled entirely that might make it seem more
> worthwhile. Again, it would need Ack from Tim.
>
>  -- Keir
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel
>

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2010-03-03 11:25 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-03-02 17:20 [PATCH][XEN] p2m cleanup: remove unused function Christoph Egger
2010-03-02 18:03 ` Keir Fraser
2010-03-03 11:25   ` George Dunlap

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).