xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap.
  2011-11-04 10:38   ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
@ 2011-11-04 10:38     ` Jean Guyader
  0 siblings, 0 replies; 15+ messages in thread
From: Jean Guyader @ 2011-11-04 10:38 UTC (permalink / raw)
  To: xen-devel; +Cc: allen.m.kay, tim, Jean Guyader

[-- Attachment #1: Type: text/plain, Size: 293 bytes --]


Move the code for the XENMEM_add_to_physmap case into it's own
function (xenmem_add_to_physmap).

Signed-off-by: Jean Guyader <jean.guyader@eu.citrix.com>
---
 xen/arch/x86/mm.c |  188 ++++++++++++++++++++++++++++-------------------------
 1 files changed, 99 insertions(+), 89 deletions(-)


[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: 0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch --]
[-- Type: text/x-patch; name="0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch", Size: 7065 bytes --]

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index acc1f34..f75011e 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4592,119 +4592,129 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
     return 0;
 }
 
-long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+static int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap xatp)
 {
     struct page_info *page = NULL;
+    unsigned long prev_mfn, mfn = 0, gpfn;
     int rc;
 
-    switch ( op )
-    {
-    case XENMEM_add_to_physmap:
+    switch ( xatp.space )
     {
-        struct xen_add_to_physmap xatp;
-        unsigned long prev_mfn, mfn = 0, gpfn;
-        struct domain *d;
-
-        if ( copy_from_guest(&xatp, arg, 1) )
-            return -EFAULT;
+    case XENMAPSPACE_shared_info:
+        if ( xatp.idx == 0 )
+            mfn = virt_to_mfn(d->shared_info);
+        break;
+    case XENMAPSPACE_grant_table:
+        spin_lock(&d->grant_table->lock);
 
-        rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
-        if ( rc != 0 )
-            return rc;
+        if ( d->grant_table->gt_version == 0 )
+            d->grant_table->gt_version = 1;
 
-        if ( xsm_add_to_physmap(current->domain, d) )
+        if ( d->grant_table->gt_version == 2 &&
+             (xatp.idx & XENMAPIDX_grant_table_status) )
         {
-            rcu_unlock_domain(d);
-            return -EPERM;
+            xatp.idx &= ~XENMAPIDX_grant_table_status;
+            if ( xatp.idx < nr_status_frames(d->grant_table) )
+                mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
+        }
+        else
+        {
+            if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
+                 (xatp.idx < max_nr_grant_frames) )
+                gnttab_grow_table(d, xatp.idx + 1);
+
+            if ( xatp.idx < nr_grant_frames(d->grant_table) )
+                mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
         }
 
-        switch ( xatp.space )
+        spin_unlock(&d->grant_table->lock);
+        break;
+    case XENMAPSPACE_gmfn:
+    {
+        p2m_type_t p2mt;
+
+        xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
+        /* If the page is still shared, exit early */
+        if ( p2m_is_shared(p2mt) )
         {
-        case XENMAPSPACE_shared_info:
-            if ( xatp.idx == 0 )
-                mfn = virt_to_mfn(d->shared_info);
+            rcu_unlock_domain(d);
+            return -ENOMEM;
+        }
+        if ( !get_page_from_pagenr(xatp.idx, d) )
             break;
-        case XENMAPSPACE_grant_table:
-            spin_lock(&d->grant_table->lock);
+        mfn = xatp.idx;
+        page = mfn_to_page(mfn);
+        break;
+    }
+    default:
+        break;
+    }
 
-            if ( d->grant_table->gt_version == 0 )
-                d->grant_table->gt_version = 1;
+    if ( !paging_mode_translate(d) || (mfn == 0) )
+    {
+        if ( page )
+            put_page(page);
+        rcu_unlock_domain(d);
+        return -EINVAL;
+    }
 
-            if ( d->grant_table->gt_version == 2 &&
-                 (xatp.idx & XENMAPIDX_grant_table_status) )
-            {
-                xatp.idx &= ~XENMAPIDX_grant_table_status;
-                if ( xatp.idx < nr_status_frames(d->grant_table) )
-                    mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
-            }
-            else
-            {
-                if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
-                     (xatp.idx < max_nr_grant_frames) )
-                    gnttab_grow_table(d, xatp.idx + 1);
+    domain_lock(d);
 
-                if ( xatp.idx < nr_grant_frames(d->grant_table) )
-                    mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
-            }
+    if ( page )
+        put_page(page);
 
-            spin_unlock(&d->grant_table->lock);
-            break;
-        case XENMAPSPACE_gmfn:
-        {
-            p2m_type_t p2mt;
+    /* Remove previously mapped page if it was present. */
+    prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(prev_mfn) )
+            /* Xen heap frames are simply unhooked from this phys slot. */
+            guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(d, xatp.gpfn);
+    }
 
-            xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
-            /* If the page is still shared, exit early */
-            if ( p2m_is_shared(p2mt) )
-            {
-                rcu_unlock_domain(d);
-                return -ENOMEM;
-            }
-            if ( !get_page_from_pagenr(xatp.idx, d) )
-                break;
-            mfn = xatp.idx;
-            page = mfn_to_page(mfn);
-            break;
-        }
-        default:
-            break;
-        }
+    /* Unmap from old location, if any. */
+    gpfn = get_gpfn_from_mfn(mfn);
+    ASSERT( gpfn != SHARED_M2P_ENTRY );
+    if ( gpfn != INVALID_M2P_ENTRY )
+        guest_physmap_remove_page(d, gpfn, mfn, 0);
 
-        if ( !paging_mode_translate(d) || (mfn == 0) )
-        {
-            if ( page )
-                put_page(page);
-            rcu_unlock_domain(d);
-            return -EINVAL;
-        }
+    /* Map at new location. */
+    rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
 
-        domain_lock(d);
+    domain_unlock(d);
 
-        if ( page )
-            put_page(page);
+    return rc;
+}
 
-        /* Remove previously mapped page if it was present. */
-        prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
-        if ( mfn_valid(prev_mfn) )
-        {
-            if ( is_xen_heap_mfn(prev_mfn) )
-                /* Xen heap frames are simply unhooked from this phys slot. */
-                guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
-            else
-                /* Normal domain memory is freed, to avoid leaking memory. */
-                guest_remove_page(d, xatp.gpfn);
-        }
 
-        /* Unmap from old location, if any. */
-        gpfn = get_gpfn_from_mfn(mfn);
-        ASSERT( gpfn != SHARED_M2P_ENTRY );
-        if ( gpfn != INVALID_M2P_ENTRY )
-            guest_physmap_remove_page(d, gpfn, mfn, 0);
+long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+{
+    int rc;
+
+    switch ( op )
+    {
+    case XENMEM_add_to_physmap:
+    {
+        struct xen_add_to_physmap xatp;
+        struct domain *d;
 
-        /* Map at new location. */
-        rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
+        if ( copy_from_guest(&xatp, arg, 1) )
+            return -EFAULT;
+
+        rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
+        if ( rc != 0 )
+            return rc;
+
+        if ( xsm_add_to_physmap(current->domain, d) )
+        {
+            rcu_unlock_domain(d);
+            return -EPERM;
+        }
 
-        domain_unlock(d);
+        xenmem_add_to_physmap(d, xatp);
 
         rcu_unlock_domain(d);
 

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap.
  2011-11-07 15:16   ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
@ 2011-11-07 15:16     ` Jean Guyader
  0 siblings, 0 replies; 15+ messages in thread
From: Jean Guyader @ 2011-11-07 15:16 UTC (permalink / raw)
  To: xen-devel; +Cc: tim, allen.m.kay, Jean Guyader

[-- Attachment #1: Type: text/plain, Size: 293 bytes --]


Move the code for the XENMEM_add_to_physmap case into it's own
function (xenmem_add_to_physmap).

Signed-off-by: Jean Guyader <jean.guyader@eu.citrix.com>
---
 xen/arch/x86/mm.c |  188 ++++++++++++++++++++++++++++-------------------------
 1 files changed, 99 insertions(+), 89 deletions(-)


[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: 0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch --]
[-- Type: text/x-patch; name="0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch", Size: 7065 bytes --]

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index acc1f34..f75011e 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4592,119 +4592,129 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
     return 0;
 }
 
-long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+static int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap xatp)
 {
     struct page_info *page = NULL;
+    unsigned long prev_mfn, mfn = 0, gpfn;
     int rc;
 
-    switch ( op )
-    {
-    case XENMEM_add_to_physmap:
+    switch ( xatp.space )
     {
-        struct xen_add_to_physmap xatp;
-        unsigned long prev_mfn, mfn = 0, gpfn;
-        struct domain *d;
-
-        if ( copy_from_guest(&xatp, arg, 1) )
-            return -EFAULT;
+    case XENMAPSPACE_shared_info:
+        if ( xatp.idx == 0 )
+            mfn = virt_to_mfn(d->shared_info);
+        break;
+    case XENMAPSPACE_grant_table:
+        spin_lock(&d->grant_table->lock);
 
-        rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
-        if ( rc != 0 )
-            return rc;
+        if ( d->grant_table->gt_version == 0 )
+            d->grant_table->gt_version = 1;
 
-        if ( xsm_add_to_physmap(current->domain, d) )
+        if ( d->grant_table->gt_version == 2 &&
+             (xatp.idx & XENMAPIDX_grant_table_status) )
         {
-            rcu_unlock_domain(d);
-            return -EPERM;
+            xatp.idx &= ~XENMAPIDX_grant_table_status;
+            if ( xatp.idx < nr_status_frames(d->grant_table) )
+                mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
+        }
+        else
+        {
+            if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
+                 (xatp.idx < max_nr_grant_frames) )
+                gnttab_grow_table(d, xatp.idx + 1);
+
+            if ( xatp.idx < nr_grant_frames(d->grant_table) )
+                mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
         }
 
-        switch ( xatp.space )
+        spin_unlock(&d->grant_table->lock);
+        break;
+    case XENMAPSPACE_gmfn:
+    {
+        p2m_type_t p2mt;
+
+        xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
+        /* If the page is still shared, exit early */
+        if ( p2m_is_shared(p2mt) )
         {
-        case XENMAPSPACE_shared_info:
-            if ( xatp.idx == 0 )
-                mfn = virt_to_mfn(d->shared_info);
+            rcu_unlock_domain(d);
+            return -ENOMEM;
+        }
+        if ( !get_page_from_pagenr(xatp.idx, d) )
             break;
-        case XENMAPSPACE_grant_table:
-            spin_lock(&d->grant_table->lock);
+        mfn = xatp.idx;
+        page = mfn_to_page(mfn);
+        break;
+    }
+    default:
+        break;
+    }
 
-            if ( d->grant_table->gt_version == 0 )
-                d->grant_table->gt_version = 1;
+    if ( !paging_mode_translate(d) || (mfn == 0) )
+    {
+        if ( page )
+            put_page(page);
+        rcu_unlock_domain(d);
+        return -EINVAL;
+    }
 
-            if ( d->grant_table->gt_version == 2 &&
-                 (xatp.idx & XENMAPIDX_grant_table_status) )
-            {
-                xatp.idx &= ~XENMAPIDX_grant_table_status;
-                if ( xatp.idx < nr_status_frames(d->grant_table) )
-                    mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
-            }
-            else
-            {
-                if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
-                     (xatp.idx < max_nr_grant_frames) )
-                    gnttab_grow_table(d, xatp.idx + 1);
+    domain_lock(d);
 
-                if ( xatp.idx < nr_grant_frames(d->grant_table) )
-                    mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
-            }
+    if ( page )
+        put_page(page);
 
-            spin_unlock(&d->grant_table->lock);
-            break;
-        case XENMAPSPACE_gmfn:
-        {
-            p2m_type_t p2mt;
+    /* Remove previously mapped page if it was present. */
+    prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(prev_mfn) )
+            /* Xen heap frames are simply unhooked from this phys slot. */
+            guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(d, xatp.gpfn);
+    }
 
-            xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
-            /* If the page is still shared, exit early */
-            if ( p2m_is_shared(p2mt) )
-            {
-                rcu_unlock_domain(d);
-                return -ENOMEM;
-            }
-            if ( !get_page_from_pagenr(xatp.idx, d) )
-                break;
-            mfn = xatp.idx;
-            page = mfn_to_page(mfn);
-            break;
-        }
-        default:
-            break;
-        }
+    /* Unmap from old location, if any. */
+    gpfn = get_gpfn_from_mfn(mfn);
+    ASSERT( gpfn != SHARED_M2P_ENTRY );
+    if ( gpfn != INVALID_M2P_ENTRY )
+        guest_physmap_remove_page(d, gpfn, mfn, 0);
 
-        if ( !paging_mode_translate(d) || (mfn == 0) )
-        {
-            if ( page )
-                put_page(page);
-            rcu_unlock_domain(d);
-            return -EINVAL;
-        }
+    /* Map at new location. */
+    rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
 
-        domain_lock(d);
+    domain_unlock(d);
 
-        if ( page )
-            put_page(page);
+    return rc;
+}
 
-        /* Remove previously mapped page if it was present. */
-        prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
-        if ( mfn_valid(prev_mfn) )
-        {
-            if ( is_xen_heap_mfn(prev_mfn) )
-                /* Xen heap frames are simply unhooked from this phys slot. */
-                guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
-            else
-                /* Normal domain memory is freed, to avoid leaking memory. */
-                guest_remove_page(d, xatp.gpfn);
-        }
 
-        /* Unmap from old location, if any. */
-        gpfn = get_gpfn_from_mfn(mfn);
-        ASSERT( gpfn != SHARED_M2P_ENTRY );
-        if ( gpfn != INVALID_M2P_ENTRY )
-            guest_physmap_remove_page(d, gpfn, mfn, 0);
+long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+{
+    int rc;
+
+    switch ( op )
+    {
+    case XENMEM_add_to_physmap:
+    {
+        struct xen_add_to_physmap xatp;
+        struct domain *d;
 
-        /* Map at new location. */
-        rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
+        if ( copy_from_guest(&xatp, arg, 1) )
+            return -EFAULT;
+
+        rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
+        if ( rc != 0 )
+            return rc;
+
+        if ( xsm_add_to_physmap(current->domain, d) )
+        {
+            rcu_unlock_domain(d);
+            return -EPERM;
+        }
 
-        domain_unlock(d);
+        xenmem_add_to_physmap(d, xatp);
 
         rcu_unlock_domain(d);
 

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap.
  2011-11-07 18:25   ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
@ 2011-11-07 18:25     ` Jean Guyader
  2011-11-08 13:24       ` Tim Deegan
  0 siblings, 1 reply; 15+ messages in thread
From: Jean Guyader @ 2011-11-07 18:25 UTC (permalink / raw)
  To: xen-devel; +Cc: allen.m.kay, tim, Jean Guyader

[-- Attachment #1: Type: text/plain, Size: 293 bytes --]


Move the code for the XENMEM_add_to_physmap case into it's own
function (xenmem_add_to_physmap).

Signed-off-by: Jean Guyader <jean.guyader@eu.citrix.com>
---
 xen/arch/x86/mm.c |  188 ++++++++++++++++++++++++++++-------------------------
 1 files changed, 99 insertions(+), 89 deletions(-)


[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: 0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch --]
[-- Type: text/x-patch; name="0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch", Size: 7065 bytes --]

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index acc1f34..f75011e 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4592,119 +4592,129 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
     return 0;
 }
 
-long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+static int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap xatp)
 {
     struct page_info *page = NULL;
+    unsigned long prev_mfn, mfn = 0, gpfn;
     int rc;
 
-    switch ( op )
-    {
-    case XENMEM_add_to_physmap:
+    switch ( xatp.space )
     {
-        struct xen_add_to_physmap xatp;
-        unsigned long prev_mfn, mfn = 0, gpfn;
-        struct domain *d;
-
-        if ( copy_from_guest(&xatp, arg, 1) )
-            return -EFAULT;
+    case XENMAPSPACE_shared_info:
+        if ( xatp.idx == 0 )
+            mfn = virt_to_mfn(d->shared_info);
+        break;
+    case XENMAPSPACE_grant_table:
+        spin_lock(&d->grant_table->lock);
 
-        rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
-        if ( rc != 0 )
-            return rc;
+        if ( d->grant_table->gt_version == 0 )
+            d->grant_table->gt_version = 1;
 
-        if ( xsm_add_to_physmap(current->domain, d) )
+        if ( d->grant_table->gt_version == 2 &&
+             (xatp.idx & XENMAPIDX_grant_table_status) )
         {
-            rcu_unlock_domain(d);
-            return -EPERM;
+            xatp.idx &= ~XENMAPIDX_grant_table_status;
+            if ( xatp.idx < nr_status_frames(d->grant_table) )
+                mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
+        }
+        else
+        {
+            if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
+                 (xatp.idx < max_nr_grant_frames) )
+                gnttab_grow_table(d, xatp.idx + 1);
+
+            if ( xatp.idx < nr_grant_frames(d->grant_table) )
+                mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
         }
 
-        switch ( xatp.space )
+        spin_unlock(&d->grant_table->lock);
+        break;
+    case XENMAPSPACE_gmfn:
+    {
+        p2m_type_t p2mt;
+
+        xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
+        /* If the page is still shared, exit early */
+        if ( p2m_is_shared(p2mt) )
         {
-        case XENMAPSPACE_shared_info:
-            if ( xatp.idx == 0 )
-                mfn = virt_to_mfn(d->shared_info);
+            rcu_unlock_domain(d);
+            return -ENOMEM;
+        }
+        if ( !get_page_from_pagenr(xatp.idx, d) )
             break;
-        case XENMAPSPACE_grant_table:
-            spin_lock(&d->grant_table->lock);
+        mfn = xatp.idx;
+        page = mfn_to_page(mfn);
+        break;
+    }
+    default:
+        break;
+    }
 
-            if ( d->grant_table->gt_version == 0 )
-                d->grant_table->gt_version = 1;
+    if ( !paging_mode_translate(d) || (mfn == 0) )
+    {
+        if ( page )
+            put_page(page);
+        rcu_unlock_domain(d);
+        return -EINVAL;
+    }
 
-            if ( d->grant_table->gt_version == 2 &&
-                 (xatp.idx & XENMAPIDX_grant_table_status) )
-            {
-                xatp.idx &= ~XENMAPIDX_grant_table_status;
-                if ( xatp.idx < nr_status_frames(d->grant_table) )
-                    mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
-            }
-            else
-            {
-                if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
-                     (xatp.idx < max_nr_grant_frames) )
-                    gnttab_grow_table(d, xatp.idx + 1);
+    domain_lock(d);
 
-                if ( xatp.idx < nr_grant_frames(d->grant_table) )
-                    mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
-            }
+    if ( page )
+        put_page(page);
 
-            spin_unlock(&d->grant_table->lock);
-            break;
-        case XENMAPSPACE_gmfn:
-        {
-            p2m_type_t p2mt;
+    /* Remove previously mapped page if it was present. */
+    prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(prev_mfn) )
+            /* Xen heap frames are simply unhooked from this phys slot. */
+            guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(d, xatp.gpfn);
+    }
 
-            xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
-            /* If the page is still shared, exit early */
-            if ( p2m_is_shared(p2mt) )
-            {
-                rcu_unlock_domain(d);
-                return -ENOMEM;
-            }
-            if ( !get_page_from_pagenr(xatp.idx, d) )
-                break;
-            mfn = xatp.idx;
-            page = mfn_to_page(mfn);
-            break;
-        }
-        default:
-            break;
-        }
+    /* Unmap from old location, if any. */
+    gpfn = get_gpfn_from_mfn(mfn);
+    ASSERT( gpfn != SHARED_M2P_ENTRY );
+    if ( gpfn != INVALID_M2P_ENTRY )
+        guest_physmap_remove_page(d, gpfn, mfn, 0);
 
-        if ( !paging_mode_translate(d) || (mfn == 0) )
-        {
-            if ( page )
-                put_page(page);
-            rcu_unlock_domain(d);
-            return -EINVAL;
-        }
+    /* Map at new location. */
+    rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
 
-        domain_lock(d);
+    domain_unlock(d);
 
-        if ( page )
-            put_page(page);
+    return rc;
+}
 
-        /* Remove previously mapped page if it was present. */
-        prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
-        if ( mfn_valid(prev_mfn) )
-        {
-            if ( is_xen_heap_mfn(prev_mfn) )
-                /* Xen heap frames are simply unhooked from this phys slot. */
-                guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
-            else
-                /* Normal domain memory is freed, to avoid leaking memory. */
-                guest_remove_page(d, xatp.gpfn);
-        }
 
-        /* Unmap from old location, if any. */
-        gpfn = get_gpfn_from_mfn(mfn);
-        ASSERT( gpfn != SHARED_M2P_ENTRY );
-        if ( gpfn != INVALID_M2P_ENTRY )
-            guest_physmap_remove_page(d, gpfn, mfn, 0);
+long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+{
+    int rc;
+
+    switch ( op )
+    {
+    case XENMEM_add_to_physmap:
+    {
+        struct xen_add_to_physmap xatp;
+        struct domain *d;
 
-        /* Map at new location. */
-        rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
+        if ( copy_from_guest(&xatp, arg, 1) )
+            return -EFAULT;
+
+        rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
+        if ( rc != 0 )
+            return rc;
+
+        if ( xsm_add_to_physmap(current->domain, d) )
+        {
+            rcu_unlock_domain(d);
+            return -EPERM;
+        }
 
-        domain_unlock(d);
+        xenmem_add_to_physmap(d, xatp);
 
         rcu_unlock_domain(d);
 

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap.
  2011-11-07 18:25     ` [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap Jean Guyader
@ 2011-11-08 13:24       ` Tim Deegan
  0 siblings, 0 replies; 15+ messages in thread
From: Tim Deegan @ 2011-11-08 13:24 UTC (permalink / raw)
  To: Jean Guyader; +Cc: xen-devel, allen.m.kay

At 18:25 +0000 on 07 Nov (1320690324), Jean Guyader wrote:
> 
> Move the code for the XENMEM_add_to_physmap case into it's own
> function (xenmem_add_to_physmap).
> 
> Signed-off-by: Jean Guyader <jean.guyader@eu.citrix.com>

Acked-by: Tim Deegan <tim@xen.org>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap.
  2011-11-08 20:04   ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
@ 2011-11-08 20:04     ` Jean Guyader
  0 siblings, 0 replies; 15+ messages in thread
From: Jean Guyader @ 2011-11-08 20:04 UTC (permalink / raw)
  To: xen-devel; +Cc: allen.m.kay, tim, Jean Guyader

[-- Attachment #1: Type: text/plain, Size: 293 bytes --]


Move the code for the XENMEM_add_to_physmap case into it's own
function (xenmem_add_to_physmap).

Signed-off-by: Jean Guyader <jean.guyader@eu.citrix.com>
---
 xen/arch/x86/mm.c |  188 ++++++++++++++++++++++++++++-------------------------
 1 files changed, 99 insertions(+), 89 deletions(-)


[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: 0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch --]
[-- Type: text/x-patch; name="0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch", Size: 7065 bytes --]

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index acc1f34..f75011e 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4592,119 +4592,129 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
     return 0;
 }
 
-long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+static int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap xatp)
 {
     struct page_info *page = NULL;
+    unsigned long prev_mfn, mfn = 0, gpfn;
     int rc;
 
-    switch ( op )
-    {
-    case XENMEM_add_to_physmap:
+    switch ( xatp.space )
     {
-        struct xen_add_to_physmap xatp;
-        unsigned long prev_mfn, mfn = 0, gpfn;
-        struct domain *d;
-
-        if ( copy_from_guest(&xatp, arg, 1) )
-            return -EFAULT;
+    case XENMAPSPACE_shared_info:
+        if ( xatp.idx == 0 )
+            mfn = virt_to_mfn(d->shared_info);
+        break;
+    case XENMAPSPACE_grant_table:
+        spin_lock(&d->grant_table->lock);
 
-        rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
-        if ( rc != 0 )
-            return rc;
+        if ( d->grant_table->gt_version == 0 )
+            d->grant_table->gt_version = 1;
 
-        if ( xsm_add_to_physmap(current->domain, d) )
+        if ( d->grant_table->gt_version == 2 &&
+             (xatp.idx & XENMAPIDX_grant_table_status) )
         {
-            rcu_unlock_domain(d);
-            return -EPERM;
+            xatp.idx &= ~XENMAPIDX_grant_table_status;
+            if ( xatp.idx < nr_status_frames(d->grant_table) )
+                mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
+        }
+        else
+        {
+            if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
+                 (xatp.idx < max_nr_grant_frames) )
+                gnttab_grow_table(d, xatp.idx + 1);
+
+            if ( xatp.idx < nr_grant_frames(d->grant_table) )
+                mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
         }
 
-        switch ( xatp.space )
+        spin_unlock(&d->grant_table->lock);
+        break;
+    case XENMAPSPACE_gmfn:
+    {
+        p2m_type_t p2mt;
+
+        xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
+        /* If the page is still shared, exit early */
+        if ( p2m_is_shared(p2mt) )
         {
-        case XENMAPSPACE_shared_info:
-            if ( xatp.idx == 0 )
-                mfn = virt_to_mfn(d->shared_info);
+            rcu_unlock_domain(d);
+            return -ENOMEM;
+        }
+        if ( !get_page_from_pagenr(xatp.idx, d) )
             break;
-        case XENMAPSPACE_grant_table:
-            spin_lock(&d->grant_table->lock);
+        mfn = xatp.idx;
+        page = mfn_to_page(mfn);
+        break;
+    }
+    default:
+        break;
+    }
 
-            if ( d->grant_table->gt_version == 0 )
-                d->grant_table->gt_version = 1;
+    if ( !paging_mode_translate(d) || (mfn == 0) )
+    {
+        if ( page )
+            put_page(page);
+        rcu_unlock_domain(d);
+        return -EINVAL;
+    }
 
-            if ( d->grant_table->gt_version == 2 &&
-                 (xatp.idx & XENMAPIDX_grant_table_status) )
-            {
-                xatp.idx &= ~XENMAPIDX_grant_table_status;
-                if ( xatp.idx < nr_status_frames(d->grant_table) )
-                    mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
-            }
-            else
-            {
-                if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
-                     (xatp.idx < max_nr_grant_frames) )
-                    gnttab_grow_table(d, xatp.idx + 1);
+    domain_lock(d);
 
-                if ( xatp.idx < nr_grant_frames(d->grant_table) )
-                    mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
-            }
+    if ( page )
+        put_page(page);
 
-            spin_unlock(&d->grant_table->lock);
-            break;
-        case XENMAPSPACE_gmfn:
-        {
-            p2m_type_t p2mt;
+    /* Remove previously mapped page if it was present. */
+    prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(prev_mfn) )
+            /* Xen heap frames are simply unhooked from this phys slot. */
+            guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(d, xatp.gpfn);
+    }
 
-            xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
-            /* If the page is still shared, exit early */
-            if ( p2m_is_shared(p2mt) )
-            {
-                rcu_unlock_domain(d);
-                return -ENOMEM;
-            }
-            if ( !get_page_from_pagenr(xatp.idx, d) )
-                break;
-            mfn = xatp.idx;
-            page = mfn_to_page(mfn);
-            break;
-        }
-        default:
-            break;
-        }
+    /* Unmap from old location, if any. */
+    gpfn = get_gpfn_from_mfn(mfn);
+    ASSERT( gpfn != SHARED_M2P_ENTRY );
+    if ( gpfn != INVALID_M2P_ENTRY )
+        guest_physmap_remove_page(d, gpfn, mfn, 0);
 
-        if ( !paging_mode_translate(d) || (mfn == 0) )
-        {
-            if ( page )
-                put_page(page);
-            rcu_unlock_domain(d);
-            return -EINVAL;
-        }
+    /* Map at new location. */
+    rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
 
-        domain_lock(d);
+    domain_unlock(d);
 
-        if ( page )
-            put_page(page);
+    return rc;
+}
 
-        /* Remove previously mapped page if it was present. */
-        prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
-        if ( mfn_valid(prev_mfn) )
-        {
-            if ( is_xen_heap_mfn(prev_mfn) )
-                /* Xen heap frames are simply unhooked from this phys slot. */
-                guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
-            else
-                /* Normal domain memory is freed, to avoid leaking memory. */
-                guest_remove_page(d, xatp.gpfn);
-        }
 
-        /* Unmap from old location, if any. */
-        gpfn = get_gpfn_from_mfn(mfn);
-        ASSERT( gpfn != SHARED_M2P_ENTRY );
-        if ( gpfn != INVALID_M2P_ENTRY )
-            guest_physmap_remove_page(d, gpfn, mfn, 0);
+long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+{
+    int rc;
+
+    switch ( op )
+    {
+    case XENMEM_add_to_physmap:
+    {
+        struct xen_add_to_physmap xatp;
+        struct domain *d;
 
-        /* Map at new location. */
-        rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
+        if ( copy_from_guest(&xatp, arg, 1) )
+            return -EFAULT;
+
+        rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
+        if ( rc != 0 )
+            return rc;
+
+        if ( xsm_add_to_physmap(current->domain, d) )
+        {
+            rcu_unlock_domain(d);
+            return -EPERM;
+        }
 
-        domain_unlock(d);
+        xenmem_add_to_physmap(d, xatp);
 
         rcu_unlock_domain(d);
 

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap.
  2011-11-10  8:44   ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
@ 2011-11-10  8:44     ` Jean Guyader
  0 siblings, 0 replies; 15+ messages in thread
From: Jean Guyader @ 2011-11-10  8:44 UTC (permalink / raw)
  To: xen-devel; +Cc: tim, allen.m.kay, keir, Jean Guyader, JBeulich

[-- Attachment #1: Type: text/plain, Size: 328 bytes --]


Move the code for the XENMEM_add_to_physmap case into it's own
function (xenmem_add_to_physmap).

Signed-off-by: Jean Guyader <jean.guyader@eu.citrix.com>
Acked-by: Tim Deegan <tim@xen.org>
---
 xen/arch/x86/mm.c |  188 ++++++++++++++++++++++++++++-------------------------
 1 files changed, 99 insertions(+), 89 deletions(-)


[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: 0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch --]
[-- Type: text/x-patch; name="0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch", Size: 7065 bytes --]

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index acc1f34..f75011e 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4592,119 +4592,129 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
     return 0;
 }
 
-long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+static int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap xatp)
 {
     struct page_info *page = NULL;
+    unsigned long prev_mfn, mfn = 0, gpfn;
     int rc;
 
-    switch ( op )
-    {
-    case XENMEM_add_to_physmap:
+    switch ( xatp.space )
     {
-        struct xen_add_to_physmap xatp;
-        unsigned long prev_mfn, mfn = 0, gpfn;
-        struct domain *d;
-
-        if ( copy_from_guest(&xatp, arg, 1) )
-            return -EFAULT;
+    case XENMAPSPACE_shared_info:
+        if ( xatp.idx == 0 )
+            mfn = virt_to_mfn(d->shared_info);
+        break;
+    case XENMAPSPACE_grant_table:
+        spin_lock(&d->grant_table->lock);
 
-        rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
-        if ( rc != 0 )
-            return rc;
+        if ( d->grant_table->gt_version == 0 )
+            d->grant_table->gt_version = 1;
 
-        if ( xsm_add_to_physmap(current->domain, d) )
+        if ( d->grant_table->gt_version == 2 &&
+             (xatp.idx & XENMAPIDX_grant_table_status) )
         {
-            rcu_unlock_domain(d);
-            return -EPERM;
+            xatp.idx &= ~XENMAPIDX_grant_table_status;
+            if ( xatp.idx < nr_status_frames(d->grant_table) )
+                mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
+        }
+        else
+        {
+            if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
+                 (xatp.idx < max_nr_grant_frames) )
+                gnttab_grow_table(d, xatp.idx + 1);
+
+            if ( xatp.idx < nr_grant_frames(d->grant_table) )
+                mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
         }
 
-        switch ( xatp.space )
+        spin_unlock(&d->grant_table->lock);
+        break;
+    case XENMAPSPACE_gmfn:
+    {
+        p2m_type_t p2mt;
+
+        xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
+        /* If the page is still shared, exit early */
+        if ( p2m_is_shared(p2mt) )
         {
-        case XENMAPSPACE_shared_info:
-            if ( xatp.idx == 0 )
-                mfn = virt_to_mfn(d->shared_info);
+            rcu_unlock_domain(d);
+            return -ENOMEM;
+        }
+        if ( !get_page_from_pagenr(xatp.idx, d) )
             break;
-        case XENMAPSPACE_grant_table:
-            spin_lock(&d->grant_table->lock);
+        mfn = xatp.idx;
+        page = mfn_to_page(mfn);
+        break;
+    }
+    default:
+        break;
+    }
 
-            if ( d->grant_table->gt_version == 0 )
-                d->grant_table->gt_version = 1;
+    if ( !paging_mode_translate(d) || (mfn == 0) )
+    {
+        if ( page )
+            put_page(page);
+        rcu_unlock_domain(d);
+        return -EINVAL;
+    }
 
-            if ( d->grant_table->gt_version == 2 &&
-                 (xatp.idx & XENMAPIDX_grant_table_status) )
-            {
-                xatp.idx &= ~XENMAPIDX_grant_table_status;
-                if ( xatp.idx < nr_status_frames(d->grant_table) )
-                    mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
-            }
-            else
-            {
-                if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
-                     (xatp.idx < max_nr_grant_frames) )
-                    gnttab_grow_table(d, xatp.idx + 1);
+    domain_lock(d);
 
-                if ( xatp.idx < nr_grant_frames(d->grant_table) )
-                    mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
-            }
+    if ( page )
+        put_page(page);
 
-            spin_unlock(&d->grant_table->lock);
-            break;
-        case XENMAPSPACE_gmfn:
-        {
-            p2m_type_t p2mt;
+    /* Remove previously mapped page if it was present. */
+    prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(prev_mfn) )
+            /* Xen heap frames are simply unhooked from this phys slot. */
+            guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(d, xatp.gpfn);
+    }
 
-            xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
-            /* If the page is still shared, exit early */
-            if ( p2m_is_shared(p2mt) )
-            {
-                rcu_unlock_domain(d);
-                return -ENOMEM;
-            }
-            if ( !get_page_from_pagenr(xatp.idx, d) )
-                break;
-            mfn = xatp.idx;
-            page = mfn_to_page(mfn);
-            break;
-        }
-        default:
-            break;
-        }
+    /* Unmap from old location, if any. */
+    gpfn = get_gpfn_from_mfn(mfn);
+    ASSERT( gpfn != SHARED_M2P_ENTRY );
+    if ( gpfn != INVALID_M2P_ENTRY )
+        guest_physmap_remove_page(d, gpfn, mfn, 0);
 
-        if ( !paging_mode_translate(d) || (mfn == 0) )
-        {
-            if ( page )
-                put_page(page);
-            rcu_unlock_domain(d);
-            return -EINVAL;
-        }
+    /* Map at new location. */
+    rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
 
-        domain_lock(d);
+    domain_unlock(d);
 
-        if ( page )
-            put_page(page);
+    return rc;
+}
 
-        /* Remove previously mapped page if it was present. */
-        prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
-        if ( mfn_valid(prev_mfn) )
-        {
-            if ( is_xen_heap_mfn(prev_mfn) )
-                /* Xen heap frames are simply unhooked from this phys slot. */
-                guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
-            else
-                /* Normal domain memory is freed, to avoid leaking memory. */
-                guest_remove_page(d, xatp.gpfn);
-        }
 
-        /* Unmap from old location, if any. */
-        gpfn = get_gpfn_from_mfn(mfn);
-        ASSERT( gpfn != SHARED_M2P_ENTRY );
-        if ( gpfn != INVALID_M2P_ENTRY )
-            guest_physmap_remove_page(d, gpfn, mfn, 0);
+long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+{
+    int rc;
+
+    switch ( op )
+    {
+    case XENMEM_add_to_physmap:
+    {
+        struct xen_add_to_physmap xatp;
+        struct domain *d;
 
-        /* Map at new location. */
-        rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
+        if ( copy_from_guest(&xatp, arg, 1) )
+            return -EFAULT;
+
+        rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
+        if ( rc != 0 )
+            return rc;
+
+        if ( xsm_add_to_physmap(current->domain, d) )
+        {
+            rcu_unlock_domain(d);
+            return -EPERM;
+        }
 
-        domain_unlock(d);
+        xenmem_add_to_physmap(d, xatp);
 
         rcu_unlock_domain(d);
 

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap
  2011-11-13 17:40   ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
@ 2011-11-13 17:40     ` Jean Guyader
  0 siblings, 0 replies; 15+ messages in thread
From: Jean Guyader @ 2011-11-13 17:40 UTC (permalink / raw)
  To: xen-devel; +Cc: tim, allen.m.kay, keir, Jean Guyader, JBeulich

[-- Attachment #1: Type: text/plain, Size: 294 bytes --]


Move the code for the XENMEM_add_to_physmap case into it's own
function (xenmem_add_to_physmap).

Signed-off-by: Jean Guyader <jean.guyader@eu.citrix.com>
---
 xen/arch/x86/mm.c |  189 ++++++++++++++++++++++++++++-------------------------
 1 files changed, 100 insertions(+), 89 deletions(-)


[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: 0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch --]
[-- Type: text/x-patch; name="0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch", Size: 6630 bytes --]

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index acc1f34..7cbbb07 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4592,9 +4592,107 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
     return 0;
 }
 
+static int xenmem_add_to_physmap(struct domain *d,
+                                 struct xen_add_to_physmap xatp)
+{
+    struct page_info* page = NULL;
+    unsigned long mfn = 0;
+    unsigned long prev_mfn, gpfn;
+    int rc;
+
+    switch ( xatp.space )
+    {
+    case XENMAPSPACE_shared_info:
+        if ( xatp.idx == 0 )
+            mfn = virt_to_mfn(d->shared_info);
+        break;
+    case XENMAPSPACE_grant_table:
+        spin_lock(&d->grant_table->lock);
+
+        if ( d->grant_table->gt_version == 0 )
+            d->grant_table->gt_version = 1;
+
+        if ( d->grant_table->gt_version == 2 &&
+             (xatp.idx & XENMAPIDX_grant_table_status) )
+        {
+            xatp.idx &= ~XENMAPIDX_grant_table_status;
+            if ( xatp.idx < nr_status_frames(d->grant_table) )
+                mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
+        }
+        else
+        {
+            if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
+                 (xatp.idx < max_nr_grant_frames) )
+                gnttab_grow_table(d, xatp.idx + 1);
+
+            if ( xatp.idx < nr_grant_frames(d->grant_table) )
+                mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
+        }
+
+        spin_unlock(&d->grant_table->lock);
+        break;
+    case XENMAPSPACE_gmfn:
+    {
+        p2m_type_t p2mt;
+
+        xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
+        /* If the page is still shared, exit early */
+        if ( p2m_is_shared(p2mt) )
+        {
+            rcu_unlock_domain(d);
+            return -ENOMEM;
+        }
+        if ( !get_page_from_pagenr(xatp.idx, d) )
+            break;
+        mfn = xatp.idx;
+        page = mfn_to_page(mfn);
+        break;
+    }
+    default:
+        break;
+    }
+
+    if ( !paging_mode_translate(d) || (mfn == 0) )
+    {
+        if ( page )
+            put_page(page);
+        rcu_unlock_domain(d);
+        return -EINVAL;
+    }
+
+    domain_lock(d);
+
+    if ( page )
+        put_page(page);
+
+    /* Remove previously mapped page if it was present. */
+    prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(prev_mfn) )
+            /* Xen heap frames are simply unhooked from this phys slot. */
+            guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(d, xatp.gpfn);
+    }
+
+    /* Unmap from old location, if any. */
+    gpfn = get_gpfn_from_mfn(mfn);
+    ASSERT( gpfn != SHARED_M2P_ENTRY );
+    if ( gpfn != INVALID_M2P_ENTRY )
+        guest_physmap_remove_page(d, gpfn, mfn, 0);
+
+    /* Map at new location. */
+    rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
+
+    domain_unlock(d);
+
+    return rc;
+}
+
 long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
 {
-    struct page_info *page = NULL;
     int rc;
 
     switch ( op )
@@ -4602,7 +4700,6 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
     case XENMEM_add_to_physmap:
     {
         struct xen_add_to_physmap xatp;
-        unsigned long prev_mfn, mfn = 0, gpfn;
         struct domain *d;
 
         if ( copy_from_guest(&xatp, arg, 1) )
@@ -4618,93 +4715,7 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
             return -EPERM;
         }
 
-        switch ( xatp.space )
-        {
-        case XENMAPSPACE_shared_info:
-            if ( xatp.idx == 0 )
-                mfn = virt_to_mfn(d->shared_info);
-            break;
-        case XENMAPSPACE_grant_table:
-            spin_lock(&d->grant_table->lock);
-
-            if ( d->grant_table->gt_version == 0 )
-                d->grant_table->gt_version = 1;
-
-            if ( d->grant_table->gt_version == 2 &&
-                 (xatp.idx & XENMAPIDX_grant_table_status) )
-            {
-                xatp.idx &= ~XENMAPIDX_grant_table_status;
-                if ( xatp.idx < nr_status_frames(d->grant_table) )
-                    mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
-            }
-            else
-            {
-                if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
-                     (xatp.idx < max_nr_grant_frames) )
-                    gnttab_grow_table(d, xatp.idx + 1);
-
-                if ( xatp.idx < nr_grant_frames(d->grant_table) )
-                    mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
-            }
-
-            spin_unlock(&d->grant_table->lock);
-            break;
-        case XENMAPSPACE_gmfn:
-        {
-            p2m_type_t p2mt;
-
-            xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt));
-            /* If the page is still shared, exit early */
-            if ( p2m_is_shared(p2mt) )
-            {
-                rcu_unlock_domain(d);
-                return -ENOMEM;
-            }
-            if ( !get_page_from_pagenr(xatp.idx, d) )
-                break;
-            mfn = xatp.idx;
-            page = mfn_to_page(mfn);
-            break;
-        }
-        default:
-            break;
-        }
-
-        if ( !paging_mode_translate(d) || (mfn == 0) )
-        {
-            if ( page )
-                put_page(page);
-            rcu_unlock_domain(d);
-            return -EINVAL;
-        }
-
-        domain_lock(d);
-
-        if ( page )
-            put_page(page);
-
-        /* Remove previously mapped page if it was present. */
-        prev_mfn = gmfn_to_mfn(d, xatp.gpfn);
-        if ( mfn_valid(prev_mfn) )
-        {
-            if ( is_xen_heap_mfn(prev_mfn) )
-                /* Xen heap frames are simply unhooked from this phys slot. */
-                guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0);
-            else
-                /* Normal domain memory is freed, to avoid leaking memory. */
-                guest_remove_page(d, xatp.gpfn);
-        }
-
-        /* Unmap from old location, if any. */
-        gpfn = get_gpfn_from_mfn(mfn);
-        ASSERT( gpfn != SHARED_M2P_ENTRY );
-        if ( gpfn != INVALID_M2P_ENTRY )
-            guest_physmap_remove_page(d, gpfn, mfn, 0);
-
-        /* Map at new location. */
-        rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0);
-
-        domain_unlock(d);
+        rc = xenmem_add_to_physmap(d, xatp);
 
         rcu_unlock_domain(d);
 

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/6] add_to_physmap: Move the code for > XENMEM_add_to_physmap
       [not found] <20111113175006.1EF1C72C36F@homiemail-mx8.g.dreamhost.com>
@ 2011-11-14 14:17 ` Andres Lagar-Cavilla
  0 siblings, 0 replies; 15+ messages in thread
From: Andres Lagar-Cavilla @ 2011-11-14 14:17 UTC (permalink / raw)
  To: xen-devel, jean.guyader

> Date: Sun, 13 Nov 2011 17:40:49 +0000
> From: Jean Guyader <jean.guyader@eu.citrix.com>
> Subject: [Xen-devel] [PATCH 3/6] add_to_physmap: Move the code for
> 	XENMEM_add_to_physmap
> To: <xen-devel@lists.xensource.com>
> Cc: tim@xen.org, allen.m.kay@intel.com, keir@xen.org,	Jean Guyader
> 	<jean.guyader@eu.citrix.com>, JBeulich@suse.com
> Message-ID:
> 	<1321206052-18833-4-git-send-email-jean.guyader@eu.citrix.com>
> Content-Type: text/plain; charset="utf-8"
>
>
> Move the code for the XENMEM_add_to_physmap case into it's own
> function (xenmem_add_to_physmap).
>
> Signed-off-by: Jean Guyader <jean.guyader@eu.citrix.com>
> ---
Jean, with the p2m API changes, your patch won't compile any more.

gfn_to_mfn* is now get_gfn*, with the need to put_gfn once done working
with the p2m translation. From what I've seen, your patch need not worry
about put_gfn, although it needs rebasing.

Andres

>  xen/arch/x86/mm.c |  189
> ++++++++++++++++++++++++++++-------------------------
>  1 files changed, 100 insertions(+), 89 deletions(-)
>
> -------------- next part --------------
> A non-text attachment was scrubbed...
> Name: 0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch
> Type: text/x-patch
> Size: 6630 bytes
> Desc: not available
> Url :
> http://lists.xensource.com/archives/html/xen-devel/attachments/20111113/13c22ac8/0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.bin
>
> ------------------------------
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel
>
>
> End of Xen-devel Digest, Vol 81, Issue 163
> ******************************************
>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap
  2011-11-16 19:25   ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
@ 2011-11-16 19:25     ` Jean Guyader
  2011-11-19 21:58       ` Olaf Hering
  0 siblings, 1 reply; 15+ messages in thread
From: Jean Guyader @ 2011-11-16 19:25 UTC (permalink / raw)
  To: xen-devel; +Cc: tim, allen.m.kay, keir, Jean Guyader, JBeulich

[-- Attachment #1: Type: text/plain, Size: 292 bytes --]


Move the code for the XENMEM_add_to_physmap case into it's own
function (xenmem_add_to_physmap).

Signed-off-by: Jean Guyader <jean.guyader@eu.citrix.com>
---
 xen/arch/x86/mm.c |  161 ++++++++++++++++++++++++++++------------------------
 1 files changed, 87 insertions(+), 74 deletions(-)


[-- Warning: decoded text below may be mangled, UTF-8 assumed --]
[-- Attachment #2: 0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch --]
[-- Type: text/x-patch; name="0003-add_to_physmap-Move-the-code-for-XENMEM_add_to_physm.patch", Size: 7450 bytes --]

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index a41a1d6..f093e93 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4677,37 +4677,18 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p)
     return 0;
 }
 
-long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+static int xenmem_add_to_physmap(struct domain *d,
+                                 const struct xen_add_to_physmap *xatp)
 {
     struct page_info *page = NULL;
     unsigned long gfn = 0; /* gcc ... */
+    unsigned long prev_mfn, mfn = 0, gpfn, idx;
     int rc;
 
-    switch ( op )
+    switch ( xatp->space )
     {
-    case XENMEM_add_to_physmap:
-    {
-        struct xen_add_to_physmap xatp;
-        unsigned long prev_mfn, mfn = 0, gpfn;
-        struct domain *d;
-
-        if ( copy_from_guest(&xatp, arg, 1) )
-            return -EFAULT;
-
-        rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
-        if ( rc != 0 )
-            return rc;
-
-        if ( xsm_add_to_physmap(current->domain, d) )
-        {
-            rcu_unlock_domain(d);
-            return -EPERM;
-        }
-
-        switch ( xatp.space )
-        {
         case XENMAPSPACE_shared_info:
-            if ( xatp.idx == 0 )
+            if ( xatp->idx == 0 )
                 mfn = virt_to_mfn(d->shared_info);
             break;
         case XENMAPSPACE_grant_table:
@@ -4716,21 +4697,22 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
             if ( d->grant_table->gt_version == 0 )
                 d->grant_table->gt_version = 1;
 
+            idx = xatp->idx;
             if ( d->grant_table->gt_version == 2 &&
-                 (xatp.idx & XENMAPIDX_grant_table_status) )
+                 (xatp->idx & XENMAPIDX_grant_table_status) )
             {
-                xatp.idx &= ~XENMAPIDX_grant_table_status;
-                if ( xatp.idx < nr_status_frames(d->grant_table) )
-                    mfn = virt_to_mfn(d->grant_table->status[xatp.idx]);
+                idx &= ~XENMAPIDX_grant_table_status;
+                if ( xatp->idx < nr_status_frames(d->grant_table) )
+                    mfn = virt_to_mfn(d->grant_table->status[idx]);
             }
             else
             {
-                if ( (xatp.idx >= nr_grant_frames(d->grant_table)) &&
-                     (xatp.idx < max_nr_grant_frames) )
-                    gnttab_grow_table(d, xatp.idx + 1);
+                if ( (idx >= nr_grant_frames(d->grant_table)) &&
+                     (idx < max_nr_grant_frames) )
+                    gnttab_grow_table(d, idx + 1);
 
-                if ( xatp.idx < nr_grant_frames(d->grant_table) )
-                    mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]);
+                if ( idx < nr_grant_frames(d->grant_table) )
+                    mfn = virt_to_mfn(d->grant_table->shared_raw[idx]);
             }
 
             spin_unlock(&d->grant_table->lock);
@@ -4738,9 +4720,9 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
         case XENMAPSPACE_gmfn:
         {
             p2m_type_t p2mt;
-            gfn = xatp.idx;
+            gfn = xatp->idx;
 
-            xatp.idx = mfn_x(get_gfn_unshare(d, xatp.idx, &p2mt));
+            idx = mfn_x(get_gfn_unshare(d, xatp->idx, &p2mt));
             /* If the page is still shared, exit early */
             if ( p2m_is_shared(p2mt) )
             {
@@ -4748,58 +4730,89 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
                 rcu_unlock_domain(d);
                 return -ENOMEM;
             }
-            if ( !get_page_from_pagenr(xatp.idx, d) )
+            if ( !get_page_from_pagenr(idx, d) )
                 break;
-            mfn = xatp.idx;
+            mfn = idx;
             page = mfn_to_page(mfn);
             break;
         }
         default:
             break;
-        }
-
-        if ( !paging_mode_translate(d) || (mfn == 0) )
-        {
-            if ( page )
-                put_page(page);
-            if ( xatp.space == XENMAPSPACE_gmfn )
-                put_gfn(d, gfn);
-            rcu_unlock_domain(d);
-            return -EINVAL;
-        }
-
-        domain_lock(d);
+    }
 
+    if ( !paging_mode_translate(d) || (mfn == 0) )
+    {
         if ( page )
             put_page(page);
+        if ( xatp->space == XENMAPSPACE_gmfn )
+            put_gfn(d, gfn);
+        rcu_unlock_domain(d);
+        return -EINVAL;
+    }
 
-        /* Remove previously mapped page if it was present. */
-        prev_mfn = get_gfn_untyped(d, xatp.gpfn);
-        if ( mfn_valid(prev_mfn) )
-        {
-            if ( is_xen_heap_mfn(prev_mfn) )
-                /* Xen heap frames are simply unhooked from this phys slot. */
-                guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, PAGE_ORDER_4K);
-            else
-                /* Normal domain memory is freed, to avoid leaking memory. */
-                guest_remove_page(d, xatp.gpfn);
-        }
-        /* In the XENMAPSPACE_gmfn case we still hold a ref on the old page. */
-        put_gfn(d, xatp.gpfn);
+    domain_lock(d);
+
+    if ( page )
+        put_page(page);
+
+    /* Remove previously mapped page if it was present. */
+    prev_mfn = get_gfn_untyped(d, xatp->gpfn);
+    if ( mfn_valid(prev_mfn) )
+    {
+        if ( is_xen_heap_mfn(prev_mfn) )
+            /* Xen heap frames are simply unhooked from this phys slot. */
+            guest_physmap_remove_page(d, xatp->gpfn, prev_mfn, PAGE_ORDER_4K);
+        else
+            /* Normal domain memory is freed, to avoid leaking memory. */
+            guest_remove_page(d, xatp->gpfn);
+    }
+    /* In the XENMAPSPACE_gmfn case we still hold a ref on the old page. */
+    put_gfn(d, xatp->gpfn);
 
-        /* Unmap from old location, if any. */
-        gpfn = get_gpfn_from_mfn(mfn);
-        ASSERT( gpfn != SHARED_M2P_ENTRY );
-        if ( gpfn != INVALID_M2P_ENTRY )
-            guest_physmap_remove_page(d, gpfn, mfn, PAGE_ORDER_4K);
+    /* Unmap from old location, if any. */
+    gpfn = get_gpfn_from_mfn(mfn);
+    ASSERT( gpfn != SHARED_M2P_ENTRY );
+    if ( gpfn != INVALID_M2P_ENTRY )
+        guest_physmap_remove_page(d, gpfn, mfn, PAGE_ORDER_4K);
 
-        /* Map at new location. */
-        rc = guest_physmap_add_page(d, xatp.gpfn, mfn, PAGE_ORDER_4K);
+    /* Map at new location. */
+    rc = guest_physmap_add_page(d, xatp->gpfn, mfn, PAGE_ORDER_4K);
 
-        /* In the XENMAPSPACE_gmfn, we took a ref and locked the p2m at the top */
-        if ( xatp.space == XENMAPSPACE_gmfn )
-            put_gfn(d, gfn);
-        domain_unlock(d);
+    /* In the XENMAPSPACE_gmfn, we took a ref and locked the p2m at the top */
+    if ( xatp->space == XENMAPSPACE_gmfn )
+        put_gfn(d, gfn);
+    domain_unlock(d);
+
+    rcu_unlock_domain(d);
+
+    return rc;
+}
+
+long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg)
+{
+    int rc;
+
+    switch ( op )
+    {
+    case XENMEM_add_to_physmap:
+    {
+        struct xen_add_to_physmap xatp;
+        struct domain *d;
+
+        if ( copy_from_guest(&xatp, arg, 1) )
+            return -EFAULT;
+
+        rc = rcu_lock_target_domain_by_id(xatp.domid, &d);
+        if ( rc != 0 )
+            return rc;
+
+        if ( xsm_add_to_physmap(current->domain, d) )
+        {
+            rcu_unlock_domain(d);
+            return -EPERM;
+        }
+
+        rc = xenmem_add_to_physmap(d, &xatp);
 
         rcu_unlock_domain(d);
 

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap
  2011-11-16 19:25     ` [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap Jean Guyader
@ 2011-11-19 21:58       ` Olaf Hering
  2011-11-19 22:14         ` Keir Fraser
  0 siblings, 1 reply; 15+ messages in thread
From: Olaf Hering @ 2011-11-19 21:58 UTC (permalink / raw)
  To: Jean Guyader; +Cc: allen.m.kay, keir, xen-devel, tim, JBeulich

On Wed, Nov 16, Jean Guyader wrote:

> Move the code for the XENMEM_add_to_physmap case into it's own
> function (xenmem_add_to_physmap).

This changeset 24163:7a9a1261a6b0 seems to cause the current testsuite failures.
(XEN) Assertion '!in_atomic()' failed at softirq.c:61

preempt_count is like fffffc52 or fffffc00 in my testing.

Olaf

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap
  2011-11-19 21:58       ` Olaf Hering
@ 2011-11-19 22:14         ` Keir Fraser
  2011-11-19 22:37           ` Jean Guyader
  2011-11-20 13:25           ` Olaf Hering
  0 siblings, 2 replies; 15+ messages in thread
From: Keir Fraser @ 2011-11-19 22:14 UTC (permalink / raw)
  To: Olaf Hering, Jean Guyader; +Cc: allen.m.kay, xen-devel, tim, JBeulich

On 19/11/2011 21:58, "Olaf Hering" <olaf@aepfle.de> wrote:

> On Wed, Nov 16, Jean Guyader wrote:
> 
>> Move the code for the XENMEM_add_to_physmap case into it's own
>> function (xenmem_add_to_physmap).
> 
> This changeset 24163:7a9a1261a6b0 seems to cause the current testsuite
> failures.
> (XEN) Assertion '!in_atomic()' failed at softirq.c:61
> 
> preempt_count is like fffffc52 or fffffc00 in my testing.

Thanks, hopefully fixed by c/s 24167.

 -- keir

> Olaf

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap
  2011-11-19 22:14         ` Keir Fraser
@ 2011-11-19 22:37           ` Jean Guyader
  2011-11-20 13:25           ` Olaf Hering
  1 sibling, 0 replies; 15+ messages in thread
From: Jean Guyader @ 2011-11-19 22:37 UTC (permalink / raw)
  To: Keir Fraser
  Cc: Olaf Hering, xen-devel, allen.m.kay, tim, Jean Guyader, JBeulich

On 19 November 2011 14:14, Keir Fraser <keir.xen@gmail.com> wrote:
> On 19/11/2011 21:58, "Olaf Hering" <olaf@aepfle.de> wrote:
>
>> On Wed, Nov 16, Jean Guyader wrote:
>>
>>> Move the code for the XENMEM_add_to_physmap case into it's own
>>> function (xenmem_add_to_physmap).
>>
>> This changeset 24163:7a9a1261a6b0 seems to cause the current testsuite
>> failures.
>> (XEN) Assertion '!in_atomic()' failed at softirq.c:61
>>
>> preempt_count is like fffffc52 or fffffc00 in my testing.
>
> Thanks, hopefully fixed by c/s 24167.
>

Thanks, sorry about that.

Jean

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap
  2011-11-19 22:14         ` Keir Fraser
  2011-11-19 22:37           ` Jean Guyader
@ 2011-11-20 13:25           ` Olaf Hering
  2011-11-20 19:59             ` Keir Fraser
  1 sibling, 1 reply; 15+ messages in thread
From: Olaf Hering @ 2011-11-20 13:25 UTC (permalink / raw)
  To: Keir Fraser; +Cc: allen.m.kay, xen-devel, tim, Jean Guyader, JBeulich

On Sat, Nov 19, Keir Fraser wrote:

> On 19/11/2011 21:58, "Olaf Hering" <olaf@aepfle.de> wrote:
> 
> > On Wed, Nov 16, Jean Guyader wrote:
> > 
> >> Move the code for the XENMEM_add_to_physmap case into it's own
> >> function (xenmem_add_to_physmap).
> > 
> > This changeset 24163:7a9a1261a6b0 seems to cause the current testsuite
> > failures.
> > (XEN) Assertion '!in_atomic()' failed at softirq.c:61
> > 
> > preempt_count is like fffffc52 or fffffc00 in my testing.
> 
> Thanks, hopefully fixed by c/s 24167.

Yes, the ASSERT does not trigger anymore.

The remaining issue is this:

Nov 20 06:21:11.744519 (XEN) hvm.c:2312:d1 guest attempted write to read-only memory page. gfn=0xc0, mfn=0x201979

See
http://www.chiark.greenend.org.uk/~xensrcts/logs/9893/test-amd64-i386-rhel6hvm-amd/serial-potato-beetle.log

Olaf

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap
  2011-11-20 13:25           ` Olaf Hering
@ 2011-11-20 19:59             ` Keir Fraser
  2011-11-21  8:39               ` Olaf Hering
  0 siblings, 1 reply; 15+ messages in thread
From: Keir Fraser @ 2011-11-20 19:59 UTC (permalink / raw)
  To: Olaf Hering; +Cc: allen.m.kay, xen-devel, tim, Jean Guyader, JBeulich

On 20/11/2011 13:25, "Olaf Hering" <olaf@aepfle.de> wrote:

> On Sat, Nov 19, Keir Fraser wrote:
> 
>> On 19/11/2011 21:58, "Olaf Hering" <olaf@aepfle.de> wrote:
>> 
>>> On Wed, Nov 16, Jean Guyader wrote:
>>> 
>>>> Move the code for the XENMEM_add_to_physmap case into it's own
>>>> function (xenmem_add_to_physmap).
>>> 
>>> This changeset 24163:7a9a1261a6b0 seems to cause the current testsuite
>>> failures.
>>> (XEN) Assertion '!in_atomic()' failed at softirq.c:61
>>> 
>>> preempt_count is like fffffc52 or fffffc00 in my testing.
>> 
>> Thanks, hopefully fixed by c/s 24167.
> 
> Yes, the ASSERT does not trigger anymore.
> 
> The remaining issue is this:
> 
> Nov 20 06:21:11.744519 (XEN) hvm.c:2312:d1 guest attempted write to read-only
> memory page. gfn=0xc0, mfn=0x201979

Is that new behaviour? It may be unrelated to whatever HVM test failure
we're seeing, or else be a mere symptom of a guest gone haywire for other
reasons (we write-protect that memory range because it is supposed to be
ROM).

 -- Keir

> See
> http://www.chiark.greenend.org.uk/~xensrcts/logs/9893/test-amd64-i386-rhel6hvm
> -amd/serial-potato-beetle.log
> 
> Olaf

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap
  2011-11-20 19:59             ` Keir Fraser
@ 2011-11-21  8:39               ` Olaf Hering
  0 siblings, 0 replies; 15+ messages in thread
From: Olaf Hering @ 2011-11-21  8:39 UTC (permalink / raw)
  To: Keir Fraser; +Cc: allen.m.kay, xen-devel, tim, Jean Guyader, JBeulich

On Sun, Nov 20, Keir Fraser wrote:

> > Nov 20 06:21:11.744519 (XEN) hvm.c:2312:d1 guest attempted write to read-only
> > memory page. gfn=0xc0, mfn=0x201979
> 
> Is that new behaviour? It may be unrelated to whatever HVM test failure
> we're seeing, or else be a mere symptom of a guest gone haywire for other
> reasons (we write-protect that memory range because it is supposed to be
> ROM).

The message does not trigger with changeset 24162, but it does with
24167.

Olaf

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2011-11-21  8:39 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
     [not found] <20111113175006.1EF1C72C36F@homiemail-mx8.g.dreamhost.com>
2011-11-14 14:17 ` [PATCH 3/6] add_to_physmap: Move the code for > XENMEM_add_to_physmap Andres Lagar-Cavilla
2011-11-16 19:25 [PATCH 0/6] IOMMU, vtd and iotlb flush rework (v8) Jean Guyader
2011-11-16 19:25 ` [PATCH 1/6] vtd: Refactor iotlb flush code Jean Guyader
2011-11-16 19:25   ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
2011-11-16 19:25     ` [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap Jean Guyader
2011-11-19 21:58       ` Olaf Hering
2011-11-19 22:14         ` Keir Fraser
2011-11-19 22:37           ` Jean Guyader
2011-11-20 13:25           ` Olaf Hering
2011-11-20 19:59             ` Keir Fraser
2011-11-21  8:39               ` Olaf Hering
  -- strict thread matches above, loose matches on Subject: below --
2011-11-13 17:40 [PATCH 0/6] IOMMU, vtd and iotlb flush rework (v7) Jean Guyader
2011-11-13 17:40 ` [PATCH 1/6] vtd: Refactor iotlb flush code Jean Guyader
2011-11-13 17:40   ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
2011-11-13 17:40     ` [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap Jean Guyader
2011-11-10  8:43 [PATCH 0/6] IOMMU, vtd and iotlb flush rework (v5) Jean Guyader
2011-11-10  8:43 ` [PATCH 1/6] vtd: Refactor iotlb flush code Jean Guyader
2011-11-10  8:44   ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
2011-11-10  8:44     ` [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap Jean Guyader
2011-11-08 20:04 [PATCH 0/6] IOMMU, vtd and iotlb flush rework (v4) Jean Guyader
2011-11-08 20:04 ` [PATCH 1/6] vtd: Refactor iotlb flush code Jean Guyader
2011-11-08 20:04   ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
2011-11-08 20:04     ` [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap Jean Guyader
2011-11-07 18:25 IOMMU, vtd and iotlb flush rework (v3) Jean Guyader
2011-11-07 18:25 ` [PATCH 1/6] vtd: Refactor iotlb flush code Jean Guyader
2011-11-07 18:25   ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
2011-11-07 18:25     ` [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap Jean Guyader
2011-11-08 13:24       ` Tim Deegan
2011-11-07 15:16 IOMMU, vtd and iotlb flush rework (v2) Jean Guyader
2011-11-07 15:16 ` [PATCH 1/6] vtd: Refactor iotlb flush code Jean Guyader
2011-11-07 15:16   ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
2011-11-07 15:16     ` [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap Jean Guyader
2011-11-04 10:38 [PATCH 0/6] IOMMU, vtd and iotlb flush rework Jean Guyader
2011-11-04 10:38 ` [PATCH 1/6] vtd: Refactor iotlb flush code Jean Guyader
2011-11-04 10:38   ` [PATCH 2/6] iommu: Introduce iommu_flush and iommu_flush_all Jean Guyader
2011-11-04 10:38     ` [PATCH 3/6] add_to_physmap: Move the code for XENMEM_add_to_physmap Jean Guyader

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).