xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Wei Liu <wei.liu2@citrix.com>,
	Daniel De Graaf <dgdegra@tycho.nsa.gov>,
	Paul Durrant <paul.durrant@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>
Subject: [PATCH-for-4.9 v1 6/8] dm_op: convert HVMOP_set_mem_type
Date: Fri, 18 Nov 2016 17:14:02 +0000	[thread overview]
Message-ID: <1479489244-2201-7-git-send-email-paul.durrant@citrix.com> (raw)
In-Reply-To: <1479489244-2201-1-git-send-email-paul.durrant@citrix.com>

This patch also removes the need for handling HVMOP restarts, so that
infrastructure is also removed.

Suggested-by: Jan Beulich <jbeulich@suse.com>
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Daniel De Graaf <dgdegra@tycho.nsa.gov>
---
 tools/libxc/xc_misc.c               |  28 +++-----
 xen/arch/x86/hvm/dm.c               |  92 ++++++++++++++++++++++++
 xen/arch/x86/hvm/hvm.c              | 136 +-----------------------------------
 xen/include/public/hvm/dm_op.h      |  16 +++++
 xen/include/public/hvm/hvm_op.h     |   4 +-
 xen/xsm/flask/policy/access_vectors |   2 +-
 6 files changed, 121 insertions(+), 157 deletions(-)

diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c
index a97864e..607cf80 100644
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -584,28 +584,18 @@ int xc_hvm_modified_memory(
 int xc_hvm_set_mem_type(
     xc_interface *xch, domid_t dom, hvmmem_type_t mem_type, uint64_t first_pfn, uint64_t nr)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_mem_type, arg);
-    int rc;
-
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_set_mem_type hypercall");
-        return -1;
-    }
+    struct xen_dm_op op;
+    struct xen_dm_op_set_mem_type *data;
 
-    arg->domid        = dom;
-    arg->hvmmem_type  = mem_type;
-    arg->first_pfn    = first_pfn;
-    arg->nr           = nr;
+    op.op = DMOP_set_mem_type;
+    data = &op.u.set_mem_type;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_mem_type,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
+    data->mem_type = mem_type;
+    data->first_pfn = first_pfn;
+    /* NOTE: The following assignment truncates nr to 32-bits */
+    data->nr = nr;
 
-    return rc;
+    return do_dm_op(xch, dom, 1, &op, sizeof(op));
 }
 
 int xc_hvm_inject_trap(
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index 0dcd454..969b68c 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -160,6 +160,16 @@ static int dm_op_set_pci_link_route(struct domain *d, uint8_t link,
     return 0;
 }
 
+static bool_t dm_op_allow_p2m_type_change(p2m_type_t old, p2m_type_t new)
+{
+    if ( p2m_is_ram(old) ||
+         (p2m_is_hole(old) && new == p2m_mmio_dm) ||
+         (old == p2m_ioreq_server && new == p2m_ram_rw) )
+        return 1;
+
+    return 0;
+}
+
 static int dm_op_modified_memory(struct domain *d, xen_pfn_t *first_pfn,
                                  unsigned int *nr)
 {
@@ -205,6 +215,79 @@ static int dm_op_modified_memory(struct domain *d, xen_pfn_t *first_pfn,
     return rc;
 }
 
+
+static int dm_op_set_mem_type(struct domain *d, hvmmem_type_t mem_type,
+                              xen_pfn_t *first_pfn, unsigned int *nr)
+{
+    xen_pfn_t last_pfn = *first_pfn + *nr - 1;
+    unsigned int iter;
+    int rc;
+
+    /* Interface types to internal p2m types */
+    static const p2m_type_t memtype[] = {
+        [HVMMEM_ram_rw]  = p2m_ram_rw,
+        [HVMMEM_ram_ro]  = p2m_ram_ro,
+        [HVMMEM_mmio_dm] = p2m_mmio_dm,
+        [HVMMEM_unused] = p2m_invalid,
+        [HVMMEM_ioreq_server] = p2m_ioreq_server
+    };
+
+    if ( (*first_pfn > last_pfn) ||
+         (last_pfn > domain_get_maximum_gpfn(d)) )
+        return -EINVAL;
+
+    if ( mem_type >= ARRAY_SIZE(memtype) ||
+         unlikely(mem_type == HVMMEM_unused) )
+        return -EINVAL;
+
+    iter = 0;
+    rc = 0;
+    while ( iter < *nr )
+    {
+        unsigned long pfn = *first_pfn + iter;
+        p2m_type_t t;
+
+        get_gfn_unshare(d, pfn, &t);
+        if ( p2m_is_paging(t) )
+        {
+            put_gfn(d, pfn);
+            p2m_mem_paging_populate(d, pfn);
+            rc = -EAGAIN;
+            break;
+        }
+        if ( p2m_is_shared(t) )
+        {
+            put_gfn(d, pfn);
+            rc = -EAGAIN;
+            break;
+        }
+        if ( !dm_op_allow_p2m_type_change(t, memtype[mem_type]) )
+        {
+            put_gfn(d, pfn);
+            rc = -EINVAL;
+            break;
+        }
+
+        rc = p2m_change_type_one(d, pfn, t, memtype[mem_type]);
+        put_gfn(d, pfn);
+
+        if ( rc )
+            break;
+
+        /* Check for continuation if it's not the last interation */
+        if ( (++iter < *nr) && hypercall_preempt_check() )
+        {
+            rc = -ERESTART;
+            break;
+        }
+    }
+
+    *first_pfn += iter;
+    *nr -= iter;
+
+    return rc;
+}
+
 long do_dm_op(domid_t domid,
               unsigned int nr_bufs,
               XEN_GUEST_HANDLE_PARAM(xen_dm_op_buf_t) bufs)
@@ -331,6 +414,15 @@ long do_dm_op(domid_t domid,
         rc = dm_op_modified_memory(d, &data->first_pfn, &data->nr);
         break;
     }
+    case DMOP_set_mem_type:
+    {
+        struct xen_dm_op_set_mem_type *data =
+            &op.u.set_mem_type;
+
+        rc = dm_op_set_mem_type(d, data->mem_type, &data->first_pfn,
+                                &data->nr);
+        break;
+    }
     default:
         rc = -EOPNOTSUPP;
         break;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 3b2e9d5..83c4063 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5249,132 +5249,11 @@ static int hvmop_get_mem_type(
     return rc;
 }
 
-/*
- * Note that this value is effectively part of the ABI, even if we don't need
- * to make it a formal part of it: A guest suspended for migration in the
- * middle of a continuation would fail to work if resumed on a hypervisor
- * using a different value.
- */
-#define HVMOP_op_mask 0xff
-
-static bool_t hvm_allow_p2m_type_change(p2m_type_t old, p2m_type_t new)
-{
-    if ( p2m_is_ram(old) ||
-         (p2m_is_hole(old) && new == p2m_mmio_dm) ||
-         (old == p2m_ioreq_server && new == p2m_ram_rw) )
-        return 1;
-
-    return 0;
-}
-
-static int hvmop_set_mem_type(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_mem_type_t) arg,
-    unsigned long *iter)
-{
-    unsigned long start_iter = *iter;
-    struct xen_hvm_set_mem_type a;
-    struct domain *d;
-    int rc;
-
-    /* Interface types to internal p2m types */
-    static const p2m_type_t memtype[] = {
-        [HVMMEM_ram_rw]  = p2m_ram_rw,
-        [HVMMEM_ram_ro]  = p2m_ram_ro,
-        [HVMMEM_mmio_dm] = p2m_mmio_dm,
-        [HVMMEM_unused] = p2m_invalid,
-        [HVMMEM_ioreq_server] = p2m_ioreq_server
-    };
-
-    if ( copy_from_guest(&a, arg, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(a.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_control(XSM_DM_PRIV, d, HVMOP_set_mem_type);
-    if ( rc )
-        goto out;
-
-    rc = -EINVAL;
-    if ( a.nr < start_iter ||
-         ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
-         ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
-        goto out;
-
-    if ( a.hvmmem_type >= ARRAY_SIZE(memtype) ||
-         unlikely(a.hvmmem_type == HVMMEM_unused) )
-        goto out;
-
-    while ( a.nr > start_iter )
-    {
-        unsigned long pfn = a.first_pfn + start_iter;
-        p2m_type_t t;
-
-        get_gfn_unshare(d, pfn, &t);
-        if ( p2m_is_paging(t) )
-        {
-            put_gfn(d, pfn);
-            p2m_mem_paging_populate(d, pfn);
-            rc = -EAGAIN;
-            goto out;
-        }
-        if ( p2m_is_shared(t) )
-        {
-            put_gfn(d, pfn);
-            rc = -EAGAIN;
-            goto out;
-        }
-        if ( !hvm_allow_p2m_type_change(t, memtype[a.hvmmem_type]) )
-        {
-            put_gfn(d, pfn);
-            goto out;
-        }
-
-        rc = p2m_change_type_one(d, pfn, t, memtype[a.hvmmem_type]);
-        put_gfn(d, pfn);
-
-        if ( rc )
-            goto out;
-
-        /* Check for continuation if it's not the last interation */
-        if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
-             hypercall_preempt_check() )
-        {
-            rc = -ERESTART;
-            goto out;
-        }
-    }
-    rc = 0;
-
- out:
-    rcu_unlock_domain(d);
-    *iter = start_iter;
-
-    return rc;
-}
-
 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
-    unsigned long start_iter, mask;
     long rc = 0;
 
-    switch ( op & HVMOP_op_mask )
-    {
-    default:
-        mask = ~0UL;
-        break;
-    case HVMOP_set_mem_type:
-        mask = HVMOP_op_mask;
-        break;
-    }
-
-    start_iter = op & ~mask;
-    switch ( op &= mask )
+    switch ( op )
     {
     case HVMOP_set_evtchn_upcall_vector:
         rc = hvmop_set_evtchn_upcall_vector(
@@ -5405,12 +5284,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
             guest_handle_cast(arg, xen_hvm_get_mem_type_t));
         break;
 
-    case HVMOP_set_mem_type:
-        rc = hvmop_set_mem_type(
-            guest_handle_cast(arg, xen_hvm_set_mem_type_t),
-            &start_iter);
-        break;
-
     case HVMOP_pagetable_dying:
     {
         struct xen_hvm_pagetable_dying a;
@@ -5519,13 +5392,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
     }
     }
 
-    if ( rc == -ERESTART )
-    {
-        ASSERT(!(start_iter & mask));
-        rc = hypercall_create_continuation(__HYPERVISOR_hvm_op, "lh",
-                                           op | start_iter, arg);
-    }
-
     return rc;
 }
 
diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h
index d2065f2..247cac6 100644
--- a/xen/include/public/hvm/dm_op.h
+++ b/xen/include/public/hvm/dm_op.h
@@ -245,6 +245,21 @@ struct xen_dm_op_modified_memory {
     uint64_t first_pfn;
 };
 
+/*
+ * DMOP_set_mem_type: Notify that a region of memory is to be treated in a
+ *                    specific way. (See definition of hvmmem_type_t).
+ */
+#define DMOP_set_mem_type 12
+
+struct xen_dm_op_set_mem_type {
+    /* IN - number of contiguous pages */
+    uint32_t nr;
+    /* IN - first pfn in region */
+    uint64_t first_pfn;
+    /* IN - new hvmmem_type_t of region */
+    uint16_t mem_type;
+};
+
 
 struct xen_dm_op {
     uint32_t op;
@@ -260,6 +275,7 @@ struct xen_dm_op {
         struct xen_dm_op_set_isa_irq_level set_isa_irq_level;
         struct xen_dm_op_set_pci_link_route set_pci_link_route;
         struct xen_dm_op_modified_memory modified_memory;
+        struct xen_dm_op_set_mem_type set_mem_type;
     } u;
 };
 
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index 45879cf..2e9a1f6 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -131,8 +131,6 @@ struct xen_hvm_modified_memory {
 typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
 
-#endif /* __XEN_INTERFACE_VERSION__ < 0x00040900 */
-
 #define HVMOP_set_mem_type    8
 /* Notify that a region of memory is to be treated in a specific way. */
 struct xen_hvm_set_mem_type {
@@ -148,6 +146,8 @@ struct xen_hvm_set_mem_type {
 typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
 
+#endif /* __XEN_INTERFACE_VERSION__ < 0x00040900 */
+
 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
 
 /* Hint from PV drivers for pagetable destruction. */
diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors
index 2041ca5..125210b 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -260,7 +260,7 @@ class hvm
     bind_irq
 # XEN_DOMCTL_pin_mem_cacheattr
     cacheattr
-# HVMOP_get_mem_type, HVMOP_set_mem_type,
+# HVMOP_get_mem_type,
 # HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying,
 # HVMOP_inject_trap
     hvmctl
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2016-11-18 17:36 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-11-18 17:13 [PATCH-for-4.9 v1 0/8] New hypercall for device models Paul Durrant
2016-11-18 17:13 ` [PATCH-for-4.9 v1 1/8] public / x86: Introduce __HYPERCALL_dm_op Paul Durrant
2016-11-22 15:57   ` Jan Beulich
2016-11-22 16:32     ` Paul Durrant
2016-11-22 17:24       ` Jan Beulich
2016-11-22 17:29         ` Paul Durrant
2016-11-18 17:13 ` [PATCH-for-4.9 v1 2/8] dm_op: convert HVMOP_*ioreq_server* Paul Durrant
2016-11-24 17:02   ` Jan Beulich
2016-11-25  7:06     ` Jan Beulich
2016-11-25  8:47       ` Paul Durrant
2016-11-25  9:01     ` Paul Durrant
2016-11-25  9:28       ` Jan Beulich
2016-11-25  9:33         ` Paul Durrant
2016-11-18 17:13 ` [PATCH-for-4.9 v1 3/8] dm_op: convert HVMOP_track_dirty_vram Paul Durrant
2016-11-25 11:25   ` Jan Beulich
2016-11-25 11:32     ` Paul Durrant
2016-11-18 17:14 ` [PATCH-for-4.9 v1 4/8] dm_op: convert HVMOP_set_pci_intx_level, HVMOP_set_isa_irq_level, and Paul Durrant
2016-11-25 11:49   ` Jan Beulich
2016-11-25 11:55     ` Paul Durrant
2016-11-25 12:26       ` Jan Beulich
2016-11-25 13:07         ` Paul Durrant
2016-11-18 17:14 ` [PATCH-for-4.9 v1 5/8] dm_op: convert HVMOP_modified_memory Paul Durrant
2016-11-25 13:25   ` Jan Beulich
2016-11-25 13:31     ` Paul Durrant
2016-11-25 13:56       ` Jan Beulich
2016-11-18 17:14 ` Paul Durrant [this message]
2016-11-25 13:50   ` [PATCH-for-4.9 v1 6/8] dm_op: convert HVMOP_set_mem_type Jan Beulich
2016-11-25 14:00     ` Paul Durrant
2016-11-25 14:16       ` Jan Beulich
2016-11-25 14:20         ` Paul Durrant
2016-11-25 14:46           ` Jan Beulich
2016-11-25 14:56             ` Paul Durrant
2016-11-18 17:14 ` [PATCH-for-4.9 v1 7/8] dm_op: convert HVMOP_inject_trap and HVMOP_inject_msi Paul Durrant
2016-11-25 14:07   ` Jan Beulich
2016-11-25 14:13     ` Paul Durrant
2016-11-18 17:14 ` [PATCH-for-4.9 v1 8/8] x86/hvm: serialize trap injecting producer and consumer Paul Durrant
2016-11-18 17:52   ` Razvan Cojocaru
2016-11-21  7:53   ` Jan Beulich
2016-11-21  8:26     ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1479489244-2201-7-git-send-email-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=dgdegra@tycho.nsa.gov \
    --cc=ian.jackson@eu.citrix.com \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).