xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Stefano Stabellini <sstabellini@kernel.org>,
	Wei Liu <wei.liu2@citrix.com>,
	George Dunlap <George.Dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>, Tim Deegan <tim@xen.org>,
	Julien Grall <julien.grall@arm.com>,
	Paul Durrant <paul.durrant@citrix.com>,
	Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v5 14/15] x86: add iommu_ops to modify and flush IOMMU mappings
Date: Fri, 3 Aug 2018 18:22:19 +0100	[thread overview]
Message-ID: <20180803172220.1657-15-paul.durrant@citrix.com> (raw)
In-Reply-To: <20180803172220.1657-1-paul.durrant@citrix.com>

This patch adds iommu_ops to add (map) or remove (unmap) frames in the
domain's IOMMU mappings, and an iommu_op to synchronize (flush) those
manipulations with the hardware.

Mappings added by the map operation are tracked and only those mappings
may be removed by a subsequent unmap operation. Frames are specified by the
owning domain and GFN. It is, of course, permissable for a domain to map
its own frames using DOMID_SELF.

NOTE: The owning domain and GFN must also be specified in the unmap
      operation, as well as the BFN, so that they can be cross-checked
      with the existent mapping.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: George Dunlap <George.Dunlap@eu.citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Julien Grall <julien.grall@arm.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Tim Deegan <tim@xen.org>
Cc: Wei Liu <wei.liu2@citrix.com>

v4:
 - Fixed logic inversion when checking return of iommu_unmap_page().

v3:
 - Add type pinning.

v2:
 - Heavily re-worked in v2, including explicit tracking of mappings.
   This avoids the need to clear non-reserved mappings from IOMMU
   at start of day, which would be prohibitively slow on a large host.
---
 xen/arch/x86/iommu_op.c       | 151 ++++++++++++++++++++++++++++++++++++++++++
 xen/include/public/iommu_op.h |  43 ++++++++++++
 xen/include/xlat.lst          |   2 +
 3 files changed, 196 insertions(+)

diff --git a/xen/arch/x86/iommu_op.c b/xen/arch/x86/iommu_op.c
index b29547bffd..35daeeed92 100644
--- a/xen/arch/x86/iommu_op.c
+++ b/xen/arch/x86/iommu_op.c
@@ -114,6 +114,131 @@ static int iommu_op_enable_modification(void)
     return 0;
 }
 
+static int iommuop_map(struct xen_iommu_op_map *op)
+{
+    struct domain *d, *currd = current->domain;
+    struct domain_iommu *iommu = dom_iommu(currd);
+    bool readonly = op->flags & XEN_IOMMUOP_map_readonly;
+    bfn_t bfn = _bfn(op->bfn);
+    struct page_info *page;
+    unsigned int prot;
+    int rc, ignore;
+
+    if ( op->pad || (op->flags & ~XEN_IOMMUOP_map_readonly) )
+        return -EINVAL;
+
+    if ( !iommu->iommu_op_ranges )
+        return -EOPNOTSUPP;
+
+    /* Check whether the specified BFN falls in a reserved region */
+    if ( rangeset_contains_singleton(iommu->reserved_ranges, bfn_x(bfn)) )
+        return -EINVAL;
+
+    d = rcu_lock_domain_by_any_id(op->domid);
+    if ( !d )
+        return -ESRCH;
+
+    rc = get_paged_gfn(d, _gfn(op->gfn), readonly, NULL, &page);
+    if ( rc )
+        goto unlock;
+
+    rc = -EINVAL;
+    if ( !readonly && !get_page_type(page, PGT_writable_page) )
+    {
+        put_page(page);
+        goto unlock;
+    }
+
+    prot = IOMMUF_readable;
+    if ( !readonly )
+        prot |= IOMMUF_writable;
+
+    rc = -EIO;
+    if ( iommu_map_page(currd, bfn, page_to_mfn(page), prot) )
+        goto release;
+
+    rc = rangeset_add_singleton(iommu->iommu_op_ranges, bfn_x(bfn));
+    if ( rc )
+        goto unmap;
+
+    rc = 0;
+    goto unlock; /* retain mapping and references */
+
+ unmap:
+    ignore = iommu_unmap_page(currd, bfn);
+
+ release:
+    if ( !readonly )
+        put_page_type(page);
+    put_page(page);
+
+ unlock:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
+static int iommuop_unmap(struct xen_iommu_op_unmap *op)
+{
+    struct domain *d, *currd = current->domain;
+    struct domain_iommu *iommu = dom_iommu(currd);
+    bfn_t bfn = _bfn(op->bfn);
+    mfn_t mfn;
+    bool readonly;
+    unsigned int prot;
+    struct page_info *page;
+    int rc;
+
+    if ( op->pad0 || op->pad1 )
+        return -EINVAL;
+
+    if ( !iommu->iommu_op_ranges )
+        return -EOPNOTSUPP;
+
+    if ( !rangeset_contains_singleton(iommu->iommu_op_ranges, bfn_x(bfn)) ||
+         iommu_lookup_page(currd, bfn, &mfn, &prot) ||
+         !mfn_valid(mfn) )
+        return -ENOENT;
+
+    readonly = !(prot & IOMMUF_writable);
+    
+    d = rcu_lock_domain_by_any_id(op->domid);
+    if ( !d )
+        return -ESRCH;
+
+    rc = get_paged_gfn(d, _gfn(op->gfn), !(prot & IOMMUF_writable), NULL,
+                       &page);
+    if ( rc )
+        goto unlock;
+
+    put_page(page); /* release extra reference just taken */
+
+    rc = -EINVAL;
+    if ( !mfn_eq(page_to_mfn(page), mfn) )
+        goto unlock;
+
+    /* release reference taken in map */
+    if ( !readonly )
+        put_page_type(page);
+    put_page(page);
+
+    rc = rangeset_remove_singleton(iommu->iommu_op_ranges, bfn_x(bfn));
+    if ( rc )
+        goto unlock;
+
+    if ( iommu_unmap_page(currd, bfn) )
+        rc = -EIO;
+
+ unlock:
+    rcu_unlock_domain(d);
+
+    return rc;
+}
+
+static int iommuop_flush(void)
+{
+    return !iommu_iotlb_flush_all(current->domain) ? 0 : -EIO;
+}
+
 static void iommu_op(xen_iommu_op_t *op)
 {
     switch ( op->op )
@@ -126,6 +251,22 @@ static void iommu_op(xen_iommu_op_t *op)
         op->status = iommu_op_enable_modification();
         break;
 
+    case XEN_IOMMUOP_map:
+        this_cpu(iommu_dont_flush_iotlb) = 1;
+        op->status = iommuop_map(&op->u.map);
+        this_cpu(iommu_dont_flush_iotlb) = 0;
+        break;
+
+    case XEN_IOMMUOP_unmap:
+        this_cpu(iommu_dont_flush_iotlb) = 1;
+        op->status = iommuop_unmap(&op->u.unmap);
+        this_cpu(iommu_dont_flush_iotlb) = 0;
+        break;
+
+    case XEN_IOMMUOP_flush:
+        op->status = iommuop_flush();
+        break;
+
     default:
         op->status = -EOPNOTSUPP;
         break;
@@ -139,6 +280,9 @@ int do_one_iommu_op(xen_iommu_op_buf_t *buf)
     static const size_t op_size[] = {
         [XEN_IOMMUOP_query_reserved] = sizeof(struct xen_iommu_op_query_reserved),
         [XEN_IOMMUOP_enable_modification] = 0,
+        [XEN_IOMMUOP_map] = sizeof(struct xen_iommu_op_map),
+        [XEN_IOMMUOP_unmap] = sizeof(struct xen_iommu_op_unmap),
+        [XEN_IOMMUOP_flush] = 0,
     };
     size_t size;
     int rc;
@@ -226,6 +370,9 @@ int compat_one_iommu_op(compat_iommu_op_buf_t *buf)
     static const size_t op_size[] = {
         [XEN_IOMMUOP_query_reserved] = sizeof(struct compat_iommu_op_query_reserved),
         [XEN_IOMMUOP_enable_modification] = 0,
+        [XEN_IOMMUOP_map] = sizeof(struct compat_iommu_op_map),
+        [XEN_IOMMUOP_unmap] = sizeof(struct compat_iommu_op_unmap),
+        [XEN_IOMMUOP_flush] = 0,
     };
     size_t size;
     xen_iommu_op_t nat;
@@ -263,6 +410,8 @@ int compat_one_iommu_op(compat_iommu_op_buf_t *buf)
      * we need to fix things up here.
      */
 #define XLAT_iommu_op_u_query_reserved XEN_IOMMUOP_query_reserved
+#define XLAT_iommu_op_u_map XEN_IOMMUOP_map
+#define XLAT_iommu_op_u_unmap XEN_IOMMUOP_unmap
     u = cmp.op;
 
 #define XLAT_iommu_op_query_reserved_HNDL_ranges(_d_, _s_)            \
@@ -322,6 +471,8 @@ int compat_one_iommu_op(compat_iommu_op_buf_t *buf)
                                 &cmp, status) )
         return -EFAULT;
 
+#undef XLAT_iommu_op_u_unmap
+#undef XLAT_iommu_op_u_map
 #undef XLAT_iommu_op_u_query_reserved
 
     return 0;
diff --git a/xen/include/public/iommu_op.h b/xen/include/public/iommu_op.h
index 9bf74bd007..c8dc531c83 100644
--- a/xen/include/public/iommu_op.h
+++ b/xen/include/public/iommu_op.h
@@ -67,6 +67,47 @@ struct xen_iommu_op_query_reserved {
  */
 #define XEN_IOMMUOP_enable_modification 2
 
+/*
+ * XEN_IOMMUOP_map: Map a guest page in the IOMMU.
+ */
+#define XEN_IOMMUOP_map 3
+
+struct xen_iommu_op_map {
+    /* IN - The domid of the guest */
+    domid_t domid;
+    uint16_t flags;
+
+#define _XEN_IOMMUOP_map_readonly 0
+#define XEN_IOMMUOP_map_readonly (1 << (_XEN_IOMMUOP_map_readonly))
+
+    uint32_t pad;
+    /* IN - The IOMMU frame number which will hold the new mapping */
+    xen_bfn_t bfn;
+    /* IN - The guest frame number of the page to be mapped */
+    xen_pfn_t gfn;
+};
+
+/*
+ * XEN_IOMMUOP_unmap_gfn: Remove a mapping in the IOMMU.
+ */
+#define XEN_IOMMUOP_unmap 4
+
+struct xen_iommu_op_unmap {
+    /* IN - The domid of the guest */
+    domid_t domid;
+    uint16_t pad0;
+    uint32_t pad1;
+    /* IN - The IOMMU frame number which holds the mapping to be removed */
+    xen_bfn_t bfn;
+    /* IN - The guest frame number of the page that is mapped */
+    xen_pfn_t gfn;
+};
+
+/*
+ * XEN_IOMMUOP_flush: Flush the IOMMU TLB.
+ */
+#define XEN_IOMMUOP_flush 5
+
 struct xen_iommu_op {
     uint16_t op;    /* op type */
     uint16_t pad;
@@ -74,6 +115,8 @@ struct xen_iommu_op {
                     /* 0 for success otherwise, negative errno */
     union {
         struct xen_iommu_op_query_reserved query_reserved;
+        struct xen_iommu_op_map map;
+        struct xen_iommu_op_unmap unmap;
     } u;
 };
 typedef struct xen_iommu_op xen_iommu_op_t;
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index d2f9b1034b..3ad7eadb5a 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -79,7 +79,9 @@
 ?	vcpu_hvm_x86_64			hvm/hvm_vcpu.h
 !	iommu_op			iommu_op.h
 !	iommu_op_buf			iommu_op.h
+!	iommu_op_map			iommu_op.h
 !	iommu_op_query_reserved		iommu_op.h
+!	iommu_op_unmap			iommu_op.h
 ?	iommu_reserved_range		iommu_op.h
 ?	kexec_exec			kexec.h
 !	kexec_image			kexec.h
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2018-08-03 17:28 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-08-03 17:22 [PATCH v5 00/15] paravirtual IOMMU interface Paul Durrant
2018-08-03 17:22 ` [PATCH v5 01/15] iommu: turn need_iommu back into a boolean Paul Durrant
2018-08-08 13:39   ` Jan Beulich
2018-08-08 13:56     ` Paul Durrant
2018-08-03 17:22 ` [PATCH v5 02/15] iommu: introduce the concept of BFN Paul Durrant
2018-08-07  2:38   ` Tian, Kevin
2018-08-07  7:59     ` Paul Durrant
2018-08-07  8:26       ` Tian, Kevin
2018-08-03 17:22 ` [PATCH v5 03/15] iommu: make use of type-safe BFN and MFN in exported functions Paul Durrant
2018-08-07  2:45   ` Tian, Kevin
2018-08-03 17:22 ` [PATCH v5 04/15] iommu: push use of type-safe BFN and MFN into iommu_ops Paul Durrant
2018-08-07  2:49   ` Tian, Kevin
2018-08-03 17:22 ` [PATCH v5 05/15] iommu: don't domain_crash() inside iommu_map/unmap_page() Paul Durrant
2018-08-07  2:55   ` Tian, Kevin
2018-08-07  8:05     ` Paul Durrant
2018-08-07  8:23       ` Jan Beulich
2018-08-03 17:22 ` [PATCH v5 06/15] public / x86: introduce __HYPERCALL_iommu_op Paul Durrant
2018-08-07  3:00   ` Tian, Kevin
2018-08-07  8:10     ` Paul Durrant
2018-08-07  8:25       ` Jan Beulich
2018-08-17 21:10   ` Daniel De Graaf
2018-08-03 17:22 ` [PATCH v5 07/15] iommu: track reserved ranges using a rangeset Paul Durrant
2018-08-07  3:04   ` Tian, Kevin
2018-08-07  8:16     ` Paul Durrant
2018-08-07  8:23       ` Tian, Kevin
2018-08-03 17:22 ` [PATCH v5 08/15] x86: add iommu_op to query reserved ranges Paul Durrant
2018-08-03 17:22 ` [PATCH v5 09/15] vtd: add lookup_page method to iommu_ops Paul Durrant
2018-08-07  3:25   ` Tian, Kevin
2018-08-07  8:21     ` Paul Durrant
2018-08-07  8:29       ` Jan Beulich
2018-08-07  8:32         ` Tian, Kevin
2018-08-07  8:37           ` Paul Durrant
2018-08-07  8:48             ` Tian, Kevin
2018-08-07  8:56               ` Paul Durrant
2018-08-07  9:03                 ` Tian, Kevin
2018-08-07  9:07                   ` Paul Durrant
2018-08-07  8:31       ` Tian, Kevin
2018-08-07  8:35         ` Paul Durrant
2018-08-07  8:47           ` Tian, Kevin
2018-08-03 17:22 ` [PATCH v5 10/15] mm / iommu: include need_iommu() test in iommu_use_hap_pt() Paul Durrant
2018-08-07  3:32   ` Tian, Kevin
2018-08-03 17:22 ` [PATCH v5 11/15] mm / iommu: split need_iommu() into has_iommu_pt() and sync_iommu_pt() Paul Durrant
2018-08-03 18:18   ` Razvan Cojocaru
2018-08-07  3:41   ` Tian, Kevin
2018-08-07  8:24     ` Paul Durrant
2018-08-03 17:22 ` [PATCH v5 12/15] x86: add iommu_op to enable modification of IOMMU mappings Paul Durrant
2018-08-07  4:08   ` Tian, Kevin
2018-08-07  8:32     ` Paul Durrant
2018-08-07  8:37       ` Tian, Kevin
2018-08-07  8:44         ` Paul Durrant
2018-08-07  9:01           ` Tian, Kevin
2018-08-07  9:12             ` Paul Durrant
2018-08-07  9:19               ` Tian, Kevin
2018-08-07  9:22                 ` Paul Durrant
2018-08-03 17:22 ` [PATCH v5 13/15] memory: add get_paged_gfn() as a wrapper Paul Durrant
2018-08-03 17:22 ` Paul Durrant [this message]
2018-08-03 17:22 ` [PATCH v5 15/15] x86: extend the map and unmap iommu_ops to support grant references Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180803172220.1657-15-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=julien.grall@arm.com \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).