xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Stefano Stabellini <sstabellini@kernel.org>,
	Wei Liu <wei.liu2@citrix.com>,
	George Dunlap <George.Dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>, Tim Deegan <tim@xen.org>,
	Paul Durrant <paul.durrant@citrix.com>,
	Jan Beulich <jbeulich@suse.com>
Subject: [PATCH 7/7] x86: add iommu_ops to map and unmap pages, and also to flush the IOTLB
Date: Mon, 12 Feb 2018 10:47:14 +0000	[thread overview]
Message-ID: <20180212104714.1922-8-paul.durrant@citrix.com> (raw)
In-Reply-To: <20180212104714.1922-1-paul.durrant@citrix.com>

This patch adds iommu_ops to allow a domain with control_iommu privilege
to map and unmap pages from any guest over which it has mapping privilege
in the IOMMU.
These operations implicitly disable IOTLB flushing so that the caller can
batch operations and then explicitly flush the IOTLB using the iommu_op
also added by this patch.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: George Dunlap <George.Dunlap@eu.citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Tim Deegan <tim@xen.org>
Cc: Wei Liu <wei.liu2@citrix.com>
---
 xen/arch/x86/iommu_op.c       | 186 ++++++++++++++++++++++++++++++++++++++++++
 xen/include/public/iommu_op.h |  37 +++++++++
 xen/include/xlat.lst          |   2 +
 3 files changed, 225 insertions(+)

diff --git a/xen/arch/x86/iommu_op.c b/xen/arch/x86/iommu_op.c
index ac81b98b7a..b10c916279 100644
--- a/xen/arch/x86/iommu_op.c
+++ b/xen/arch/x86/iommu_op.c
@@ -24,6 +24,174 @@
 #include <xen/hypercall.h>
 #include <xen/iommu.h>
 
+/* Override macros from asm/page.h to make them work with mfn_t */
+#undef mfn_to_page
+#define mfn_to_page(mfn) __mfn_to_page(mfn_x(mfn))
+#undef page_to_mfn
+#define page_to_mfn(page) _mfn(__page_to_mfn(page))
+
+struct check_rdm_ctxt {
+    bfn_t bfn;
+};
+
+static int check_rdm(xen_pfn_t start, xen_ulong_t nr, u32 id, void *arg)
+{
+    struct check_rdm_ctxt *ctxt = arg;
+
+    if ( bfn_x(ctxt->bfn) >= start &&
+         bfn_x(ctxt->bfn) < start + nr )
+        return -EINVAL;
+
+    return 1;
+}
+
+static int iommuop_map(struct xen_iommu_op_map *op, unsigned int flags)
+{
+    struct domain *d, *od, *currd = current->domain;
+    struct domain_iommu *iommu = dom_iommu(currd);
+    const struct iommu_ops *ops = iommu->platform_ops;
+    domid_t domid = op->domid;
+    gfn_t gfn = _gfn(op->gfn);
+    bfn_t bfn = _bfn(op->bfn);
+    mfn_t mfn;
+    struct check_rdm_ctxt ctxt = {
+        .bfn = bfn,
+    };
+    p2m_type_t p2mt;
+    p2m_query_t p2mq;
+    struct page_info *page;
+    unsigned int prot;
+    int rc;
+
+    if (op->pad0 != 0 || op->pad1 != 0)
+        return -EINVAL;
+
+    /*
+     * Both map_page and lookup_page operations must be implemented.
+     * The lookup_page method is not used here but is relied upon by
+     * iommuop_unmap() to drop the page reference taken here.
+     */
+    if ( !ops->map_page || !ops->lookup_page )
+        return -ENOSYS;
+
+    /* Check whether the specified BFN falls in a reserved region */
+    rc = iommu_get_reserved_device_memory(check_rdm, &ctxt);
+    if ( rc )
+        return rc;
+
+    d = rcu_lock_domain_by_any_id(domid);
+    if ( !d )
+        return -ESRCH;
+
+    p2mq = (flags & XEN_IOMMUOP_map_readonly) ?
+        P2M_UNSHARE : P2M_ALLOC;
+    page = get_page_from_gfn(d, gfn_x(gfn), &p2mt, p2mq);
+
+    rc = -ENOENT;
+    if ( !page )
+        goto unlock;
+
+    if ( p2m_is_paged(p2mt) )
+    {
+        p2m_mem_paging_populate(d, gfn_x(gfn));
+        goto release;
+    }
+
+    if ( (p2mq & P2M_UNSHARE) && p2m_is_shared(p2mt) )
+        goto release;
+
+    /*
+     * Make sure the page is RAM and, if it is read-only, that the
+     * read-only flag is present.
+     */
+    rc = -EPERM;
+    if ( !p2m_is_any_ram(p2mt) ||
+         (p2m_is_readonly(p2mt) && !(flags & XEN_IOMMUOP_map_readonly)) )
+        goto release;
+
+    /*
+     * If the calling domain does not own the page then make sure it
+     * has mapping privilege over the page owner.
+     */
+    od = page_get_owner(page);
+    if ( od != currd )
+    {
+        rc = xsm_domain_memory_map(XSM_TARGET, od);
+        if ( rc )
+            goto release;
+    }
+
+    prot = IOMMUF_readable;
+    if ( !(flags & XEN_IOMMUOP_map_readonly) )
+        prot |= IOMMUF_writable;
+
+    mfn = page_to_mfn(page);
+
+    rc = 0;
+    if ( !ops->map_page(currd, bfn, mfn, prot) )
+        goto unlock; /* keep the page ref */
+
+    rc = -EIO;
+
+ release:
+    put_page(page);
+
+ unlock:
+    rcu_unlock_domain(d);
+
+    return rc;
+}
+
+static int iommuop_unmap(struct xen_iommu_op_unmap *op)
+{
+    struct domain *currd = current->domain;
+    struct domain_iommu *iommu = dom_iommu(currd);
+    const struct iommu_ops *ops = iommu->platform_ops;
+    bfn_t bfn = _bfn(op->bfn);
+    mfn_t mfn;
+    struct check_rdm_ctxt ctxt = {
+        .bfn = bfn,
+    };
+    unsigned int flags;
+    struct page_info *page;
+    int rc;
+
+    /*
+     * Both unmap_page and lookup_page operations must be implemented.
+     */
+    if ( !ops->unmap_page || !ops->lookup_page )
+        return -ENOSYS;
+
+    /* Check whether the specified BFN falls in a reserved region */
+    rc = iommu_get_reserved_device_memory(check_rdm, &ctxt);
+    if ( rc )
+        return rc;
+
+    if ( ops->lookup_page(currd, bfn, &mfn, &flags) ||
+         !mfn_valid(mfn) )
+        return -ENOENT;
+
+    page = mfn_to_page(mfn);
+
+    if ( ops->unmap_page(currd, bfn) )
+        return -EIO;
+
+    put_page(page);
+    return 0;
+}
+
+static int iommuop_flush(void)
+{
+    struct domain *currd = current->domain;
+    struct domain_iommu *iommu = dom_iommu(currd);
+    const struct iommu_ops *ops = iommu->platform_ops;
+
+    if ( ops->iotlb_flush_all(currd) )
+        return -EIO;
+
+    return 0;
+}
+
 struct get_rdm_ctxt {
     unsigned int max_entries;
     unsigned int nr_entries;
@@ -101,6 +269,22 @@ static void iommu_op(xen_iommu_op_t *op)
         op->status = iommuop_query_reserved(&op->u.query_reserved);
         break;
 
+    case XEN_IOMMUOP_map:
+        this_cpu(iommu_dont_flush_iotlb) = 1;
+        op->status = iommuop_map(&op->u.map, op->flags);
+        this_cpu(iommu_dont_flush_iotlb) = 0;
+        break;
+
+    case XEN_IOMMUOP_unmap:
+        this_cpu(iommu_dont_flush_iotlb) = 1;
+        op->status = iommuop_unmap(&op->u.unmap);
+        this_cpu(iommu_dont_flush_iotlb) = 0;
+        break;
+
+    case XEN_IOMMUOP_flush:
+        op->status = iommuop_flush();
+        break;
+
     default:
         op->status = -EOPNOTSUPP;
         break;
@@ -195,6 +379,8 @@ int compat_iommu_op(XEN_GUEST_HANDLE_PARAM(compat_iommu_op_t) uops,
          * we need to fix things up here.
          */
 #define XLAT_iommu_op_u_query_reserved XEN_IOMMUOP_query_reserved
+#define XLAT_iommu_op_u_map XEN_IOMMUOP_map
+#define XLAT_iommu_op_u_unmap XEN_IOMMUOP_unmap
         u = cmp.op;
 
 #define XLAT_iommu_op_query_reserved_HNDL_regions(_d_, _s_) \
diff --git a/xen/include/public/iommu_op.h b/xen/include/public/iommu_op.h
index 24b8b9e0cc..9a782603de 100644
--- a/xen/include/public/iommu_op.h
+++ b/xen/include/public/iommu_op.h
@@ -57,13 +57,50 @@ struct xen_iommu_op_query_reserved {
     XEN_GUEST_HANDLE(xen_iommu_reserved_region_t) regions;
 };
 
+/*
+ * XEN_IOMMUOP_map: Map a page in the IOMMU.
+ */
+#define XEN_IOMMUOP_map 2
+
+struct xen_iommu_op_map {
+    /* IN - The IOMMU frame number which will hold the new mapping */
+    xen_bfn_t bfn;
+    /* IN - The guest frame number of the page to be mapped */
+    xen_pfn_t gfn;
+    /* IN - The domid of the guest */
+    domid_t domid;
+    unsigned short pad0;
+    unsigned int pad1;
+};
+
+/*
+ * XEN_IOMMUOP_unmap: Remove a mapping in the IOMMU.
+ */
+#define XEN_IOMMUOP_unmap 3
+
+struct xen_iommu_op_unmap {
+    /* IN - The IOMMU frame number holding the mapping to be cleared */
+    xen_bfn_t bfn;
+};
+
+/*
+ * XEN_IOMMUOP_flush: Flush the IOMMU TLB.
+ */
+#define XEN_IOMMUOP_flush 4
+
 struct xen_iommu_op {
     uint16_t op;
     uint16_t flags; /* op specific flags */
+
+#define _XEN_IOMMUOP_map_readonly 0
+#define XEN_IOMMUOP_map_readonly (1 << (_XEN_IOMMUOP_map_readonly))
+
     int32_t status; /* op completion status: */
                     /* 0 for success otherwise, negative errno */
     union {
         struct xen_iommu_op_query_reserved query_reserved;
+        struct xen_iommu_op_map map;
+        struct xen_iommu_op_unmap unmap;
     } u;
 };
 typedef struct xen_iommu_op xen_iommu_op_t;
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index a2070b6d7d..dddafc7422 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -78,6 +78,8 @@
 ?	vcpu_hvm_x86_64			hvm/hvm_vcpu.h
 !	iommu_reserved_region		iommu_op.h
 !	iommu_op_query_reserved		iommu_op.h
+!	iommu_op_map			iommu_op.h
+!	iommu_op_unmap			iommu_op.h
 !	iommu_op			iommu_op.h
 ?	kexec_exec			kexec.h
 !	kexec_image			kexec.h
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2018-02-12 10:48 UTC|newest]

Thread overview: 68+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-02-12 10:47 [PATCH 0/7] paravirtual IOMMU interface Paul Durrant
2018-02-12 10:47 ` [PATCH 1/7] iommu: introduce the concept of BFN Paul Durrant
2018-03-15 13:39   ` Jan Beulich
2018-03-16 10:31     ` Paul Durrant
2018-03-16 10:39       ` Jan Beulich
2018-02-12 10:47 ` [PATCH 2/7] iommu: make use of type-safe BFN and MFN in exported functions Paul Durrant
2018-03-15 15:44   ` Jan Beulich
2018-03-16 10:26     ` Paul Durrant
2018-07-10 14:29     ` George Dunlap
2018-07-10 14:34       ` Jan Beulich
2018-07-10 14:37         ` Andrew Cooper
2018-07-10 14:58         ` George Dunlap
2018-07-10 15:19           ` Jan Beulich
2018-02-12 10:47 ` [PATCH 3/7] iommu: push use of type-safe BFN and MFN into iommu_ops Paul Durrant
2018-03-15 16:15   ` Jan Beulich
2018-03-16 10:22     ` Paul Durrant
2018-02-12 10:47 ` [PATCH 4/7] vtd: add lookup_page method to iommu_ops Paul Durrant
2018-03-15 16:54   ` Jan Beulich
2018-03-16 10:19     ` Paul Durrant
2018-03-16 10:28       ` Jan Beulich
2018-03-16 10:41         ` Paul Durrant
2018-02-12 10:47 ` [PATCH 5/7] public / x86: introduce __HYPERCALL_iommu_op Paul Durrant
2018-02-13  6:43   ` Tian, Kevin
2018-02-13  9:22     ` Paul Durrant
2018-02-23  5:17       ` Tian, Kevin
2018-02-23  9:41         ` Paul Durrant
2018-02-24  2:57           ` Tian, Kevin
2018-02-26  9:57             ` Paul Durrant
2018-02-26 11:55               ` Tian, Kevin
2018-02-27  5:05               ` Tian, Kevin
2018-02-27  9:32                 ` Paul Durrant
2018-02-28  2:53                   ` Tian, Kevin
2018-02-28  8:55                     ` Paul Durrant
2018-03-16 12:25   ` Jan Beulich
2018-06-07 11:42     ` Paul Durrant
2018-06-07 13:21       ` Jan Beulich
2018-06-07 13:45         ` George Dunlap
2018-06-07 14:06           ` Paul Durrant
2018-06-07 14:21             ` Ian Jackson
2018-06-07 15:21               ` Paul Durrant
2018-06-07 15:41                 ` Jan Beulich
2018-02-12 10:47 ` [PATCH 6/7] x86: add iommu_op to query reserved ranges Paul Durrant
2018-02-13  6:51   ` Tian, Kevin
2018-02-13  9:25     ` Paul Durrant
2018-02-23  5:23       ` Tian, Kevin
2018-02-23  9:02         ` Jan Beulich
2018-03-19 14:10   ` Jan Beulich
2018-03-19 15:13     ` Paul Durrant
2018-03-19 16:30       ` Jan Beulich
2018-03-19 15:13   ` Jan Beulich
2018-03-19 15:36     ` Paul Durrant
2018-03-19 16:31       ` Jan Beulich
2018-02-12 10:47 ` Paul Durrant [this message]
2018-02-13  6:55   ` [PATCH 7/7] x86: add iommu_ops to map and unmap pages, and also to flush the IOTLB Tian, Kevin
2018-02-13  9:55     ` Paul Durrant
2018-02-23  5:35       ` Tian, Kevin
2018-02-23  9:35         ` Paul Durrant
2018-02-24  3:01           ` Tian, Kevin
2018-02-26  9:38             ` Paul Durrant
2018-03-19 15:11   ` Jan Beulich
2018-03-19 15:34     ` Paul Durrant
2018-03-19 16:49       ` Jan Beulich
2018-03-19 16:57         ` Paul Durrant
2018-03-20  8:11           ` Jan Beulich
2018-03-20  9:32             ` Paul Durrant
2018-03-20  9:49               ` Jan Beulich
2018-02-13  6:21 ` [PATCH 0/7] paravirtual IOMMU interface Tian, Kevin
2018-02-13  9:18   ` Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180212104714.1922-8-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).