xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Paul Durrant <paul.durrant@citrix.com>
To: xen-devel@lists.xenproject.org
Cc: Stefano Stabellini <sstabellini@kernel.org>,
	Wei Liu <wei.liu2@citrix.com>,
	George Dunlap <George.Dunlap@eu.citrix.com>,
	Andrew Cooper <andrew.cooper3@citrix.com>,
	Ian Jackson <ian.jackson@eu.citrix.com>, Tim Deegan <tim@xen.org>,
	Paul Durrant <paul.durrant@citrix.com>,
	Jan Beulich <jbeulich@suse.com>
Subject: [PATCH v5 08/15] x86: add iommu_op to query reserved ranges
Date: Fri, 3 Aug 2018 18:22:13 +0100	[thread overview]
Message-ID: <20180803172220.1657-9-paul.durrant@citrix.com> (raw)
In-Reply-To: <20180803172220.1657-1-paul.durrant@citrix.com>

This patch adds an iommu_op to allow the domain IOMMU reserved ranges to be
queried by the guest.

NOTE: The number of reserved ranges is determined by system firmware, in
      conjunction with Xen command line options, and is expected to be
      small. Thus, to avoid over-complicating the code, there is no
      pre-emption check within the operation.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
---
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: George Dunlap <George.Dunlap@eu.citrix.com>
Cc: Ian Jackson <ian.jackson@eu.citrix.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Tim Deegan <tim@xen.org>
Cc: Wei Liu <wei.liu2@citrix.com>

v4:
 - Make xen_bfn_t strictly 64 bits wide and drop associated compat
   translation.

v3:
 - Avoid speculation beyond array bounds check.

v2:
 - Re-implemented for v2 based on new rangeset.
---
 xen/arch/x86/iommu_op.c       | 164 ++++++++++++++++++++++++++++++++++++++++--
 xen/include/public/iommu_op.h |  39 ++++++++++
 xen/include/xlat.lst          |   2 +
 3 files changed, 199 insertions(+), 6 deletions(-)

diff --git a/xen/arch/x86/iommu_op.c b/xen/arch/x86/iommu_op.c
index 744c0fce27..bcfcd49102 100644
--- a/xen/arch/x86/iommu_op.c
+++ b/xen/arch/x86/iommu_op.c
@@ -22,11 +22,70 @@
 #include <xen/event.h>
 #include <xen/guest_access.h>
 #include <xen/hypercall.h>
+#include <xen/nospec.h>
+
+struct get_reserved_ctxt {
+    unsigned int max_entries;
+    unsigned int nr_entries;
+    XEN_GUEST_HANDLE(xen_iommu_reserved_range_t) ranges;
+};
+
+static int get_reserved(unsigned long s, unsigned long e, void *arg)
+{
+    struct get_reserved_ctxt *ctxt = arg;
+
+    if ( ctxt->nr_entries < ctxt->max_entries )
+    {
+        xen_iommu_reserved_range_t range = {
+            .start_bfn = s,
+            .nr_frames = e - s,
+        };
+
+        if ( copy_to_guest_offset(ctxt->ranges, ctxt->nr_entries, &range,
+                                  1) )
+            return -EFAULT;
+    }
+
+    ctxt->nr_entries++;
+    return 0;
+}
+
+static int iommu_op_query_reserved(struct xen_iommu_op_query_reserved *op)
+{
+    struct domain *currd = current->domain;
+    struct domain_iommu *iommu = dom_iommu(currd);
+    struct get_reserved_ctxt ctxt = {
+        .max_entries = op->nr_entries,
+        .ranges = op->ranges,
+    };
+    int rc;
+
+    if ( op->pad )
+        return -EINVAL;
+
+    rc = rangeset_report_ranges(iommu->reserved_ranges, 0, ~0ul,
+                                get_reserved, &ctxt);
+    if ( rc )
+        return rc;
+
+    /* Pass back the actual number of reserved ranges */
+    op->nr_entries = ctxt.nr_entries;
+
+    if ( !guest_handle_is_null(ctxt.ranges) &&
+         ctxt.nr_entries > ctxt.max_entries )
+        return -ENOBUFS;
+
+    return 0;
+}
 
 static void iommu_op(xen_iommu_op_t *op)
 {
     switch ( op->op )
     {
+    case XEN_IOMMUOP_query_reserved:
+        op->status = iommu_op_query_reserved(&op->u.query_reserved);
+        break;
+
     default:
         op->status = -EOPNOTSUPP;
         break;
@@ -35,13 +94,20 @@ static void iommu_op(xen_iommu_op_t *op)
 
 int do_one_iommu_op(xen_iommu_op_buf_t *buf)
 {
-    xen_iommu_op_t op;
+    xen_iommu_op_t op = {};
+    size_t offset;
+    static const size_t op_size[] = {
+        [XEN_IOMMUOP_query_reserved] = sizeof(struct xen_iommu_op_query_reserved),
+    };
+    size_t size;
     int rc;
 
-    if ( buf->size < sizeof(op) )
+    offset = offsetof(struct xen_iommu_op, u);
+
+    if ( buf->size < offset )
         return -EFAULT;
 
-    if ( copy_from_guest((void *)&op, buf->h, sizeof(op)) )
+    if ( copy_from_guest((void *)&op, buf->h, offset) )
         return -EFAULT;
 
     if ( op.pad )
@@ -51,6 +117,16 @@ int do_one_iommu_op(xen_iommu_op_buf_t *buf)
     if ( rc )
         return rc;
 
+    if ( op.op >= ARRAY_SIZE(op_size) )
+        return -EOPNOTSUPP;
+
+    size = op_size[array_index_nospec(op.op, ARRAY_SIZE(op_size))];
+    if ( buf->size < offset + size )
+        return -EFAULT;
+
+    if ( copy_from_guest_offset((void *)&op.u, buf->h, offset, size) )
+        return -EFAULT;
+
     iommu_op(&op);
 
     if ( __copy_field_to_guest(guest_handle_cast(buf->h, xen_iommu_op_t),
@@ -100,16 +176,27 @@ long do_iommu_op(unsigned int nr_bufs,
     return rc;
 }
 
+CHECK_iommu_reserved_range;
+
 int compat_one_iommu_op(compat_iommu_op_buf_t *buf)
 {
-    compat_iommu_op_t cmp;
+    compat_iommu_op_t cmp = {};
+    size_t offset;
+    static const size_t op_size[] = {
+        [XEN_IOMMUOP_query_reserved] = sizeof(struct compat_iommu_op_query_reserved),
+    };
+    size_t size;
     xen_iommu_op_t nat;
+    unsigned int u;
+    int32_t status;
     int rc;
 
-    if ( buf->size < sizeof(cmp) )
+    offset = offsetof(struct compat_iommu_op, u);
+
+    if ( buf->size < offset )
         return -EFAULT;
 
-    if ( copy_from_compat((void *)&cmp, buf->h, sizeof(cmp)) )
+    if ( copy_from_compat((void *)&cmp, buf->h, offset) )
         return -EFAULT;
 
     if ( cmp.pad )
@@ -119,17 +206,82 @@ int compat_one_iommu_op(compat_iommu_op_buf_t *buf)
     if ( rc )
         return rc;
 
+    if ( cmp.op >= ARRAY_SIZE(op_size) )
+        return -EOPNOTSUPP;
+
+    size = op_size[array_index_nospec(cmp.op, ARRAY_SIZE(op_size))];
+    if ( buf->size < offset + size )
+        return -EFAULT;
+
+    if ( copy_from_compat_offset((void *)&cmp.u, buf->h, offset, size) )
+        return -EFAULT;
+
+    /*
+     * The xlat magic doesn't quite know how to handle the union so
+     * we need to fix things up here.
+     */
+#define XLAT_iommu_op_u_query_reserved XEN_IOMMUOP_query_reserved
+    u = cmp.op;
+
+#define XLAT_iommu_op_query_reserved_HNDL_ranges(_d_, _s_)            \
+    do                                                                \
+    {                                                                 \
+        if ( !compat_handle_is_null((_s_)->ranges) )                  \
+        {                                                             \
+            unsigned int *nr_entries = COMPAT_ARG_XLAT_VIRT_BASE;     \
+            xen_iommu_reserved_range_t *ranges =                      \
+                (void *)(nr_entries + 1);                             \
+                                                                      \
+            if ( sizeof(*nr_entries) +                                \
+                 (sizeof(*ranges) * (_s_)->nr_entries) >              \
+                 COMPAT_ARG_XLAT_SIZE )                               \
+                return -E2BIG;                                        \
+                                                                      \
+            *nr_entries = (_s_)->nr_entries;                          \
+            set_xen_guest_handle((_d_)->ranges, ranges);              \
+        }                                                             \
+        else                                                          \
+            set_xen_guest_handle((_d_)->ranges, NULL);                \
+    } while (false)
+
     XLAT_iommu_op(&nat, &cmp);
 
+#undef XLAT_iommu_op_query_reserved_HNDL_ranges
+
     iommu_op(&nat);
 
+    status = nat.status;
+
+#define XLAT_iommu_op_query_reserved_HNDL_ranges(_d_, _s_)               \
+    do                                                                   \
+    {                                                                    \
+        if ( !compat_handle_is_null((_d_)->ranges) )                     \
+        {                                                                \
+            unsigned int *nr_entries = COMPAT_ARG_XLAT_VIRT_BASE;        \
+            compat_iommu_reserved_range_t *ranges =                      \
+                (void *)(nr_entries + 1);                                \
+            unsigned int nr =                                            \
+                min_t(unsigned int, (_d_)->nr_entries, *nr_entries);     \
+                                                                         \
+            if ( __copy_to_compat_offset((_d_)->ranges, 0, ranges, nr) ) \
+                status = -EFAULT;                                        \
+        }                                                                \
+    } while (false)
+
     XLAT_iommu_op(&cmp, &nat);
 
+    /* status will have been modified if __copy_to_compat_offset() failed */
+    cmp.status = status;
+
+#undef XLAT_iommu_op_query_reserved_HNDL_ranges
+
     if ( __copy_field_to_compat(compat_handle_cast(buf->h,
                                                    compat_iommu_op_t),
                                 &cmp, status) )
         return -EFAULT;
 
+#undef XLAT_iommu_op_u_query_reserved
+
     return 0;
 }
 
diff --git a/xen/include/public/iommu_op.h b/xen/include/public/iommu_op.h
index c3b68f665a..ade404a877 100644
--- a/xen/include/public/iommu_op.h
+++ b/xen/include/public/iommu_op.h
@@ -25,11 +25,50 @@
 
 #include "xen.h"
 
+typedef uint64_t xen_bfn_t;
+
+/* Structure describing a single range reserved in the IOMMU */
+struct xen_iommu_reserved_range {
+    xen_bfn_t start_bfn;
+    unsigned int nr_frames;
+    unsigned int pad;
+};
+typedef struct xen_iommu_reserved_range xen_iommu_reserved_range_t;
+DEFINE_XEN_GUEST_HANDLE(xen_iommu_reserved_range_t);
+
+/*
+ * XEN_IOMMUOP_query_reserved: Query ranges reserved in the IOMMU.
+ */
+#define XEN_IOMMUOP_query_reserved 1
+
+struct xen_iommu_op_query_reserved {
+    /*
+     * IN/OUT - On entry this is the number of entries available
+     *          in the ranges array below.
+     *          On exit this is the actual number of reserved ranges.
+     */
+    unsigned int nr_entries;
+    unsigned int pad;
+    /*
+     * OUT - This array is populated with reserved ranges. If it is
+     *       not sufficiently large then available entries are populated,
+     *       but the op status code will be set to -ENOBUFS.
+     *       It is permissable to set this to NULL if nr_entries is also
+     *       set to zero. In this case, on exit, nr_entries will still be
+     *       set to the actual number of reserved ranges but the status
+     *       code will be set to zero.
+     */
+    XEN_GUEST_HANDLE(xen_iommu_reserved_range_t) ranges;
+};
+
 struct xen_iommu_op {
     uint16_t op;    /* op type */
     uint16_t pad;
     int32_t status; /* op completion status: */
                     /* 0 for success otherwise, negative errno */
+    union {
+        struct xen_iommu_op_query_reserved query_reserved;
+    } u;
 };
 typedef struct xen_iommu_op xen_iommu_op_t;
 DEFINE_XEN_GUEST_HANDLE(xen_iommu_op_t);
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 3b15c18c4e..d2f9b1034b 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -79,6 +79,8 @@
 ?	vcpu_hvm_x86_64			hvm/hvm_vcpu.h
 !	iommu_op			iommu_op.h
 !	iommu_op_buf			iommu_op.h
+!	iommu_op_query_reserved		iommu_op.h
+?	iommu_reserved_range		iommu_op.h
 ?	kexec_exec			kexec.h
 !	kexec_image			kexec.h
 !	kexec_range			kexec.h
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2018-08-03 17:22 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-08-03 17:22 [PATCH v5 00/15] paravirtual IOMMU interface Paul Durrant
2018-08-03 17:22 ` [PATCH v5 01/15] iommu: turn need_iommu back into a boolean Paul Durrant
2018-08-08 13:39   ` Jan Beulich
2018-08-08 13:56     ` Paul Durrant
2018-08-03 17:22 ` [PATCH v5 02/15] iommu: introduce the concept of BFN Paul Durrant
2018-08-07  2:38   ` Tian, Kevin
2018-08-07  7:59     ` Paul Durrant
2018-08-07  8:26       ` Tian, Kevin
2018-08-03 17:22 ` [PATCH v5 03/15] iommu: make use of type-safe BFN and MFN in exported functions Paul Durrant
2018-08-07  2:45   ` Tian, Kevin
2018-08-03 17:22 ` [PATCH v5 04/15] iommu: push use of type-safe BFN and MFN into iommu_ops Paul Durrant
2018-08-07  2:49   ` Tian, Kevin
2018-08-03 17:22 ` [PATCH v5 05/15] iommu: don't domain_crash() inside iommu_map/unmap_page() Paul Durrant
2018-08-07  2:55   ` Tian, Kevin
2018-08-07  8:05     ` Paul Durrant
2018-08-07  8:23       ` Jan Beulich
2018-08-03 17:22 ` [PATCH v5 06/15] public / x86: introduce __HYPERCALL_iommu_op Paul Durrant
2018-08-07  3:00   ` Tian, Kevin
2018-08-07  8:10     ` Paul Durrant
2018-08-07  8:25       ` Jan Beulich
2018-08-17 21:10   ` Daniel De Graaf
2018-08-03 17:22 ` [PATCH v5 07/15] iommu: track reserved ranges using a rangeset Paul Durrant
2018-08-07  3:04   ` Tian, Kevin
2018-08-07  8:16     ` Paul Durrant
2018-08-07  8:23       ` Tian, Kevin
2018-08-03 17:22 ` Paul Durrant [this message]
2018-08-03 17:22 ` [PATCH v5 09/15] vtd: add lookup_page method to iommu_ops Paul Durrant
2018-08-07  3:25   ` Tian, Kevin
2018-08-07  8:21     ` Paul Durrant
2018-08-07  8:29       ` Jan Beulich
2018-08-07  8:32         ` Tian, Kevin
2018-08-07  8:37           ` Paul Durrant
2018-08-07  8:48             ` Tian, Kevin
2018-08-07  8:56               ` Paul Durrant
2018-08-07  9:03                 ` Tian, Kevin
2018-08-07  9:07                   ` Paul Durrant
2018-08-07  8:31       ` Tian, Kevin
2018-08-07  8:35         ` Paul Durrant
2018-08-07  8:47           ` Tian, Kevin
2018-08-03 17:22 ` [PATCH v5 10/15] mm / iommu: include need_iommu() test in iommu_use_hap_pt() Paul Durrant
2018-08-07  3:32   ` Tian, Kevin
2018-08-03 17:22 ` [PATCH v5 11/15] mm / iommu: split need_iommu() into has_iommu_pt() and sync_iommu_pt() Paul Durrant
2018-08-03 18:18   ` Razvan Cojocaru
2018-08-07  3:41   ` Tian, Kevin
2018-08-07  8:24     ` Paul Durrant
2018-08-03 17:22 ` [PATCH v5 12/15] x86: add iommu_op to enable modification of IOMMU mappings Paul Durrant
2018-08-07  4:08   ` Tian, Kevin
2018-08-07  8:32     ` Paul Durrant
2018-08-07  8:37       ` Tian, Kevin
2018-08-07  8:44         ` Paul Durrant
2018-08-07  9:01           ` Tian, Kevin
2018-08-07  9:12             ` Paul Durrant
2018-08-07  9:19               ` Tian, Kevin
2018-08-07  9:22                 ` Paul Durrant
2018-08-03 17:22 ` [PATCH v5 13/15] memory: add get_paged_gfn() as a wrapper Paul Durrant
2018-08-03 17:22 ` [PATCH v5 14/15] x86: add iommu_ops to modify and flush IOMMU mappings Paul Durrant
2018-08-03 17:22 ` [PATCH v5 15/15] x86: extend the map and unmap iommu_ops to support grant references Paul Durrant

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180803172220.1657-9-paul.durrant@citrix.com \
    --to=paul.durrant@citrix.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=jbeulich@suse.com \
    --cc=sstabellini@kernel.org \
    --cc=tim@xen.org \
    --cc=wei.liu2@citrix.com \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).