xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Tiejun Chen <tiejun.chen@intel.com>
To: JBeulich@suse.com, ian.campbell@citrix.com,
	ian.jackson@eu.citrix.com, stefano.stabellini@eu.citrix.com,
	kevin.tian@intel.com, yang.z.zhang@intel.com
Cc: xen-devel@lists.xen.org
Subject: [v6][PATCH 1/7] introduce XENMEM_reserved_device_memory_map
Date: Wed, 10 Sep 2014 13:49:44 +0800	[thread overview]
Message-ID: <1410328190-6372-2-git-send-email-tiejun.chen@intel.com> (raw)
In-Reply-To: <1410328190-6372-1-git-send-email-tiejun.chen@intel.com>

From: Jan Beulich <jbeulich@suse.com>

This is a prerequisite for punching holes into HVM and PVH guests' P2M
to allow passing through devices that are associated with (on VT-d)
RMRRs.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c
index 25dc016..01154f6 100644
--- a/xen/common/compat/memory.c
+++ b/xen/common/compat/memory.c
@@ -15,6 +15,35 @@ CHECK_TYPE(domid);
 
 CHECK_mem_access_op;
 
+struct get_reserved_device_memory {
+    struct compat_mem_reserved_device_memory_map map;
+    unsigned int used_entries;
+};
+
+static int get_reserved_device_memory(xen_pfn_t start,
+                                      xen_ulong_t nr, void *ctxt)
+{
+    struct get_reserved_device_memory *grdm = ctxt;
+
+    if ( grdm->used_entries < grdm->map.nr_entries )
+    {
+        struct compat_mem_reserved_device_memory rdm = {
+            .start_pfn = start, .nr_pages = nr
+        };
+
+        if ( rdm.start_pfn != start || rdm.nr_pages != nr )
+            return -ERANGE;
+
+        if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries,
+                                     &rdm, 1) )
+            return -EFAULT;
+    }
+
+    ++grdm->used_entries;
+
+    return 0;
+}
+
 int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat)
 {
     int split, op = cmd & MEMOP_CMD_MASK;
@@ -272,6 +301,29 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat)
             break;
         }
 
+#ifdef HAS_PASSTHROUGH
+        case XENMEM_reserved_device_memory_map:
+        {
+            struct get_reserved_device_memory grdm;
+
+            if ( copy_from_guest(&grdm.map, compat, 1) ||
+                 !compat_handle_okay(grdm.map.buffer, grdm.map.nr_entries) )
+                return -EFAULT;
+
+            grdm.used_entries = 0;
+            rc = iommu_get_reserved_device_memory(get_reserved_device_memory,
+                                                  &grdm);
+
+            if ( !rc && grdm.map.nr_entries < grdm.used_entries )
+                rc = -ENOBUFS;
+            grdm.map.nr_entries = grdm.used_entries;
+            if ( __copy_to_guest(compat, &grdm.map, 1) )
+                rc = -EFAULT;
+
+            return rc;
+        }
+#endif
+
         default:
             return compat_arch_memory_op(cmd, compat);
         }
diff --git a/xen/common/memory.c b/xen/common/memory.c
index c2dd31b..c7efd6b 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -695,6 +695,32 @@ out:
     return rc;
 }
 
+struct get_reserved_device_memory {
+    struct xen_mem_reserved_device_memory_map map;
+    unsigned int used_entries;
+};
+
+static int get_reserved_device_memory(xen_pfn_t start,
+                                      xen_ulong_t nr, void *ctxt)
+{
+    struct get_reserved_device_memory *grdm = ctxt;
+
+    if ( grdm->used_entries < grdm->map.nr_entries )
+    {
+        struct xen_mem_reserved_device_memory rdm = {
+            .start_pfn = start, .nr_pages = nr
+        };
+
+        if ( __copy_to_guest_offset(grdm->map.buffer, grdm->used_entries,
+                                    &rdm, 1) )
+            return -EFAULT;
+    }
+
+    ++grdm->used_entries;
+
+    return 0;
+}
+
 long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     struct domain *d;
@@ -969,6 +995,29 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
 
         break;
 
+#ifdef HAS_PASSTHROUGH
+    case XENMEM_reserved_device_memory_map:
+    {
+        struct get_reserved_device_memory grdm;
+
+        if ( copy_from_guest(&grdm.map, arg, 1) ||
+             !guest_handle_okay(grdm.map.buffer, grdm.map.nr_entries) )
+            return -EFAULT;
+
+        grdm.used_entries = 0;
+        rc = iommu_get_reserved_device_memory(get_reserved_device_memory,
+                                              &grdm);
+
+        if ( !rc && grdm.map.nr_entries < grdm.used_entries )
+            rc = -ENOBUFS;
+        grdm.map.nr_entries = grdm.used_entries;
+        if ( __copy_to_guest(arg, &grdm.map, 1) )
+            rc = -EFAULT;
+
+        break;
+    }
+#endif
+
     default:
         rc = arch_memory_op(cmd, arg);
         break;
diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c
index cc12735..7c17e8d 100644
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -344,6 +344,16 @@ void iommu_crash_shutdown(void)
     iommu_enabled = iommu_intremap = 0;
 }
 
+int iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
+{
+    const struct iommu_ops *ops = iommu_get_ops();
+
+    if ( !iommu_enabled || !ops->get_reserved_device_memory )
+        return 0;
+
+    return ops->get_reserved_device_memory(func, ctxt);
+}
+
 bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature)
 {
     const struct hvm_iommu *hd = domain_hvm_iommu(d);
diff --git a/xen/drivers/passthrough/vtd/dmar.c b/xen/drivers/passthrough/vtd/dmar.c
index 1152c3a..141e735 100644
--- a/xen/drivers/passthrough/vtd/dmar.c
+++ b/xen/drivers/passthrough/vtd/dmar.c
@@ -893,3 +893,20 @@ int platform_supports_x2apic(void)
     unsigned int mask = ACPI_DMAR_INTR_REMAP | ACPI_DMAR_X2APIC_OPT_OUT;
     return cpu_has_x2apic && ((dmar_flags & mask) == ACPI_DMAR_INTR_REMAP);
 }
+
+int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
+{
+    struct acpi_rmrr_unit *rmrr;
+    int rc = 0;
+
+    list_for_each_entry(rmrr, &acpi_rmrr_units, list)
+    {
+        rc = func(PFN_DOWN(rmrr->base_address),
+                  PFN_UP(rmrr->end_address) - PFN_DOWN(rmrr->base_address),
+                  ctxt);
+        if ( rc )
+            break;
+    }
+
+    return rc;
+}
diff --git a/xen/drivers/passthrough/vtd/extern.h b/xen/drivers/passthrough/vtd/extern.h
index 5524dba..f9ee9b0 100644
--- a/xen/drivers/passthrough/vtd/extern.h
+++ b/xen/drivers/passthrough/vtd/extern.h
@@ -75,6 +75,7 @@ int domain_context_mapping_one(struct domain *domain, struct iommu *iommu,
                                u8 bus, u8 devfn, const struct pci_dev *);
 int domain_context_unmap_one(struct domain *domain, struct iommu *iommu,
                              u8 bus, u8 devfn);
+int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt);
 
 unsigned int io_apic_read_remap_rte(unsigned int apic, unsigned int reg);
 void io_apic_write_remap_rte(unsigned int apic,
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 042b882..d513dba 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -2485,6 +2485,7 @@ const struct iommu_ops intel_iommu_ops = {
     .crash_shutdown = vtd_crash_shutdown,
     .iotlb_flush = intel_iommu_iotlb_flush,
     .iotlb_flush_all = intel_iommu_iotlb_flush_all,
+    .get_reserved_device_memory = intel_iommu_get_reserved_device_memory,
     .dump_p2m_table = vtd_dump_p2m_table,
 };
 
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 2c57aa0..c59d42d 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -523,7 +523,29 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
 
 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
 
-/* Next available subop number is 26 */
+/*
+ * For legacy reasons, some devices must be configured with special memory
+ * regions to function correctly.  The guest must avoid using any of these
+ * regions.
+ */
+#define XENMEM_reserved_device_memory_map   26
+struct xen_mem_reserved_device_memory {
+    xen_pfn_t start_pfn;
+    xen_ulong_t nr_pages;
+};
+typedef struct xen_mem_reserved_device_memory xen_mem_reserved_device_memory_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_t);
+
+struct xen_mem_reserved_device_memory_map {
+    /* IN/OUT */
+    unsigned int nr_entries;
+    /* OUT */
+    XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_t) buffer;
+};
+typedef struct xen_mem_reserved_device_memory_map xen_mem_reserved_device_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_map_t);
+
+/* Next available subop number is 27 */
 
 #endif /* __XEN_PUBLIC_MEMORY_H__ */
 
diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h
index 8eb764a..409f6f8 100644
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -120,6 +120,8 @@ void iommu_dt_domain_destroy(struct domain *d);
 
 struct page_info;
 
+typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, void *ctxt);
+
 struct iommu_ops {
     int (*init)(struct domain *d);
     void (*hwdom_init)(struct domain *d);
@@ -156,12 +158,14 @@ struct iommu_ops {
     void (*crash_shutdown)(void);
     void (*iotlb_flush)(struct domain *d, unsigned long gfn, unsigned int page_count);
     void (*iotlb_flush_all)(struct domain *d);
+    int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
     void (*dump_p2m_table)(struct domain *d);
 };
 
 void iommu_suspend(void);
 void iommu_resume(void);
 void iommu_crash_shutdown(void);
+int iommu_get_reserved_device_memory(iommu_grdm_t *, void *);
 
 void iommu_share_p2m_table(struct domain *d);
 
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 9a35dd7..3ec1749 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -60,7 +60,8 @@
 !	memory_exchange			memory.h
 !	memory_map			memory.h
 !	memory_reservation		memory.h
-?	mem_access_op		memory.h
+?	mem_access_op			memory.h
+!	mem_reserved_device_memory_map	memory.h
 !	pod_target			memory.h
 !	remove_from_physmap		memory.h
 ?	physdev_eoi			physdev.h
-- 
1.9.1

  reply	other threads:[~2014-09-10  5:49 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-09-10  5:49 [v6][PATCH 0/7] xen: reserve RMRR to avoid conflicting MMIO/RAM Tiejun Chen
2014-09-10  5:49 ` Tiejun Chen [this message]
2014-09-10 21:34   ` [v6][PATCH 1/7] introduce XENMEM_reserved_device_memory_map Tian, Kevin
2014-09-10  5:49 ` [v6][PATCH 2/7] tools/libxc: introduce hypercall for xc_reserved_device_memory_map Tiejun Chen
2014-09-11 15:21   ` Jan Beulich
2014-09-11 15:23     ` Ian Campbell
2014-09-11 15:55     ` Andrew Cooper
2014-09-12  2:43     ` Chen, Tiejun
2014-09-12  6:20       ` Jan Beulich
2014-09-10  5:49 ` [v6][PATCH 3/7] tools/libxc: check if mmio BAR is out of reserved device memory maps Tiejun Chen
2014-09-10 21:37   ` Tian, Kevin
2014-09-11  1:14     ` Chen, Tiejun
2014-09-11 22:55       ` Tian, Kevin
2014-09-11 15:38   ` Jan Beulich
2014-09-12  2:56     ` Chen, Tiejun
2014-09-12  6:19     ` Jan Beulich
2014-09-10  5:49 ` [v6][PATCH 4/7] libxc/hvm_info_table: introduce a new field nr_reserved_device_memory_map Tiejun Chen
2014-09-10 21:39   ` Tian, Kevin
2014-09-11  1:16     ` Chen, Tiejun
2014-09-10  5:49 ` [v6][PATCH 5/7] hvmloader: introduce hypercall for xc_reserved_device_memory_map Tiejun Chen
2014-09-10 21:41   ` Tian, Kevin
2014-09-11  1:32     ` Chen, Tiejun
2014-09-11  7:52     ` Jan Beulich
2014-09-11 15:45   ` Jan Beulich
2014-09-12  4:52     ` Chen, Tiejun
2014-09-10  5:49 ` [v6][PATCH 6/7] hvmloader: check to reserved device memory maps in e820 Tiejun Chen
2014-09-11 15:57   ` Jan Beulich
2014-09-12  6:08     ` Jan Beulich
2014-09-12  6:28     ` Chen, Tiejun
2014-09-12  6:44       ` Jan Beulich
2014-09-10  5:49 ` [v6][PATCH 7/7] xen/vtd: make USB RMRR mapping safe Tiejun Chen
2014-09-18  9:11   ` Jan Beulich
2014-09-10 21:44 ` [v6][PATCH 0/7] xen: reserve RMRR to avoid conflicting MMIO/RAM Tian, Kevin
2014-09-11  1:38   ` Chen, Tiejun
2014-09-11  7:48     ` Jan Beulich
2014-09-11  9:39       ` Chen, Tiejun
2014-09-11 10:01         ` Jan Beulich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1410328190-6372-2-git-send-email-tiejun.chen@intel.com \
    --to=tiejun.chen@intel.com \
    --cc=JBeulich@suse.com \
    --cc=ian.campbell@citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=kevin.tian@intel.com \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=xen-devel@lists.xen.org \
    --cc=yang.z.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).