From: "Chen, Tiejun" <tiejun.chen@intel.com>
To: Jan Beulich <JBeulich@suse.com>
Cc: kevin.tian@intel.com, ian.campbell@citrix.com,
stefano.stabellini@eu.citrix.com,
Andrew Cooper <andrew.cooper3@citrix.com>,
ian.jackson@eu.citrix.com, xen-devel@lists.xen.org,
yang.z.zhang@intel.com
Subject: Re: [v5][PATCH 03/10] xen:x86: define a new hypercall to get RMRR mappings
Date: Thu, 04 Sep 2014 09:15:37 +0800 [thread overview]
Message-ID: <5407BD39.30504@intel.com> (raw)
In-Reply-To: <54072B9502000078000304D0@mail.emea.novell.com>
On 2014/9/3 20:54, Jan Beulich wrote:
>>>> On 03.09.14 at 10:41, <JBeulich@suse.com> wrote:
>> I'm afraid I have to give up and instead go and implement this for
>> you (which already by now would clearly have been the much less
>> time consuming thing at least on my end).
>
> So here's something to try (only compile tested).
This can work so I will rebase other patches on this.
Thanks
Tiejun
>
> Jan
>
> introduce XENMEM_reserved_device_memory_map
>
> This is a prerequisite for punching holes into HVM and PVH guests' P2M
> to allow passing through devices that are associated with (on VT-d)
> RMRRs.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/common/compat/memory.c
> +++ b/xen/common/compat/memory.c
> @@ -15,6 +15,35 @@ CHECK_TYPE(domid);
>
> CHECK_mem_access_op;
>
> +struct get_reserved_device_memory {
> + struct compat_mem_reserved_device_memory_map map;
> + unsigned int used_entries;
> +};
> +
> +static int get_reserved_device_memory(xen_pfn_t start,
> + xen_ulong_t nr, void *ctxt)
> +{
> + struct get_reserved_device_memory *grdm = ctxt;
> +
> + if ( grdm->used_entries < grdm->map.nr_entries )
> + {
> + struct compat_mem_reserved_device_memory rdm = {
> + .start_pfn = start, .nr_pages = nr
> + };
> +
> + if ( rdm.start_pfn != start || rdm.nr_pages != nr )
> + return -ERANGE;
> +
> + if ( __copy_to_compat_offset(grdm->map.buffer, grdm->used_entries,
> + &rdm, 1) )
> + return -EFAULT;
> + }
> +
> + ++grdm->used_entries;
> +
> + return 0;
> +}
> +
> int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat)
> {
> int split, op = cmd & MEMOP_CMD_MASK;
> @@ -272,6 +301,29 @@ int compat_memory_op(unsigned int cmd, X
> break;
> }
>
> +#ifdef HAS_PASSTHROUGH
> + case XENMEM_reserved_device_memory_map:
> + {
> + struct get_reserved_device_memory grdm;
> +
> + if ( copy_from_guest(&grdm.map, compat, 1) ||
> + !compat_handle_okay(grdm.map.buffer, grdm.map.nr_entries) )
> + return -EFAULT;
> +
> + grdm.used_entries = 0;
> + rc = iommu_get_reserved_device_memory(get_reserved_device_memory,
> + &grdm);
> +
> + if ( !rc && grdm.map.nr_entries < grdm.used_entries )
> + rc = -ENOBUFS;
> + grdm.map.nr_entries = grdm.used_entries;
> + if ( __copy_to_guest(compat, &grdm.map, 1) )
> + rc = -EFAULT;
> +
> + return rc;
> + }
> +#endif
> +
> default:
> return compat_arch_memory_op(cmd, compat);
> }
> --- a/xen/common/memory.c
> +++ b/xen/common/memory.c
> @@ -695,6 +695,32 @@ out:
> return rc;
> }
>
> +struct get_reserved_device_memory {
> + struct xen_mem_reserved_device_memory_map map;
> + unsigned int used_entries;
> +};
> +
> +static int get_reserved_device_memory(xen_pfn_t start,
> + xen_ulong_t nr, void *ctxt)
> +{
> + struct get_reserved_device_memory *grdm = ctxt;
> +
> + if ( grdm->used_entries < grdm->map.nr_entries )
> + {
> + struct xen_mem_reserved_device_memory rdm = {
> + .start_pfn = start, .nr_pages = nr
> + };
> +
> + if ( __copy_to_guest_offset(grdm->map.buffer, grdm->used_entries,
> + &rdm, 1) )
> + return -EFAULT;
> + }
> +
> + ++grdm->used_entries;
> +
> + return 0;
> +}
> +
> long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
> {
> struct domain *d;
> @@ -969,6 +995,29 @@ long do_memory_op(unsigned long cmd, XEN
>
> break;
>
> +#ifdef HAS_PASSTHROUGH
> + case XENMEM_reserved_device_memory_map:
> + {
> + struct get_reserved_device_memory grdm;
> +
> + if ( copy_from_guest(&grdm.map, arg, 1) ||
> + !guest_handle_okay(grdm.map.buffer, grdm.map.nr_entries) )
> + return -EFAULT;
> +
> + grdm.used_entries = 0;
> + rc = iommu_get_reserved_device_memory(get_reserved_device_memory,
> + &grdm);
> +
> + if ( !rc && grdm.map.nr_entries < grdm.used_entries )
> + rc = -ENOBUFS;
> + grdm.map.nr_entries = grdm.used_entries;
> + if ( __copy_to_guest(arg, &grdm.map, 1) )
> + rc = -EFAULT;
> +
> + break;
> + }
> +#endif
> +
> default:
> rc = arch_memory_op(cmd, arg);
> break;
> --- a/xen/drivers/passthrough/iommu.c
> +++ b/xen/drivers/passthrough/iommu.c
> @@ -344,6 +344,16 @@ void iommu_crash_shutdown(void)
> iommu_enabled = iommu_intremap = 0;
> }
>
> +int iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
> +{
> + const struct iommu_ops *ops = iommu_get_ops();
> +
> + if ( !iommu_enabled || !ops->get_reserved_device_memory )
> + return 0;
> +
> + return ops->get_reserved_device_memory(func, ctxt);
> +}
> +
> bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature)
> {
> const struct hvm_iommu *hd = domain_hvm_iommu(d);
> --- a/xen/drivers/passthrough/vtd/dmar.c
> +++ b/xen/drivers/passthrough/vtd/dmar.c
> @@ -893,3 +893,20 @@ int platform_supports_x2apic(void)
> unsigned int mask = ACPI_DMAR_INTR_REMAP | ACPI_DMAR_X2APIC_OPT_OUT;
> return cpu_has_x2apic && ((dmar_flags & mask) == ACPI_DMAR_INTR_REMAP);
> }
> +
> +int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
> +{
> + struct acpi_rmrr_unit *rmrr;
> + int rc = 0;
> +
> + list_for_each_entry(rmrr, &acpi_rmrr_units, list)
> + {
> + rc = func(PFN_DOWN(rmrr->base_address),
> + PFN_UP(rmrr->end_address) - PFN_DOWN(rmrr->base_address),
> + ctxt);
> + if ( rc )
> + break;
> + }
> +
> + return rc;
> +}
> --- a/xen/drivers/passthrough/vtd/extern.h
> +++ b/xen/drivers/passthrough/vtd/extern.h
> @@ -75,6 +75,7 @@ int domain_context_mapping_one(struct do
> u8 bus, u8 devfn, const struct pci_dev *);
> int domain_context_unmap_one(struct domain *domain, struct iommu *iommu,
> u8 bus, u8 devfn);
> +int intel_iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt);
>
> unsigned int io_apic_read_remap_rte(unsigned int apic, unsigned int reg);
> void io_apic_write_remap_rte(unsigned int apic,
> --- a/xen/drivers/passthrough/vtd/iommu.c
> +++ b/xen/drivers/passthrough/vtd/iommu.c
> @@ -2485,6 +2485,7 @@ const struct iommu_ops intel_iommu_ops =
> .crash_shutdown = vtd_crash_shutdown,
> .iotlb_flush = intel_iommu_iotlb_flush,
> .iotlb_flush_all = intel_iommu_iotlb_flush_all,
> + .get_reserved_device_memory = intel_iommu_get_reserved_device_memory,
> .dump_p2m_table = vtd_dump_p2m_table,
> };
>
> --- a/xen/include/xen/iommu.h
> +++ b/xen/include/xen/iommu.h
> @@ -120,6 +120,8 @@ void iommu_dt_domain_destroy(struct doma
>
> struct page_info;
>
> +typedef int iommu_grdm_t(xen_pfn_t start, xen_ulong_t nr, void *ctxt);
> +
> struct iommu_ops {
> int (*init)(struct domain *d);
> void (*hwdom_init)(struct domain *d);
> @@ -156,12 +158,14 @@ struct iommu_ops {
> void (*crash_shutdown)(void);
> void (*iotlb_flush)(struct domain *d, unsigned long gfn, unsigned int page_count);
> void (*iotlb_flush_all)(struct domain *d);
> + int (*get_reserved_device_memory)(iommu_grdm_t *, void *);
> void (*dump_p2m_table)(struct domain *d);
> };
>
> void iommu_suspend(void);
> void iommu_resume(void);
> void iommu_crash_shutdown(void);
> +int iommu_get_reserved_device_memory(iommu_grdm_t *, void *);
>
> void iommu_share_p2m_table(struct domain *d);
>
> --- a/xen/include/public/memory.h
> +++ b/xen/include/public/memory.h
> @@ -523,7 +523,29 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_
>
> #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
>
> -/* Next available subop number is 26 */
> +/*
> + * For legacy reasons, some devices must be configured with special memory
> + * regions to function correctly. The guest must avoid using any of these
> + * regions.
> + */
> +#define XENMEM_reserved_device_memory_map 26
> +struct xen_mem_reserved_device_memory {
> + xen_pfn_t start_pfn;
> + xen_ulong_t nr_pages;
> +};
> +typedef struct xen_mem_reserved_device_memory xen_mem_reserved_device_memory_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_t);
> +
> +struct xen_mem_reserved_device_memory_map {
> + /* IN/OUT */
> + unsigned int nr_entries;
> + /* OUT */
> + XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_t) buffer;
> +};
> +typedef struct xen_mem_reserved_device_memory_map xen_mem_reserved_device_memory_map_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_mem_reserved_device_memory_map_t);
> +
> +/* Next available subop number is 27 */
>
> #endif /* __XEN_PUBLIC_MEMORY_H__ */
>
> --- a/xen/include/xlat.lst
> +++ b/xen/include/xlat.lst
> @@ -60,7 +60,8 @@
> ! memory_exchange memory.h
> ! memory_map memory.h
> ! memory_reservation memory.h
> -? mem_access_op memory.h
> +? mem_access_op memory.h
> +! mem_reserved_device_memory_map memory.h
> ! pod_target memory.h
> ! remove_from_physmap memory.h
> ? physdev_eoi physdev.h
>
>
next prev parent reply other threads:[~2014-09-04 1:15 UTC|newest]
Thread overview: 53+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-08-26 11:02 [v5][PATCH 0/10] xen: reserve RMRR to avoid conflicting MMIO/RAM Tiejun Chen
2014-08-26 11:02 ` [v5][PATCH 01/10] xen:vtd:rmrr: export acpi_rmrr_units Tiejun Chen
2014-08-26 11:02 ` [v5][PATCH 02/10] xen:vtd:rmrr: introduce acpi_rmrr_unit_entries Tiejun Chen
2014-08-26 11:02 ` [v5][PATCH 03/10] xen:x86: define a new hypercall to get RMRR mappings Tiejun Chen
2014-08-26 12:02 ` Andrew Cooper
2014-08-26 12:37 ` Jan Beulich
2014-08-27 1:37 ` Chen, Tiejun
2014-08-27 6:51 ` Jan Beulich
2014-08-27 7:21 ` Chen, Tiejun
2014-08-28 2:24 ` Chen, Tiejun
2014-08-28 6:50 ` Jan Beulich
2014-08-28 7:09 ` Chen, Tiejun
2014-08-28 7:19 ` Chen, Tiejun
2014-08-28 7:29 ` Chen, Tiejun
2014-08-28 7:44 ` Jan Beulich
2014-08-29 3:02 ` Chen, Tiejun
2014-08-29 9:18 ` Jan Beulich
2014-09-01 9:44 ` Chen, Tiejun
2014-09-01 10:29 ` Jan Beulich
2014-09-02 9:59 ` Chen, Tiejun
2014-09-02 10:15 ` Jan Beulich
2014-09-02 11:10 ` Chen, Tiejun
2014-09-02 13:15 ` Jan Beulich
2014-09-03 1:45 ` Chen, Tiejun
2014-09-03 8:31 ` Chen, Tiejun
2014-09-03 8:41 ` Jan Beulich
2014-09-03 8:59 ` Chen, Tiejun
2014-09-03 9:01 ` Chen, Tiejun
2014-09-03 9:54 ` Chen, Tiejun
2014-09-03 12:54 ` Jan Beulich
2014-09-04 1:15 ` Chen, Tiejun [this message]
2014-09-03 8:35 ` Jan Beulich
2014-08-27 1:15 ` Chen, Tiejun
2014-09-02 8:25 ` Jan Beulich
2014-08-26 11:02 ` [v5][PATCH 04/10] tools:libxc: introduce hypercall for xc_reserved_device_memory_map Tiejun Chen
2014-08-26 11:02 ` [v5][PATCH 05/10] tools:libxc: check if mmio BAR is out of RMRR mappings Tiejun Chen
2014-08-26 11:02 ` [v5][PATCH 06/10] hvm_info_table: introduce nr_reserved_device_memory_map Tiejun Chen
2014-09-02 8:34 ` Jan Beulich
2014-09-04 2:07 ` Chen, Tiejun
2014-09-04 6:32 ` Jan Beulich
2014-09-04 6:55 ` Chen, Tiejun
[not found] ` <54082E3B0200007800030BCB@mail.emea.novell.com>
2014-09-09 6:40 ` Chen, Tiejun
2014-08-26 11:02 ` [v5][PATCH 07/10] xen:x86:: support xc_reserved_device_memory_map in compat case Tiejun Chen
2014-09-02 8:35 ` Jan Beulich
2014-09-04 2:13 ` Chen, Tiejun
2014-08-26 11:02 ` [v5][PATCH 08/10] tools:firmware:hvmloader: introduce hypercall for xc_reserved_device_memory_map Tiejun Chen
2014-09-02 8:37 ` Jan Beulich
2014-08-26 11:02 ` [v5][PATCH 09/10] tools:firmware:hvmloader: check to reserve RMRR mappings in e820 Tiejun Chen
2014-09-02 8:47 ` Jan Beulich
2014-09-04 3:04 ` Chen, Tiejun
2014-09-04 4:32 ` Chen, Tiejun
2014-09-04 6:36 ` Jan Beulich
2014-08-26 11:03 ` [v5][PATCH 10/10] xen:vtd: make USB RMRR mapping safe Tiejun Chen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5407BD39.30504@intel.com \
--to=tiejun.chen@intel.com \
--cc=JBeulich@suse.com \
--cc=andrew.cooper3@citrix.com \
--cc=ian.campbell@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=kevin.tian@intel.com \
--cc=stefano.stabellini@eu.citrix.com \
--cc=xen-devel@lists.xen.org \
--cc=yang.z.zhang@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).