* [RFC PATCH 13/16]: PVH xen: introduce p2m_map_foreign
@ 2013-01-12 2:09 Mukesh Rathor
2013-01-14 12:21 ` Jan Beulich
2013-01-24 17:18 ` Tim Deegan
0 siblings, 2 replies; 5+ messages in thread
From: Mukesh Rathor @ 2013-01-12 2:09 UTC (permalink / raw)
To: Xen-devel@lists.xensource.com
In this patch, I introduce a new type p2m_map_foreign for pages that a
dom0 maps from foreign domains its creating. Also, add
set_foreign_p2m_entry() to map p2m_map_foreign type pages. Finally,
allow XENMEM_remove_from_physmap to remove p2m_map_foreign pages. Note,
in this path, we must release the refcount that was taken during the
map phase.
Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
diff -r 31a145002453 -r 2c894340b16f xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c Fri Jan 11 16:38:07 2013 -0800
+++ b/xen/arch/x86/domctl.c Fri Jan 11 16:43:02 2013 -0800
@@ -69,9 +69,10 @@ long domctl_memory_mapping(struct domain
if ( add_map )
{
- printk(XENLOG_G_INFO
- "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
- d->domain_id, gfn, mfn, nr_mfns);
+ if ( !is_pvh_domain(d) ) /* PVH maps lots and lots */
+ printk(XENLOG_G_INFO
+ "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
+ d->domain_id, gfn, mfn, nr_mfns);
ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
if ( !ret && paging_mode_translate(d) )
@@ -94,9 +95,10 @@ long domctl_memory_mapping(struct domain
}
}
} else {
- printk(XENLOG_G_INFO
- "memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
- d->domain_id, gfn, mfn, nr_mfns);
+ if ( !is_pvh_domain(d) ) /* PVH unmaps lots and lots */
+ printk(XENLOG_G_INFO
+ "memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
+ d->domain_id, gfn, mfn, nr_mfns);
if ( paging_mode_translate(d) )
for ( i = 0; i < nr_mfns; i++ )
@@ -1651,6 +1653,11 @@ void arch_get_info_guest(struct vcpu *v,
c.nat->gs_base_kernel = hvm_get_shadow_gs_base(v);
}
}
+ else if ( is_pvh_vcpu(v) )
+ {
+ /* fixme: phase II work */
+ printk("PVH: FIXME: arch_get_info_guest()\n");
+ }
else
{
c(ldt_base = v->arch.pv_vcpu.ldt_base);
diff -r 31a145002453 -r 2c894340b16f xen/arch/x86/mm/p2m-ept.c
--- a/xen/arch/x86/mm/p2m-ept.c Fri Jan 11 16:38:07 2013 -0800
+++ b/xen/arch/x86/mm/p2m-ept.c Fri Jan 11 16:43:02 2013 -0800
@@ -75,6 +75,7 @@ static void ept_p2m_type_to_flags(ept_en
entry->w = 0;
break;
case p2m_grant_map_rw:
+ case p2m_map_foreign:
entry->r = entry->w = 1;
entry->x = 0;
break;
@@ -428,7 +429,7 @@ ept_set_entry(struct p2m_domain *p2m, un
}
/* Track the highest gfn for which we have ever had a valid mapping */
- if ( p2mt != p2m_invalid &&
+ if ( p2mt != p2m_invalid && p2mt != p2m_mmio_dm &&
(gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
@@ -472,7 +473,6 @@ out:
}
}
}
-
/* Release the old intermediate tables, if any. This has to be the
last thing we do, after the ept_sync_domain() and removal
from the iommu tables, so as to avoid a potential
diff -r 31a145002453 -r 2c894340b16f xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c Fri Jan 11 16:38:07 2013 -0800
+++ b/xen/arch/x86/mm/p2m-pt.c Fri Jan 11 16:43:02 2013 -0800
@@ -89,6 +89,7 @@ static unsigned long p2m_type_to_flags(p
case p2m_ram_rw:
return flags | P2M_BASE_FLAGS | _PAGE_RW;
case p2m_grant_map_rw:
+ case p2m_map_foreign:
return flags | P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
case p2m_mmio_direct:
if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
@@ -429,7 +430,7 @@ p2m_set_entry(struct p2m_domain *p2m, un
}
/* Track the highest gfn for which we have ever had a valid mapping */
- if ( p2mt != p2m_invalid
+ if ( p2mt != p2m_invalid && p2mt != p2m_mmio_dm
&& (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
diff -r 31a145002453 -r 2c894340b16f xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Fri Jan 11 16:38:07 2013 -0800
+++ b/xen/arch/x86/mm/p2m.c Fri Jan 11 16:43:02 2013 -0800
@@ -488,7 +488,7 @@ p2m_remove_page(struct p2m_domain *p2m,
for ( i = 0; i < (1UL << page_order); i++ )
{
mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0, NULL);
- if ( !p2m_is_grant(t) && !p2m_is_shared(t) )
+ if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t) )
set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
}
@@ -584,6 +584,11 @@ guest_physmap_add_entry(struct domain *d
{
ASSERT(mfn_valid(omfn));
set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
+
+ /* Because PVH domU uses kmalloc for grant pfn, we need to save
+ * and restore the old mfn */
+ if (is_pvh_domain(d) && p2m_is_grant(t))
+ free_domheap_page(mfn_to_page(omfn));
}
else if ( ot == p2m_populate_on_demand )
{
@@ -715,7 +720,34 @@ void p2m_change_type_range(struct domain
p2m_unlock(p2m);
}
+/* Returns: True for success. 0 for failure */
+int set_foreign_p2m_entry(struct domain *dp, unsigned long gfn, mfn_t mfn)
+{
+ int rc = 0;
+ p2m_type_t ot;
+ mfn_t omfn;
+ struct p2m_domain *p2m = p2m_get_hostp2m(dp);
+ if ( !paging_mode_translate(dp) )
+ return 0;
+
+ omfn = get_gfn_query_unlocked(dp, gfn, &ot);
+ if (mfn_valid(omfn)) {
+ gdprintk(XENLOG_ERR, "Already mapped mfn %lx at gfn:%lx\n",
+ mfn_x(omfn), gfn);
+ set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
+ }
+
+ P2M_DEBUG("set foreign %lx %lx\n", gfn, mfn_x(mfn));
+ p2m_lock(p2m);
+ rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_map_foreign, p2m->default_access);
+ p2m_unlock(p2m);
+ if ( rc == 0 )
+ gdprintk(XENLOG_ERR,
+ "set_foreign_p2m_entry: set_p2m_entry failed! gfn:%lx mfn=%08lx\n",
+ gfn, mfn_x(get_gfn_query(dp, gfn, &ot)));
+ return rc;
+}
int
set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
diff -r 31a145002453 -r 2c894340b16f xen/arch/x86/physdev.c
--- a/xen/arch/x86/physdev.c Fri Jan 11 16:38:07 2013 -0800
+++ b/xen/arch/x86/physdev.c Fri Jan 11 16:43:02 2013 -0800
@@ -485,6 +485,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
case PHYSDEVOP_set_iopl: {
struct physdev_set_iopl set_iopl;
+ NO_PVH_ASSERT_VCPU(current);
ret = -EFAULT;
if ( copy_from_guest(&set_iopl, arg, 1) != 0 )
break;
@@ -498,6 +499,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
case PHYSDEVOP_set_iobitmap: {
struct physdev_set_iobitmap set_iobitmap;
+ NO_PVH_ASSERT_VCPU(current);
ret = -EFAULT;
if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 )
break;
@@ -738,7 +740,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
struct domain *d = current->domain;
ret = -EPERM;
-
+ if ( !IS_PRIV(d) || !is_pvh_domain(d))
+ break;
d = rcu_lock_current_domain();
ret = -EFAULT;
diff -r 31a145002453 -r 2c894340b16f xen/common/memory.c
--- a/xen/common/memory.c Fri Jan 11 16:38:07 2013 -0800
+++ b/xen/common/memory.c Fri Jan 11 16:43:02 2013 -0800
@@ -650,9 +650,12 @@ long do_memory_op(unsigned long cmd, XEN
case XENMEM_remove_from_physmap:
{
+ unsigned long argmfn, foreign_mfn = INVALID_MFN;
struct xen_remove_from_physmap xrfp;
struct page_info *page;
- struct domain *d;
+ struct domain *d, *foreign_dom = NULL;
+ p2m_type_t p2mt, tp;
+ int valid_pvh_pg, is_curr_pvh = is_pvh_vcpu(current);
if ( copy_from_guest(&xrfp, arg, 1) )
return -EFAULT;
@@ -669,14 +672,45 @@ long do_memory_op(unsigned long cmd, XEN
domain_lock(d);
- page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
- if ( page )
+ /* PVH note: if PVH, the gfn could be mapped to a mfn from foreign
+ * domain by the user space tool during domain creation. We need to
+ * check for that, free it up from the p2m, and release refcnt on it.
+ * In such a case, page would be NULL. */
+
+ page = get_page_from_gfn(d, xrfp.gpfn, &p2mt, P2M_ALLOC);
+ valid_pvh_pg = is_curr_pvh &&
+ (p2m_is_mmio(p2mt) || p2m_is_foreign(p2mt));
+
+ if ( page || valid_pvh_pg)
{
- guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
- put_page(page);
+ argmfn = page ? page_to_mfn(page) : INVALID_MFN;
+
+ if ( is_curr_pvh && p2m_is_foreign(p2mt) )
+ {
+ foreign_mfn = mfn_x(get_gfn_query_unlocked(d, xrfp.gpfn, &tp));
+ foreign_dom = page_get_owner(mfn_to_page(foreign_mfn));
+ PVH_ASSERT(p2m_is_mmio(tp) || p2m_is_foreign(tp));
+ }
+
+ guest_physmap_remove_page(d, xrfp.gpfn, argmfn, 0);
+ if (page)
+ put_page(page);
+
+ /* if pages were mapped from foreign domain via
+ * xenmem_add_foreign_to_pmap(), we must drop a refcnt here */
+ if ( is_curr_pvh && p2m_is_foreign(p2mt) )
+ {
+ PVH_ASSERT( d != foreign_dom );
+ put_page(mfn_to_page(foreign_mfn));
+ }
}
else
+ {
+ if ( is_curr_pvh )
+ gdprintk(XENLOG_WARNING, "%s: Domain:%u gmfn:%lx invalid\n",
+ __func__, current->domain->domain_id, xrfp.gpfn);
rc = -ENOENT;
+ }
domain_unlock(d);
diff -r 31a145002453 -r 2c894340b16f xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Fri Jan 11 16:38:07 2013 -0800
+++ b/xen/include/asm-x86/p2m.h Fri Jan 11 16:43:02 2013 -0800
@@ -70,6 +70,7 @@ typedef enum {
p2m_ram_paging_in = 11, /* Memory that is being paged in */
p2m_ram_shared = 12, /* Shared or sharable memory */
p2m_ram_broken = 13, /* Broken page, access cause domain crash */
+ p2m_map_foreign = 14, /* ram pages from foreign domain */
} p2m_type_t;
/*
@@ -180,6 +181,7 @@ typedef unsigned int p2m_query_t;
#define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES)
#define p2m_is_shared(_t) (p2m_to_mask(_t) & P2M_SHARED_TYPES)
#define p2m_is_broken(_t) (p2m_to_mask(_t) & P2M_BROKEN_TYPES)
+#define p2m_is_foreign(_t) (p2m_to_mask(_t) & p2m_to_mask(p2m_map_foreign))
/* Per-p2m-table state */
struct p2m_domain {
@@ -506,6 +508,8 @@ p2m_type_t p2m_change_type(struct domain
int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
+/* Set foreign mfn in the current guest's p2m table (for pvh dom0) */
+int set_foreign_p2m_entry(struct domain *domp, unsigned long gfn, mfn_t mfn);
/*
* Populate-on-demand
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [RFC PATCH 13/16]: PVH xen: introduce p2m_map_foreign
2013-01-12 2:09 [RFC PATCH 13/16]: PVH xen: introduce p2m_map_foreign Mukesh Rathor
@ 2013-01-14 12:21 ` Jan Beulich
2013-01-24 17:18 ` Tim Deegan
1 sibling, 0 replies; 5+ messages in thread
From: Jan Beulich @ 2013-01-14 12:21 UTC (permalink / raw)
To: Mukesh Rathor; +Cc: xen-devel
>>> On 12.01.13 at 03:09, Mukesh Rathor <mukesh.rathor@oracle.com> wrote:
> --- a/xen/arch/x86/physdev.c Fri Jan 11 16:38:07 2013 -0800
> +++ b/xen/arch/x86/physdev.c Fri Jan 11 16:43:02 2013 -0800
> @@ -485,6 +485,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
>
> case PHYSDEVOP_set_iopl: {
> struct physdev_set_iopl set_iopl;
> + NO_PVH_ASSERT_VCPU(current);
> ret = -EFAULT;
> if ( copy_from_guest(&set_iopl, arg, 1) != 0 )
> break;
As i the previous patch - if execution can reach here for a PVH
guest, you must not assert this not being the case, but return
an error instead.
Jan
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [RFC PATCH 13/16]: PVH xen: introduce p2m_map_foreign
2013-01-12 2:09 [RFC PATCH 13/16]: PVH xen: introduce p2m_map_foreign Mukesh Rathor
2013-01-14 12:21 ` Jan Beulich
@ 2013-01-24 17:18 ` Tim Deegan
2013-02-01 2:38 ` Mukesh Rathor
1 sibling, 1 reply; 5+ messages in thread
From: Tim Deegan @ 2013-01-24 17:18 UTC (permalink / raw)
To: Mukesh Rathor; +Cc: Xen-devel@lists.xensource.com
At 18:09 -0800 on 11 Jan (1357927784), Mukesh Rathor wrote:
> @@ -584,6 +584,11 @@ guest_physmap_add_entry(struct domain *d
> {
> ASSERT(mfn_valid(omfn));
> set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
> +
> + /* Because PVH domU uses kmalloc for grant pfn, we need to save
> + * and restore the old mfn */
> + if (is_pvh_domain(d) && p2m_is_grant(t))
> + free_domheap_page(mfn_to_page(omfn));
I think you'll need to explain this in more detail. The comment assumes
that the guest is running linux, which is worrying. And in any case you
can't just free_domheap_page() the guest's memory! What if another
domain has a reference to it?
> }
> else if ( ot == p2m_populate_on_demand )
> {
> @@ -715,7 +720,34 @@ void p2m_change_type_range(struct domain
> p2m_unlock(p2m);
> }
>
> +/* Returns: True for success. 0 for failure */
> +int set_foreign_p2m_entry(struct domain *dp, unsigned long gfn, mfn_t mfn)
> +{
> + int rc = 0;
> + p2m_type_t ot;
> + mfn_t omfn;
> + struct p2m_domain *p2m = p2m_get_hostp2m(dp);
>
> + if ( !paging_mode_translate(dp) )
> + return 0;
> +
> + omfn = get_gfn_query_unlocked(dp, gfn, &ot);
> + if (mfn_valid(omfn)) {
> + gdprintk(XENLOG_ERR, "Already mapped mfn %lx at gfn:%lx\n",
> + mfn_x(omfn), gfn);
Unless you hold a lock here, you can't rely on this check for safety;
two callers could be racing to set the same gfn.
> + set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
> + }
> +
> + P2M_DEBUG("set foreign %lx %lx\n", gfn, mfn_x(mfn));
> + p2m_lock(p2m);
> + rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_map_foreign, p2m->default_access);
> + p2m_unlock(p2m);
> + if ( rc == 0 )
> + gdprintk(XENLOG_ERR,
> + "set_foreign_p2m_entry: set_p2m_entry failed! gfn:%lx mfn=%08lx\n",
> + gfn, mfn_x(get_gfn_query(dp, gfn, &ot)));
> + return rc;
> +}
> diff -r 31a145002453 -r 2c894340b16f xen/arch/x86/physdev.c
> --- a/xen/arch/x86/physdev.c Fri Jan 11 16:38:07 2013 -0800
> +++ b/xen/arch/x86/physdev.c Fri Jan 11 16:43:02 2013 -0800
> @@ -485,6 +485,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
>
> case PHYSDEVOP_set_iopl: {
> struct physdev_set_iopl set_iopl;
> + NO_PVH_ASSERT_VCPU(current);
> ret = -EFAULT;
> if ( copy_from_guest(&set_iopl, arg, 1) != 0 )
> break;
> @@ -498,6 +499,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
>
> case PHYSDEVOP_set_iobitmap: {
> struct physdev_set_iobitmap set_iobitmap;
> + NO_PVH_ASSERT_VCPU(current);
> ret = -EFAULT;
> if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 )
> break;
> @@ -738,7 +740,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
> struct domain *d = current->domain;
>
> ret = -EPERM;
> -
> + if ( !IS_PRIV(d) || !is_pvh_domain(d))
> + break;
That doesn't seem right! What constraints are you trying to implement
here? Mapping IO memory seems like it should be an IS_PRIV() thing
regardless of PVH-ness.
Tim.
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [RFC PATCH 13/16]: PVH xen: introduce p2m_map_foreign
2013-01-24 17:18 ` Tim Deegan
@ 2013-02-01 2:38 ` Mukesh Rathor
2013-02-07 10:56 ` Tim Deegan
0 siblings, 1 reply; 5+ messages in thread
From: Mukesh Rathor @ 2013-02-01 2:38 UTC (permalink / raw)
To: Tim Deegan; +Cc: Xen-devel@lists.xensource.com
On Thu, 24 Jan 2013 17:18:15 +0000
Tim Deegan <tim@xen.org> wrote:
> At 18:09 -0800 on 11 Jan (1357927784), Mukesh Rathor wrote:
> > @@ -584,6 +584,11 @@ guest_physmap_add_entry(struct domain *d
> > {
> > ASSERT(mfn_valid(omfn));
> > set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
> > +
> > + /* Because PVH domU uses kmalloc for grant pfn, we
> > need to save
> > + * and restore the old mfn */
> > + if (is_pvh_domain(d) && p2m_is_grant(t))
> > + free_domheap_page(mfn_to_page(omfn));
>
> I think you'll need to explain this in more detail. The comment
> assumes that the guest is running linux, which is worrying. And in
> any case you can't just free_domheap_page() the guest's memory! What
> if another domain has a reference to it?
Ok, I fixed linux side so instead of kmalloc it uses ballooning to
get pfn space for the grant table. That means there should not be
an omfn here. If there is, I think I should just fail the operation,
ie, guest_physmap_add_entry(), right?
thanks,
Mukesh
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [RFC PATCH 13/16]: PVH xen: introduce p2m_map_foreign
2013-02-01 2:38 ` Mukesh Rathor
@ 2013-02-07 10:56 ` Tim Deegan
0 siblings, 0 replies; 5+ messages in thread
From: Tim Deegan @ 2013-02-07 10:56 UTC (permalink / raw)
To: Mukesh Rathor; +Cc: Xen-devel@lists.xensource.com
At 18:38 -0800 on 31 Jan (1359657504), Mukesh Rathor wrote:
> On Thu, 24 Jan 2013 17:18:15 +0000
> Tim Deegan <tim@xen.org> wrote:
>
> > At 18:09 -0800 on 11 Jan (1357927784), Mukesh Rathor wrote:
> > > @@ -584,6 +584,11 @@ guest_physmap_add_entry(struct domain *d
> > > {
> > > ASSERT(mfn_valid(omfn));
> > > set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
> > > +
> > > + /* Because PVH domU uses kmalloc for grant pfn, we
> > > need to save
> > > + * and restore the old mfn */
> > > + if (is_pvh_domain(d) && p2m_is_grant(t))
> > > + free_domheap_page(mfn_to_page(omfn));
> >
> > I think you'll need to explain this in more detail. The comment
> > assumes that the guest is running linux, which is worrying. And in
> > any case you can't just free_domheap_page() the guest's memory! What
> > if another domain has a reference to it?
>
> Ok, I fixed linux side so instead of kmalloc it uses ballooning to
> get pfn space for the grant table. That means there should not be
> an omfn here. If there is, I think I should just fail the operation,
> ie, guest_physmap_add_entry(), right?
Currently, guest_physmap_add_entry() just overwrites the old entry,
allowing the guest to leak it if it wants to. It's perhaps not the best
interface in the world, but I think PVH guetst should get the same
treatment as HVM ones.
Tim.
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2013-02-07 10:56 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-01-12 2:09 [RFC PATCH 13/16]: PVH xen: introduce p2m_map_foreign Mukesh Rathor
2013-01-14 12:21 ` Jan Beulich
2013-01-24 17:18 ` Tim Deegan
2013-02-01 2:38 ` Mukesh Rathor
2013-02-07 10:56 ` Tim Deegan
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).