xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Mukesh Rathor <mukesh.rathor@oracle.com>
To: "Roger Pau Monné" <roger.pau@citrix.com>
Cc: George.Dunlap@eu.citrix.com, xen-devel@lists.xenproject.org,
	keir.xen@gmail.com, tim@xen.org, JBeulich@suse.com
Subject: Re: [V8 PATCH 0/8] pvh dom0....
Date: Fri, 28 Mar 2014 12:48:22 -0700	[thread overview]
Message-ID: <20140328124822.5dacd217@mantra.us.oracle.com> (raw)
In-Reply-To: <5335B307.1070908@citrix.com>

[-- Attachment #1: Type: text/plain, Size: 4406 bytes --]

On Fri, 28 Mar 2014 18:36:07 +0100
Roger Pau Monné <roger.pau@citrix.com> wrote:

> On 22/03/14 02:39, Mukesh Rathor wrote:
> > Hi all,
> > 
> > Finally, please find V8 of dom0 PVH patches based on commit bc69aaf.
> > 
> >   git tree: git://oss.oracle.com/git/mrathor/xen.git  branch:
> > dom0pvh-v8
> 
> Hello Mukesh,
> 
> Thanks for the patches, do you have the Linux side of them? (I think 
> the only missing bit is the support for XENMEM_add_to_physmap_range).

Attached and linlined below what I've in my tree...

> Also, while testing them I've found that from time to time I would
> hit the following ASSERT on shutdown:

Ok, I'll add that to my series.

thanks Roger,
Mukesh



diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 256282e..9063d0a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2511,6 +2511,91 @@ void __init xen_hvm_init_mmu_ops(void)
 }
 #endif
 
+/* 
+ * Map foreign gmfn, fgmfn, to local pfn, lpfn. This for the user space
+ * creating new guest on pvh dom0 and needing to map domU pages.
+ */
+static int autox_add_to_p2m(unsigned long lpfn, unsigned long fgmfn,
+			    unsigned int domid)
+{
+	int rc, err = 0;
+	xen_pfn_t gpfn = lpfn;
+	xen_ulong_t idx = fgmfn;
+
+	struct xen_add_to_physmap_range xatp = {
+		.domid = DOMID_SELF,
+		.foreign_domid = domid,
+		.size = 1,
+		.space = XENMAPSPACE_gmfn_foreign,
+	};
+	set_xen_guest_handle(xatp.idxs, &idx);
+	set_xen_guest_handle(xatp.gpfns, &gpfn);
+	set_xen_guest_handle(xatp.errs, &err);
+
+	rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
+	return rc;
+}
+
+static int autox_remove_from_p2m(unsigned long spfn, int count)
+{
+	struct xen_remove_from_physmap xrp;
+	int i, rc;
+
+	for (i = 0; i < count; i++) {
+		xrp.domid = DOMID_SELF;
+		xrp.gpfn = spfn+i;
+		rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
+		if (rc)
+			break;
+	}
+	return rc;
+}
+
+struct autox_remap_data {
+	unsigned long fgmfn; /* foreign domain's gmfn */
+	pgprot_t prot;
+	domid_t  domid;
+	int index;
+	struct page **pages;
+};
+
+static int autox_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
+			    void *data)
+{
+	int rc;
+	struct autox_remap_data *remap = data;
+	unsigned long pfn = page_to_pfn(remap->pages[remap->index++]);
+	pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot));
+
+	rc = autox_add_to_p2m(pfn, remap->fgmfn, remap->domid);
+	if (rc)
+		return rc;
+	native_set_pte(ptep, pteval);
+
+	return 0;
+}
+
+static int autox_remap_gmfn_range(struct vm_area_struct *vma,
+				  unsigned long addr, unsigned long mfn,
+				  int nr, pgprot_t prot, unsigned domid,
+				  struct page **pages)
+{
+	int err;
+	struct autox_remap_data pvhdata;
+
+	BUG_ON(!pages);
+
+	pvhdata.fgmfn = mfn;
+	pvhdata.prot = prot;
+	pvhdata.domid = domid;
+	pvhdata.index = 0;
+	pvhdata.pages = pages;
+	err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
+				  autox_map_pte_fn, &pvhdata);
+	flush_tlb_all();
+	return err;
+}
+
 #define REMAP_BATCH_SIZE 16
 
 struct remap_data {
@@ -2545,13 +2630,16 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
 	unsigned long range;
 	int err = 0;
 
-	if (xen_feature(XENFEAT_auto_translated_physmap))
-		return -EINVAL;
-
 	prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
 
 	BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
 
+	if (xen_feature(XENFEAT_auto_translated_physmap)) {
+		/* We need to update the local page tables and the xen HAP */
+		return autox_remap_gmfn_range(vma, addr, mfn, nr, prot,
+					      domid, pages);
+	}
+
 	rmd.mfn = mfn;
 	rmd.prot = prot;
 
@@ -2589,6 +2677,18 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
 	if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
 		return 0;
 
-	return -EINVAL;
+	while (numpgs--) {
+
+		/* The mmu has already cleaned up the process mmu resources at
+		 * this point (lookup_address will return NULL). */
+		unsigned long pfn = page_to_pfn(pages[numpgs]);
+
+		autox_remove_from_p2m(pfn, 1);
+	}
+	/* We don't need to flush tlbs because as part of autox_remove_from_p2m,
+	 * the hypervisor will do tlb flushes after removing the p2m entries
+	 * from the EPT/NPT */
+
+	return 0;
 }
 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);

[-- Attachment #2: linux.patch --]
[-- Type: text/x-patch, Size: 3554 bytes --]

diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 256282e..9063d0a 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2511,6 +2511,91 @@ void __init xen_hvm_init_mmu_ops(void)
 }
 #endif
 
+/* 
+ * Map foreign gmfn, fgmfn, to local pfn, lpfn. This for the user space
+ * creating new guest on pvh dom0 and needing to map domU pages.
+ */
+static int autox_add_to_p2m(unsigned long lpfn, unsigned long fgmfn,
+			    unsigned int domid)
+{
+	int rc, err = 0;
+	xen_pfn_t gpfn = lpfn;
+	xen_ulong_t idx = fgmfn;
+
+	struct xen_add_to_physmap_range xatp = {
+		.domid = DOMID_SELF,
+		.foreign_domid = domid,
+		.size = 1,
+		.space = XENMAPSPACE_gmfn_foreign,
+	};
+	set_xen_guest_handle(xatp.idxs, &idx);
+	set_xen_guest_handle(xatp.gpfns, &gpfn);
+	set_xen_guest_handle(xatp.errs, &err);
+
+	rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
+	return rc;
+}
+
+static int autox_remove_from_p2m(unsigned long spfn, int count)
+{
+	struct xen_remove_from_physmap xrp;
+	int i, rc;
+
+	for (i = 0; i < count; i++) {
+		xrp.domid = DOMID_SELF;
+		xrp.gpfn = spfn+i;
+		rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
+		if (rc)
+			break;
+	}
+	return rc;
+}
+
+struct autox_remap_data {
+	unsigned long fgmfn; /* foreign domain's gmfn */
+	pgprot_t prot;
+	domid_t  domid;
+	int index;
+	struct page **pages;
+};
+
+static int autox_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
+			    void *data)
+{
+	int rc;
+	struct autox_remap_data *remap = data;
+	unsigned long pfn = page_to_pfn(remap->pages[remap->index++]);
+	pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot));
+
+	rc = autox_add_to_p2m(pfn, remap->fgmfn, remap->domid);
+	if (rc)
+		return rc;
+	native_set_pte(ptep, pteval);
+
+	return 0;
+}
+
+static int autox_remap_gmfn_range(struct vm_area_struct *vma,
+				  unsigned long addr, unsigned long mfn,
+				  int nr, pgprot_t prot, unsigned domid,
+				  struct page **pages)
+{
+	int err;
+	struct autox_remap_data pvhdata;
+
+	BUG_ON(!pages);
+
+	pvhdata.fgmfn = mfn;
+	pvhdata.prot = prot;
+	pvhdata.domid = domid;
+	pvhdata.index = 0;
+	pvhdata.pages = pages;
+	err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
+				  autox_map_pte_fn, &pvhdata);
+	flush_tlb_all();
+	return err;
+}
+
 #define REMAP_BATCH_SIZE 16
 
 struct remap_data {
@@ -2545,13 +2630,16 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
 	unsigned long range;
 	int err = 0;
 
-	if (xen_feature(XENFEAT_auto_translated_physmap))
-		return -EINVAL;
-
 	prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
 
 	BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
 
+	if (xen_feature(XENFEAT_auto_translated_physmap)) {
+		/* We need to update the local page tables and the xen HAP */
+		return autox_remap_gmfn_range(vma, addr, mfn, nr, prot,
+					      domid, pages);
+	}
+
 	rmd.mfn = mfn;
 	rmd.prot = prot;
 
@@ -2589,6 +2677,18 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
 	if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
 		return 0;
 
-	return -EINVAL;
+	while (numpgs--) {
+
+		/* The mmu has already cleaned up the process mmu resources at
+		 * this point (lookup_address will return NULL). */
+		unsigned long pfn = page_to_pfn(pages[numpgs]);
+
+		autox_remove_from_p2m(pfn, 1);
+	}
+	/* We don't need to flush tlbs because as part of autox_remove_from_p2m,
+	 * the hypervisor will do tlb flushes after removing the p2m entries
+	 * from the EPT/NPT */
+
+	return 0;
 }
 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  reply	other threads:[~2014-03-28 19:48 UTC|newest]

Thread overview: 51+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-03-22  1:39 [V8 PATCH 0/8] pvh dom0 Mukesh Rathor
2014-03-22  1:39 ` [V8 PATCH 1/8] pvh dom0: move some pv specific code to static functions Mukesh Rathor
2014-03-22  1:39 ` [V8 PATCH 2/8] pvh dom0: construct_dom0 changes Mukesh Rathor
2014-03-26 19:05   ` George Dunlap
2014-03-27 10:14     ` Jan Beulich
2014-03-27 10:55       ` George Dunlap
2014-03-27 11:03         ` George Dunlap
2014-03-27 15:04         ` Jan Beulich
2014-03-27 15:30           ` Tim Deegan
2014-04-05  0:53             ` Mukesh Rathor
2014-04-07  7:30               ` Jan Beulich
2014-04-07  9:27               ` George Dunlap
2014-03-22  1:39 ` [V8 PATCH 3/8] pvh dom0: Introduce p2m_map_foreign Mukesh Rathor
2014-03-24  9:00   ` Jan Beulich
2014-03-27 12:29   ` George Dunlap
2014-04-05  0:57     ` Mukesh Rathor
2014-03-22  1:39 ` [V8 PATCH 4/8] pvh dom0: make xsm_map_gmfn_foreign available for x86 Mukesh Rathor
2014-03-25 17:53   ` Daniel De Graaf
2014-03-22  1:39 ` [V8 PATCH 5/8] pvh dom0: Add and remove foreign pages Mukesh Rathor
2014-03-24  9:26   ` Jan Beulich
2014-04-05  1:17     ` Mukesh Rathor
2014-04-07  6:57       ` Jan Beulich
2014-04-08  1:11         ` Mukesh Rathor
2014-04-08  7:36           ` Jan Beulich
2014-04-08 14:01             ` Tim Deegan
2014-04-08 14:07               ` Jan Beulich
2014-04-08 14:18                 ` Tim Deegan
2014-04-08 15:40                   ` George Dunlap
2014-04-11  1:33     ` Mukesh Rathor
2014-04-11  8:02       ` Jan Beulich
2014-03-22  1:39 ` [V8 PATCH 6/8] pvh dom0: allow get_pg_owner for translated domains Mukesh Rathor
2014-03-24  9:31   ` Jan Beulich
2014-04-01 14:31     ` George Dunlap
2014-04-05  0:59       ` Mukesh Rathor
2014-03-22  1:39 ` [V8 PATCH 7/8] pvh dom0: add check for pvh in vioapic_range Mukesh Rathor
2014-03-24  9:34   ` Jan Beulich
2014-04-01 14:40     ` George Dunlap
2014-04-01 15:09       ` Jan Beulich
2014-04-05  1:00         ` Mukesh Rathor
2014-04-07  6:59           ` Jan Beulich
2014-04-07  9:28             ` George Dunlap
2014-04-08  1:00               ` Mukesh Rathor
2014-04-08  8:21                 ` Jan Beulich
2014-03-22  1:39 ` [V8 PATCH 8/8] pvh dom0: add opt_dom0pvh to setup.c Mukesh Rathor
2014-03-24  9:35   ` Jan Beulich
2014-03-24  8:57 ` [V8 PATCH 0/8] pvh dom0 Jan Beulich
2014-03-24 21:36   ` Mukesh Rathor
2014-03-28 17:36 ` Roger Pau Monné
2014-03-28 19:48   ` Mukesh Rathor [this message]
2014-04-01 16:04 ` George Dunlap
2014-04-02  1:22   ` Mukesh Rathor

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20140328124822.5dacd217@mantra.us.oracle.com \
    --to=mukesh.rathor@oracle.com \
    --cc=George.Dunlap@eu.citrix.com \
    --cc=JBeulich@suse.com \
    --cc=keir.xen@gmail.com \
    --cc=roger.pau@citrix.com \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).