From mboxrd@z Thu Jan 1 00:00:00 1970 From: Mukesh Rathor Subject: Re: [V8 PATCH 0/8] pvh dom0.... Date: Fri, 28 Mar 2014 12:48:22 -0700 Message-ID: <20140328124822.5dacd217@mantra.us.oracle.com> References: <1395452357-1598-1-git-send-email-mukesh.rathor@oracle.com> <5335B307.1070908@citrix.com> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="MP_/9DPCqwD=U2T0Yr/5AqCYl02" Return-path: Received: from mail6.bemta14.messagelabs.com ([193.109.254.103]) by lists.xen.org with esmtp (Exim 4.72) (envelope-from ) id 1WTclX-0007mN-LL for xen-devel@lists.xenproject.org; Fri, 28 Mar 2014 19:48:35 +0000 In-Reply-To: <5335B307.1070908@citrix.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: Roger Pau =?UTF-8?B?TW9ubsOp?= Cc: George.Dunlap@eu.citrix.com, xen-devel@lists.xenproject.org, keir.xen@gmail.com, tim@xen.org, JBeulich@suse.com List-Id: xen-devel@lists.xenproject.org --MP_/9DPCqwD=U2T0Yr/5AqCYl02 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: quoted-printable Content-Disposition: inline On Fri, 28 Mar 2014 18:36:07 +0100 Roger Pau Monn=C3=A9 wrote: > On 22/03/14 02:39, Mukesh Rathor wrote: > > Hi all, > >=20 > > Finally, please find V8 of dom0 PVH patches based on commit bc69aaf. > >=20 > > git tree: git://oss.oracle.com/git/mrathor/xen.git branch: > > dom0pvh-v8 >=20 > Hello Mukesh, >=20 > Thanks for the patches, do you have the Linux side of them? (I think=20 > the only missing bit is the support for XENMEM_add_to_physmap_range). Attached and linlined below what I've in my tree... > Also, while testing them I've found that from time to time I would > hit the following ASSERT on shutdown: Ok, I'll add that to my series. thanks Roger, Mukesh diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 256282e..9063d0a 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -2511,6 +2511,91 @@ void __init xen_hvm_init_mmu_ops(void) } #endif =20 +/*=20 + * Map foreign gmfn, fgmfn, to local pfn, lpfn. This for the user space + * creating new guest on pvh dom0 and needing to map domU pages. + */ +static int autox_add_to_p2m(unsigned long lpfn, unsigned long fgmfn, + unsigned int domid) +{ + int rc, err =3D 0; + xen_pfn_t gpfn =3D lpfn; + xen_ulong_t idx =3D fgmfn; + + struct xen_add_to_physmap_range xatp =3D { + .domid =3D DOMID_SELF, + .foreign_domid =3D domid, + .size =3D 1, + .space =3D XENMAPSPACE_gmfn_foreign, + }; + set_xen_guest_handle(xatp.idxs, &idx); + set_xen_guest_handle(xatp.gpfns, &gpfn); + set_xen_guest_handle(xatp.errs, &err); + + rc =3D HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); + return rc; +} + +static int autox_remove_from_p2m(unsigned long spfn, int count) +{ + struct xen_remove_from_physmap xrp; + int i, rc; + + for (i =3D 0; i < count; i++) { + xrp.domid =3D DOMID_SELF; + xrp.gpfn =3D spfn+i; + rc =3D HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp); + if (rc) + break; + } + return rc; +} + +struct autox_remap_data { + unsigned long fgmfn; /* foreign domain's gmfn */ + pgprot_t prot; + domid_t domid; + int index; + struct page **pages; +}; + +static int autox_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long ad= dr, + void *data) +{ + int rc; + struct autox_remap_data *remap =3D data; + unsigned long pfn =3D page_to_pfn(remap->pages[remap->index++]); + pte_t pteval =3D pte_mkspecial(pfn_pte(pfn, remap->prot)); + + rc =3D autox_add_to_p2m(pfn, remap->fgmfn, remap->domid); + if (rc) + return rc; + native_set_pte(ptep, pteval); + + return 0; +} + +static int autox_remap_gmfn_range(struct vm_area_struct *vma, + unsigned long addr, unsigned long mfn, + int nr, pgprot_t prot, unsigned domid, + struct page **pages) +{ + int err; + struct autox_remap_data pvhdata; + + BUG_ON(!pages); + + pvhdata.fgmfn =3D mfn; + pvhdata.prot =3D prot; + pvhdata.domid =3D domid; + pvhdata.index =3D 0; + pvhdata.pages =3D pages; + err =3D apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT, + autox_map_pte_fn, &pvhdata); + flush_tlb_all(); + return err; +} + #define REMAP_BATCH_SIZE 16 =20 struct remap_data { @@ -2545,13 +2630,16 @@ int xen_remap_domain_mfn_range(struct vm_area_struc= t *vma, unsigned long range; int err =3D 0; =20 - if (xen_feature(XENFEAT_auto_translated_physmap)) - return -EINVAL; - prot =3D __pgprot(pgprot_val(prot) | _PAGE_IOMAP); =20 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) =3D=3D (VM_PFNMAP | VM_IO)= )); =20 + if (xen_feature(XENFEAT_auto_translated_physmap)) { + /* We need to update the local page tables and the xen HAP */ + return autox_remap_gmfn_range(vma, addr, mfn, nr, prot, + domid, pages); + } + rmd.mfn =3D mfn; rmd.prot =3D prot; =20 @@ -2589,6 +2677,18 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct= *vma, if (!pages || !xen_feature(XENFEAT_auto_translated_physmap)) return 0; =20 - return -EINVAL; + while (numpgs--) { + + /* The mmu has already cleaned up the process mmu resources at + * this point (lookup_address will return NULL). */ + unsigned long pfn =3D page_to_pfn(pages[numpgs]); + + autox_remove_from_p2m(pfn, 1); + } + /* We don't need to flush tlbs because as part of autox_remove_from_p2m, + * the hypervisor will do tlb flushes after removing the p2m entries + * from the EPT/NPT */ + + return 0; } EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); --MP_/9DPCqwD=U2T0Yr/5AqCYl02 Content-Type: text/x-patch Content-Transfer-Encoding: 7bit Content-Disposition: attachment; filename=linux.patch diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 256282e..9063d0a 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -2511,6 +2511,91 @@ void __init xen_hvm_init_mmu_ops(void) } #endif +/* + * Map foreign gmfn, fgmfn, to local pfn, lpfn. This for the user space + * creating new guest on pvh dom0 and needing to map domU pages. + */ +static int autox_add_to_p2m(unsigned long lpfn, unsigned long fgmfn, + unsigned int domid) +{ + int rc, err = 0; + xen_pfn_t gpfn = lpfn; + xen_ulong_t idx = fgmfn; + + struct xen_add_to_physmap_range xatp = { + .domid = DOMID_SELF, + .foreign_domid = domid, + .size = 1, + .space = XENMAPSPACE_gmfn_foreign, + }; + set_xen_guest_handle(xatp.idxs, &idx); + set_xen_guest_handle(xatp.gpfns, &gpfn); + set_xen_guest_handle(xatp.errs, &err); + + rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); + return rc; +} + +static int autox_remove_from_p2m(unsigned long spfn, int count) +{ + struct xen_remove_from_physmap xrp; + int i, rc; + + for (i = 0; i < count; i++) { + xrp.domid = DOMID_SELF; + xrp.gpfn = spfn+i; + rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp); + if (rc) + break; + } + return rc; +} + +struct autox_remap_data { + unsigned long fgmfn; /* foreign domain's gmfn */ + pgprot_t prot; + domid_t domid; + int index; + struct page **pages; +}; + +static int autox_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, + void *data) +{ + int rc; + struct autox_remap_data *remap = data; + unsigned long pfn = page_to_pfn(remap->pages[remap->index++]); + pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot)); + + rc = autox_add_to_p2m(pfn, remap->fgmfn, remap->domid); + if (rc) + return rc; + native_set_pte(ptep, pteval); + + return 0; +} + +static int autox_remap_gmfn_range(struct vm_area_struct *vma, + unsigned long addr, unsigned long mfn, + int nr, pgprot_t prot, unsigned domid, + struct page **pages) +{ + int err; + struct autox_remap_data pvhdata; + + BUG_ON(!pages); + + pvhdata.fgmfn = mfn; + pvhdata.prot = prot; + pvhdata.domid = domid; + pvhdata.index = 0; + pvhdata.pages = pages; + err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT, + autox_map_pte_fn, &pvhdata); + flush_tlb_all(); + return err; +} + #define REMAP_BATCH_SIZE 16 struct remap_data { @@ -2545,13 +2630,16 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma, unsigned long range; int err = 0; - if (xen_feature(XENFEAT_auto_translated_physmap)) - return -EINVAL; - prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); + if (xen_feature(XENFEAT_auto_translated_physmap)) { + /* We need to update the local page tables and the xen HAP */ + return autox_remap_gmfn_range(vma, addr, mfn, nr, prot, + domid, pages); + } + rmd.mfn = mfn; rmd.prot = prot; @@ -2589,6 +2677,18 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, if (!pages || !xen_feature(XENFEAT_auto_translated_physmap)) return 0; - return -EINVAL; + while (numpgs--) { + + /* The mmu has already cleaned up the process mmu resources at + * this point (lookup_address will return NULL). */ + unsigned long pfn = page_to_pfn(pages[numpgs]); + + autox_remove_from_p2m(pfn, 1); + } + /* We don't need to flush tlbs because as part of autox_remove_from_p2m, + * the hypervisor will do tlb flushes after removing the p2m entries + * from the EPT/NPT */ + + return 0; } EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); --MP_/9DPCqwD=U2T0Yr/5AqCYl02 Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel --MP_/9DPCqwD=U2T0Yr/5AqCYl02--