From mboxrd@z Thu Jan 1 00:00:00 1970 From: Jean Guyader Subject: [PATCH 4/6] mm: New XENMEM space, XENMAPSPACE_gmfn_range Date: Wed, 16 Nov 2011 19:25:06 +0000 Message-ID: <1321471508-31633-5-git-send-email-jean.guyader@eu.citrix.com> References: <1321471508-31633-1-git-send-email-jean.guyader@eu.citrix.com> <1321471508-31633-2-git-send-email-jean.guyader@eu.citrix.com> <1321471508-31633-3-git-send-email-jean.guyader@eu.citrix.com> <1321471508-31633-4-git-send-email-jean.guyader@eu.citrix.com> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="------------true" Return-path: In-Reply-To: <1321471508-31633-4-git-send-email-jean.guyader@eu.citrix.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xensource.com Errors-To: xen-devel-bounces@lists.xensource.com To: xen-devel@lists.xensource.com Cc: tim@xen.org, allen.m.kay@intel.com, keir@xen.org, Jean Guyader , JBeulich@suse.com List-Id: xen-devel@lists.xenproject.org --------------true Content-Type: text/plain; charset="UTF-8"; format=fixed Content-Transfer-Encoding: 8bit XENMAPSPACE_gmfn_range is like XENMAPSPACE_gmfn but it runs on a range of pages. The size of the range is defined in a new field. This new field .size is located in the 16 bits padding between .domid and .space in struct xen_add_to_physmap to stay compatible with older versions. Signed-off-by: Jean Guyader --- xen/arch/x86/mm.c | 55 ++++++++++++++++++++++++++++++++++++--- xen/arch/x86/x86_64/compat/mm.c | 15 ++++++++++ xen/include/public/memory.h | 4 +++ 3 files changed, 70 insertions(+), 4 deletions(-) --------------true Content-Type: text/x-patch; name="0004-mm-New-XENMEM-space-XENMAPSPACE_gmfn_range.patch" Content-Transfer-Encoding: 8bit Content-Disposition: attachment; filename="0004-mm-New-XENMEM-space-XENMAPSPACE_gmfn_range.patch" diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index f093e93..44a444e 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -4677,8 +4677,8 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p) return 0; } -static int xenmem_add_to_physmap(struct domain *d, - const struct xen_add_to_physmap *xatp) +static int xenmem_add_to_physmap_once(struct domain *d, + const struct xen_add_to_physmap *xatp) { struct page_info *page = NULL; unsigned long gfn = 0; /* gcc ... */ @@ -4717,6 +4717,7 @@ static int xenmem_add_to_physmap(struct domain *d, spin_unlock(&d->grant_table->lock); break; + case XENMAPSPACE_gmfn_range: case XENMAPSPACE_gmfn: { p2m_type_t p2mt; @@ -4744,7 +4745,8 @@ static int xenmem_add_to_physmap(struct domain *d, { if ( page ) put_page(page); - if ( xatp->space == XENMAPSPACE_gmfn ) + if ( xatp->space == XENMAPSPACE_gmfn || + xatp->space == XENMAPSPACE_gmfn_range ) put_gfn(d, gfn); rcu_unlock_domain(d); return -EINVAL; @@ -4779,7 +4781,8 @@ static int xenmem_add_to_physmap(struct domain *d, rc = guest_physmap_add_page(d, xatp->gpfn, mfn, PAGE_ORDER_4K); /* In the XENMAPSPACE_gmfn, we took a ref and locked the p2m at the top */ - if ( xatp->space == XENMAPSPACE_gmfn ) + if ( xatp->space == XENMAPSPACE_gmfn || + xatp->space == XENMAPSPACE_gmfn_range ) put_gfn(d, gfn); domain_unlock(d); @@ -4788,6 +4791,37 @@ static int xenmem_add_to_physmap(struct domain *d, return rc; } +static int xenmem_add_to_physmap(struct domain *d, + struct xen_add_to_physmap *xatp) +{ + int rc = 0; + + if ( xatp->space == XENMAPSPACE_gmfn_range ) + { + while ( xatp->size > 0 ) + { + rc = xenmem_add_to_physmap_once(d, xatp); + if ( rc < 0 ) + return rc; + + xatp->idx++; + xatp->gpfn++; + xatp->size--; + + /* Check for continuation if it's not the last interation */ + if ( xatp->size > 0 && hypercall_preempt_check() ) + { + rc = -EAGAIN; + break; + } + } + + return rc; + } + + return xenmem_add_to_physmap_once(d, xatp); +} + long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) { int rc; @@ -4816,6 +4850,19 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) rcu_unlock_domain(d); + if ( xatp.space == XENMAPSPACE_gmfn_range ) + { + if ( rc ) + { + if ( copy_to_guest(arg, &xatp, 1) ) + return -EFAULT; + } + + if ( rc == -EAGAIN ) + rc = hypercall_create_continuation( + __HYPERVISOR_memory_op, "ih", op, arg); + } + return rc; } diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c index 3ef08a5..bea94fe 100644 --- a/xen/arch/x86/x86_64/compat/mm.c +++ b/xen/arch/x86/x86_64/compat/mm.c @@ -64,6 +64,21 @@ int compat_arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) XLAT_add_to_physmap(nat, &cmp); rc = arch_memory_op(op, guest_handle_from_ptr(nat, void)); + if ( cmp.space == XENMAPSPACE_gmfn_range ) + { + if ( rc ) + { + XLAT_add_to_physmap(&cmp, nat); + if ( copy_to_guest(arg, &cmp, 1) ) + { + hypercall_cancel_continuation(); + return -EFAULT; + } + } + if ( rc == __HYPERVISOR_memory_op ) + hypercall_xlat_continuation(NULL, 0x2, nat, arg); + } + break; } diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h index 08355e3..c5b78a8 100644 --- a/xen/include/public/memory.h +++ b/xen/include/public/memory.h @@ -208,10 +208,14 @@ struct xen_add_to_physmap { /* Which domain to change the mapping for. */ domid_t domid; + /* Number of pages to go through for gmfn_range */ + uint16_t size; + /* Source mapping space. */ #define XENMAPSPACE_shared_info 0 /* shared info page */ #define XENMAPSPACE_grant_table 1 /* grant table page */ #define XENMAPSPACE_gmfn 2 /* GMFN */ +#define XENMAPSPACE_gmfn_range 3 /* GMFN range */ unsigned int space; #define XENMAPIDX_grant_table_status 0x80000000 --------------true Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel --------------true--