From mboxrd@z Thu Jan 1 00:00:00 1970 From: Tamas K Lengyel Subject: [PATCH for-4.5 v10 15/19] xen/arm: Temporarily disable mem_access for hypervisor access Date: Thu, 25 Sep 2014 13:56:48 +0200 Message-ID: <1411646212-17041-16-git-send-email-tklengyel@sec.in.tum.de> References: <1411646212-17041-1-git-send-email-tklengyel@sec.in.tum.de> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: <1411646212-17041-1-git-send-email-tklengyel@sec.in.tum.de> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xen.org Cc: ian.campbell@citrix.com, tim@xen.org, julien.grall@linaro.org, ian.jackson@eu.citrix.com, stefano.stabellini@citrix.com, andres@lagarcavilla.org, jbeulich@suse.com, dgdegra@tycho.nsa.gov, Tamas K Lengyel List-Id: xen-devel@lists.xenproject.org The guestcopy helpers use the MMU to verify that the given guest has read/write access to a given page during hypercalls. As we may have custom mem_access permissions set on these pages, we temporarily disable them to allow Xen to finish the hypercalls. This is permissible as mem_access events are only reported for events when the guest directly accesses protected memory on x86 as well. Signed-off-by: Tamas K Lengyel --- xen/arch/arm/guestcopy.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c index 0173597..4aa041f 100644 --- a/xen/arch/arm/guestcopy.c +++ b/xen/arch/arm/guestcopy.c @@ -6,6 +6,43 @@ #include #include +#include + +/* + * Temporarily disable mem_access permission restrictions. + * Note: In the future, events generated by the hypervisor accessing + * protected memory regions could be added here. + */ +static long temp_disable_mem_access(vaddr_t gva, unsigned long *gfn, + xenmem_access_t *xma) +{ + long rc; + paddr_t gpa; + + rc = gva_to_ipa((vaddr_t) gva, &gpa); + if ( rc < 0 ) + return rc; + + *gfn = paddr_to_pfn(gpa); + + rc = p2m_get_mem_access(current->domain, *gfn, xma); + if ( rc < 0 ) + return rc; + + if ( *xma != XENMEM_access_rwx ) + rc = p2m_set_mem_access(current->domain, *gfn, 1, 0, ~0, + XENMEM_access_rwx); + + return rc; +} + +/* Re-enable mem_access for this page (if in use). */ +static inline +void temp_reenable_mem_access(unsigned long gfn, xenmem_access_t xma) +{ + if ( xma != XENMEM_access_rwx ) + p2m_set_mem_access(current->domain, gfn, 1, 0, ~0, xma); +} static unsigned long raw_copy_to_guest_helper(void *to, const void *from, unsigned len, int flush_dcache) @@ -18,6 +55,11 @@ static unsigned long raw_copy_to_guest_helper(void *to, const void *from, void *p; unsigned size = min(len, (unsigned)PAGE_SIZE - offset); struct page_info *page; + unsigned long gfn; + xenmem_access_t xma; + + if ( temp_disable_mem_access((vaddr_t) to, &gfn, &xma) < 0 ) + return len; page = get_page_from_gva(current->domain, (vaddr_t) to, GV2M_WRITE); if ( page == NULL ) @@ -34,6 +76,9 @@ static unsigned long raw_copy_to_guest_helper(void *to, const void *from, len -= size; from += size; to += size; + + temp_reenable_mem_access(gfn, xma); + /* * After the first iteration, guest virtual address is correctly * aligned to PAGE_SIZE. @@ -65,6 +110,11 @@ unsigned long raw_clear_guest(void *to, unsigned len) void *p; unsigned size = min(len, (unsigned)PAGE_SIZE - offset); struct page_info *page; + unsigned long gfn; + xenmem_access_t xma; + + if ( temp_disable_mem_access((vaddr_t) to, &gfn, &xma) < 0 ) + return len; page = get_page_from_gva(current->domain, (vaddr_t) to, GV2M_WRITE); if ( page == NULL ) @@ -78,6 +128,9 @@ unsigned long raw_clear_guest(void *to, unsigned len) put_page(page); len -= size; to += size; + + temp_reenable_mem_access(gfn, xma); + /* * After the first iteration, guest virtual address is correctly * aligned to PAGE_SIZE. @@ -97,6 +150,11 @@ unsigned long raw_copy_from_guest(void *to, const void __user *from, unsigned le void *p; unsigned size = min(len, (unsigned)(PAGE_SIZE - offset)); struct page_info *page; + unsigned long gfn; + xenmem_access_t xma; + + if ( temp_disable_mem_access((vaddr_t) from, &gfn, &xma) < 0 ) + return len; page = get_page_from_gva(current->domain, (vaddr_t) from, GV2M_READ); if ( page == NULL ) @@ -112,6 +170,9 @@ unsigned long raw_copy_from_guest(void *to, const void __user *from, unsigned le len -= size; from += size; to += size; + + temp_reenable_mem_access(gfn, xma); + /* * After the first iteration, guest virtual address is correctly * aligned to PAGE_SIZE. -- 2.1.0