xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Julien Grall <julien.grall@linaro.org>
To: xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, Julien Grall <julien.grall@linaro.org>,
	andre.przywara@linaro.org
Subject: [PATCH for-next 06/16] xen/arm: Extend copy_to_guest to support copying from/to guest physical address
Date: Thu, 23 Nov 2017 18:32:00 +0000	[thread overview]
Message-ID: <20171123183210.12045-7-julien.grall@linaro.org> (raw)
In-Reply-To: <20171123183210.12045-1-julien.grall@linaro.org>

The only differences between copy_to_guest and access_guest_memory_by_ipa are:
    - The latter does not support copying data crossing page boundary
    - The former is copying from/to guest VA whilst the latter from
    guest PA

copy_to_guest can easily be extended to support copying from/to guest
physical address. For that a new bit is used to tell whether linear
address or ipa is been used.

Lastly access_guest_memory_by_ipa is reimplemented using copy_to_guest.
This also has the benefits to extend the use of it, it is now possible
to copy data crossing page boundary.

Signed-off-by: Julien Grall <julien.grall@linaro.org>
---
 xen/arch/arm/guestcopy.c | 86 ++++++++++++++++++++++--------------------------
 1 file changed, 39 insertions(+), 47 deletions(-)

diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c
index 487f5ab82d..be53bee559 100644
--- a/xen/arch/arm/guestcopy.c
+++ b/xen/arch/arm/guestcopy.c
@@ -8,6 +8,31 @@
 #define COPY_flush_dcache   (1U << 0)
 #define COPY_from_guest     (0U << 1)
 #define COPY_to_guest       (1U << 1)
+#define COPY_ipa            (0U << 2)
+#define COPY_linear         (1U << 2)
+
+static struct page_info *translate_get_page(struct vcpu *v, paddr_t addr,
+                                            bool linear, bool write)
+{
+    p2m_type_t p2mt;
+    struct page_info *page;
+
+    if ( linear )
+        return get_page_from_gva(v, addr, write ? GV2M_WRITE : GV2M_READ);
+
+    page = get_page_from_gfn(v->domain, paddr_to_pfn(addr), &p2mt, P2M_ALLOC);
+
+    if ( !page )
+        return NULL;
+
+    if ( !p2m_is_ram(p2mt) )
+    {
+        put_page(page);
+        return NULL;
+    }
+
+    return page;
+}
 
 static unsigned long copy_guest(void *buf, paddr_t addr, unsigned int len,
                                 struct vcpu *v, unsigned int flags)
@@ -21,8 +46,8 @@ static unsigned long copy_guest(void *buf, paddr_t addr, unsigned int len,
         unsigned size = min(len, (unsigned)PAGE_SIZE - offset);
         struct page_info *page;
 
-        page = get_page_from_gva(v, addr,
-                                 (flags & COPY_to_guest) ? GV2M_WRITE : GV2M_READ);
+        page = translate_get_page(v, addr, flags & COPY_linear,
+                                  flags & COPY_to_guest);
         if ( page == NULL )
             return len;
 
@@ -63,73 +88,40 @@ static unsigned long copy_guest(void *buf, paddr_t addr, unsigned int len,
 unsigned long raw_copy_to_guest(void *to, const void *from, unsigned len)
 {
     return copy_guest((void *)from, (unsigned long)to, len,
-                      current, COPY_to_guest);
+                      current, COPY_to_guest | COPY_linear);
 }
 
 unsigned long raw_copy_to_guest_flush_dcache(void *to, const void *from,
                                              unsigned len)
 {
     return copy_guest((void *)from, (unsigned long)to, len,
-                      current, COPY_to_guest | COPY_flush_dcache);
+                      current, COPY_to_guest | COPY_flush_dcache | COPY_linear);
 }
 
 unsigned long raw_clear_guest(void *to, unsigned len)
 {
-    return copy_guest(NULL, (unsigned long)to, len, current, COPY_to_guest);
+    return copy_guest(NULL, (unsigned long)to, len, current,
+                      COPY_to_guest | COPY_linear);
 }
 
 unsigned long raw_copy_from_guest(void *to, const void __user *from, unsigned len)
 {
-    return copy_guest(to, (unsigned long)from, len, current, COPY_from_guest);
+    return copy_guest(to, (unsigned long)from, len, current,
+                      COPY_from_guest | COPY_linear);
 }
 
-/*
- * Temporarily map one physical guest page and copy data to or from it.
- * The data to be copied cannot cross a page boundary.
- */
 int access_guest_memory_by_ipa(struct domain *d, paddr_t gpa, void *buf,
                                uint32_t size, bool is_write)
 {
-    struct page_info *page;
-    uint64_t offset = gpa & ~PAGE_MASK;  /* Offset within the mapped page */
-    p2m_type_t p2mt;
-    void *p;
-
-    /* Do not cross a page boundary. */
-    if ( size > (PAGE_SIZE - offset) )
-    {
-        printk(XENLOG_G_ERR "d%d: guestcopy: memory access crosses page boundary.\n",
-               d->domain_id);
-        return -EINVAL;
-    }
-
-    page = get_page_from_gfn(d, paddr_to_pfn(gpa), &p2mt, P2M_ALLOC);
-    if ( !page )
-    {
-        printk(XENLOG_G_ERR "d%d: guestcopy: failed to get table entry.\n",
-               d->domain_id);
-        return -EINVAL;
-    }
-
-    if ( !p2m_is_ram(p2mt) )
-    {
-        put_page(page);
-        printk(XENLOG_G_ERR "d%d: guestcopy: guest memory should be RAM.\n",
-               d->domain_id);
-        return -EINVAL;
-    }
+    unsigned long left;
+    int flags = COPY_ipa;
 
-    p = __map_domain_page(page);
+    flags |= is_write ? COPY_to_guest : COPY_from_guest;
 
-    if ( is_write )
-        memcpy(p + offset, buf, size);
-    else
-        memcpy(buf, p + offset, size);
+    /* P2M is shared between all vCPUs, so the vcpu used does not matter. */
+    left = copy_guest(buf, gpa, size, d->vcpu[0], flags);
 
-    unmap_domain_page(p);
-    put_page(page);
-
-    return 0;
+    return (!left) ? 0 : -EINVAL;
 }
 
 /*
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

  parent reply	other threads:[~2017-11-23 18:32 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-11-23 18:31 [PATCH for-next 00/16] xen/arm: Stage-2 handling cleanup Julien Grall
2017-11-23 18:31 ` [PATCH for-next 01/16] xen/arm: raw_copy_to_guest_helper: Rename flush_dcache to flags Julien Grall
2017-12-06  0:51   ` Stefano Stabellini
2017-11-23 18:31 ` [PATCH for-next 02/16] xen/arm: raw_copy_to_guest_helper: Rework the prototype and rename it Julien Grall
2017-12-06  1:04   ` Stefano Stabellini
2017-11-23 18:31 ` [PATCH for-next 03/16] xen/arm: Extend copy_to_guest to support copying from guest VA and use it Julien Grall
2017-12-06  1:05   ` Stefano Stabellini
2017-11-23 18:31 ` [PATCH for-next 04/16] xen/arm: Extend copy_to_guest to support zeroing " Julien Grall
2017-12-06  1:08   ` Stefano Stabellini
2017-11-23 18:31 ` [PATCH for-next 05/16] xen/arm: guest_copy: Extend the prototype to pass the vCPU Julien Grall
2017-12-06  1:11   ` Stefano Stabellini
2017-11-23 18:32 ` Julien Grall [this message]
2017-12-06  1:22   ` [PATCH for-next 06/16] xen/arm: Extend copy_to_guest to support copying from/to guest physical address Stefano Stabellini
2017-12-06  1:24     ` Stefano Stabellini
2017-12-06 12:22     ` Julien Grall
2017-12-07 23:01       ` Stefano Stabellini
2017-12-08 15:24         ` Julien Grall
2017-12-08 21:13           ` Stefano Stabellini
2017-11-23 18:32 ` [PATCH for-next 07/16] xen/arm: Introduce copy_to_guest_phys_flush_dcache Julien Grall
2017-11-23 18:49   ` Andrew Cooper
2017-11-23 19:02     ` Julien Grall
2017-12-06  1:26       ` Stefano Stabellini
2017-12-06 12:27         ` Julien Grall
2017-12-08 15:34           ` Julien Grall
2017-12-08 22:26             ` Stefano Stabellini
2017-12-08 22:30               ` Julien Grall
2017-12-08 22:43                 ` Stefano Stabellini
2017-12-12  0:16                   ` Julien Grall
2017-12-12  0:28                     ` Stefano Stabellini
2017-11-23 18:32 ` [PATCH for-next 08/16] xen/arm: kernel: Rework kernel_zimage_load to use the generic copy helper Julien Grall
2017-12-06  1:38   ` Stefano Stabellini
2017-12-06  1:42     ` Stefano Stabellini
2017-11-23 18:32 ` [PATCH for-next 09/16] xen/arm: domain_build: Rework initrd_load " Julien Grall
2017-12-06  1:56   ` Stefano Stabellini
2017-11-23 18:32 ` [PATCH for-next 10/16] xen/arm: domain_build: Use copy_to_guest_phys_flush_dcache in dtb_load Julien Grall
2017-12-06  1:59   ` Stefano Stabellini
2017-11-23 18:32 ` [PATCH for-next 11/16] xen/arm: p2m: Rename p2m_flush_tlb and p2m_flush_tlb_sync Julien Grall
2017-12-07 22:10   ` Stefano Stabellini
2017-11-23 18:32 ` [PATCH for-next 12/16] xen/arm: p2m: Introduce p2m_tlb_flush_sync, export it and use it Julien Grall
2017-12-07 22:14   ` Stefano Stabellini
2017-11-23 18:32 ` [PATCH for-next 13/16] xen/arm: p2m: Fold p2m_tlb_flush into p2m_force_tlb_flush_sync Julien Grall
2017-12-07 22:26   ` Stefano Stabellini
2017-11-23 18:32 ` [PATCH for-next 14/16] xen/arm: traps: Remove the field gva from mmio_info_t Julien Grall
2017-12-07 22:29   ` Stefano Stabellini
2017-11-23 18:32 ` [PATCH for-next 15/16] xen/arm: traps: Move the definition of mmio_info_t in try_handle_mmio Julien Grall
2017-12-07 22:43   ` Stefano Stabellini
2017-11-23 18:32 ` [PATCH for-next 16/16] xen/arm: traps: Merge do_trap_instr_abort_guest and do_trap_data_abort_guest Julien Grall
2017-12-07 22:43   ` Stefano Stabellini
2017-12-07 22:57     ` Julien Grall

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171123183210.12045-7-julien.grall@linaro.org \
    --to=julien.grall@linaro.org \
    --cc=andre.przywara@linaro.org \
    --cc=sstabellini@kernel.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).