From: Jaeyong Yoo <jaeyong.yoo@samsung.com>
To: xen-devel@lists.xen.org
Cc: Evgeny Fedotov <e.fedotov@samsung.com>
Subject: [PATCH v3 03/10] xen/arm: Implement set_memory_map hypercall
Date: Thu, 01 Aug 2013 21:57:46 +0900 [thread overview]
Message-ID: <1375361873-32145-4-git-send-email-jaeyong.yoo@samsung.com> (raw)
In-Reply-To: <1375361873-32145-1-git-send-email-jaeyong.yoo@samsung.com>
From: Evgeny Fedotov <e.fedotov@samsung.com>
When creating domU in toolstack, pass the guest memory map info to the hypervisor,
and the hypervisor stores those info in arch_domain for later use.
Singed-off-by: Evgeny Fedotov <e.fedotov@samsung.com>
---
tools/libxc/xc_dom_arm.c | 12 +++++++-
tools/libxc/xc_domain.c | 44 ++++++++++++++++++++++++++++
tools/libxc/xenctrl.h | 23 +++++++++++++++
xen/arch/arm/domain.c | 3 ++
xen/arch/arm/mm.c | 68 ++++++++++++++++++++++++++++++++++++++++++++
xen/include/asm-arm/domain.h | 2 ++
xen/include/asm-arm/mm.h | 1 +
xen/include/public/memory.h | 15 ++++++++--
xen/include/xsm/dummy.h | 5 ++++
xen/include/xsm/xsm.h | 5 ++++
10 files changed, 175 insertions(+), 3 deletions(-)
diff --git a/tools/libxc/xc_dom_arm.c b/tools/libxc/xc_dom_arm.c
index df59ffb..20c9095 100644
--- a/tools/libxc/xc_dom_arm.c
+++ b/tools/libxc/xc_dom_arm.c
@@ -166,6 +166,7 @@ int arch_setup_meminit(struct xc_dom_image *dom)
{
int rc;
xen_pfn_t pfn, allocsz, i;
+ struct dt_mem_info memmap;
dom->shadow_enabled = 1;
@@ -191,7 +192,16 @@ int arch_setup_meminit(struct xc_dom_image *dom)
0, 0, &dom->p2m_host[i]);
}
- return 0;
+ /* setup guest memory map */
+ memmap.nr_banks = 2;
+ memmap.bank[0].start = (dom->rambase_pfn << PAGE_SHIFT_ARM);
+ memmap.bank[0].size = (dom->total_pages << PAGE_SHIFT_ARM);
+ /*The end of main memory: magic pages */
+ memmap.bank[1].start = memmap.bank[0].start + memmap.bank[0].size;
+ memmap.bank[1].size = NR_MAGIC_PAGES << PAGE_SHIFT_ARM;
+
+ return xc_domain_set_memory_map(dom->xch, dom->guest_domid, &memmap);
+
}
int arch_setup_bootearly(struct xc_dom_image *dom)
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 3257e2a..10627f7 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -644,7 +644,51 @@ int xc_domain_set_memmap_limit(xc_interface *xch,
return -1;
}
#endif
+#if defined(__arm__)
+int xc_domain_get_memory_map(xc_interface *xch,
+ uint32_t domid,
+ struct dt_mem_info *map)
+{
+ int rc;
+ struct xen_arm_memory_map fmap = {
+ .domid = domid
+ };
+
+ DECLARE_HYPERCALL_BOUNCE(map, sizeof(struct dt_mem_info),
+ XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+ if ( !map || xc_hypercall_bounce_pre(xch, map) )
+ return -1;
+ set_xen_guest_handle(fmap.buffer, map);
+ rc = do_memory_op(xch, XENMEM_memory_map, &fmap, sizeof(fmap));
+
+ xc_hypercall_bounce_post(xch, map);
+
+ return rc;
+}
+
+int xc_domain_set_memory_map(xc_interface *xch,
+ uint32_t domid,
+ struct dt_mem_info *map)
+{
+ int rc;
+ struct xen_arm_memory_map fmap = {
+ .domid = domid
+ };
+ DECLARE_HYPERCALL_BOUNCE(map, sizeof(struct dt_mem_info),
+ XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+ if ( !map || xc_hypercall_bounce_pre(xch, map) )
+ return -1;
+ set_xen_guest_handle(fmap.buffer, map);
+
+ rc = do_memory_op(xch, XENMEM_set_memory_map, &fmap, sizeof(fmap));
+
+ xc_hypercall_bounce_post(xch, map);
+ return rc;
+}
+#endif
int xc_domain_set_time_offset(xc_interface *xch,
uint32_t domid,
int32_t time_offset_seconds)
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 388a9c3..e12d49c 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -1110,6 +1110,29 @@ int xc_get_machine_memory_map(xc_interface *xch,
struct e820entry entries[],
uint32_t max_entries);
#endif
+
+#if defined(__arm__)
+#define NR_MEM_BANKS 8
+typedef uint64_t paddr_t;
+
+struct membank {
+ paddr_t start;
+ paddr_t size;
+};
+
+struct dt_mem_info {
+ int nr_banks;
+ struct membank bank[NR_MEM_BANKS];
+};
+
+int xc_domain_set_memory_map(xc_interface *xch,
+ uint32_t domid,
+ struct dt_mem_info *map);
+int xc_domain_get_memory_map(xc_interface *xch,
+ uint32_t domid,
+ struct dt_mem_info *map);
+#endif
+
int xc_domain_set_time_offset(xc_interface *xch,
uint32_t domid,
int32_t time_offset_seconds);
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 4fab443..e9cfc81 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -509,6 +509,9 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags)
/* Default the virtual ID to match the physical */
d->arch.vpidr = boot_cpu_data.midr.bits;
+ spin_lock_init(&d->arch.map_lock);
+ d->arch.map_domain.nr_banks = 0;
+
clear_page(d->shared_info);
share_xen_page_with_guest(
virt_to_page(d->shared_info), d, XENSHARE_writable);
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index f301e65..3c83447 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -998,6 +998,74 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void) arg)
return rc;
}
+ case XENMEM_set_memory_map:
+ {
+ struct xen_arm_memory_map fmap;
+ struct domain *d;
+ struct dt_mem_info info;
+
+ if ( copy_from_guest(&fmap, arg, 1) )
+ return -EFAULT;
+
+ if ( copy_from_guest(&info, fmap.buffer, 1) )
+ {
+ return -EFAULT;
+ }
+
+ if ( info.nr_banks > NR_MEM_BANKS )
+ return -EINVAL;
+
+ d = rcu_lock_domain_by_any_id(fmap.domid);
+ if ( d == NULL )
+ return -ESRCH;
+
+ rc = xsm_domain_memory_map(XSM_TARGET, d);
+ if ( rc )
+ {
+ rcu_unlock_domain(d);
+ return rc;
+ }
+ spin_lock(&d->arch.map_lock);
+ d->arch.map_domain = info;
+ spin_unlock(&d->arch.map_lock);
+
+ rcu_unlock_domain(d);
+ return rc;
+ }
+
+ case XENMEM_memory_map:
+ {
+ /* get the domain's memory map as it was stored */
+ struct xen_arm_memory_map fmap;
+ struct domain *d;
+ struct dt_mem_info info;
+
+ if ( copy_from_guest(&fmap, arg, 1) )
+ return -EFAULT;
+
+ d = rcu_lock_domain_by_any_id(fmap.domid);
+ if ( d == NULL )
+ return -ESRCH;
+
+ spin_lock(&d->arch.map_lock);
+ info = d->arch.map_domain;
+ spin_unlock(&d->arch.map_lock);
+
+ if ( copy_to_guest(fmap.buffer, &info, 1) )
+ {
+ rcu_unlock_domain(d);
+ return -EFAULT;
+ }
+
+ if ( copy_to_guest(arg, &fmap, 1) )
+ {
+ rcu_unlock_domain(d);
+ return -EFAULT;
+ }
+
+ rcu_unlock_domain(d);
+ return 0;
+ }
/* XXX: memsharing not working yet */
case XENMEM_get_sharing_shared_pages:
case XENMEM_get_sharing_freed_pages:
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 89f88f6..0c80c65 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -110,6 +110,8 @@ struct arch_domain
spinlock_t lock;
} uart0;
+ struct dt_mem_info map_domain;
+ spinlock_t map_lock;
} __cacheline_aligned;
struct arch_vcpu
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 5e7c5a3..404ec4d 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -5,6 +5,7 @@
#include <xen/kernel.h>
#include <asm/page.h>
#include <public/xen.h>
+#include <xen/device_tree.h>
#if defined(CONFIG_ARM_32)
# include <asm/arm32/io.h>
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 7a26dee..264fb8f 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -283,9 +283,12 @@ DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
/*#define XENMEM_translate_gpfn_list 8*/
/*
- * Returns the pseudo-physical memory map as it was when the domain
+ * x86: returns the pseudo-physical memory map as it was when the domain
* was started (specified by XENMEM_set_memory_map).
* arg == addr of xen_memory_map_t.
+ * ARM: returns the pseudo-physical memory map as it was set
+ * (specified by XENMEM_set_memory_map).
+ * arg == addr of xen_arm_memory_map_t.
*/
#define XENMEM_memory_map 9
struct xen_memory_map {
@@ -315,7 +318,8 @@ DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
/*
* Set the pseudo-physical memory map of a domain, as returned by
* XENMEM_memory_map.
- * arg == addr of xen_foreign_memory_map_t.
+ * x86: arg == addr of xen_foreign_memory_map_t.
+ * ARM: arg == addr of xen_arm_memory_map_t
*/
#define XENMEM_set_memory_map 13
struct xen_foreign_memory_map {
@@ -325,6 +329,13 @@ struct xen_foreign_memory_map {
typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
+struct xen_arm_memory_map {
+ domid_t domid;
+ XEN_GUEST_HANDLE(void) buffer;
+};
+typedef struct xen_arm_memory_map xen_arm_memory_map_t;
+DEFINE_XEN_GUEST_HANDLE(xen_arm_memory_map_t);
+
#define XENMEM_set_pod_target 16
#define XENMEM_get_pod_target 17
struct xen_pod_target {
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index cc0a5a8..fef9904 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -626,4 +626,9 @@ static XSM_INLINE int xsm_map_gmfn_foreign(XSM_DEFAULT_ARG struct domain *d, str
XSM_ASSERT_ACTION(XSM_TARGET);
return xsm_default_action(action, d, t);
}
+static XSM_INLINE int xsm_domain_memory_map(XSM_DEFAULT_ARG struct domain *d)
+{
+ XSM_ASSERT_ACTION(XSM_TARGET);
+ return xsm_default_action(action, current->domain, d);
+}
#endif
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index 1939453..9764011 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -625,6 +625,11 @@ static inline int xsm_map_gmfn_foreign (struct domain *d, struct domain *t)
{
return xsm_ops->map_gmfn_foreign(d, t);
}
+static inline int xsm_domain_memory_map(xsm_default_t def, struct domain *d)
+{
+ return xsm_ops->domain_memory_map(d);
+}
+
#endif /* CONFIG_ARM */
#endif /* XSM_NO_WRAPPERS */
--
1.8.1.2
next prev parent reply other threads:[~2013-08-01 12:57 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-08-01 12:57 [PATCH v3 00/10] xen/arm: live migration support in arndale board Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 01/10] xen/arm: Implement hvm save and restore Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 02/10] xen/arm: Add more registers for saving and restoring vcpu registers Jaeyong Yoo
2013-08-01 12:57 ` Jaeyong Yoo [this message]
2013-08-01 12:57 ` [PATCH v3 04/10] xen/arm: Implement get_maximum_gpfn hypercall for arm Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 05/10] xen/arm: Implement modify_returncode Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 06/10] xen/arm: Implement virtual-linear page table for guest p2m mapping in live migration Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 07/10] xen/arm: Add handling write fault for dirty-page tracing Jaeyong Yoo
2013-08-04 16:27 ` Stefano Stabellini
2013-08-05 0:23 ` Jaeyong Yoo
2013-08-05 11:11 ` Stefano Stabellini
2013-08-05 11:39 ` Jaeyong Yoo
2013-08-05 13:49 ` Stefano Stabellini
2013-08-05 13:52 ` Ian Campbell
2013-08-06 11:56 ` Jaeyong Yoo
2013-08-06 13:17 ` Ian Campbell
2013-08-07 1:24 ` Jaeyong Yoo
2013-08-15 4:24 ` Jaeyong Yoo
2013-08-17 22:16 ` Ian Campbell
2013-08-17 22:21 ` Ian Campbell
2013-08-20 10:15 ` Jaeyong Yoo
2013-08-18 6:39 ` Ian Campbell
2013-08-20 10:19 ` Jaeyong Yoo
2013-08-17 23:51 ` Julien Grall
2013-08-20 10:16 ` Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 08/10] xen/arm: Fixing clear_guest_offset macro Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 09/10] xen/arm: Implement hypercall for dirty page tracing (shadow op) Jaeyong Yoo
2013-08-01 12:57 ` [PATCH v3 10/10] xen/arm: Implement toolstack for xl restore/save and migrate Jaeyong Yoo
2013-09-25 15:59 ` [PATCH v3 00/10] xen/arm: live migration support in arndale board Ian Campbell
2013-09-26 6:23 ` Jaeyong Yoo
2013-09-26 15:13 ` Ian Campbell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1375361873-32145-4-git-send-email-jaeyong.yoo@samsung.com \
--to=jaeyong.yoo@samsung.com \
--cc=e.fedotov@samsung.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).