From: Wei Liu <wei.liu2@citrix.com>
To: xen-devel@lists.xen.org
Cc: Wei Liu <wei.liu2@citrix.com>,
ian.campbell@citrix.com, jbeulich@suse.com,
david.vrabel@citrix.com
Subject: [RFC PATCH V2 13/14] Infrastructure for manipulating 3-level event channel pages
Date: Mon, 21 Jan 2013 14:30:53 +0000 [thread overview]
Message-ID: <1358778654-29559-14-git-send-email-wei.liu2@citrix.com> (raw)
In-Reply-To: <1358778654-29559-1-git-send-email-wei.liu2@citrix.com>
NOTE: the registration call is always failed because other part of the code is
not yet completed.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
---
xen/common/event_channel.c | 287 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 287 insertions(+)
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index e8faf7d..54a847e 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -26,6 +26,7 @@
#include <xen/compat.h>
#include <xen/guest_access.h>
#include <xen/keyhandler.h>
+#include <xen/paging.h>
#include <asm/current.h>
#include <public/xen.h>
@@ -1008,6 +1009,267 @@ out:
}
+static long __map_l3_arrays(struct domain *d, xen_pfn_t *pending,
+ xen_pfn_t *mask, int nr_pages)
+{
+ int rc;
+ void *mapping;
+ struct page_info *pginfo;
+ unsigned long gfn;
+ int pending_count = 0, mask_count = 0;
+
+#define __MAP(src, dst, cnt) \
+ for ( (cnt) = 0; (cnt) < nr_pages; (cnt)++ ) \
+ { \
+ rc = -EINVAL; \
+ gfn = (src)[(cnt)]; \
+ pginfo = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); \
+ if ( !pginfo ) \
+ goto err; \
+ if ( !get_page_type(pginfo, PGT_writable_page) ) \
+ { \
+ put_page(pginfo); \
+ goto err; \
+ } \
+ mapping = __map_domain_page_global(pginfo); \
+ if ( !mapping ) \
+ { \
+ put_page_and_type(pginfo); \
+ rc = -ENOMEM; \
+ goto err; \
+ } \
+ (dst)[(cnt)] = mapping; \
+ }
+
+ __MAP(pending, d->evtchn_pending, pending_count)
+ __MAP(mask, d->evtchn_mask, mask_count)
+#undef __MAP
+
+ rc = 0;
+
+ err:
+ return rc;
+}
+
+static void __unmap_l3_arrays(struct domain *d)
+{
+ int i;
+ unsigned long mfn;
+
+ for ( i = 0; i < EVTCHN_MAX_L3_PAGES; i++ )
+ {
+ if ( d->evtchn_pending[i] != 0 )
+ {
+ mfn = domain_page_map_to_mfn(d->evtchn_pending[i]);
+ unmap_domain_page_global(d->evtchn_pending[i]);
+ put_page_and_type(mfn_to_page(mfn));
+ d->evtchn_pending[i] = 0;
+ }
+ if ( d->evtchn_mask[i] != 0 )
+ {
+ mfn = domain_page_map_to_mfn(d->evtchn_mask[i]);
+ unmap_domain_page_global(d->evtchn_mask[i]);
+ put_page_and_type(mfn_to_page(mfn));
+ d->evtchn_mask[i] = 0;
+ }
+ }
+}
+
+static long __map_l2_selector(struct vcpu *v, unsigned long gfn,
+ unsigned long off)
+{
+ void *mapping;
+ int rc;
+ struct page_info *page;
+ struct domain *d = v->domain;
+
+ rc = -EINVAL; /* common errno for following operations */
+
+ /* Sanity check: L2 selector has maximum size of sizeof(unsigned
+ * long) * 8, this size is equal to the size of shared bitmap
+ * array of 2-level event channel. */
+ if ( off + sizeof(unsigned long) * 8 >= PAGE_SIZE )
+ goto out;
+
+ page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
+ if ( !page )
+ goto out;
+
+ if ( !get_page_type(page, PGT_writable_page) )
+ {
+ put_page(page);
+ goto out;
+ }
+
+ /* Use global mapping here, because we need to map selector for
+ * other vcpu (v != current). However this mapping is only used by
+ * v when guest is running. */
+ mapping = __map_domain_page_global(page);
+
+ if ( mapping == NULL )
+ {
+ put_page_and_type(page);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ v->evtchn_pending_sel_l2 = mapping + off;
+ rc = 0;
+
+ out:
+ return rc;
+}
+
+static void __unmap_l2_selector(struct vcpu *v)
+{
+ unsigned long mfn;
+
+ if ( v->evtchn_pending_sel_l2 )
+ {
+ mfn = domain_page_map_to_mfn(v->evtchn_pending_sel_l2);
+ unmap_domain_page_global(v->evtchn_pending_sel_l2);
+ put_page_and_type(mfn_to_page(mfn));
+ v->evtchn_pending_sel_l2 = NULL;
+ }
+}
+
+static void __evtchn_unmap_all_3level(struct domain *d)
+{
+ struct vcpu *v;
+ for_each_vcpu ( d, v )
+ __unmap_l2_selector(v);
+ __unmap_l3_arrays(d);
+}
+
+static void __evtchn_setup_bitmap_l3(struct domain *d)
+{
+ struct vcpu *v;
+
+ /* Easy way to setup 3-level bitmap, just move existing selector
+ * to next level then copy pending array and mask array */
+ for_each_vcpu ( d, v )
+ {
+ memcpy(&v->evtchn_pending_sel_l2[0],
+ &vcpu_info(v, evtchn_pending_sel),
+ sizeof(vcpu_info(v, evtchn_pending_sel)));
+ memset(&vcpu_info(v, evtchn_pending_sel), 0,
+ sizeof(vcpu_info(v, evtchn_pending_sel)));
+ set_bit(0, &vcpu_info(v, evtchn_pending_sel));
+ }
+
+ memcpy(d->evtchn_pending[0], &shared_info(d, evtchn_pending),
+ sizeof(shared_info(d, evtchn_pending)));
+ memcpy(d->evtchn_mask[0], &shared_info(d, evtchn_mask),
+ sizeof(shared_info(d, evtchn_mask)));
+}
+
+static long evtchn_register_3level(
+ XEN_GUEST_HANDLE_PARAM(evtchn_register_3level_t) arg)
+{
+ struct domain *d = current->domain;
+ struct evtchn_register_3level r;
+ struct vcpu *v;
+ int rc = 0;
+ xen_pfn_t *evtchn_pending = NULL;
+ xen_pfn_t *evtchn_mask = NULL;
+ xen_pfn_t *l2sel_mfns = NULL;
+ xen_pfn_t *l2sel_offsets = NULL;
+
+ if ( d->evtchn_level == EVTCHN_3_LEVEL )
+ {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ( copy_from_guest(&r, arg, 1) )
+ {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if ( r.nr_vcpus > d->max_vcpus ||
+ r.nr_pages > EVTCHN_MAX_L3_PAGES )
+ {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ evtchn_pending = xzalloc_array(xen_pfn_t, r.nr_pages);
+ evtchn_mask = xzalloc_array(xen_pfn_t, r.nr_pages);
+ l2sel_mfns = xzalloc_array(xen_pfn_t, r.nr_vcpus);
+ l2sel_offsets = xzalloc_array(xen_pfn_t, r.nr_vcpus);
+
+ if ( !evtchn_pending || !evtchn_mask ||
+ !l2sel_mfns || !l2sel_offsets )
+ {
+ rc = -ENOMEM;
+ goto out_free;
+ }
+
+#define __COPY_ARRAY(_d, _s, _nr) \
+ if ( copy_from_guest((_d), (_s), (_nr)) ) \
+ { \
+ rc = -EFAULT; \
+ goto out_free; \
+ }
+ __COPY_ARRAY(evtchn_pending, r.evtchn_pending, r.nr_pages)
+ __COPY_ARRAY(evtchn_mask, r.evtchn_mask, r.nr_pages)
+ __COPY_ARRAY(l2sel_mfns, r.l2sel_mfns, r.nr_vcpus)
+ __COPY_ARRAY(l2sel_offsets, r.l2sel_offsets, r.nr_vcpus)
+#undef __COPY_ARRAY
+
+ rc = __map_l3_arrays(d, evtchn_pending, evtchn_mask, r.nr_pages);
+ if ( rc )
+ goto out_free;
+
+ for_each_vcpu ( d, v )
+ {
+ if ( (rc = __map_l2_selector(v, l2sel_mfns[v->vcpu_id],
+ l2sel_offsets[v->vcpu_id])) )
+ {
+ __evtchn_unmap_all_3level(d);
+ goto out_free;
+ }
+ }
+
+ __evtchn_setup_bitmap_l3(d);
+
+ d->evtchn_level = EVTCHN_3_LEVEL;
+
+ out_free:
+ if ( evtchn_pending )
+ xfree(evtchn_pending);
+ if ( evtchn_mask )
+ xfree(evtchn_mask);
+ if ( l2sel_mfns )
+ xfree(l2sel_mfns);
+ if ( l2sel_offsets )
+ xfree(l2sel_offsets);
+ out:
+ return rc;
+}
+
+static long evtchn_register_nlevel(struct evtchn_register_nlevel *reg)
+{
+ struct domain *d = current->domain;
+ int rc;
+
+ spin_lock(&d->event_lock);
+
+ switch ( reg->level )
+ {
+ case EVTCHN_3_LEVEL:
+ rc = evtchn_register_3level(reg->u.l3);
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ spin_unlock(&d->event_lock);
+
+ return rc;
+}
+
long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
{
long rc;
@@ -1116,6 +1378,18 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
}
+ case EVTCHNOP_register_nlevel: {
+ struct evtchn_register_nlevel reg;
+ if ( copy_from_guest(®, arg, 1) != 0 )
+ return -EFAULT;
+ rc = evtchn_register_nlevel(®);
+
+ /* XXX always fails this call because it is not yet completed */
+ rc = -EINVAL;
+
+ break;
+ }
+
default:
rc = -ENOSYS;
break;
@@ -1245,6 +1519,17 @@ int evtchn_init(struct domain *d)
return 0;
}
+static void evtchn_unmap_nlevel(struct domain *d)
+{
+ switch ( d->evtchn_level )
+ {
+ case EVTCHN_3_LEVEL:
+ __evtchn_unmap_all_3level(d);
+ break;
+ default:
+ break;
+ }
+}
void evtchn_destroy(struct domain *d)
{
@@ -1273,6 +1558,8 @@ void evtchn_destroy(struct domain *d)
clear_global_virq_handlers(d);
+ evtchn_unmap_nlevel(d);
+
free_xenheap_page(d->evtchn);
}
--
1.7.10.4
next prev parent reply other threads:[~2013-01-21 14:30 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-01-21 14:30 [RFC PATCH V2] Implement 3-level event channel support in Xen Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 01/14] Remove trailing whitespaces in event_channel.c Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 02/14] Remove trailing whitespaces in sched.h Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 03/14] Add evtchn_level in struct domain Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 04/14] Dynamically allocate d->evtchn Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 05/14] Bump EVTCHNS_PER_BUCKET to 512 Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 06/14] Add evtchn_is_{pending, masked} and evtchn_clear_pending Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 07/14] Genneralized event channel operations Wei Liu
2013-01-21 16:36 ` Jan Beulich
2013-01-21 14:30 ` [RFC PATCH V2 08/14] Define N-level event channel registration interface Wei Liu
2013-01-21 16:38 ` Jan Beulich
2013-01-21 14:30 ` [RFC PATCH V2 09/14] Update Xen public header Wei Liu
2013-01-21 16:40 ` Jan Beulich
2013-01-21 14:30 ` [RFC PATCH V2 10/14] Add control structures for 3-level event channel Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 11/14] Introduce some macros for event channels Wei Liu
2013-01-21 16:46 ` Jan Beulich
2013-01-21 14:30 ` [RFC PATCH V2 12/14] Make NR_EVTCHN_BUCKETS 3-level ready Wei Liu
2013-01-21 14:30 ` Wei Liu [this message]
2013-01-21 16:50 ` [RFC PATCH V2 13/14] Infrastructure for manipulating 3-level event channel pages Jan Beulich
2013-01-28 17:21 ` Wei Liu
2013-01-29 8:43 ` Jan Beulich
2013-01-29 18:10 ` Wei Liu
2013-01-30 8:04 ` Jan Beulich
2013-01-21 14:30 ` [RFC PATCH V2 14/14] Implement 3-level event channel routines Wei Liu
2013-01-21 16:53 ` Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1358778654-29559-14-git-send-email-wei.liu2@citrix.com \
--to=wei.liu2@citrix.com \
--cc=david.vrabel@citrix.com \
--cc=ian.campbell@citrix.com \
--cc=jbeulich@suse.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).