From: Wei Liu <wei.liu2@citrix.com>
To: xen-devel@lists.xen.org
Cc: Wei Liu <wei.liu2@citrix.com>,
ian.campbell@citrix.com, jbeulich@suse.com,
david.vrabel@citrix.com
Subject: [PATCH V2 12/15] Infrastructure for manipulating 3-level event channel pages
Date: Mon, 4 Feb 2013 17:23:55 +0000 [thread overview]
Message-ID: <1359998638-16774-13-git-send-email-wei.liu2@citrix.com> (raw)
In-Reply-To: <1359998638-16774-1-git-send-email-wei.liu2@citrix.com>
NOTE: the registration call is always failed because other part of the code is
not yet completed.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
---
xen/common/event_channel.c | 280 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 280 insertions(+)
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 1ce97b0..411bef8 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -26,6 +26,7 @@
#include <xen/compat.h>
#include <xen/guest_access.h>
#include <xen/keyhandler.h>
+#include <xen/paging.h>
#include <asm/current.h>
#include <public/xen.h>
@@ -1024,6 +1025,260 @@ out:
}
+static long __map_l3_arrays(struct domain *d, xen_pfn_t *pending,
+ xen_pfn_t *mask, int nr_pages)
+{
+ int rc;
+ void *mapping;
+ struct page_info *pginfo;
+ unsigned long gfn;
+ int pending_count = 0, mask_count = 0;
+
+#define __MAP(src, dst, cnt) \
+ for ( (cnt) = 0; (cnt) < nr_pages; (cnt)++ ) \
+ { \
+ rc = -EINVAL; \
+ gfn = (src)[(cnt)]; \
+ pginfo = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); \
+ if ( !pginfo ) \
+ goto err; \
+ if ( !get_page_type(pginfo, PGT_writable_page) ) \
+ { \
+ put_page(pginfo); \
+ goto err; \
+ } \
+ mapping = __map_domain_page_global(pginfo); \
+ if ( !mapping ) \
+ { \
+ put_page_and_type(pginfo); \
+ rc = -ENOMEM; \
+ goto err; \
+ } \
+ (dst)[(cnt)] = mapping; \
+ }
+
+ __MAP(pending, d->evtchn_pending, pending_count)
+ __MAP(mask, d->evtchn_mask, mask_count)
+#undef __MAP
+
+ rc = 0;
+
+ err:
+ return rc;
+}
+
+static void __unmap_l3_arrays(struct domain *d)
+{
+ int i;
+ unsigned long mfn;
+
+ for ( i = 0; i < EVTCHN_MAX_L3_PAGES; i++ )
+ {
+ if ( d->evtchn_pending[i] != 0 )
+ {
+ mfn = domain_page_map_to_mfn(d->evtchn_pending[i]);
+ unmap_domain_page_global(d->evtchn_pending[i]);
+ put_page_and_type(mfn_to_page(mfn));
+ d->evtchn_pending[i] = 0;
+ }
+ if ( d->evtchn_mask[i] != 0 )
+ {
+ mfn = domain_page_map_to_mfn(d->evtchn_mask[i]);
+ unmap_domain_page_global(d->evtchn_mask[i]);
+ put_page_and_type(mfn_to_page(mfn));
+ d->evtchn_mask[i] = 0;
+ }
+ }
+}
+
+static long __map_l2_selector(struct vcpu *v, unsigned long gfn,
+ unsigned long off)
+{
+ void *mapping;
+ int rc;
+ struct page_info *page;
+ struct domain *d = v->domain;
+
+ rc = -EINVAL; /* common errno for following operations */
+
+ /* Sanity check: L2 selector has maximum size of sizeof(unsigned
+ * long) * 8, this size is equal to the size of shared bitmap
+ * array of 2-level event channel. */
+ if ( off + sizeof(unsigned long) * 8 >= PAGE_SIZE )
+ goto out;
+
+ page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC);
+ if ( !page )
+ goto out;
+
+ if ( !get_page_type(page, PGT_writable_page) )
+ {
+ put_page(page);
+ goto out;
+ }
+
+ /* Use global mapping here, because these selectors will also be
+ * accessed by other domains when setting pending for inter-domain
+ * event channels.
+ */
+ mapping = __map_domain_page_global(page);
+
+ if ( mapping == NULL )
+ {
+ put_page_and_type(page);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ v->evtchn_pending_sel_l2 = mapping + off;
+ rc = 0;
+
+ out:
+ return rc;
+}
+
+static void __unmap_l2_selector(struct vcpu *v)
+{
+ unsigned long mfn;
+
+ if ( v->evtchn_pending_sel_l2 )
+ {
+ mfn = domain_page_map_to_mfn(v->evtchn_pending_sel_l2);
+ unmap_domain_page_global(v->evtchn_pending_sel_l2);
+ put_page_and_type(mfn_to_page(mfn));
+ v->evtchn_pending_sel_l2 = NULL;
+ }
+}
+
+static void __evtchn_unmap_all_3level(struct domain *d)
+{
+ struct vcpu *v;
+ for_each_vcpu ( d, v )
+ __unmap_l2_selector(v);
+ __unmap_l3_arrays(d);
+}
+
+static void __evtchn_setup_bitmap_l3(struct domain *d)
+{
+ struct vcpu *v;
+
+ /* Easy way to setup 3-level bitmap, just move existing selector
+ * to next level then copy pending array and mask array */
+ for_each_vcpu ( d, v )
+ {
+ memcpy(&v->evtchn_pending_sel_l2[0],
+ &vcpu_info(v, evtchn_pending_sel),
+ sizeof(vcpu_info(v, evtchn_pending_sel)));
+ memset(&vcpu_info(v, evtchn_pending_sel), 0,
+ sizeof(vcpu_info(v, evtchn_pending_sel)));
+ set_bit(0, &vcpu_info(v, evtchn_pending_sel));
+ }
+
+ memcpy(d->evtchn_pending[0], &shared_info(d, evtchn_pending),
+ sizeof(shared_info(d, evtchn_pending)));
+ memcpy(d->evtchn_mask[0], &shared_info(d, evtchn_mask),
+ sizeof(shared_info(d, evtchn_mask)));
+}
+
+static long evtchn_register_3level(evtchn_register_3level_t *arg)
+{
+ struct domain *d = current->domain;
+ struct vcpu *v;
+ int rc = 0;
+ xen_pfn_t evtchn_pending[EVTCHN_MAX_L3_PAGES];
+ xen_pfn_t evtchn_mask[EVTCHN_MAX_L3_PAGES];
+ xen_pfn_t l2sel_mfn = 0;
+ xen_pfn_t l2sel_offset = 0;
+
+ if ( d->evtchn_level == EVTCHN_3_LEVEL )
+ {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if ( arg->nr_vcpus > d->max_vcpus ||
+ arg->nr_pages > EVTCHN_MAX_L3_PAGES )
+ {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ memset(evtchn_pending, 0, sizeof(xen_pfn_t) * EVTCHN_MAX_L3_PAGES);
+ memset(evtchn_mask, 0, sizeof(xen_pfn_t) * EVTCHN_MAX_L3_PAGES);
+
+ rc = -EFAULT; /* common error code for following operations */
+ if ( copy_from_guest(evtchn_pending, arg->evtchn_pending, arg->nr_pages) )
+ goto out;
+ if ( copy_from_guest(evtchn_mask, arg->evtchn_mask, arg->nr_pages) )
+ goto out;
+
+ rc = __map_l3_arrays(d, evtchn_pending, evtchn_mask, arg->nr_pages);
+ if ( rc )
+ goto out;
+
+ for_each_vcpu ( d, v )
+ {
+ int vcpu_id = v->vcpu_id;
+
+ rc = -EFAULT; /* common error code for following operations */
+ if ( unlikely(copy_from_guest_offset(&l2sel_mfn, arg->l2sel_mfns,
+ vcpu_id, 1)) )
+ {
+ __evtchn_unmap_all_3level(d);
+ goto out;
+ }
+ if ( unlikely(copy_from_guest_offset(&l2sel_offset, arg->l2sel_offsets,
+ vcpu_id, 1)) )
+ {
+ __evtchn_unmap_all_3level(d);
+ goto out;
+ }
+
+ if ( (rc = __map_l2_selector(v, l2sel_mfn, l2sel_offset)) )
+ {
+ __evtchn_unmap_all_3level(d);
+ goto out;
+ }
+ }
+
+ __evtchn_setup_bitmap_l3(d);
+
+ d->evtchn_level = EVTCHN_3_LEVEL;
+
+ rc = 0;
+
+ out:
+ return rc;
+}
+
+/*
+ * NOTE to N-level event channel users:
+ * N-level channels are likely to consume lots large global mapping
+ * area in Xen. For example, 3-level event channel consumes 16 +
+ * nr_vcpus pages global mapping area. So *ONLY* enable N-level event
+ * channel for Dom0 or driver domains.
+ */
+static long evtchn_register_nlevel(struct evtchn_register_nlevel *reg)
+{
+ struct domain *d = current->domain;
+ int rc;
+
+ spin_lock(&d->event_lock);
+
+ switch ( reg->level )
+ {
+ case EVTCHN_3_LEVEL:
+ rc = evtchn_register_3level(®->u.l3);
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ spin_unlock(&d->event_lock);
+
+ return rc;
+}
+
long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
{
long rc;
@@ -1132,6 +1387,18 @@ long do_event_channel_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
break;
}
+ case EVTCHNOP_register_nlevel: {
+ struct evtchn_register_nlevel reg;
+ if ( copy_from_guest(®, arg, 1) != 0 )
+ return -EFAULT;
+ rc = evtchn_register_nlevel(®);
+
+ /* XXX always fails this call because it is not yet completed */
+ rc = -EINVAL;
+
+ break;
+ }
+
default:
rc = -ENOSYS;
break;
@@ -1258,6 +1525,17 @@ int evtchn_init(struct domain *d)
return 0;
}
+static void evtchn_unmap_nlevel(struct domain *d)
+{
+ switch ( d->evtchn_level )
+ {
+ case EVTCHN_3_LEVEL:
+ __evtchn_unmap_all_3level(d);
+ break;
+ default:
+ break;
+ }
+}
void evtchn_destroy(struct domain *d)
{
@@ -1286,6 +1564,8 @@ void evtchn_destroy(struct domain *d)
clear_global_virq_handlers(d);
+ evtchn_unmap_nlevel(d);
+
free_xenheap_page(d->evtchn);
}
--
1.7.10.4
next prev parent reply other threads:[~2013-02-04 17:23 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-02-04 17:23 [PATCH V2] Implement 3-level event channel in Xen Wei Liu
2013-02-04 17:23 ` [PATCH V2 01/15] Dynamically allocate d->evtchn Wei Liu
2013-02-04 17:23 ` [PATCH V2 02/15] Move event channel macros / struct definition to proper place Wei Liu
2013-02-04 17:23 ` [PATCH V2 03/15] Add evtchn_level in struct domain Wei Liu
2013-02-04 17:23 ` [PATCH V2 04/15] Bump EVTCHNS_PER_BUCKET to 512 Wei Liu
2013-02-04 17:23 ` [PATCH V2 05/15] Add evtchn_is_{pending, masked} and evtchn_clear_pending Wei Liu
2013-02-04 17:23 ` [PATCH V2 06/15] Introduce some macros for event channels Wei Liu
2013-02-04 17:23 ` [PATCH V2 07/15] Update Xen public header Wei Liu
2013-02-04 17:23 ` [PATCH V2 08/15] Define N-level event channel registration interface Wei Liu
2013-02-04 17:23 ` [PATCH V2 09/15] Add control structures for 3-level event channel Wei Liu
2013-02-04 17:23 ` [PATCH V2 10/15] Make NR_EVTCHN_BUCKETS 3-level ready Wei Liu
2013-02-04 17:23 ` [PATCH V2 11/15] Genneralized event channel operations Wei Liu
2013-02-04 17:23 ` Wei Liu [this message]
2013-02-04 17:23 ` [PATCH V2 13/15] Implement 3-level event channel routines Wei Liu
2013-02-04 17:23 ` [PATCH V2 14/15] Only allow 3-level event channel on Dom0 and driver domain Wei Liu
2013-02-06 8:28 ` Jan Beulich
2013-02-04 17:23 ` [PATCH V2 15/15] libxl: add evtchn_l3 flag Wei Liu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1359998638-16774-13-git-send-email-wei.liu2@citrix.com \
--to=wei.liu2@citrix.com \
--cc=david.vrabel@citrix.com \
--cc=ian.campbell@citrix.com \
--cc=jbeulich@suse.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).