From: Wei Liu <wei.liu2@citrix.com>
To: xen-devel@lists.xen.org
Cc: Wei Liu <wei.liu2@citrix.com>,
ian.campbell@citrix.com, jbeulich@suse.com,
david.vrabel@citrix.com
Subject: [RFC PATCH V2 07/14] Genneralized event channel operations
Date: Mon, 21 Jan 2013 14:30:47 +0000 [thread overview]
Message-ID: <1358778654-29559-8-git-send-email-wei.liu2@citrix.com> (raw)
In-Reply-To: <1358778654-29559-1-git-send-email-wei.liu2@citrix.com>
Add struct xen_evtchn_ops *eops in struct domain to reference current
operation function set.
When building a domain, the default operation set is 2-level operation
set.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
---
xen/common/event_channel.c | 64 ++++++++++++++++++++++++++++++++------------
xen/include/xen/sched.h | 2 ++
2 files changed, 49 insertions(+), 17 deletions(-)
diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c
index 1df2b76..e8faf7d 100644
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -51,6 +51,15 @@
#define consumer_is_xen(e) (!!(e)->xen_consumer)
+/* N-level event channel should implement following operations */
+struct xen_evtchn_ops {
+ void (*set_pending)(struct vcpu *v, int port);
+ void (*clear_pending)(struct domain *d, int port);
+ int (*unmask)(unsigned int port);
+ int (*is_pending)(struct domain *d, int port);
+ int (*is_masked)(struct domain *d, int port);
+};
+
/*
* The function alloc_unbound_xen_event_channel() allows an arbitrary
* notifier function to be specified. However, very few unique functions
@@ -94,9 +103,6 @@ static uint8_t get_xen_consumer(xen_event_channel_notification_t fn)
/* Get the notification function for a given Xen-bound event channel. */
#define xen_notification_fn(e) (xen_consumers[(e)->xen_consumer-1])
-static void evtchn_set_pending(struct vcpu *v, int port);
-static void evtchn_clear_pending(struct domain *d, int port);
-
static int virq_is_global(uint32_t virq)
{
int rc;
@@ -157,16 +163,25 @@ static int get_free_port(struct domain *d)
return port;
}
-int evtchn_is_pending(struct domain *d, int port)
+static int evtchn_is_pending_l2(struct domain *d, int port)
{
return test_bit(port, &shared_info(d, evtchn_pending));
}
-int evtchn_is_masked(struct domain *d, int port)
+static int evtchn_is_masked_l2(struct domain *d, int port)
{
return test_bit(port, &shared_info(d, evtchn_mask));
}
+int evtchn_is_pending(struct domain *d, int port)
+{
+ return d->eops->is_pending(d, port);
+}
+
+int evtchn_is_masked(struct domain *d, int port)
+{
+ return d->eops->is_masked(d, port);
+}
static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc)
{
@@ -258,7 +273,7 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind)
* We may have lost notifications on the remote unbound port. Fix that up
* here by conservatively always setting a notification on the local port.
*/
- evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
+ ld->eops->set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
bind->local_port = lport;
@@ -540,7 +555,7 @@ static long __evtchn_close(struct domain *d1, int port1)
}
/* Clear pending event to avoid unexpected behavior on re-bind. */
- evtchn_clear_pending(d1, port1);
+ d1->eops->clear_pending(d1, port1);
/* Reset binding to vcpu0 when the channel is freed. */
chn1->state = ECS_FREE;
@@ -605,10 +620,10 @@ int evtchn_send(struct domain *d, unsigned int lport)
if ( consumer_is_xen(rchn) )
(*xen_notification_fn(rchn))(rvcpu, rport);
else
- evtchn_set_pending(rvcpu, rport);
+ rd->eops->set_pending(rvcpu, rport);
break;
case ECS_IPI:
- evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
+ ld->eops->set_pending(ld->vcpu[lchn->notify_vcpu_id], lport);
break;
case ECS_UNBOUND:
/* silently drop the notification */
@@ -623,7 +638,7 @@ out:
return ret;
}
-static void evtchn_set_pending(struct vcpu *v, int port)
+static void evtchn_set_pending_l2(struct vcpu *v, int port)
{
struct domain *d = v->domain;
int vcpuid;
@@ -664,7 +679,7 @@ static void evtchn_set_pending(struct vcpu *v, int port)
}
}
-static void evtchn_clear_pending(struct domain *d, int port)
+static void evtchn_clear_pending_l2(struct domain *d, int port)
{
clear_bit(port, &shared_info(d, evtchn_pending));
}
@@ -678,6 +693,7 @@ void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq)
{
unsigned long flags;
int port;
+ struct domain *d = v->domain;
ASSERT(!virq_is_global(virq));
@@ -687,7 +703,7 @@ void send_guest_vcpu_virq(struct vcpu *v, uint32_t virq)
if ( unlikely(port == 0) )
goto out;
- evtchn_set_pending(v, port);
+ d->eops->set_pending(v, port);
out:
spin_unlock_irqrestore(&v->virq_lock, flags);
@@ -716,7 +732,7 @@ static void send_guest_global_virq(struct domain *d, uint32_t virq)
goto out;
chn = evtchn_from_port(d, port);
- evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
+ d->eops->set_pending(d->vcpu[chn->notify_vcpu_id], port);
out:
spin_unlock_irqrestore(&v->virq_lock, flags);
@@ -740,7 +756,7 @@ void send_guest_pirq(struct domain *d, const struct pirq *pirq)
}
chn = evtchn_from_port(d, port);
- evtchn_set_pending(d->vcpu[chn->notify_vcpu_id], port);
+ d->eops->set_pending(d->vcpu[chn->notify_vcpu_id], port);
}
static struct domain *global_virq_handlers[NR_VIRQS] __read_mostly;
@@ -932,7 +948,7 @@ long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id)
}
-int evtchn_unmask(unsigned int port)
+static int evtchn_unmask_l2(unsigned int port)
{
struct domain *d = current->domain;
struct vcpu *v;
@@ -959,6 +975,12 @@ int evtchn_unmask(unsigned int port)
return 0;
}
+int evtchn_unmask(unsigned int port)
+{
+ struct domain *d = current->domain;
+ return d->eops->unmask(port);
+}
+
static long evtchn_reset(evtchn_reset_t *r)
{
@@ -1179,12 +1201,19 @@ void notify_via_xen_event_channel(struct domain *ld, int lport)
rd = lchn->u.interdomain.remote_dom;
rport = lchn->u.interdomain.remote_port;
rchn = evtchn_from_port(rd, rport);
- evtchn_set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
+ rd->eops->set_pending(rd->vcpu[rchn->notify_vcpu_id], rport);
}
spin_unlock(&ld->event_lock);
}
+static struct xen_evtchn_ops __read_mostly xen_evtchn_ops_l2 = {
+ .set_pending = evtchn_set_pending_l2,
+ .clear_pending = evtchn_clear_pending_l2,
+ .unmask = evtchn_unmask_l2,
+ .is_pending = evtchn_is_pending_l2,
+ .is_masked = evtchn_is_masked_l2,
+};
int evtchn_init(struct domain *d)
{
@@ -1197,6 +1226,7 @@ int evtchn_init(struct domain *d)
spin_lock_init(&d->event_lock);
d->evtchn_level = EVTCHN_DEFAULT_LEVEL; /* = 2 */
+ d->eops = &xen_evtchn_ops_l2;
if ( get_free_port(d) != 0 ) {
free_xenheap_page(d->evtchn);
return -EINVAL;
@@ -1272,7 +1302,6 @@ void evtchn_move_pirqs(struct vcpu *v)
spin_unlock(&d->event_lock);
}
-
static void domain_dump_evtchn_info(struct domain *d)
{
unsigned int port;
@@ -1334,6 +1363,7 @@ static void domain_dump_evtchn_info(struct domain *d)
spin_unlock(&d->event_lock);
}
+
static void dump_evtchn_info(unsigned char key)
{
struct domain *d;
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index eae9baf..df3b877 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -69,6 +69,7 @@ extern struct domain *dom0;
#define EVTCHNS_PER_BUCKET 512
#define NR_EVTCHN_BUCKETS (NR_EVENT_CHANNELS / EVTCHNS_PER_BUCKET)
+struct xen_evtchn_ops;
struct evtchn
{
@@ -279,6 +280,7 @@ struct domain
struct evtchn **evtchn;
spinlock_t event_lock;
unsigned int evtchn_level;
+ struct xen_evtchn_ops *eops;
struct grant_table *grant_table;
--
1.7.10.4
next prev parent reply other threads:[~2013-01-21 14:30 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-01-21 14:30 [RFC PATCH V2] Implement 3-level event channel support in Xen Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 01/14] Remove trailing whitespaces in event_channel.c Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 02/14] Remove trailing whitespaces in sched.h Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 03/14] Add evtchn_level in struct domain Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 04/14] Dynamically allocate d->evtchn Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 05/14] Bump EVTCHNS_PER_BUCKET to 512 Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 06/14] Add evtchn_is_{pending, masked} and evtchn_clear_pending Wei Liu
2013-01-21 14:30 ` Wei Liu [this message]
2013-01-21 16:36 ` [RFC PATCH V2 07/14] Genneralized event channel operations Jan Beulich
2013-01-21 14:30 ` [RFC PATCH V2 08/14] Define N-level event channel registration interface Wei Liu
2013-01-21 16:38 ` Jan Beulich
2013-01-21 14:30 ` [RFC PATCH V2 09/14] Update Xen public header Wei Liu
2013-01-21 16:40 ` Jan Beulich
2013-01-21 14:30 ` [RFC PATCH V2 10/14] Add control structures for 3-level event channel Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 11/14] Introduce some macros for event channels Wei Liu
2013-01-21 16:46 ` Jan Beulich
2013-01-21 14:30 ` [RFC PATCH V2 12/14] Make NR_EVTCHN_BUCKETS 3-level ready Wei Liu
2013-01-21 14:30 ` [RFC PATCH V2 13/14] Infrastructure for manipulating 3-level event channel pages Wei Liu
2013-01-21 16:50 ` Jan Beulich
2013-01-28 17:21 ` Wei Liu
2013-01-29 8:43 ` Jan Beulich
2013-01-29 18:10 ` Wei Liu
2013-01-30 8:04 ` Jan Beulich
2013-01-21 14:30 ` [RFC PATCH V2 14/14] Implement 3-level event channel routines Wei Liu
2013-01-21 16:53 ` Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1358778654-29559-8-git-send-email-wei.liu2@citrix.com \
--to=wei.liu2@citrix.com \
--cc=david.vrabel@citrix.com \
--cc=ian.campbell@citrix.com \
--cc=jbeulich@suse.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).