From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
To: xen-devel@lists.xensource.com
Cc: Tim.Deegan@citrix.com,
Stefano Stabellini <stefano.stabellini@eu.citrix.com>,
Ian.Campbell@citrix.com, david.vrabel@citrix.com
Subject: [PATCH v4 6/6] arm: implement event injection
Date: Fri, 25 May 2012 17:23:14 +0100 [thread overview]
Message-ID: <1337962994-23573-6-git-send-email-stefano.stabellini@eu.citrix.com> (raw)
In-Reply-To: <alpine.DEB.2.00.1205251712480.26786@kaball-desktop>
Implement vcpu_mark_events_pending using the vgic to inject PPI 31, that
we reserve for Xen usage.
In the future the interrupt used for event injection might be dynamic
and could be written into the device tree.
Otherwise it could be an SGI choosen by the guest and passed to Xen
through an hypercall.
Considering that:
- it is easy to determine if an event notification
interrupt has already been EOI'd by the guest just looking at the
evtchn_upcall_pending bit in the shared_info page;
- we can safely assume that there is at most one event notification
interrupt pending at any time in any set of LR registers because we
never inject more than a single event notification interrupt in one vcpu
(see vcpu_mark_events_pending);
we can avoid requesting maintenance interrupts for
VGIC_IRQ_EVTCHN_CALLBACK, provided that we check for event notification
interrupts that need to be cleared in the following places:
- maintenance interrupt entry;
- gic_set_guest_irq;
that is every time we are about to write to an LR.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
xen/arch/arm/domain.c | 11 +++++++++++
xen/arch/arm/dummy.S | 1 -
xen/arch/arm/gic.c | 40 +++++++++++++++++++++++++++++++++++++++-
xen/arch/arm/gic.h | 3 +++
4 files changed, 53 insertions(+), 2 deletions(-)
diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index 3a726c8..5702399 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -232,6 +232,17 @@ void arch_dump_vcpu_info(struct vcpu *v)
{
}
+void vcpu_mark_events_pending(struct vcpu *v)
+{
+ int already_pending = test_and_set_bit(
+ 0, (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
+
+ if ( already_pending )
+ return;
+
+ vgic_vcpu_inject_irq(v, VGIC_IRQ_EVTCHN_CALLBACK, 1);
+}
+
/*
* Local variables:
* mode: C
diff --git a/xen/arch/arm/dummy.S b/xen/arch/arm/dummy.S
index 8c6151c..016340c 100644
--- a/xen/arch/arm/dummy.S
+++ b/xen/arch/arm/dummy.S
@@ -27,7 +27,6 @@ DUMMY(arch_vcpu_reset);
DUMMY(free_vcpu_guest_context);
DUMMY(sync_vcpu_execstate);
NOP(update_vcpu_system_time);
-DUMMY(vcpu_mark_events_pending);
DUMMY(vcpu_show_execution_state);
/* Page Reference & Type Maintenance */
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index cdb4e4a..cc9d37b 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -37,6 +37,7 @@
+ (GIC_CR_OFFSET & 0xfff)))
#define GICH ((volatile uint32_t *) (FIXMAP_ADDR(FIXMAP_GICH) \
+ (GIC_HR_OFFSET & 0xfff)))
+static void events_maintenance(struct vcpu *v);
/* Global state */
static struct {
@@ -46,6 +47,7 @@ static struct {
unsigned int lines;
unsigned int cpus;
spinlock_t lock;
+ uint64_t event_mask;
uint64_t lr_mask;
/* lr_pending is used to queue IRQs (struct pending_irq) that the
* vgic tried to inject in the guest (calling gic_set_guest_irq) but
@@ -293,6 +295,7 @@ int __init gic_init(void)
gic_hyp_init();
gic.lr_mask = 0ULL;
+ gic.event_mask = 0ULL;
INIT_LIST_HEAD(&gic.lr_pending);
spin_unlock(&gic.lock);
@@ -392,9 +395,15 @@ int __init setup_irq(unsigned int irq, struct irqaction *new)
static inline void gic_set_lr(int lr, unsigned int virtual_irq,
unsigned int state, unsigned int priority)
{
+ int maintenance_int = GICH_LR_MAINTENANCE_IRQ;
+
BUG_ON(lr > nr_lrs);
+
+ if (virtual_irq == VGIC_IRQ_EVTCHN_CALLBACK && nr_lrs > 1)
+ maintenance_int = 0;
+
GICH[GICH_LR + lr] = state |
- GICH_LR_MAINTENANCE_IRQ |
+ maintenance_int |
((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
}
@@ -405,6 +414,8 @@ void gic_set_guest_irq(unsigned int virtual_irq,
int i;
struct pending_irq *iter, *n;
+ events_maintenance(current);
+
spin_lock(&gic.lock);
if ( list_empty(&gic.lr_pending) )
@@ -412,6 +423,8 @@ void gic_set_guest_irq(unsigned int virtual_irq,
i = find_first_zero_bit(&gic.lr_mask, nr_lrs);
if (i < nr_lrs) {
set_bit(i, &gic.lr_mask);
+ if ( virtual_irq == VGIC_IRQ_EVTCHN_CALLBACK )
+ set_bit(i, &gic.event_mask);
gic_set_lr(i, virtual_irq, state, priority);
goto out;
}
@@ -515,12 +528,35 @@ void gicv_setup(struct domain *d)
GIC_BASE_ADDRESS + GIC_VR_OFFSET);
}
+static void events_maintenance(struct vcpu *v)
+{
+ int i = 0;
+ int already_pending = test_bit(0,
+ (unsigned long *)&vcpu_info(v, evtchn_upcall_pending));
+
+ if (!already_pending && gic.event_mask != 0) {
+ spin_lock(&gic.lock);
+ while ((i = find_next_bit((const long unsigned int *) &gic.event_mask,
+ sizeof(uint64_t), i)) < sizeof(uint64_t)) {
+
+ GICH[GICH_LR + i] = 0;
+ clear_bit(i, &gic.lr_mask);
+ clear_bit(i, &gic.event_mask);
+
+ i++;
+ }
+ spin_unlock(&gic.lock);
+ }
+}
+
static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
{
int i = 0, virq;
uint32_t lr;
uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
+ events_maintenance(current);
+
while ((i = find_next_bit((const long unsigned int *) &eisr,
sizeof(eisr), i)) < sizeof(eisr)) {
struct pending_irq *p;
@@ -536,6 +572,8 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
list_del_init(&p->lr_queue);
set_bit(i, &gic.lr_mask);
+ if ( p->irq == VGIC_IRQ_EVTCHN_CALLBACK )
+ set_bit(i, &gic.event_mask);
} else {
gic_inject_irq_stop();
}
diff --git a/xen/arch/arm/gic.h b/xen/arch/arm/gic.h
index 2c5922e..ff8d0a2 100644
--- a/xen/arch/arm/gic.h
+++ b/xen/arch/arm/gic.h
@@ -121,6 +121,9 @@
#define GICH_LR_CPUID_SHIFT 9
#define GICH_VTR_NRLRGS 0x3f
+/* XXX: write this into the DT */
+#define VGIC_IRQ_EVTCHN_CALLBACK 31
+
extern int domain_vgic_init(struct domain *d);
extern int vcpu_vgic_init(struct vcpu *v);
extern void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq,int virtual);
--
1.7.2.5
next prev parent reply other threads:[~2012-05-25 16:23 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-05-25 16:21 [PATCH v4 0/6] xen/arm: event channels and shared_info page Stefano Stabellini
2012-05-25 16:23 ` [PATCH v4 1/6] arm: support fewer LR registers than virtual irqs Stefano Stabellini
2012-05-25 16:23 ` [PATCH v4 2/6] arm: replace list_del and INIT_LIST_HEAD with list_del_init Stefano Stabellini
2012-05-25 16:23 ` [PATCH v4 3/6] arm: shared_info page allocation and mapping Stefano Stabellini
2012-05-25 16:23 ` [PATCH v4 4/6] arm: implement flush_tlb_all_local and flush_tlb_local Stefano Stabellini
2012-05-25 16:23 ` [PATCH v4 5/6] arm: remove VGIC_SOFTIRQ Stefano Stabellini
2012-05-25 16:23 ` Stefano Stabellini [this message]
2012-06-01 8:45 ` [PATCH v4 6/6] arm: implement event injection Ian Campbell
2012-06-01 9:31 ` [PATCH v4 0/6] xen/arm: event channels and shared_info page Ian Campbell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1337962994-23573-6-git-send-email-stefano.stabellini@eu.citrix.com \
--to=stefano.stabellini@eu.citrix.com \
--cc=Ian.Campbell@citrix.com \
--cc=Tim.Deegan@citrix.com \
--cc=david.vrabel@citrix.com \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).