From: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
To: xen-devel@lists.xen.org
Cc: julien.grall@linaro.org, ian.campbell@citrix.com,
Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Subject: [PATCH 2/6] xen/arm: track the state of guest IRQs
Date: Fri, 6 Dec 2013 17:26:44 +0000 [thread overview]
Message-ID: <1386350808-13648-2-git-send-email-stefano.stabellini@eu.citrix.com> (raw)
In-Reply-To: <alpine.DEB.2.02.1312061715200.7093@kaball.uk.xensource.com>
Introduce a status field in struct pending_irq. Valid states are
GUEST_PENDING and GUEST_VISIBLE and they are not mutually exclusive.
See the in-code comment for an explanation of the states and how they
are used. Protect pending_irq state manipulations with the vgic lock.
The main effect of this patch is that an IRQ can be set to GUEST_PENDING
while it is being serviced by the guest. In maintenance_interrupt we
check whether GUEST_PENDING is set and if it is we reinject the IRQ one
more time.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
xen/arch/arm/gic.c | 49 ++++++++++++++++++++++++++++++------------
xen/arch/arm/vgic.c | 17 +++++++++++++--
xen/include/asm-arm/domain.h | 29 +++++++++++++++++++++++++
3 files changed, 79 insertions(+), 16 deletions(-)
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 7e87acb..a8a5c2a 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -626,6 +626,7 @@ static inline void gic_set_lr(int lr, unsigned int virtual_irq,
((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
}
+/* needs to be called with vgic lock held */
void gic_set_guest_irq(struct vcpu *v, unsigned int virtual_irq,
unsigned int state, unsigned int priority)
{
@@ -635,17 +636,20 @@ void gic_set_guest_irq(struct vcpu *v, unsigned int virtual_irq,
spin_lock_irqsave(&gic.lock, flags);
+ n = irq_to_pending(v, virtual_irq);
+
if ( v == current && list_empty(&v->arch.vgic.lr_pending) )
{
i = find_first_zero_bit(&this_cpu(lr_mask), nr_lrs);
if (i < nr_lrs) {
set_bit(i, &this_cpu(lr_mask));
gic_set_lr(i, virtual_irq, state, priority);
+ n->status |= GIC_IRQ_GUEST_VISIBLE;
+ n->status &= ~GIC_IRQ_GUEST_PENDING;
goto out;
}
}
- n = irq_to_pending(v, virtual_irq);
if ( !list_empty(&n->lr_queue) )
goto out;
@@ -679,6 +683,8 @@ static void gic_restore_pending_irqs(struct vcpu *v)
gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
list_del_init(&p->lr_queue);
set_bit(i, &this_cpu(lr_mask));
+ p->status |= GIC_IRQ_GUEST_VISIBLE;
+ p->status &= ~GIC_IRQ_GUEST_PENDING;
spin_unlock_irqrestore(&gic.lock, flags);
}
@@ -884,6 +890,7 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
uint32_t lr;
struct vcpu *v = current;
uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
+ unsigned long flags;
while ((i = find_next_bit((const long unsigned int *) &eisr,
64, i)) < 64) {
@@ -893,23 +900,13 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
cpu = -1;
eoi = 0;
- spin_lock_irq(&gic.lock);
+ spin_lock(&v->arch.vgic.lock);
+ spin_lock_irqsave(&gic.lock, flags);
lr = GICH[GICH_LR + i];
virq = lr & GICH_LR_VIRTUAL_MASK;
GICH[GICH_LR + i] = 0;
clear_bit(i, &this_cpu(lr_mask));
- if ( !list_empty(&v->arch.vgic.lr_pending) ) {
- p = list_entry(v->arch.vgic.lr_pending.next, typeof(*p), lr_queue);
- gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
- list_del_init(&p->lr_queue);
- set_bit(i, &this_cpu(lr_mask));
- } else {
- gic_inject_irq_stop();
- }
- spin_unlock_irq(&gic.lock);
-
- spin_lock_irq(&v->arch.vgic.lock);
p = irq_to_pending(v, virq);
if ( p->desc != NULL ) {
p->desc->status &= ~IRQ_INPROGRESS;
@@ -918,8 +915,32 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
eoi = 1;
pirq = p->desc->irq;
}
+ if ( p->status & GIC_IRQ_GUEST_PENDING )
+ {
+ gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
+ p->status &= ~GIC_IRQ_GUEST_PENDING;
+ i++;
+ spin_unlock_irqrestore(&gic.lock, flags);
+ spin_unlock(&v->arch.vgic.lock);
+ continue;
+ }
+
+ p->status &= ~GIC_IRQ_GUEST_VISIBLE;
list_del_init(&p->inflight);
- spin_unlock_irq(&v->arch.vgic.lock);
+
+ if ( !list_empty(&v->arch.vgic.lr_pending) ) {
+ p = list_entry(v->arch.vgic.lr_pending.next, typeof(*p), lr_queue);
+ gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
+ p->status |= GIC_IRQ_GUEST_VISIBLE;
+ p->status &= ~GIC_IRQ_GUEST_PENDING;
+ list_del_init(&p->lr_queue);
+ set_bit(i, &this_cpu(lr_mask));
+ } else {
+ gic_inject_irq_stop();
+ }
+
+ spin_unlock_irqrestore(&gic.lock, flags);
+ spin_unlock(&v->arch.vgic.lock);
if ( eoi ) {
/* this is not racy because we can't receive another irq of the
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index 2e4b11f..c71db37 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -365,6 +365,9 @@ static void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
struct pending_irq *p;
unsigned int irq;
int i = 0;
+ unsigned long f;
+
+ spin_lock_irqsave(&v->arch.vgic.lock, f);
while ( (i = find_next_bit((const long unsigned int *) &r, 32, i)) < 32 ) {
irq = i + (32 * n);
@@ -373,6 +376,8 @@ static void vgic_enable_irqs(struct vcpu *v, uint32_t r, int n)
gic_set_guest_irq(v, irq, GICH_LR_PENDING, p->priority);
i++;
}
+
+ spin_unlock_irqrestore(&v->arch.vgic.lock, f);
}
static inline int is_vcpu_running(struct domain *d, int vcpuid)
@@ -672,8 +677,15 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq, int virtual)
spin_lock_irqsave(&v->arch.vgic.lock, flags);
- /* vcpu offline or irq already pending */
- if (test_bit(_VPF_down, &v->pause_flags) || !list_empty(&n->inflight))
+ if ( !list_empty(&n->inflight) )
+ {
+ n->status |= GIC_IRQ_GUEST_PENDING;
+ spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
+ return;
+ }
+
+ /* vcpu offline */
+ if ( test_bit(_VPF_down, &v->pause_flags) )
{
spin_unlock_irqrestore(&v->arch.vgic.lock, flags);
return;
@@ -682,6 +694,7 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq, int virtual)
priority = byte_read(rank->ipriority[REG_RANK_INDEX(8, idx)], 0, byte);
n->irq = irq;
+ n->status = GIC_IRQ_GUEST_PENDING;
n->priority = priority;
if (!virtual)
n->desc = irq_to_desc(irq);
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index d5cae2e..5e7bb58 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -22,6 +22,35 @@ struct vgic_irq_rank {
struct pending_irq
{
int irq;
+ /*
+ * The following two states track the lifecycle of the guest irq.
+ * However because we are not sure and we don't want to track
+ * whether an irq added to an LR register is PENDING or ACTIVE, the
+ * following states are just an approximation.
+ *
+ * GIC_IRQ_GUEST_PENDING: the irq is asserted
+ *
+ * GIC_IRQ_GUEST_VISIBLE: the irq has been added to an LR register,
+ * therefore the guest is aware of it. From the guest point of view
+ * the irq can be pending (if the guest has not acked the irq yet)
+ * or active (after acking the irq).
+ *
+ * In order for the state machine to be fully accurate, for level
+ * interrupts, we should keep the GIC_IRQ_GUEST_PENDING state until
+ * the guest deactivates the irq. However because we are not sure
+ * when that happens, we simply remove the GIC_IRQ_GUEST_PENDING
+ * state when we add the irq to an LR register. We add it back when
+ * we receive another interrupt notification.
+ * Therefore it is possible to set GIC_IRQ_GUEST_PENDING while the
+ * irq is GIC_IRQ_GUEST_VISIBLE. We could also change the state of
+ * the guest irq in the LR register from active to active and
+ * pending, but for simplicity we simply inject a second irq after
+ * the guest EOIs the first one.
+ *
+ */
+#define GIC_IRQ_GUEST_PENDING (1<<1)
+#define GIC_IRQ_GUEST_VISIBLE (1<<2)
+ int status;
struct irq_desc *desc; /* only set it the irq corresponds to a physical irq */
uint8_t priority;
/* inflight is used to append instances of pending_irq to
--
1.7.10.4
next prev parent reply other threads:[~2013-12-06 17:26 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-12-06 17:26 [PATCH 0/6] interrupt handling fixes Stefano Stabellini
2013-12-06 17:26 ` [PATCH 1/6] xen/arm: Physical IRQ is not always equal to virtual IRQ Stefano Stabellini
2013-12-09 17:10 ` Ian Campbell
2013-12-06 17:26 ` Stefano Stabellini [this message]
2013-12-09 17:38 ` [PATCH 2/6] xen/arm: track the state of guest IRQs Ian Campbell
2013-12-10 13:02 ` Stefano Stabellini
2013-12-06 17:26 ` [PATCH 3/6] xen/arm: do not add a second irq to the LRs if one is already present Stefano Stabellini
2013-12-06 17:26 ` [PATCH 4/6] xen/arm: implement gic_irq_enable and gic_irq_disable Stefano Stabellini
2013-12-06 18:29 ` Julien Grall
2013-12-09 17:42 ` Ian Campbell
2013-12-06 17:26 ` [PATCH 5/6] xen/arm: Only enable physical IRQs when the guest asks Stefano Stabellini
2013-12-06 18:28 ` Julien Grall
2013-12-09 12:26 ` Stefano Stabellini
2013-12-09 17:44 ` Ian Campbell
2013-12-06 17:26 ` [PATCH 6/6] xen/arm: disable a physical IRQ when the guest disables the corresponding IRQ Stefano Stabellini
2013-12-06 17:56 ` Julien Grall
2013-12-09 12:37 ` Stefano Stabellini
2013-12-09 12:48 ` Ian Campbell
2013-12-06 18:24 ` Julien Grall
2013-12-09 12:50 ` Ian Campbell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1386350808-13648-2-git-send-email-stefano.stabellini@eu.citrix.com \
--to=stefano.stabellini@eu.citrix.com \
--cc=ian.campbell@citrix.com \
--cc=julien.grall@linaro.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).