* [PATCH v4 1/2] arm: support fewer LR registers than virtual irqs
@ 2012-02-23 17:00 Stefano Stabellini
2012-02-23 17:00 ` [PATCH v4 2/2] arm: replace list_del and INIT_LIST_HEAD with list_del_init Stefano Stabellini
2012-02-28 11:24 ` [PATCH v4 1/2] arm: support fewer LR registers than virtual irqs Ian Campbell
0 siblings, 2 replies; 6+ messages in thread
From: Stefano Stabellini @ 2012-02-23 17:00 UTC (permalink / raw)
To: xen-devel; +Cc: Stefano Stabellini, david.vrabel, Ian.Campbell
If the vgic needs to inject a virtual irq into the guest, but no free
LR registers are available, add the irq to a list and return.
Whenever an LR register becomes available we add the queued irq to it
and remove it from the list.
We use the gic lock to protect the list and the bitmask.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
xen/arch/arm/gic.c | 95 ++++++++++++++++++++++++++++++++----------
xen/include/asm-arm/domain.h | 1 +
2 files changed, 74 insertions(+), 22 deletions(-)
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index adc10bb..2ff7bce 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -25,6 +25,7 @@
#include <xen/sched.h>
#include <xen/errno.h>
#include <xen/softirq.h>
+#include <xen/list.h>
#include <asm/p2m.h>
#include <asm/domain.h>
@@ -45,6 +46,8 @@ static struct {
unsigned int lines;
unsigned int cpus;
spinlock_t lock;
+ uint64_t lr_mask;
+ struct list_head lr_pending;
} gic;
irq_desc_t irq_desc[NR_IRQS];
@@ -247,6 +250,8 @@ static void __cpuinit gic_hyp_init(void)
GICH[GICH_HCR] = GICH_HCR_EN;
GICH[GICH_MISR] = GICH_MISR_EOI;
+ gic.lr_mask = 0ULL;
+ INIT_LIST_HEAD(&gic.lr_pending);
}
/* Set up the GIC */
@@ -345,16 +350,50 @@ int __init setup_irq(unsigned int irq, struct irqaction *new)
return rc;
}
-void gic_set_guest_irq(unsigned int virtual_irq,
+static inline void gic_set_lr(int lr, unsigned int virtual_irq,
unsigned int state, unsigned int priority)
{
- BUG_ON(virtual_irq > nr_lrs);
- GICH[GICH_LR + virtual_irq] = state |
+ BUG_ON(lr > nr_lrs);
+ GICH[GICH_LR + lr] = state |
GICH_LR_MAINTENANCE_IRQ |
((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
}
+void gic_set_guest_irq(unsigned int virtual_irq,
+ unsigned int state, unsigned int priority)
+{
+ int i;
+ struct pending_irq *iter, *n;
+
+ spin_lock(&gic.lock);
+
+ if ( list_empty(&gic.lr_pending) )
+ {
+ i = find_first_zero_bit(&gic.lr_mask, sizeof(uint64_t));
+ if (i < sizeof(uint64_t)) {
+ set_bit(i, &gic.lr_mask);
+ gic_set_lr(i, virtual_irq, state, priority);
+ goto out;
+ }
+ }
+
+ n = irq_to_pending(current, virtual_irq);
+ list_for_each_entry ( iter, &gic.lr_pending, lr_link )
+ {
+ if ( iter->priority < priority )
+ {
+ list_add_tail(&n->lr_link, &iter->lr_link);
+ goto out;
+ }
+ }
+ list_add(&n->lr_link, &gic.lr_pending);
+
+out:
+ spin_unlock(&gic.lock);
+ return;
+}
+
void gic_inject_irq_start(void)
{
uint32_t hcr;
@@ -431,30 +470,42 @@ void gicv_setup(struct domain *d)
static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
{
- int i, virq;
+ int i = 0, virq;
uint32_t lr;
uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
- for ( i = 0; i < 64; i++ ) {
- if ( eisr & ((uint64_t)1 << i) ) {
- struct pending_irq *p;
-
- lr = GICH[GICH_LR + i];
- virq = lr & GICH_LR_VIRTUAL_MASK;
- GICH[GICH_LR + i] = 0;
-
- spin_lock(¤t->arch.vgic.lock);
- p = irq_to_pending(current, virq);
- if ( p->desc != NULL ) {
- p->desc->status &= ~IRQ_INPROGRESS;
- GICC[GICC_DIR] = virq;
- }
+ while ((i = find_next_bit((const long unsigned int *) &eisr,
+ sizeof(eisr), i)) < sizeof(eisr)) {
+ struct pending_irq *p;
+
+ spin_lock(&gic.lock);
+ lr = GICH[GICH_LR + i];
+ virq = lr & GICH_LR_VIRTUAL_MASK;
+ GICH[GICH_LR + i] = 0;
+ clear_bit(i, &gic.lr_mask);
+
+ if ( !list_empty(gic.lr_pending.next) ) {
+ p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
+ gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
+ list_del_init(&p->lr_link);
+ set_bit(i, &gic.lr_mask);
+ } else {
gic_inject_irq_stop();
- list_del(&p->link);
- INIT_LIST_HEAD(&p->link);
- cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
- spin_unlock(¤t->arch.vgic.lock);
}
+ spin_unlock(&gic.lock);
+
+ spin_lock(¤t->arch.vgic.lock);
+ p = irq_to_pending(current, virq);
+ if ( p->desc != NULL ) {
+ p->desc->status &= ~IRQ_INPROGRESS;
+ GICC[GICC_DIR] = virq;
+ }
+ list_del(&p->link);
+ INIT_LIST_HEAD(&p->link);
+ cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
+ spin_unlock(¤t->arch.vgic.lock);
+
+ i++;
}
}
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 3372d14..75095ff 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -21,6 +21,7 @@ struct pending_irq
struct irq_desc *desc; /* only set it the irq corresponds to a physical irq */
uint8_t priority;
struct list_head link;
+ struct list_head lr_link;
};
struct arch_domain
--
1.7.2.5
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v4 2/2] arm: replace list_del and INIT_LIST_HEAD with list_del_init
2012-02-23 17:00 [PATCH v4 1/2] arm: support fewer LR registers than virtual irqs Stefano Stabellini
@ 2012-02-23 17:00 ` Stefano Stabellini
2012-02-28 11:24 ` [PATCH v4 1/2] arm: support fewer LR registers than virtual irqs Ian Campbell
1 sibling, 0 replies; 6+ messages in thread
From: Stefano Stabellini @ 2012-02-23 17:00 UTC (permalink / raw)
To: xen-devel; +Cc: Stefano Stabellini, david.vrabel, Ian.Campbell
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
---
xen/arch/arm/gic.c | 3 +--
1 files changed, 1 insertions(+), 2 deletions(-)
diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
index 2ff7bce..397a148 100644
--- a/xen/arch/arm/gic.c
+++ b/xen/arch/arm/gic.c
@@ -500,8 +500,7 @@ static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *r
p->desc->status &= ~IRQ_INPROGRESS;
GICC[GICC_DIR] = virq;
}
- list_del(&p->link);
- INIT_LIST_HEAD(&p->link);
+ list_del_init(&p->link);
cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
spin_unlock(¤t->arch.vgic.lock);
--
1.7.2.5
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v4 1/2] arm: support fewer LR registers than virtual irqs
2012-02-23 17:00 [PATCH v4 1/2] arm: support fewer LR registers than virtual irqs Stefano Stabellini
2012-02-23 17:00 ` [PATCH v4 2/2] arm: replace list_del and INIT_LIST_HEAD with list_del_init Stefano Stabellini
@ 2012-02-28 11:24 ` Ian Campbell
2012-02-29 18:34 ` Stefano Stabellini
1 sibling, 1 reply; 6+ messages in thread
From: Ian Campbell @ 2012-02-28 11:24 UTC (permalink / raw)
To: Stefano Stabellini; +Cc: xen-devel@lists.xensource.com, David Vrabel
On Thu, 2012-02-23 at 17:00 +0000, Stefano Stabellini wrote:
> If the vgic needs to inject a virtual irq into the guest, but no free
> LR registers are available, add the irq to a list and return.
There is an ordering constraint on this list. It should be mentioned
somewhere in a comment.
> Whenever an LR register becomes available we add the queued irq to it
> and remove it from the list.
> We use the gic lock to protect the list and the bitmask.
>
> Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> ---
> xen/arch/arm/gic.c | 95 ++++++++++++++++++++++++++++++++----------
> xen/include/asm-arm/domain.h | 1 +
> 2 files changed, 74 insertions(+), 22 deletions(-)
>
> diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> index adc10bb..2ff7bce 100644
> --- a/xen/arch/arm/gic.c
> +++ b/xen/arch/arm/gic.c
> @@ -25,6 +25,7 @@
> #include <xen/sched.h>
> #include <xen/errno.h>
> #include <xen/softirq.h>
> +#include <xen/list.h>
> #include <asm/p2m.h>
> #include <asm/domain.h>
>
> @@ -45,6 +46,8 @@ static struct {
> unsigned int lines;
> unsigned int cpus;
> spinlock_t lock;
> + uint64_t lr_mask;
> + struct list_head lr_pending;
> } gic;
>
> irq_desc_t irq_desc[NR_IRQS];
> @@ -247,6 +250,8 @@ static void __cpuinit gic_hyp_init(void)
>
> GICH[GICH_HCR] = GICH_HCR_EN;
> GICH[GICH_MISR] = GICH_MISR_EOI;
> + gic.lr_mask = 0ULL;
> + INIT_LIST_HEAD(&gic.lr_pending);
> }
>
> /* Set up the GIC */
> @@ -345,16 +350,50 @@ int __init setup_irq(unsigned int irq, struct irqaction *new)
> return rc;
> }
>
> -void gic_set_guest_irq(unsigned int virtual_irq,
> +static inline void gic_set_lr(int lr, unsigned int virtual_irq,
> unsigned int state, unsigned int priority)
> {
> - BUG_ON(virtual_irq > nr_lrs);
> - GICH[GICH_LR + virtual_irq] = state |
> + BUG_ON(lr > nr_lrs);
> + GICH[GICH_LR + lr] = state |
> GICH_LR_MAINTENANCE_IRQ |
> ((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
> ((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
> }
>
> +void gic_set_guest_irq(unsigned int virtual_irq,
> + unsigned int state, unsigned int priority)
> +{
> + int i;
> + struct pending_irq *iter, *n;
> +
> + spin_lock(&gic.lock);
> +
> + if ( list_empty(&gic.lr_pending) )
This could really do with some comments.
The logic here is that if the "lr_pending" list has nothing in it then
there are no other IRQs queued waiting for an LR to become available and
therefore we can take a fast path and inject this IRQ directly?
> + {
> + i = find_first_zero_bit(&gic.lr_mask, sizeof(uint64_t));
> + if (i < sizeof(uint64_t)) {
What does find_first_zero_bit(0xffff.fff, 64) return?
> + set_bit(i, &gic.lr_mask);
> + gic_set_lr(i, virtual_irq, state, priority);
> + goto out;
> + }
> + }
> +
> + n = irq_to_pending(current, virtual_irq);
> + list_for_each_entry ( iter, &gic.lr_pending, lr_link )
> + {
> + if ( iter->priority < priority )
> + {
> + list_add_tail(&n->lr_link, &iter->lr_link);
> + goto out;
> + }
> + }
> + list_add(&n->lr_link, &gic.lr_pending);
Should this be list_add_tail?
Lower numbers are higher priority and therefore the list should be
ordered from low->high, since we want to pull the next interrupt off the
front of the list. I don't think the above implements that though.
If we imagine a list containing priorities [2,4,6] into which we are
inserting a priority 5 interrupt.
On the first iteration of the loop iter->priority == 2 so "if
(iter->priority < priority)" is true and we insert 5 after 2 in the list
resulting in [2,5,4,6].
> +
> +out:
> + spin_unlock(&gic.lock);
> + return;
> +}
> +
> void gic_inject_irq_start(void)
> {
> uint32_t hcr;
> @@ -431,30 +470,42 @@ void gicv_setup(struct domain *d)
>
> static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
> {
> - int i, virq;
> + int i = 0, virq;
> uint32_t lr;
> uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
>
> - for ( i = 0; i < 64; i++ ) {
> - if ( eisr & ((uint64_t)1 << i) ) {
> - struct pending_irq *p;
> -
> - lr = GICH[GICH_LR + i];
> - virq = lr & GICH_LR_VIRTUAL_MASK;
> - GICH[GICH_LR + i] = 0;
> -
> - spin_lock(¤t->arch.vgic.lock);
> - p = irq_to_pending(current, virq);
> - if ( p->desc != NULL ) {
> - p->desc->status &= ~IRQ_INPROGRESS;
> - GICC[GICC_DIR] = virq;
> - }
> + while ((i = find_next_bit((const long unsigned int *) &eisr,
> + sizeof(eisr), i)) < sizeof(eisr)) {
> + struct pending_irq *p;
> +
> + spin_lock(&gic.lock);
> + lr = GICH[GICH_LR + i];
> + virq = lr & GICH_LR_VIRTUAL_MASK;
> + GICH[GICH_LR + i] = 0;
> + clear_bit(i, &gic.lr_mask);
> +
> + if ( !list_empty(gic.lr_pending.next) ) {
This seems odd, why not "list_empty(gic.lr_pending)"?
Otherwise won't you fail to inject anything if there is exactly one
entry on lr_pending?
> + p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
> + gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
> + list_del_init(&p->lr_link);
> + set_bit(i, &gic.lr_mask);
> + } else {
> gic_inject_irq_stop();
> - list_del(&p->link);
> - INIT_LIST_HEAD(&p->link);
> - cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
> - spin_unlock(¤t->arch.vgic.lock);
> }
> + spin_unlock(&gic.lock);
> +
> + spin_lock(¤t->arch.vgic.lock);
> + p = irq_to_pending(current, virq);
> + if ( p->desc != NULL ) {
> + p->desc->status &= ~IRQ_INPROGRESS;
> + GICC[GICC_DIR] = virq;
> + }
> + list_del(&p->link);
> + INIT_LIST_HEAD(&p->link);
> + cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
> + spin_unlock(¤t->arch.vgic.lock);
> +
> + i++;
Does find_next_bit include or exclude the bit which you give it as the
3rd argument?
> }
> }
>
> diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> index 3372d14..75095ff 100644
> --- a/xen/include/asm-arm/domain.h
> +++ b/xen/include/asm-arm/domain.h
> @@ -21,6 +21,7 @@ struct pending_irq
> struct irq_desc *desc; /* only set it the irq corresponds to a physical irq */
> uint8_t priority;
> struct list_head link;
> + struct list_head lr_link;
Calling this "link" or "lr_link" when it is used in the context or link
registers (e.g. link-register_link) just adds to the confusion around
these two lists IMHO, as does having one just called link and the other
prefix_link. Calling them foo_list, both with a descriptive prefix,
would be better, e.g. active_list and queued_list (or whatever you deem
appropriate to their semantics)
Even better would be if the invariant "always on either active or
pending lists, never both" were true -- in which case only one list_head
would be needed here.
What do we actually use "link" for? It is chained off vgic.inflight_irqs
but we seem to only use it for anything other than manipulating itself
in vgic_softirq where it is used as a binary "something to inject" flag
-- we could just as well maintain a nr_inflight variable if that were
the case, couldn't we?
Ian.
> };
>
> struct arch_domain
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v4 1/2] arm: support fewer LR registers than virtual irqs
2012-02-28 11:24 ` [PATCH v4 1/2] arm: support fewer LR registers than virtual irqs Ian Campbell
@ 2012-02-29 18:34 ` Stefano Stabellini
2012-03-01 8:55 ` Ian Campbell
0 siblings, 1 reply; 6+ messages in thread
From: Stefano Stabellini @ 2012-02-29 18:34 UTC (permalink / raw)
To: Ian Campbell
Cc: xen-devel@lists.xensource.com, David Vrabel, Stefano Stabellini
On Tue, 28 Feb 2012, Ian Campbell wrote:
> On Thu, 2012-02-23 at 17:00 +0000, Stefano Stabellini wrote:
> > If the vgic needs to inject a virtual irq into the guest, but no free
> > LR registers are available, add the irq to a list and return.
>
> There is an ordering constraint on this list. It should be mentioned
> somewhere in a comment.
right
> > Whenever an LR register becomes available we add the queued irq to it
> > and remove it from the list.
> > We use the gic lock to protect the list and the bitmask.
> >
> > Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
> > ---
> > xen/arch/arm/gic.c | 95 ++++++++++++++++++++++++++++++++----------
> > xen/include/asm-arm/domain.h | 1 +
> > 2 files changed, 74 insertions(+), 22 deletions(-)
> >
> > diff --git a/xen/arch/arm/gic.c b/xen/arch/arm/gic.c
> > index adc10bb..2ff7bce 100644
> > --- a/xen/arch/arm/gic.c
> > +++ b/xen/arch/arm/gic.c
> > @@ -25,6 +25,7 @@
> > #include <xen/sched.h>
> > #include <xen/errno.h>
> > #include <xen/softirq.h>
> > +#include <xen/list.h>
> > #include <asm/p2m.h>
> > #include <asm/domain.h>
> >
> > @@ -45,6 +46,8 @@ static struct {
> > unsigned int lines;
> > unsigned int cpus;
> > spinlock_t lock;
> > + uint64_t lr_mask;
> > + struct list_head lr_pending;
> > } gic;
> >
> > irq_desc_t irq_desc[NR_IRQS];
> > @@ -247,6 +250,8 @@ static void __cpuinit gic_hyp_init(void)
> >
> > GICH[GICH_HCR] = GICH_HCR_EN;
> > GICH[GICH_MISR] = GICH_MISR_EOI;
> > + gic.lr_mask = 0ULL;
> > + INIT_LIST_HEAD(&gic.lr_pending);
> > }
> >
> > /* Set up the GIC */
> > @@ -345,16 +350,50 @@ int __init setup_irq(unsigned int irq, struct irqaction *new)
> > return rc;
> > }
> >
> > -void gic_set_guest_irq(unsigned int virtual_irq,
> > +static inline void gic_set_lr(int lr, unsigned int virtual_irq,
> > unsigned int state, unsigned int priority)
> > {
> > - BUG_ON(virtual_irq > nr_lrs);
> > - GICH[GICH_LR + virtual_irq] = state |
> > + BUG_ON(lr > nr_lrs);
> > + GICH[GICH_LR + lr] = state |
> > GICH_LR_MAINTENANCE_IRQ |
> > ((priority >> 3) << GICH_LR_PRIORITY_SHIFT) |
> > ((virtual_irq & GICH_LR_VIRTUAL_MASK) << GICH_LR_VIRTUAL_SHIFT);
> > }
> >
> > +void gic_set_guest_irq(unsigned int virtual_irq,
> > + unsigned int state, unsigned int priority)
> > +{
> > + int i;
> > + struct pending_irq *iter, *n;
> > +
> > + spin_lock(&gic.lock);
> > +
> > + if ( list_empty(&gic.lr_pending) )
>
> This could really do with some comments.
>
> The logic here is that if the "lr_pending" list has nothing in it then
> there are no other IRQs queued waiting for an LR to become available and
> therefore we can take a fast path and inject this IRQ directly?
Yes: if there are no IRQs waiting for an LR it means that an LR might
actually be free. If we have IRQs queued in lr_pending then there is no
point in checking for a free LR.
> > + {
> > + i = find_first_zero_bit(&gic.lr_mask, sizeof(uint64_t));
> > + if (i < sizeof(uint64_t)) {
>
> What does find_first_zero_bit(0xffff.fff, 64) return?
0
> > + set_bit(i, &gic.lr_mask);
> > + gic_set_lr(i, virtual_irq, state, priority);
> > + goto out;
> > + }
> > + }
> > +
> > + n = irq_to_pending(current, virtual_irq);
> > + list_for_each_entry ( iter, &gic.lr_pending, lr_link )
> > + {
> > + if ( iter->priority < priority )
> > + {
> > + list_add_tail(&n->lr_link, &iter->lr_link);
> > + goto out;
> > + }
> > + }
> > + list_add(&n->lr_link, &gic.lr_pending);
>
> Should this be list_add_tail?
yes
> Lower numbers are higher priority and therefore the list should be
> ordered from low->high, since we want to pull the next interrupt off the
> front of the list. I don't think the above implements that though.
Yes, the priority test is inverted.
I think we want:
iter->priority > priority
> If we imagine a list containing priorities [2,4,6] into which we are
> inserting a priority 5 interrupt.
>
> On the first iteration of the loop iter->priority == 2 so "if
> (iter->priority < priority)" is true and we insert 5 after 2 in the list
> resulting in [2,5,4,6].
Actually list_add_tail adds the entry before the head, not after.
So if we have the right priority check and the list is [2,4,6], adding 5
should result in [2,4,5,6].
> > +
> > +out:
> > + spin_unlock(&gic.lock);
> > + return;
> > +}
> > +
> > void gic_inject_irq_start(void)
> > {
> > uint32_t hcr;
> > @@ -431,30 +470,42 @@ void gicv_setup(struct domain *d)
> >
> > static void maintenance_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
> > {
> > - int i, virq;
> > + int i = 0, virq;
> > uint32_t lr;
> > uint64_t eisr = GICH[GICH_EISR0] | (((uint64_t) GICH[GICH_EISR1]) << 32);
> >
> > - for ( i = 0; i < 64; i++ ) {
> > - if ( eisr & ((uint64_t)1 << i) ) {
> > - struct pending_irq *p;
> > -
> > - lr = GICH[GICH_LR + i];
> > - virq = lr & GICH_LR_VIRTUAL_MASK;
> > - GICH[GICH_LR + i] = 0;
> > -
> > - spin_lock(¤t->arch.vgic.lock);
> > - p = irq_to_pending(current, virq);
> > - if ( p->desc != NULL ) {
> > - p->desc->status &= ~IRQ_INPROGRESS;
> > - GICC[GICC_DIR] = virq;
> > - }
> > + while ((i = find_next_bit((const long unsigned int *) &eisr,
> > + sizeof(eisr), i)) < sizeof(eisr)) {
>
> > + struct pending_irq *p;
> > +
> > + spin_lock(&gic.lock);
> > + lr = GICH[GICH_LR + i];
> > + virq = lr & GICH_LR_VIRTUAL_MASK;
> > + GICH[GICH_LR + i] = 0;
> > + clear_bit(i, &gic.lr_mask);
> > +
> > + if ( !list_empty(gic.lr_pending.next) ) {
>
> This seems odd, why not "list_empty(gic.lr_pending)"?
>
> Otherwise won't you fail to inject anything if there is exactly one
> entry on lr_pending?
You are correct, that is a mistake.
> > + p = list_entry(gic.lr_pending.next, typeof(*p), lr_link);
> > + gic_set_lr(i, p->irq, GICH_LR_PENDING, p->priority);
> > + list_del_init(&p->lr_link);
> > + set_bit(i, &gic.lr_mask);
> > + } else {
> > gic_inject_irq_stop();
> > - list_del(&p->link);
> > - INIT_LIST_HEAD(&p->link);
> > - cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
> > - spin_unlock(¤t->arch.vgic.lock);
> > }
> > + spin_unlock(&gic.lock);
> > +
> > + spin_lock(¤t->arch.vgic.lock);
> > + p = irq_to_pending(current, virq);
> > + if ( p->desc != NULL ) {
> > + p->desc->status &= ~IRQ_INPROGRESS;
> > + GICC[GICC_DIR] = virq;
> > + }
> > + list_del(&p->link);
> > + INIT_LIST_HEAD(&p->link);
> > + cpu_raise_softirq(current->processor, VGIC_SOFTIRQ);
> > + spin_unlock(¤t->arch.vgic.lock);
> > +
> > + i++;
>
> Does find_next_bit include or exclude the bit which you give it as the
> 3rd argument?
include
> > }
> > }
> >
> > diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
> > index 3372d14..75095ff 100644
> > --- a/xen/include/asm-arm/domain.h
> > +++ b/xen/include/asm-arm/domain.h
> > @@ -21,6 +21,7 @@ struct pending_irq
> > struct irq_desc *desc; /* only set it the irq corresponds to a physical irq */
> > uint8_t priority;
> > struct list_head link;
> > + struct list_head lr_link;
>
> Calling this "link" or "lr_link" when it is used in the context or link
> registers (e.g. link-register_link) just adds to the confusion around
> these two lists IMHO, as does having one just called link and the other
> prefix_link. Calling them foo_list, both with a descriptive prefix,
> would be better, e.g. active_list and queued_list (or whatever you deem
> appropriate to their semantics)
>
> Even better would be if the invariant "always on either active or
> pending lists, never both" were true -- in which case only one list_head
> would be needed here.
I'll try to come up with better descriptive names and comments.
> What do we actually use "link" for? It is chained off vgic.inflight_irqs
> but we seem to only use it for anything other than manipulating itself
> in vgic_softirq where it is used as a binary "something to inject" flag
> -- we could just as well maintain a nr_inflight variable if that were
> the case, couldn't we?
It is used by the vgic to keep track of what IRQs have been injected
from its point of view. These IRQs might have been injected and
currently resident in an LR register, or they might be queued in
lr_pending. I'll write a comment to better the explain the life cycle of
an IRQ.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v4 1/2] arm: support fewer LR registers than virtual irqs
2012-02-29 18:34 ` Stefano Stabellini
@ 2012-03-01 8:55 ` Ian Campbell
2012-03-01 11:57 ` Stefano Stabellini
0 siblings, 1 reply; 6+ messages in thread
From: Ian Campbell @ 2012-03-01 8:55 UTC (permalink / raw)
To: Stefano Stabellini; +Cc: xen-devel@lists.xensource.com, David Vrabel
On Wed, 2012-02-29 at 18:34 +0000, Stefano Stabellini wrote:
> > > + {
> > > + i = find_first_zero_bit(&gic.lr_mask, sizeof(uint64_t));
> > > + if (i < sizeof(uint64_t)) {
> >
> > What does find_first_zero_bit(0xffff.fff, 64) return?
>
> 0
So the if is wrong?
What does it return for 0x0? I'd have expected it to return 0 too (the
zeroth bit). I guess the response must be 1-based? In which case don't
you need to subtract 1 somewhere so that bit 1 being clear leads you to
use LR0?
Also, it occurs to me that you need to check i against the number of LRs
too -- otherwise if you have 4 LRs all in use then mask is 0xF but
find_first_zero_bit(0xF, sizeof(...) will return 5 (or is it six?) and
you will try and deploy the non-existent 5th LR.
I'm a bit concerned that the #irqs > #LRs code paths probably haven't
been run, even though you tested on a system, with only 4 LRs. Perhaps
you could artificially inject a bunch of unused/spurious interrupts at
the same time e.g. from a keyhandler?
Also there is an option in the model (at build time for sure, perhaps at
runtime too via -C parameters) to reduce the number of LRs, perhaps
setting it to 1 or 2 would help exercise these code paths a bit more
than 4? We are unlikely to be hitting 5 concurrent pending interrupts
with our current uses.
> > If we imagine a list containing priorities [2,4,6] into which we are
> > inserting a priority 5 interrupt.
> >
> > On the first iteration of the loop iter->priority == 2 so "if
> > (iter->priority < priority)" is true and we insert 5 after 2 in the list
> > resulting in [2,5,4,6].
>
> Actually list_add_tail adds the entry before the head, not after.
Oh, yes, it treats the thing you give it as the head and therefore the
tail is == prev because the list is circular. Subtle!
> So if we have the right priority check and the list is [2,4,6], adding 5
> should result in [2,4,5,6].
Indeed it _should_ ;-)
[...]
> > Calling this "link" or "lr_link" when it is used in the context or link
> > registers (e.g. link-register_link) just adds to the confusion around
> > these two lists IMHO, as does having one just called link and the other
> > prefix_link. Calling them foo_list, both with a descriptive prefix,
> > would be better, e.g. active_list and queued_list (or whatever you deem
> > appropriate to their semantics)
> >
> > Even better would be if the invariant "always on either active or
> > pending lists, never both" were true -- in which case only one list_head
> > would be needed here.
>
> I'll try to come up with better descriptive names and comments.
Thanks.
> > What do we actually use "link" for? It is chained off vgic.inflight_irqs
> > but we seem to only use it for anything other than manipulating itself
> > in vgic_softirq where it is used as a binary "something to inject" flag
> > -- we could just as well maintain a nr_inflight variable if that were
> > the case, couldn't we?
>
> It is used by the vgic to keep track of what IRQs have been injected
> from its point of view. These IRQs might have been injected and
> currently resident in an LR register, or they might be queued in
> lr_pending. I'll write a comment to better the explain the life cycle of
> an IRQ.
Awesome, cheers!
Ian.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v4 1/2] arm: support fewer LR registers than virtual irqs
2012-03-01 8:55 ` Ian Campbell
@ 2012-03-01 11:57 ` Stefano Stabellini
0 siblings, 0 replies; 6+ messages in thread
From: Stefano Stabellini @ 2012-03-01 11:57 UTC (permalink / raw)
To: Ian Campbell
Cc: xen-devel@lists.xensource.com, David Vrabel, Stefano Stabellini
On Thu, 1 Mar 2012, Ian Campbell wrote:
> On Wed, 2012-02-29 at 18:34 +0000, Stefano Stabellini wrote:
>
> > > > + {
> > > > + i = find_first_zero_bit(&gic.lr_mask, sizeof(uint64_t));
> > > > + if (i < sizeof(uint64_t)) {
> > >
> > > What does find_first_zero_bit(0xffff.fff, 64) return?
> >
> > 0
>
> So the if is wrong?
>
> What does it return for 0x0? I'd have expected it to return 0 too (the
> zeroth bit). I guess the response must be 1-based? In which case don't
> you need to subtract 1 somewhere so that bit 1 being clear leads you to
> use LR0?
Ops, sorry, I misread your previous comment. It would return
sizeof(uint64_t), hence the if should work as expected.
> Also, it occurs to me that you need to check i against the number of LRs
> too -- otherwise if you have 4 LRs all in use then mask is 0xF but
> find_first_zero_bit(0xF, sizeof(...) will return 5 (or is it six?) and
> you will try and deploy the non-existent 5th LR.
Yes, I realized that yesterday while I was reading through the code again.
We need to pass nr_lrs instead of sizeof(uint64_t) and check against it.
> I'm a bit concerned that the #irqs > #LRs code paths probably haven't
> been run, even though you tested on a system, with only 4 LRs. Perhaps
> you could artificially inject a bunch of unused/spurious interrupts at
> the same time e.g. from a keyhandler?
Actually after the few fixes discussed in this email thread I limited
the number of LRs to 1, and I can see IRQs being added/removed from
pending_irq the way they are supposed to.
> Also there is an option in the model (at build time for sure, perhaps at
> runtime too via -C parameters) to reduce the number of LRs, perhaps
> setting it to 1 or 2 would help exercise these code paths a bit more
> than 4? We are unlikely to be hitting 5 concurrent pending interrupts
> with our current uses.
indeed
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2012-03-01 11:57 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-02-23 17:00 [PATCH v4 1/2] arm: support fewer LR registers than virtual irqs Stefano Stabellini
2012-02-23 17:00 ` [PATCH v4 2/2] arm: replace list_del and INIT_LIST_HEAD with list_del_init Stefano Stabellini
2012-02-28 11:24 ` [PATCH v4 1/2] arm: support fewer LR registers than virtual irqs Ian Campbell
2012-02-29 18:34 ` Stefano Stabellini
2012-03-01 8:55 ` Ian Campbell
2012-03-01 11:57 ` Stefano Stabellini
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).