xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] x86/HPET: deal with event having expired while interrupt was masked
@ 2013-03-20 14:45 Jan Beulich
  2013-03-20 15:18 ` Keir Fraser
  0 siblings, 1 reply; 2+ messages in thread
From: Jan Beulich @ 2013-03-20 14:45 UTC (permalink / raw)
  To: xen-devel; +Cc: Sander Eikelenboom, Keir Fraser

[-- Attachment #1: Type: text/plain, Size: 4635 bytes --]

Commit 2d8a282 ("x86/HPET: fix FSB interrupt masking") may cause the
HPET event to occur while its interrupt is masked. In that case we need
to "manually" deliver the event.

Unfortunately this requires the locking to be changed: For one, it was
always bogus for handle_hpet_broadcast() to use spin_unlock_irq() - the
function is being called from an interrupt handler, and hence shouldn't
blindly re-enable interrupts (this should be left up to the generic
interrupt handling code). And with the event handler wanting to acquire
the lock for two of its code regions, we must not enter it with the
lock already held. Hence move the locking into
hpet_{attach,detach}_channel(), permitting the lock to be dropped by
set_channel_irq_affinity() (which is a tail call of those functions).

Reported-by: Sander Eikelenboom <linux@eikelenboom.it>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Tested-by: Sander Eikelenboom <linux@eikelenboom.it>

--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -171,13 +171,14 @@ static void handle_hpet_broadcast(struct
     cpumask_t mask;
     s_time_t now, next_event;
     unsigned int cpu;
+    unsigned long flags;
 
-    spin_lock_irq(&ch->lock);
+    spin_lock_irqsave(&ch->lock, flags);
 
 again:
     ch->next_event = STIME_MAX;
 
-    spin_unlock_irq(&ch->lock);
+    spin_unlock_irqrestore(&ch->lock, flags);
 
     next_event = STIME_MAX;
     cpumask_clear(&mask);
@@ -205,13 +206,13 @@ again:
 
     if ( next_event != STIME_MAX )
     {
-        spin_lock_irq(&ch->lock);
+        spin_lock_irqsave(&ch->lock, flags);
 
         if ( next_event < ch->next_event &&
              reprogram_hpet_evt_channel(ch, next_event, now, 0) )
             goto again;
 
-        spin_unlock_irq(&ch->lock);
+        spin_unlock_irqrestore(&ch->lock, flags);
     }
 }
 
@@ -460,7 +461,7 @@ static struct hpet_event_channel *hpet_g
     return ch;
 }
 
-static void set_channel_irq_affinity(const struct hpet_event_channel *ch)
+static void set_channel_irq_affinity(struct hpet_event_channel *ch)
 {
     struct irq_desc *desc = irq_to_desc(ch->msi.irq);
 
@@ -470,12 +471,19 @@ static void set_channel_irq_affinity(con
     hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
     hpet_msi_unmask(desc);
     spin_unlock(&desc->lock);
+
+    spin_unlock(&ch->lock);
+
+    /* We may have missed an interrupt due to the temporary masking. */
+    if ( ch->event_handler && ch->next_event < NOW() )
+        ch->event_handler(ch);
 }
 
 static void hpet_attach_channel(unsigned int cpu,
                                 struct hpet_event_channel *ch)
 {
-    ASSERT(spin_is_locked(&ch->lock));
+    ASSERT(!local_irq_is_enabled());
+    spin_lock(&ch->lock);
 
     per_cpu(cpu_bc_channel, cpu) = ch;
 
@@ -484,31 +492,34 @@ static void hpet_attach_channel(unsigned
         ch->cpu = cpu;
 
     if ( ch->cpu != cpu )
-        return;
-
-    set_channel_irq_affinity(ch);
+        spin_unlock(&ch->lock);
+    else
+        set_channel_irq_affinity(ch);
 }
 
 static void hpet_detach_channel(unsigned int cpu,
                                 struct hpet_event_channel *ch)
 {
-    ASSERT(spin_is_locked(&ch->lock));
+    spin_lock_irq(&ch->lock);
+
     ASSERT(ch == per_cpu(cpu_bc_channel, cpu));
 
     per_cpu(cpu_bc_channel, cpu) = NULL;
 
     if ( cpu != ch->cpu )
-        return;
-
-    if ( cpumask_empty(ch->cpumask) )
+        spin_unlock_irq(&ch->lock);
+    else if ( cpumask_empty(ch->cpumask) )
     {
         ch->cpu = -1;
         clear_bit(HPET_EVT_USED_BIT, &ch->flags);
-        return;
+        spin_unlock_irq(&ch->lock);
+    }
+    else
+    {
+        ch->cpu = cpumask_first(ch->cpumask);
+        set_channel_irq_affinity(ch);
+        local_irq_enable();
     }
-
-    ch->cpu = cpumask_first(ch->cpumask);
-    set_channel_irq_affinity(ch);
 }
 
 #include <asm/mc146818rtc.h>
@@ -686,11 +697,7 @@ void hpet_broadcast_enter(void)
     ASSERT(!local_irq_is_enabled());
 
     if ( !(ch->flags & HPET_EVT_LEGACY) )
-    {
-        spin_lock(&ch->lock);
         hpet_attach_channel(cpu, ch);
-        spin_unlock(&ch->lock);
-    }
 
     /* Disable LAPIC timer interrupts. */
     disable_APIC_timer();
@@ -722,11 +729,7 @@ void hpet_broadcast_exit(void)
     cpumask_clear_cpu(cpu, ch->cpumask);
 
     if ( !(ch->flags & HPET_EVT_LEGACY) )
-    {
-        spin_lock_irq(&ch->lock);
         hpet_detach_channel(cpu, ch);
-        spin_unlock_irq(&ch->lock);
-    }
 }
 
 int hpet_broadcast_is_available(void)



[-- Attachment #2: x86-HPET-affinity-masked-deliver.patch --]
[-- Type: text/plain, Size: 4702 bytes --]

x86/HPET: deal with event having expired while interrupt was masked

Commit 2d8a282 ("x86/HPET: fix FSB interrupt masking") may cause the
HPET event to occur while its interrupt is masked. In that case we need
to "manually" deliver the event.

Unfortunately this requires the locking to be changed: For one, it was
always bogus for handle_hpet_broadcast() to use spin_unlock_irq() - the
function is being called from an interrupt handler, and hence shouldn't
blindly re-enable interrupts (this should be left up to the generic
interrupt handling code). And with the event handler wanting to acquire
the lock for two of its code regions, we must not enter it with the
lock already held. Hence move the locking into
hpet_{attach,detach}_channel(), permitting the lock to be dropped by
set_channel_irq_affinity() (which is a tail call of those functions).

Reported-by: Sander Eikelenboom <linux@eikelenboom.it>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Tested-by: Sander Eikelenboom <linux@eikelenboom.it>

--- a/xen/arch/x86/hpet.c
+++ b/xen/arch/x86/hpet.c
@@ -171,13 +171,14 @@ static void handle_hpet_broadcast(struct
     cpumask_t mask;
     s_time_t now, next_event;
     unsigned int cpu;
+    unsigned long flags;
 
-    spin_lock_irq(&ch->lock);
+    spin_lock_irqsave(&ch->lock, flags);
 
 again:
     ch->next_event = STIME_MAX;
 
-    spin_unlock_irq(&ch->lock);
+    spin_unlock_irqrestore(&ch->lock, flags);
 
     next_event = STIME_MAX;
     cpumask_clear(&mask);
@@ -205,13 +206,13 @@ again:
 
     if ( next_event != STIME_MAX )
     {
-        spin_lock_irq(&ch->lock);
+        spin_lock_irqsave(&ch->lock, flags);
 
         if ( next_event < ch->next_event &&
              reprogram_hpet_evt_channel(ch, next_event, now, 0) )
             goto again;
 
-        spin_unlock_irq(&ch->lock);
+        spin_unlock_irqrestore(&ch->lock, flags);
     }
 }
 
@@ -460,7 +461,7 @@ static struct hpet_event_channel *hpet_g
     return ch;
 }
 
-static void set_channel_irq_affinity(const struct hpet_event_channel *ch)
+static void set_channel_irq_affinity(struct hpet_event_channel *ch)
 {
     struct irq_desc *desc = irq_to_desc(ch->msi.irq);
 
@@ -470,12 +471,19 @@ static void set_channel_irq_affinity(con
     hpet_msi_set_affinity(desc, cpumask_of(ch->cpu));
     hpet_msi_unmask(desc);
     spin_unlock(&desc->lock);
+
+    spin_unlock(&ch->lock);
+
+    /* We may have missed an interrupt due to the temporary masking. */
+    if ( ch->event_handler && ch->next_event < NOW() )
+        ch->event_handler(ch);
 }
 
 static void hpet_attach_channel(unsigned int cpu,
                                 struct hpet_event_channel *ch)
 {
-    ASSERT(spin_is_locked(&ch->lock));
+    ASSERT(!local_irq_is_enabled());
+    spin_lock(&ch->lock);
 
     per_cpu(cpu_bc_channel, cpu) = ch;
 
@@ -484,31 +492,34 @@ static void hpet_attach_channel(unsigned
         ch->cpu = cpu;
 
     if ( ch->cpu != cpu )
-        return;
-
-    set_channel_irq_affinity(ch);
+        spin_unlock(&ch->lock);
+    else
+        set_channel_irq_affinity(ch);
 }
 
 static void hpet_detach_channel(unsigned int cpu,
                                 struct hpet_event_channel *ch)
 {
-    ASSERT(spin_is_locked(&ch->lock));
+    spin_lock_irq(&ch->lock);
+
     ASSERT(ch == per_cpu(cpu_bc_channel, cpu));
 
     per_cpu(cpu_bc_channel, cpu) = NULL;
 
     if ( cpu != ch->cpu )
-        return;
-
-    if ( cpumask_empty(ch->cpumask) )
+        spin_unlock_irq(&ch->lock);
+    else if ( cpumask_empty(ch->cpumask) )
     {
         ch->cpu = -1;
         clear_bit(HPET_EVT_USED_BIT, &ch->flags);
-        return;
+        spin_unlock_irq(&ch->lock);
+    }
+    else
+    {
+        ch->cpu = cpumask_first(ch->cpumask);
+        set_channel_irq_affinity(ch);
+        local_irq_enable();
     }
-
-    ch->cpu = cpumask_first(ch->cpumask);
-    set_channel_irq_affinity(ch);
 }
 
 #include <asm/mc146818rtc.h>
@@ -686,11 +697,7 @@ void hpet_broadcast_enter(void)
     ASSERT(!local_irq_is_enabled());
 
     if ( !(ch->flags & HPET_EVT_LEGACY) )
-    {
-        spin_lock(&ch->lock);
         hpet_attach_channel(cpu, ch);
-        spin_unlock(&ch->lock);
-    }
 
     /* Disable LAPIC timer interrupts. */
     disable_APIC_timer();
@@ -722,11 +729,7 @@ void hpet_broadcast_exit(void)
     cpumask_clear_cpu(cpu, ch->cpumask);
 
     if ( !(ch->flags & HPET_EVT_LEGACY) )
-    {
-        spin_lock_irq(&ch->lock);
         hpet_detach_channel(cpu, ch);
-        spin_unlock_irq(&ch->lock);
-    }
 }
 
 int hpet_broadcast_is_available(void)

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2013-03-20 15:18 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2013-03-20 14:45 [PATCH] x86/HPET: deal with event having expired while interrupt was masked Jan Beulich
2013-03-20 15:18 ` Keir Fraser

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).