xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCHv3] Move IOMMU faults handling into softirq for AMD-Vi.
@ 2012-01-18 15:56 Dario Faggioli
  0 siblings, 0 replies; 4+ messages in thread
From: Dario Faggioli @ 2012-01-18 15:56 UTC (permalink / raw)
  To: Keir Fraser
  Cc: Wei Wang, allen.m.kay@intel.com, xen-devel, Tim Deegan,
	Jan Beulich


[-- Attachment #1.1.1: Type: text/plain, Size: 3247 bytes --]

Dealing with interrupts from AMD-Vi IOMMU(s) is deferred to a softirq-tasklet,
raised by the actual IRQ handler. To avoid more interrupts being generated
(because of further faults), they must be masked in the IOMMU within the low
level IRQ handler and enabled back in the tasklet body. Notice that this may
cause the log to overflow, but none of the existing entry will be overwritten.
                                 
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>

diff -r 15ab61865ecb xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c	Tue Jan 17 12:40:52 2012 +0000
+++ b/xen/drivers/passthrough/amd/iommu_init.c	Wed Jan 18 13:01:23 2012 +0100
@@ -32,6 +32,8 @@
 
 static int __initdata nr_amd_iommus;
 
+static struct tasklet amd_iommu_irq_tasklet;
+
 unsigned short ivrs_bdf_entries;
 static struct radix_tree_root ivrs_maps;
 struct list_head amd_iommu_head;
@@ -689,14 +691,48 @@ static void iommu_check_ppr_log(struct a
     spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
+static void do_amd_iommu_irq(unsigned long data)
+{
+    struct amd_iommu *iommu;
+
+    if ( !iommu_found() )
+    {
+        AMD_IOMMU_DEBUG("no device found, something must be very wrong!\n");
+        return;
+    }
+
+    /*
+     * No matter from where the interrupt came from, check all the
+     * IOMMUs present in the system. This allows for having just one
+     * tasklet (instead of one per each IOMMUs).
+     */
+    for_each_amd_iommu ( iommu ) {
+        iommu_check_event_log(iommu);
+
+        if ( iommu->ppr_log.buffer != NULL )
+            iommu_check_ppr_log(iommu);
+    }
+}
+
 static void iommu_interrupt_handler(int irq, void *dev_id,
                                     struct cpu_user_regs *regs)
 {
+    u32 entry;
+    unsigned long flags;
     struct amd_iommu *iommu = dev_id;
-    iommu_check_event_log(iommu);
 
-    if ( iommu->ppr_log.buffer != NULL )
-        iommu_check_ppr_log(iommu);
+    spin_lock_irqsave(&iommu->lock, flags);
+
+    /* Silence interrupts from both event and PPR logging */
+    entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
+    iommu_clear_bit(&entry, IOMMU_STATUS_EVENT_LOG_INT_SHIFT);
+    iommu_clear_bit(&entry, IOMMU_STATUS_PPR_LOG_INT_SHIFT);
+    writel(entry, iommu->mmio_base+IOMMU_STATUS_MMIO_OFFSET);
+
+    spin_unlock_irqrestore(&iommu->lock, flags);
+
+    /* It is the tasklet that will clear the logs and re-enable interrupts */
+    tasklet_schedule(&amd_iommu_irq_tasklet);
 }
 
 static int __init set_iommu_interrupt_handler(struct amd_iommu *iommu)
@@ -876,6 +912,8 @@ static int __init amd_iommu_init_one(str
     printk("AMD-Vi: IOMMU %d Enabled.\n", nr_amd_iommus );
     nr_amd_iommus++;
 
+    softirq_tasklet_init(&amd_iommu_irq_tasklet, do_amd_iommu_irq, 0);
+
     return 0;
 
 error_out:

-- 
<<This happens because I choose it to happen!>> (Raistlin Majere)
-------------------------------------------------------------------
Dario Faggioli, http://retis.sssup.it/people/faggioli
Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)
PhD Candidate, ReTiS Lab, Scuola Superiore Sant'Anna, Pisa (Italy)


[-- Attachment #1.1.2: iommu-fault-tasklet_amd.patch --]
[-- Type: text/x-patch, Size: 3058 bytes --]

# HG changeset patch
# Parent 15ab61865ecbd146f6ce65fbea5bf49bfd9c6cb1
Move IOMMU faults handling into softirq for AMD-Vi.
                  
Dealing with interrupts from AMD-Vi IOMMU(s) is deferred to a softirq-tasklet,
raised by the actual IRQ handler. To avoid more interrupts being generated
(because of further faults), they must be masked in the IOMMU within the low
level IRQ handler and enabled back in the tasklet body. Notice that this may
cause the log to overflow, but none of the existing entry will be overwritten.
                                 
Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>

diff -r 15ab61865ecb xen/drivers/passthrough/amd/iommu_init.c
--- a/xen/drivers/passthrough/amd/iommu_init.c	Tue Jan 17 12:40:52 2012 +0000
+++ b/xen/drivers/passthrough/amd/iommu_init.c	Wed Jan 18 13:01:23 2012 +0100
@@ -32,6 +32,8 @@
 
 static int __initdata nr_amd_iommus;
 
+static struct tasklet amd_iommu_irq_tasklet;
+
 unsigned short ivrs_bdf_entries;
 static struct radix_tree_root ivrs_maps;
 struct list_head amd_iommu_head;
@@ -689,14 +691,48 @@ static void iommu_check_ppr_log(struct a
     spin_unlock_irqrestore(&iommu->lock, flags);
 }
 
+static void do_amd_iommu_irq(unsigned long data)
+{
+    struct amd_iommu *iommu;
+
+    if ( !iommu_found() )
+    {
+        AMD_IOMMU_DEBUG("no device found, something must be very wrong!\n");
+        return;
+    }
+
+    /*
+     * No matter from where the interrupt came from, check all the
+     * IOMMUs present in the system. This allows for having just one
+     * tasklet (instead of one per each IOMMUs).
+     */
+    for_each_amd_iommu ( iommu ) {
+        iommu_check_event_log(iommu);
+
+        if ( iommu->ppr_log.buffer != NULL )
+            iommu_check_ppr_log(iommu);
+    }
+}
+
 static void iommu_interrupt_handler(int irq, void *dev_id,
                                     struct cpu_user_regs *regs)
 {
+    u32 entry;
+    unsigned long flags;
     struct amd_iommu *iommu = dev_id;
-    iommu_check_event_log(iommu);
 
-    if ( iommu->ppr_log.buffer != NULL )
-        iommu_check_ppr_log(iommu);
+    spin_lock_irqsave(&iommu->lock, flags);
+
+    /* Silence interrupts from both event and PPR logging */
+    entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
+    iommu_clear_bit(&entry, IOMMU_STATUS_EVENT_LOG_INT_SHIFT);
+    iommu_clear_bit(&entry, IOMMU_STATUS_PPR_LOG_INT_SHIFT);
+    writel(entry, iommu->mmio_base+IOMMU_STATUS_MMIO_OFFSET);
+
+    spin_unlock_irqrestore(&iommu->lock, flags);
+
+    /* It is the tasklet that will clear the logs and re-enable interrupts */
+    tasklet_schedule(&amd_iommu_irq_tasklet);
 }
 
 static int __init set_iommu_interrupt_handler(struct amd_iommu *iommu)
@@ -876,6 +912,8 @@ static int __init amd_iommu_init_one(str
     printk("AMD-Vi: IOMMU %d Enabled.\n", nr_amd_iommus );
     nr_amd_iommus++;
 
+    softirq_tasklet_init(&amd_iommu_irq_tasklet, do_amd_iommu_irq, 0);
+
     return 0;
 
 error_out:

[-- Attachment #1.2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 198 bytes --]

[-- Attachment #2: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 4+ messages in thread
* Re: [PATCHv2 2 of 2] Move IOMMU faults handling into softirq for AMD-Vi.
@ 2012-01-17 11:17 Keir Fraser
  2012-01-18  8:53 ` Dario Faggioli
  0 siblings, 1 reply; 4+ messages in thread
From: Keir Fraser @ 2012-01-17 11:17 UTC (permalink / raw)
  To: Dario Faggioli, xen-devel
  Cc: Wei Wang2, allen.m.kay@intel.com, Tim Deegan, Jan Beulich

On 05/01/2012 15:27, "Dario Faggioli" <raistlin@linux.it> wrote:

> Dealing with interrupts from AMD-Vi IOMMU(s) is deferred to a softirq-tasklet,
> raised by the actual IRQ handler. To avoid more interrupts being generated
> (because of further faults), they must be masked in the IOMMU within the low
> level IRQ handler and enabled back in the tasklet body. Notice that this may
> cause the log to overflow, but none of the existing entry will be overwritten.
> 
> Signed-off-by: Dario Faggioli <dario.faggioli@citrix.com>

This patch needs fixing to apply to xen-unstable tip. Please do that and
resubmit.

 -- Keir

> diff -r 3cb587bb34d0 xen/drivers/passthrough/amd/iommu_init.c
> --- a/xen/drivers/passthrough/amd/iommu_init.c Thu Jan 05 15:12:35 2012 +0100
> +++ b/xen/drivers/passthrough/amd/iommu_init.c Thu Jan 05 15:14:03 2012 +0100
> @@ -32,6 +32,8 @@
>  
>  static int __initdata nr_amd_iommus;
>  
> +static struct tasklet amd_iommu_fault_tasklet;
> +
>  unsigned short ivrs_bdf_entries;
>  static struct radix_tree_root ivrs_maps;
>  struct list_head amd_iommu_head;
> @@ -522,12 +524,10 @@ static void parse_event_log_entry(struct
>      }
>  }
>  
> -static void amd_iommu_page_fault(int irq, void *dev_id,
> -                             struct cpu_user_regs *regs)
> +static void __do_amd_iommu_page_fault(struct amd_iommu *iommu)
>  {
>      u32 entry;
>      unsigned long flags;
> -    struct amd_iommu *iommu = dev_id;
>  
>      spin_lock_irqsave(&iommu->lock, flags);
>      amd_iommu_read_event_log(iommu);
> @@ -546,6 +546,45 @@ static void amd_iommu_page_fault(int irq
>      spin_unlock_irqrestore(&iommu->lock, flags);
>  }
>  
> +static void do_amd_iommu_page_fault(unsigned long data)
> +{
> +    struct amd_iommu *iommu;
> +
> +    if ( !iommu_found() )
> +    {
> +        AMD_IOMMU_DEBUG("no device found, something must be very wrong!\n");
> +        return;
> +    }
> +
> +    /*
> +     * No matter from whom the interrupt came from, check all the
> +     * IOMMUs present in the system. This allows for having just one
> +     * tasklet (instead of one per each IOMMUs) and should be more than
> +     * fine, considering how rare the event of a fault should be.
> +     */
> +    for_each_amd_iommu ( iommu )
> +        __do_amd_iommu_page_fault(iommu);
> +}
> +
> +static void amd_iommu_page_fault(int irq, void *dev_id,
> +                             struct cpu_user_regs *regs)
> +{
> +    u32 entry;
> +    unsigned long flags;
> +    struct amd_iommu *iommu = dev_id;
> +
> +    /* silence interrupts. The tasklet will enable them back */
> +    spin_lock_irqsave(&iommu->lock, flags);
> +    entry = readl(iommu->mmio_base + IOMMU_STATUS_MMIO_OFFSET);
> +    iommu_clear_bit(&entry, IOMMU_STATUS_EVENT_LOG_INT_SHIFT);
> +    writel(entry, iommu->mmio_base+IOMMU_STATUS_MMIO_OFFSET);
> +    spin_unlock_irqrestore(&iommu->lock, flags);
> +
> +    /* Flag the tasklet as runnable so that it can execute, clear
> +     * the log and re-enable interrupts. */
> +    tasklet_schedule(&amd_iommu_fault_tasklet);
> +}
> +
>  static int __init set_iommu_interrupt_handler(struct amd_iommu *iommu)
>  {
>      int irq, ret;
> @@ -884,6 +923,8 @@ int __init amd_iommu_init(void)
>          if ( amd_iommu_init_one(iommu) != 0 )
>              goto error_out;
>  
> +    softirq_tasklet_init(&amd_iommu_fault_tasklet, do_amd_iommu_page_fault,
> 0);
> +
>      return 0;
>  
>  error_out:

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2012-01-18 15:57 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-01-18 15:56 [PATCHv3] Move IOMMU faults handling into softirq for AMD-Vi Dario Faggioli
  -- strict thread matches above, loose matches on Subject: below --
2012-01-17 11:17 [PATCHv2 2 of 2] " Keir Fraser
2012-01-18  8:53 ` Dario Faggioli
2012-01-18 10:40   ` Wei Wang
2012-01-18 13:51     ` [PATCHv3] " Dario Faggioli
2012-01-18 15:53       ` Wei Wang
2012-01-18 15:57         ` Dario Faggioli

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).