xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] AMD IOMMU: Enable HPET broadcast msi remapping
@ 2012-10-23 13:56 Wei Wang
  2012-10-24 13:11 ` Jan Beulich
  0 siblings, 1 reply; 2+ messages in thread
From: Wei Wang @ 2012-10-23 13:56 UTC (permalink / raw)
  To: Jan Beulich; +Cc: xen-devel@lists.xensource.com

[-- Attachment #1: Type: text/plain, Size: 89 bytes --]

Hi Jan,

This patch enables hpet msi remapping for amd iommu. Please review
Thanks,
Wei


[-- Attachment #2: 0001-AMD-IOMMU-Enable-HPET-broadcast-msi-remapping.patch --]
[-- Type: text/plain, Size: 6036 bytes --]

>From 79dc3ed42e64c66fb261a5bad2834718330fe767 Mon Sep 17 00:00:00 2001
From: Wei Wang <wei.wang2@amd.com>
Date: Tue, 23 Oct 2012 15:52:34 +0200
Subject: [PATCH] AMD IOMMU: Enable HPET broadcast msi remapping

This patch enables hpet msi remapping for amd iommu.

Signed-off-by: Wei Wang <wei.wang2@amd.com>
---
 xen/drivers/passthrough/amd/iommu_acpi.c      |   20 +++++++++++++-
 xen/drivers/passthrough/amd/iommu_intr.c      |   34 +++++++++++++++++++------
 xen/drivers/passthrough/amd/pci_amd_iommu.c   |    1 +
 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h |    6 ++++
 4 files changed, 51 insertions(+), 10 deletions(-)

diff --git a/xen/drivers/passthrough/amd/iommu_acpi.c b/xen/drivers/passthrough/amd/iommu_acpi.c
index e217d45..6606014 100644
--- a/xen/drivers/passthrough/amd/iommu_acpi.c
+++ b/xen/drivers/passthrough/amd/iommu_acpi.c
@@ -653,9 +653,25 @@ static u16 __init parse_ivhd_device_special(
     }
 
     add_ivrs_mapping_entry(bdf, bdf, special->header.data_setting, iommu);
+    
+    switch ( special->variety )
+    {
+    case 1:
     /* set device id of ioapic */
-    ioapic_sbdf[special->handle].bdf = bdf;
-    ioapic_sbdf[special->handle].seg = seg;
+        ioapic_sbdf[special->handle].bdf = bdf;
+        ioapic_sbdf[special->handle].seg = seg;
+        break;
+    case 2:
+        /* set device id of hpet */
+        hpet_sbdf.id = special->handle;
+        hpet_sbdf.bdf = bdf;
+        hpet_sbdf.seg = seg;
+        hpet_sbdf.iommu = iommu;
+        break;            
+    default:
+        return 0;
+    }
+        
     return dev_length;
 }
 
diff --git a/xen/drivers/passthrough/amd/iommu_intr.c b/xen/drivers/passthrough/amd/iommu_intr.c
index 23cb1ea..26b3465 100644
--- a/xen/drivers/passthrough/amd/iommu_intr.c
+++ b/xen/drivers/passthrough/amd/iommu_intr.c
@@ -28,6 +28,7 @@
 #define INTREMAP_ENTRIES (1 << INTREMAP_LENGTH)
 
 struct ioapic_sbdf ioapic_sbdf[MAX_IO_APICS];
+struct hpet_sbdf hpet_sbdf;
 void *shared_intremap_table;
 static DEFINE_SPINLOCK(shared_intremap_lock);
 
@@ -267,14 +268,16 @@ static void update_intremap_entry_from_msi_msg(
 {
     unsigned long flags;
     u32* entry;
-    u16 bdf, req_id, alias_id;
+    u16 seg, bdf, req_id, alias_id;
     u8 delivery_mode, dest, vector, dest_mode;
     spinlock_t *lock;
     int offset;
 
-    bdf = PCI_BDF2(pdev->bus, pdev->devfn);
-    req_id = get_dma_requestor_id(pdev->seg, bdf);
-    alias_id = get_intremap_requestor_id(pdev->seg, bdf);
+    bdf = pdev ? PCI_BDF2(pdev->bus, pdev->devfn) : hpet_sbdf.bdf;
+    seg = pdev ? pdev->seg : hpet_sbdf.seg;
+    
+    req_id = get_dma_requestor_id(seg, bdf);
+    alias_id = get_intremap_requestor_id(seg, bdf);
 
     if ( msg == NULL )
     {
@@ -284,7 +287,7 @@ static void update_intremap_entry_from_msi_msg(
         spin_unlock_irqrestore(lock, flags);
 
         if ( ( req_id != alias_id ) &&
-             get_ivrs_mappings(pdev->seg)[alias_id].intremap_table != NULL )
+             get_ivrs_mappings(seg)[alias_id].intremap_table != NULL )
         {
             lock = get_intremap_lock(iommu->seg, alias_id);
             spin_lock_irqsave(lock, flags);
@@ -317,7 +320,7 @@ static void update_intremap_entry_from_msi_msg(
 
     lock = get_intremap_lock(iommu->seg, alias_id);
     if ( ( req_id != alias_id ) &&
-         get_ivrs_mappings(pdev->seg)[alias_id].intremap_table != NULL )
+         get_ivrs_mappings(seg)[alias_id].intremap_table != NULL )
     {
         spin_lock_irqsave(lock, flags);
         entry = (u32*)get_intremap_entry(iommu->seg, alias_id, offset);
@@ -340,13 +343,16 @@ void amd_iommu_msi_msg_update_ire(
     struct msi_desc *msi_desc, struct msi_msg *msg)
 {
     struct pci_dev *pdev = msi_desc->dev;
-    int bdf = PCI_BDF2(pdev->bus, pdev->devfn);
+    int bdf, seg; 
     struct amd_iommu *iommu;
 
     if ( !iommu_intremap )
         return;
 
-    iommu = find_iommu_for_device(pdev->seg, bdf);
+    bdf = pdev ? PCI_BDF2(pdev->bus, pdev->devfn) : hpet_sbdf.bdf;
+    seg = pdev ? pdev->seg : hpet_sbdf.seg;
+    
+    iommu = find_iommu_for_device(seg, bdf);
     if ( !iommu )
     {
         AMD_IOMMU_DEBUG("Fail to find iommu for MSI device id = %#x\n", bdf);
@@ -383,3 +389,15 @@ void* __init amd_iommu_alloc_intremap_table(void)
     memset(tb, 0, PAGE_SIZE * (1UL << INTREMAP_TABLE_ORDER));
     return tb;
 }
+
+int __init amd_setup_hpet_msi(struct msi_desc *msi_desc)
+{
+    if ( (!msi_desc->hpet_id != hpet_sbdf.id) || 
+         (hpet_sbdf.iommu == NULL) )
+    {
+        AMD_IOMMU_DEBUG("Fail to setup HPET MSI remapping\n");
+        return 1;        
+    }
+
+    return 0;
+}
diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c b/xen/drivers/passthrough/amd/pci_amd_iommu.c
index b633f1c..7e37348 100644
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -595,6 +595,7 @@ const struct iommu_ops amd_iommu_ops = {
     .update_ire_from_msi = amd_iommu_msi_msg_update_ire,
     .read_apic_from_ire = __io_apic_read,
     .read_msi_from_ire = amd_iommu_read_msi_from_ire,
+    .setup_hpet_msi = amd_setup_hpet_msi,
     .suspend = amd_iommu_suspend,
     .resume = amd_iommu_resume,
     .share_p2m = amd_iommu_share_p2m,
diff --git a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
index 8c425a3..7bef915 100644
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -97,12 +97,18 @@ void amd_iommu_msi_msg_update_ire(
     struct msi_desc *msi_desc, struct msi_msg *msg);
 void amd_iommu_read_msi_from_ire(
     struct msi_desc *msi_desc, struct msi_msg *msg);
+int __init amd_setup_hpet_msi(struct msi_desc *msi_desc);
 
 extern struct ioapic_sbdf {
     u16 bdf, seg;
 } ioapic_sbdf[MAX_IO_APICS];
 extern void *shared_intremap_table;
 
+extern struct hpet_sbdf {
+    u16 bdf, seg, id;
+    struct amd_iommu *iommu;
+} hpet_sbdf;
+
 /* power management support */
 void amd_iommu_resume(void);
 void amd_iommu_suspend(void);
-- 
1.7.4


[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] AMD IOMMU: Enable HPET broadcast msi remapping
  2012-10-23 13:56 [PATCH] AMD IOMMU: Enable HPET broadcast msi remapping Wei Wang
@ 2012-10-24 13:11 ` Jan Beulich
  0 siblings, 0 replies; 2+ messages in thread
From: Jan Beulich @ 2012-10-24 13:11 UTC (permalink / raw)
  To: Wei Wang; +Cc: xen-devel@lists.xensource.com

>>> On 23.10.12 at 15:56, Wei Wang <wei.wang2@amd.com> wrote:
>--- a/xen/drivers/passthrough/amd/iommu_acpi.c
>+++ b/xen/drivers/passthrough/amd/iommu_acpi.c
>@@ -653,9 +653,25 @@ static u16 __init parse_ivhd_device_special(
>     }
> 
>     add_ivrs_mapping_entry(bdf, bdf, special->header.data_setting, iommu);
>+    
>+    switch ( special->variety )
>+    {
>+    case 1:

Please use the existing #define-s for this and the HPET one below.

>     /* set device id of ioapic */
>-    ioapic_sbdf[special->handle].bdf = bdf;
>-    ioapic_sbdf[special->handle].seg = seg;
>+        ioapic_sbdf[special->handle].bdf = bdf;
>+        ioapic_sbdf[special->handle].seg = seg;
>+        break;
>+    case 2:
>+        /* set device id of hpet */
>+        hpet_sbdf.id = special->handle;
>+        hpet_sbdf.bdf = bdf;
>+        hpet_sbdf.seg = seg;
>+        hpet_sbdf.iommu = iommu;

So there can only ever be a single HPET? Is that written down in
the spec somewhere? Otherwise I would at least want a warning
to be issued if we unexpectedly find a second one (and probably
avoid clobbering the already saved data).

>@@ -267,14 +268,16 @@ static void update_intremap_entry_from_msi_msg(
> {
>     unsigned long flags;
>     u32* entry;
>-    u16 bdf, req_id, alias_id;
>+    u16 seg, bdf, req_id, alias_id;
>     u8 delivery_mode, dest, vector, dest_mode;
>     spinlock_t *lock;
>     int offset;
> 
>-    bdf = PCI_BDF2(pdev->bus, pdev->devfn);
>-    req_id = get_dma_requestor_id(pdev->seg, bdf);
>-    alias_id = get_intremap_requestor_id(pdev->seg, bdf);
>+    bdf = pdev ? PCI_BDF2(pdev->bus, pdev->devfn) : hpet_sbdf.bdf;
>+    seg = pdev ? pdev->seg : hpet_sbdf.seg;
>+    
>+    req_id = get_dma_requestor_id(seg, bdf);
>+    alias_id = get_intremap_requestor_id(seg, bdf);
> 
>     if ( msg == NULL )
>     {

As you're replacing all further uses of pdev in this function anyway,
and the single caller already determines seg and bdf, please replace
the passing of "struct pci_dev *" by passing "bdf" and using
"iommu->seg". That'll also make obvious that no left over unchecked
use of "pdev" exists here (and avoids any getting re-added later).

>--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
>+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
>@@ -97,12 +97,18 @@ void amd_iommu_msi_msg_update_ire(
>     struct msi_desc *msi_desc, struct msi_msg *msg);
> void amd_iommu_read_msi_from_ire(
>     struct msi_desc *msi_desc, struct msi_msg *msg);
>+int __init amd_setup_hpet_msi(struct msi_desc *msi_desc);

No __init on a declaration please.

Jan

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2012-10-24 13:11 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-10-23 13:56 [PATCH] AMD IOMMU: Enable HPET broadcast msi remapping Wei Wang
2012-10-24 13:11 ` Jan Beulich

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).