xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [RFC PATCH 2/3] AMD IOMMU: Implement p2m sharing
@ 2011-03-25 10:32 Wei Wang
  2011-04-04 11:05 ` Tim Deegan
  0 siblings, 1 reply; 3+ messages in thread
From: Wei Wang @ 2011-03-25 10:32 UTC (permalink / raw)
  To: xen-devel@lists.xensource.com

[-- Attachment #1: Type: text/plain, Size: 201 bytes --]

--
Advanced Micro Devices GmbH
Sitz: Dornach, Gemeinde Aschheim,
Landkreis München Registergericht München,
HRB Nr. 43632
WEEE-Reg-Nr: DE 12919551
Geschäftsführer:
Alberto Bozzo, Andrew Bowd


[-- Attachment #2: p2m_2.patch --]
[-- Type: text/plain, Size: 6841 bytes --]

# HG changeset patch
# User Wei Wang <wei.wang2@amd.com>
# Date 1300980869 -3600
# Node ID 48052c25abfd39b5c1b96481bb908bcedf1af7d5
# Parent  6827fd35cf583353a7cb05946cdc5c0f13930f39
Implementation of p2m table sharing with amd iommu.

Signed-off-by: Wei Wang <wei.wang2@amd.com>

diff -r 6827fd35cf58 -r 48052c25abfd xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c	Thu Mar 24 16:18:28 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c	Thu Mar 24 16:34:29 2011 +0100
@@ -34,6 +34,7 @@
 #include <public/mem_event.h>
 #include <asm/mem_sharing.h>
 #include <xen/event.h>
+#include <asm/hvm/svm/amd-iommu-proto.h>
 
 /* Debugging and auditing of the P2M code? */
 #define P2M_AUDIT     0
@@ -1454,15 +1455,23 @@
          && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
         p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
 
+    if ( (p2mt == p2m_ram_rw) && iommu_hap_pt_share )
+        amd_iommu_set_p2m_next_level(p2m, gfn, mfn_x(mfn), page_order);
+
     if ( iommu_enabled && need_iommu(p2m->domain) )
     {
-        if ( p2mt == p2m_ram_rw )
-            for ( i = 0; i < (1UL << page_order); i++ )
-                iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
-                               IOMMUF_readable|IOMMUF_writable);
+        if ( iommu_hap_pt_share )
+            amd_iommu_invalidate_pages(p2m->domain, gfn, page_order);
         else
-            for ( int i = 0; i < (1UL << page_order); i++ )
-                iommu_unmap_page(p2m->domain, gfn+i);
+        {
+            if ( p2mt == p2m_ram_rw )
+                for ( i = 0; i < (1UL << page_order); i++ )
+                    iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
+                                   IOMMUF_readable|IOMMUF_writable);
+            else
+                for ( int i = 0; i < (1UL << page_order); i++ )
+                    iommu_unmap_page(p2m->domain, gfn+i);
+        }
     }
 
     /* Success */
diff -r 6827fd35cf58 -r 48052c25abfd xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c	Thu Mar 24 16:18:28 2011 +0100
+++ b/xen/drivers/passthrough/amd/iommu_map.c	Thu Mar 24 16:34:29 2011 +0100
@@ -19,6 +19,7 @@
  */
 
 #include <xen/sched.h>
+#include <asm/p2m.h>
 #include <xen/hvm/iommu.h>
 #include <asm/amd-iommu.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
@@ -563,6 +564,9 @@
 
     BUG_ON( !hd->root_table );
 
+    if ( iommu_hap_pt_share && is_hvm_domain(d) )
+        return 0;
+
     spin_lock(&hd->mapping_lock);
 
     /* Since HVM domain is initialized with 2 level IO page table,
@@ -603,6 +607,9 @@
 
     BUG_ON( !hd->root_table );
 
+    if ( iommu_hap_pt_share && is_hvm_domain(d) )
+        return 0;
+
     spin_lock(&hd->mapping_lock);
 
     /* Since HVM domain is initialized with 2 level IO page table,
@@ -680,3 +687,87 @@
         spin_unlock_irqrestore(&iommu->lock, flags);
     }
 }
+
+/* Convert nexl level and r/w bits into 24 bits p2m flags */
+#define next_level_to_flags(nl) (((nl) << 9 ) | (3ULL << 21))
+
+/* Flush IOMMU TLB after p2m changes */
+int amd_iommu_invalidate_pages(struct domain *d, unsigned long gfn,
+                               unsigned int order)
+{
+    unsigned long flags;
+    struct amd_iommu *iommu;
+    struct hvm_iommu *hd = domain_hvm_iommu(d);
+
+    BUG_ON( !hd->root_table );
+
+    if ( !iommu_hap_pt_share && is_hvm_domain(d) )
+        return 0;
+
+    /* send INVALIDATE_IOMMU_PAGES command */
+    for_each_amd_iommu ( iommu )
+    {
+        spin_lock_irqsave(&iommu->lock, flags);
+        invalidate_iommu_pages(iommu, gfn, hd->domain_id, order);
+        flush_command_buffer(iommu);
+        spin_unlock_irqrestore(&iommu->lock, flags);
+    }
+
+    return 0;
+}
+
+/* Share p2m table with iommu */
+void amd_iommu_set_p2m_table(struct domain *d)
+{
+    struct hvm_iommu *hd  = domain_hvm_iommu(d);
+    mfn_t pgd_mfn;
+
+    ASSERT( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled );
+
+    if ( hd->root_table != NULL )
+        free_amd_iommu_pgtable(hd->root_table);
+
+    pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
+    hd->root_table = mfn_to_page(mfn_x(pgd_mfn));
+
+    iommu_hap_pt_share = 1;
+    AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = 0x%lx\n",
+                    mfn_x(pgd_mfn));
+}
+
+/* Fix next level fields in p2m entries for iommu use */
+void amd_iommu_set_p2m_next_level(struct p2m_domain *p2m, unsigned long gfn,
+                                  unsigned long mfn, unsigned int page_order)
+{
+#if CONFIG_PAGING_LEVELS >= 4
+
+    mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
+    void *table = NULL;
+    unsigned int level = CONFIG_PAGING_LEVELS;
+    unsigned int lowest = page_order / PTE_PER_TABLE_SHIFT + 1;
+    l1_pgentry_t *p2m_entry;
+
+    if ( !mfn_valid(mfn) )
+        return;
+
+    while ( level >= lowest )
+    {
+        u32 next_level;
+        u32 offset = gfn >> ((PTE_PER_TABLE_SHIFT *
+                             (level - IOMMU_PAGING_MODE_LEVEL_1)));
+        offset &= ~PTE_PER_TABLE_MASK;
+
+        table = map_domain_page(mfn_x(table_mfn));
+
+        p2m_entry = (l1_pgentry_t *)table + offset;
+        next_level = (level == lowest) ? 0 : level - 1;
+        l1e_add_flags(*p2m_entry,  next_level_to_flags(next_level));
+        table_mfn = _mfn(l1e_get_pfn(*p2m_entry));
+
+        unmap_domain_page(table);
+
+        level--;
+    }
+
+#endif
+}
diff -r 6827fd35cf58 -r 48052c25abfd xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c	Thu Mar 24 16:18:28 2011 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Thu Mar 24 16:34:29 2011 +0100
@@ -362,6 +362,9 @@
 {
     struct hvm_iommu *hd  = domain_hvm_iommu(d);
 
+    if ( iommu_hap_pt_share )
+        return;
+
     spin_lock(&hd->mapping_lock);
     if ( hd->root_table )
     {
diff -r 6827fd35cf58 -r 48052c25abfd xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Thu Mar 24 16:18:28 2011 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Thu Mar 24 16:34:29 2011 +0100
@@ -24,6 +24,7 @@
 #include <xen/sched.h>
 #include <asm/amd-iommu.h>
 #include <xen/domain_page.h>
+#include <asm/p2m.h>
 
 #define for_each_amd_iommu(amd_iommu) \
     list_for_each_entry(amd_iommu, \
@@ -92,6 +93,13 @@
 void amd_iommu_resume(void);
 void amd_iommu_suspend(void);
 
+/* Share p2m table with iommu */
+void amd_iommu_set_p2m_table(struct domain *d);
+void amd_iommu_set_p2m_next_level(struct p2m_domain *p2m, unsigned long gfn, 
+                                  unsigned long mfn, unsigned int page_order);
+int amd_iommu_invalidate_pages(struct domain *d, unsigned long gfn, 
+                               unsigned int order);
+
 static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32 shift)
 {
     u32 field;

[-- Attachment #3: Type: text/plain, Size: 138 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [RFC PATCH 2/3] AMD IOMMU: Implement p2m sharing
  2011-03-25 10:32 [RFC PATCH 2/3] AMD IOMMU: Implement p2m sharing Wei Wang
@ 2011-04-04 11:05 ` Tim Deegan
  2011-04-04 12:14   ` Wei Wang2
  0 siblings, 1 reply; 3+ messages in thread
From: Tim Deegan @ 2011-04-04 11:05 UTC (permalink / raw)
  To: Wei Wang; +Cc: xen-devel@lists.xensource.com

At 10:32 +0000 on 25 Mar (1301049120), Wei Wang wrote:
> diff -r 6827fd35cf58 -r 48052c25abfd xen/arch/x86/mm/p2m.c
> --- a/xen/arch/x86/mm/p2m.c	Thu Mar 24 16:18:28 2011 +0100
> +++ b/xen/arch/x86/mm/p2m.c	Thu Mar 24 16:34:29 2011 +0100
> @@ -34,6 +34,7 @@
>  #include <public/mem_event.h>
>  #include <asm/mem_sharing.h>
>  #include <xen/event.h>
> +#include <asm/hvm/svm/amd-iommu-proto.h>
>  
>  /* Debugging and auditing of the P2M code? */
>  #define P2M_AUDIT     0
> @@ -1454,15 +1455,23 @@
>           && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
>          p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
>  
> +    if ( (p2mt == p2m_ram_rw) && iommu_hap_pt_share )
> +        amd_iommu_set_p2m_next_level(p2m, gfn, mfn_x(mfn), page_order);
> +

It looks like this function maps the p2m table and walks it to fix up
bits 9-11 of the entries, but you're calling it from a function that has
just done the same map-and-walk.  Couldn't you fold the
next-level-setting code into p2m_set_entry?

Cheers,

Tim.

>      if ( iommu_enabled && need_iommu(p2m->domain) )
>      {
> -        if ( p2mt == p2m_ram_rw )
> -            for ( i = 0; i < (1UL << page_order); i++ )
> -                iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
> -                               IOMMUF_readable|IOMMUF_writable);
> +        if ( iommu_hap_pt_share )
> +            amd_iommu_invalidate_pages(p2m->domain, gfn, page_order);
>          else
> -            for ( int i = 0; i < (1UL << page_order); i++ )
> -                iommu_unmap_page(p2m->domain, gfn+i);
> +        {
> +            if ( p2mt == p2m_ram_rw )
> +                for ( i = 0; i < (1UL << page_order); i++ )
> +                    iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
> +                                   IOMMUF_readable|IOMMUF_writable);
> +            else
> +                for ( int i = 0; i < (1UL << page_order); i++ )
> +                    iommu_unmap_page(p2m->domain, gfn+i);
> +        }
>      }
>  
>      /* Success */
> diff -r 6827fd35cf58 -r 48052c25abfd xen/drivers/passthrough/amd/iommu_map.c
> --- a/xen/drivers/passthrough/amd/iommu_map.c	Thu Mar 24 16:18:28 2011 +0100
> +++ b/xen/drivers/passthrough/amd/iommu_map.c	Thu Mar 24 16:34:29 2011 +0100
> @@ -19,6 +19,7 @@
>   */
>  
>  #include <xen/sched.h>
> +#include <asm/p2m.h>
>  #include <xen/hvm/iommu.h>
>  #include <asm/amd-iommu.h>
>  #include <asm/hvm/svm/amd-iommu-proto.h>
> @@ -563,6 +564,9 @@
>  
>      BUG_ON( !hd->root_table );
>  
> +    if ( iommu_hap_pt_share && is_hvm_domain(d) )
> +        return 0;
> +
>      spin_lock(&hd->mapping_lock);
>  
>      /* Since HVM domain is initialized with 2 level IO page table,
> @@ -603,6 +607,9 @@
>  
>      BUG_ON( !hd->root_table );
>  
> +    if ( iommu_hap_pt_share && is_hvm_domain(d) )
> +        return 0;
> +
>      spin_lock(&hd->mapping_lock);
>  
>      /* Since HVM domain is initialized with 2 level IO page table,
> @@ -680,3 +687,87 @@
>          spin_unlock_irqrestore(&iommu->lock, flags);
>      }
>  }
> +
> +/* Convert nexl level and r/w bits into 24 bits p2m flags */
> +#define next_level_to_flags(nl) (((nl) << 9 ) | (3ULL << 21))
> +
> +/* Flush IOMMU TLB after p2m changes */
> +int amd_iommu_invalidate_pages(struct domain *d, unsigned long gfn,
> +                               unsigned int order)
> +{
> +    unsigned long flags;
> +    struct amd_iommu *iommu;
> +    struct hvm_iommu *hd = domain_hvm_iommu(d);
> +
> +    BUG_ON( !hd->root_table );
> +
> +    if ( !iommu_hap_pt_share && is_hvm_domain(d) )
> +        return 0;
> +
> +    /* send INVALIDATE_IOMMU_PAGES command */
> +    for_each_amd_iommu ( iommu )
> +    {
> +        spin_lock_irqsave(&iommu->lock, flags);
> +        invalidate_iommu_pages(iommu, gfn, hd->domain_id, order);
> +        flush_command_buffer(iommu);
> +        spin_unlock_irqrestore(&iommu->lock, flags);
> +    }
> +
> +    return 0;
> +}
> +
> +/* Share p2m table with iommu */
> +void amd_iommu_set_p2m_table(struct domain *d)
> +{
> +    struct hvm_iommu *hd  = domain_hvm_iommu(d);
> +    mfn_t pgd_mfn;
> +
> +    ASSERT( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled );
> +
> +    if ( hd->root_table != NULL )
> +        free_amd_iommu_pgtable(hd->root_table);
> +
> +    pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
> +    hd->root_table = mfn_to_page(mfn_x(pgd_mfn));
> +
> +    iommu_hap_pt_share = 1;
> +    AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = 0x%lx\n",
> +                    mfn_x(pgd_mfn));
> +}
> +
> +/* Fix next level fields in p2m entries for iommu use */
> +void amd_iommu_set_p2m_next_level(struct p2m_domain *p2m, unsigned long gfn,
> +                                  unsigned long mfn, unsigned int page_order)
> +{
> +#if CONFIG_PAGING_LEVELS >= 4
> +
> +    mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
> +    void *table = NULL;
> +    unsigned int level = CONFIG_PAGING_LEVELS;
> +    unsigned int lowest = page_order / PTE_PER_TABLE_SHIFT + 1;
> +    l1_pgentry_t *p2m_entry;
> +
> +    if ( !mfn_valid(mfn) )
> +        return;
> +
> +    while ( level >= lowest )
> +    {
> +        u32 next_level;
> +        u32 offset = gfn >> ((PTE_PER_TABLE_SHIFT *
> +                             (level - IOMMU_PAGING_MODE_LEVEL_1)));
> +        offset &= ~PTE_PER_TABLE_MASK;
> +
> +        table = map_domain_page(mfn_x(table_mfn));
> +
> +        p2m_entry = (l1_pgentry_t *)table + offset;
> +        next_level = (level == lowest) ? 0 : level - 1;
> +        l1e_add_flags(*p2m_entry,  next_level_to_flags(next_level));
> +        table_mfn = _mfn(l1e_get_pfn(*p2m_entry));
> +
> +        unmap_domain_page(table);
> +
> +        level--;
> +    }
> +
> +#endif
> +}
> diff -r 6827fd35cf58 -r 48052c25abfd xen/drivers/passthrough/amd/pci_amd_iommu.c
> --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c	Thu Mar 24 16:18:28 2011 +0100
> +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Thu Mar 24 16:34:29 2011 +0100
> @@ -362,6 +362,9 @@
>  {
>      struct hvm_iommu *hd  = domain_hvm_iommu(d);
>  
> +    if ( iommu_hap_pt_share )
> +        return;
> +
>      spin_lock(&hd->mapping_lock);
>      if ( hd->root_table )
>      {
> diff -r 6827fd35cf58 -r 48052c25abfd xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
> --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Thu Mar 24 16:18:28 2011 +0100
> +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Thu Mar 24 16:34:29 2011 +0100
> @@ -24,6 +24,7 @@
>  #include <xen/sched.h>
>  #include <asm/amd-iommu.h>
>  #include <xen/domain_page.h>
> +#include <asm/p2m.h>
>  
>  #define for_each_amd_iommu(amd_iommu) \
>      list_for_each_entry(amd_iommu, \
> @@ -92,6 +93,13 @@
>  void amd_iommu_resume(void);
>  void amd_iommu_suspend(void);
>  
> +/* Share p2m table with iommu */
> +void amd_iommu_set_p2m_table(struct domain *d);
> +void amd_iommu_set_p2m_next_level(struct p2m_domain *p2m, unsigned long gfn, 
> +                                  unsigned long mfn, unsigned int page_order);
> +int amd_iommu_invalidate_pages(struct domain *d, unsigned long gfn, 
> +                               unsigned int order);
> +
>  static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32 shift)
>  {
>      u32 field;

> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xensource.com
> http://lists.xensource.com/xen-devel


-- 
Tim Deegan <Tim.Deegan@citrix.com>
Principal Software Engineer, Xen Platform Team
Citrix Systems UK Ltd.  (Company #02937203, SL9 0BG)

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [RFC PATCH 2/3] AMD IOMMU: Implement p2m sharing
  2011-04-04 11:05 ` Tim Deegan
@ 2011-04-04 12:14   ` Wei Wang2
  0 siblings, 0 replies; 3+ messages in thread
From: Wei Wang2 @ 2011-04-04 12:14 UTC (permalink / raw)
  To: Tim Deegan; +Cc: xen-devel@lists.xensource.com

On Monday 04 April 2011 13:05:43 Tim Deegan wrote:
> At 10:32 +0000 on 25 Mar (1301049120), Wei Wang wrote:
> > diff -r 6827fd35cf58 -r 48052c25abfd xen/arch/x86/mm/p2m.c
> > --- a/xen/arch/x86/mm/p2m.c	Thu Mar 24 16:18:28 2011 +0100
> > +++ b/xen/arch/x86/mm/p2m.c	Thu Mar 24 16:34:29 2011 +0100
> > @@ -34,6 +34,7 @@
> >  #include <public/mem_event.h>
> >  #include <asm/mem_sharing.h>
> >  #include <xen/event.h>
> > +#include <asm/hvm/svm/amd-iommu-proto.h>
> >
> >  /* Debugging and auditing of the P2M code? */
> >  #define P2M_AUDIT     0
> > @@ -1454,15 +1455,23 @@
> >           && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
> >          p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
> >
> > +    if ( (p2mt == p2m_ram_rw) && iommu_hap_pt_share )
> > +        amd_iommu_set_p2m_next_level(p2m, gfn, mfn_x(mfn), page_order);
> > +
>
> It looks like this function maps the p2m table and walks it to fix up
> bits 9-11 of the entries, but you're calling it from a function that has
> just done the same map-and-walk.  Couldn't you fold the
> next-level-setting code into p2m_set_entry?
Yeah, that is also possible. Actually I did it this way in my testing code... 
should be no problem to get it back. I will send a new patch this week.
Thanks,
Wei

> Cheers,
>
> Tim.
>
> >      if ( iommu_enabled && need_iommu(p2m->domain) )
> >      {
> > -        if ( p2mt == p2m_ram_rw )
> > -            for ( i = 0; i < (1UL << page_order); i++ )
> > -                iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
> > -                               IOMMUF_readable|IOMMUF_writable);
> > +        if ( iommu_hap_pt_share )
> > +            amd_iommu_invalidate_pages(p2m->domain, gfn, page_order);
> >          else
> > -            for ( int i = 0; i < (1UL << page_order); i++ )
> > -                iommu_unmap_page(p2m->domain, gfn+i);
> > +        {
> > +            if ( p2mt == p2m_ram_rw )
> > +                for ( i = 0; i < (1UL << page_order); i++ )
> > +                    iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
> > +                                   IOMMUF_readable|IOMMUF_writable);
> > +            else
> > +                for ( int i = 0; i < (1UL << page_order); i++ )
> > +                    iommu_unmap_page(p2m->domain, gfn+i);
> > +        }
> >      }
> >
> >      /* Success */
> > diff -r 6827fd35cf58 -r 48052c25abfd
> > xen/drivers/passthrough/amd/iommu_map.c ---
> > a/xen/drivers/passthrough/amd/iommu_map.c	Thu Mar 24 16:18:28 2011 +0100
> > +++ b/xen/drivers/passthrough/amd/iommu_map.c	Thu Mar 24 16:34:29 2011
> > +0100 @@ -19,6 +19,7 @@
> >   */
> >
> >  #include <xen/sched.h>
> > +#include <asm/p2m.h>
> >  #include <xen/hvm/iommu.h>
> >  #include <asm/amd-iommu.h>
> >  #include <asm/hvm/svm/amd-iommu-proto.h>
> > @@ -563,6 +564,9 @@
> >
> >      BUG_ON( !hd->root_table );
> >
> > +    if ( iommu_hap_pt_share && is_hvm_domain(d) )
> > +        return 0;
> > +
> >      spin_lock(&hd->mapping_lock);
> >
> >      /* Since HVM domain is initialized with 2 level IO page table,
> > @@ -603,6 +607,9 @@
> >
> >      BUG_ON( !hd->root_table );
> >
> > +    if ( iommu_hap_pt_share && is_hvm_domain(d) )
> > +        return 0;
> > +
> >      spin_lock(&hd->mapping_lock);
> >
> >      /* Since HVM domain is initialized with 2 level IO page table,
> > @@ -680,3 +687,87 @@
> >          spin_unlock_irqrestore(&iommu->lock, flags);
> >      }
> >  }
> > +
> > +/* Convert nexl level and r/w bits into 24 bits p2m flags */
> > +#define next_level_to_flags(nl) (((nl) << 9 ) | (3ULL << 21))
> > +
> > +/* Flush IOMMU TLB after p2m changes */
> > +int amd_iommu_invalidate_pages(struct domain *d, unsigned long gfn,
> > +                               unsigned int order)
> > +{
> > +    unsigned long flags;
> > +    struct amd_iommu *iommu;
> > +    struct hvm_iommu *hd = domain_hvm_iommu(d);
> > +
> > +    BUG_ON( !hd->root_table );
> > +
> > +    if ( !iommu_hap_pt_share && is_hvm_domain(d) )
> > +        return 0;
> > +
> > +    /* send INVALIDATE_IOMMU_PAGES command */
> > +    for_each_amd_iommu ( iommu )
> > +    {
> > +        spin_lock_irqsave(&iommu->lock, flags);
> > +        invalidate_iommu_pages(iommu, gfn, hd->domain_id, order);
> > +        flush_command_buffer(iommu);
> > +        spin_unlock_irqrestore(&iommu->lock, flags);
> > +    }
> > +
> > +    return 0;
> > +}
> > +
> > +/* Share p2m table with iommu */
> > +void amd_iommu_set_p2m_table(struct domain *d)
> > +{
> > +    struct hvm_iommu *hd  = domain_hvm_iommu(d);
> > +    mfn_t pgd_mfn;
> > +
> > +    ASSERT( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled );
> > +
> > +    if ( hd->root_table != NULL )
> > +        free_amd_iommu_pgtable(hd->root_table);
> > +
> > +    pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
> > +    hd->root_table = mfn_to_page(mfn_x(pgd_mfn));
> > +
> > +    iommu_hap_pt_share = 1;
> > +    AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = 0x%lx\n",
> > +                    mfn_x(pgd_mfn));
> > +}
> > +
> > +/* Fix next level fields in p2m entries for iommu use */
> > +void amd_iommu_set_p2m_next_level(struct p2m_domain *p2m, unsigned long
> > gfn, +                                  unsigned long mfn, unsigned int
> > page_order) +{
> > +#if CONFIG_PAGING_LEVELS >= 4
> > +
> > +    mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
> > +    void *table = NULL;
> > +    unsigned int level = CONFIG_PAGING_LEVELS;
> > +    unsigned int lowest = page_order / PTE_PER_TABLE_SHIFT + 1;
> > +    l1_pgentry_t *p2m_entry;
> > +
> > +    if ( !mfn_valid(mfn) )
> > +        return;
> > +
> > +    while ( level >= lowest )
> > +    {
> > +        u32 next_level;
> > +        u32 offset = gfn >> ((PTE_PER_TABLE_SHIFT *
> > +                             (level - IOMMU_PAGING_MODE_LEVEL_1)));
> > +        offset &= ~PTE_PER_TABLE_MASK;
> > +
> > +        table = map_domain_page(mfn_x(table_mfn));
> > +
> > +        p2m_entry = (l1_pgentry_t *)table + offset;
> > +        next_level = (level == lowest) ? 0 : level - 1;
> > +        l1e_add_flags(*p2m_entry,  next_level_to_flags(next_level));
> > +        table_mfn = _mfn(l1e_get_pfn(*p2m_entry));
> > +
> > +        unmap_domain_page(table);
> > +
> > +        level--;
> > +    }
> > +
> > +#endif
> > +}
> > diff -r 6827fd35cf58 -r 48052c25abfd
> > xen/drivers/passthrough/amd/pci_amd_iommu.c ---
> > a/xen/drivers/passthrough/amd/pci_amd_iommu.c	Thu Mar 24 16:18:28 2011
> > +0100 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c	Thu Mar 24
> > 16:34:29 2011 +0100 @@ -362,6 +362,9 @@
> >  {
> >      struct hvm_iommu *hd  = domain_hvm_iommu(d);
> >
> > +    if ( iommu_hap_pt_share )
> > +        return;
> > +
> >      spin_lock(&hd->mapping_lock);
> >      if ( hd->root_table )
> >      {
> > diff -r 6827fd35cf58 -r 48052c25abfd
> > xen/include/asm-x86/hvm/svm/amd-iommu-proto.h ---
> > a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Thu Mar 24 16:18:28 2011
> > +0100 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h	Thu Mar 24
> > 16:34:29 2011 +0100 @@ -24,6 +24,7 @@
> >  #include <xen/sched.h>
> >  #include <asm/amd-iommu.h>
> >  #include <xen/domain_page.h>
> > +#include <asm/p2m.h>
> >
> >  #define for_each_amd_iommu(amd_iommu) \
> >      list_for_each_entry(amd_iommu, \
> > @@ -92,6 +93,13 @@
> >  void amd_iommu_resume(void);
> >  void amd_iommu_suspend(void);
> >
> > +/* Share p2m table with iommu */
> > +void amd_iommu_set_p2m_table(struct domain *d);
> > +void amd_iommu_set_p2m_next_level(struct p2m_domain *p2m, unsigned long
> > gfn, +                                  unsigned long mfn, unsigned int
> > page_order); +int amd_iommu_invalidate_pages(struct domain *d, unsigned
> > long gfn, +                               unsigned int order);
> > +
> >  static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32
> > shift) {
> >      u32 field;
> >
> > _______________________________________________
> > Xen-devel mailing list
> > Xen-devel@lists.xensource.com
> > http://lists.xensource.com/xen-devel

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2011-04-04 12:14 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-03-25 10:32 [RFC PATCH 2/3] AMD IOMMU: Implement p2m sharing Wei Wang
2011-04-04 11:05 ` Tim Deegan
2011-04-04 12:14   ` Wei Wang2

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).