From: Wei Wang <wei.wang2@amd.com>
To: "xen-devel@lists.xensource.com" <xen-devel@lists.xensource.com>
Subject: [RFC PATCH 2/3] AMD IOMMU: Implement p2m sharing
Date: Fri, 25 Mar 2011 11:32:00 +0100 [thread overview]
Message-ID: <4D8C6F20.6010005@amd.com> (raw)
[-- Attachment #1: Type: text/plain, Size: 201 bytes --]
--
Advanced Micro Devices GmbH
Sitz: Dornach, Gemeinde Aschheim,
Landkreis München Registergericht München,
HRB Nr. 43632
WEEE-Reg-Nr: DE 12919551
Geschäftsführer:
Alberto Bozzo, Andrew Bowd
[-- Attachment #2: p2m_2.patch --]
[-- Type: text/plain, Size: 6841 bytes --]
# HG changeset patch
# User Wei Wang <wei.wang2@amd.com>
# Date 1300980869 -3600
# Node ID 48052c25abfd39b5c1b96481bb908bcedf1af7d5
# Parent 6827fd35cf583353a7cb05946cdc5c0f13930f39
Implementation of p2m table sharing with amd iommu.
Signed-off-by: Wei Wang <wei.wang2@amd.com>
diff -r 6827fd35cf58 -r 48052c25abfd xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Thu Mar 24 16:18:28 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c Thu Mar 24 16:34:29 2011 +0100
@@ -34,6 +34,7 @@
#include <public/mem_event.h>
#include <asm/mem_sharing.h>
#include <xen/event.h>
+#include <asm/hvm/svm/amd-iommu-proto.h>
/* Debugging and auditing of the P2M code? */
#define P2M_AUDIT 0
@@ -1454,15 +1455,23 @@
&& (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) )
p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
+ if ( (p2mt == p2m_ram_rw) && iommu_hap_pt_share )
+ amd_iommu_set_p2m_next_level(p2m, gfn, mfn_x(mfn), page_order);
+
if ( iommu_enabled && need_iommu(p2m->domain) )
{
- if ( p2mt == p2m_ram_rw )
- for ( i = 0; i < (1UL << page_order); i++ )
- iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
- IOMMUF_readable|IOMMUF_writable);
+ if ( iommu_hap_pt_share )
+ amd_iommu_invalidate_pages(p2m->domain, gfn, page_order);
else
- for ( int i = 0; i < (1UL << page_order); i++ )
- iommu_unmap_page(p2m->domain, gfn+i);
+ {
+ if ( p2mt == p2m_ram_rw )
+ for ( i = 0; i < (1UL << page_order); i++ )
+ iommu_map_page(p2m->domain, gfn+i, mfn_x(mfn)+i,
+ IOMMUF_readable|IOMMUF_writable);
+ else
+ for ( int i = 0; i < (1UL << page_order); i++ )
+ iommu_unmap_page(p2m->domain, gfn+i);
+ }
}
/* Success */
diff -r 6827fd35cf58 -r 48052c25abfd xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c Thu Mar 24 16:18:28 2011 +0100
+++ b/xen/drivers/passthrough/amd/iommu_map.c Thu Mar 24 16:34:29 2011 +0100
@@ -19,6 +19,7 @@
*/
#include <xen/sched.h>
+#include <asm/p2m.h>
#include <xen/hvm/iommu.h>
#include <asm/amd-iommu.h>
#include <asm/hvm/svm/amd-iommu-proto.h>
@@ -563,6 +564,9 @@
BUG_ON( !hd->root_table );
+ if ( iommu_hap_pt_share && is_hvm_domain(d) )
+ return 0;
+
spin_lock(&hd->mapping_lock);
/* Since HVM domain is initialized with 2 level IO page table,
@@ -603,6 +607,9 @@
BUG_ON( !hd->root_table );
+ if ( iommu_hap_pt_share && is_hvm_domain(d) )
+ return 0;
+
spin_lock(&hd->mapping_lock);
/* Since HVM domain is initialized with 2 level IO page table,
@@ -680,3 +687,87 @@
spin_unlock_irqrestore(&iommu->lock, flags);
}
}
+
+/* Convert nexl level and r/w bits into 24 bits p2m flags */
+#define next_level_to_flags(nl) (((nl) << 9 ) | (3ULL << 21))
+
+/* Flush IOMMU TLB after p2m changes */
+int amd_iommu_invalidate_pages(struct domain *d, unsigned long gfn,
+ unsigned int order)
+{
+ unsigned long flags;
+ struct amd_iommu *iommu;
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
+
+ BUG_ON( !hd->root_table );
+
+ if ( !iommu_hap_pt_share && is_hvm_domain(d) )
+ return 0;
+
+ /* send INVALIDATE_IOMMU_PAGES command */
+ for_each_amd_iommu ( iommu )
+ {
+ spin_lock_irqsave(&iommu->lock, flags);
+ invalidate_iommu_pages(iommu, gfn, hd->domain_id, order);
+ flush_command_buffer(iommu);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+ }
+
+ return 0;
+}
+
+/* Share p2m table with iommu */
+void amd_iommu_set_p2m_table(struct domain *d)
+{
+ struct hvm_iommu *hd = domain_hvm_iommu(d);
+ mfn_t pgd_mfn;
+
+ ASSERT( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled );
+
+ if ( hd->root_table != NULL )
+ free_amd_iommu_pgtable(hd->root_table);
+
+ pgd_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m_get_hostp2m(d)));
+ hd->root_table = mfn_to_page(mfn_x(pgd_mfn));
+
+ iommu_hap_pt_share = 1;
+ AMD_IOMMU_DEBUG("Share p2m table with iommu: p2m table = 0x%lx\n",
+ mfn_x(pgd_mfn));
+}
+
+/* Fix next level fields in p2m entries for iommu use */
+void amd_iommu_set_p2m_next_level(struct p2m_domain *p2m, unsigned long gfn,
+ unsigned long mfn, unsigned int page_order)
+{
+#if CONFIG_PAGING_LEVELS >= 4
+
+ mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
+ void *table = NULL;
+ unsigned int level = CONFIG_PAGING_LEVELS;
+ unsigned int lowest = page_order / PTE_PER_TABLE_SHIFT + 1;
+ l1_pgentry_t *p2m_entry;
+
+ if ( !mfn_valid(mfn) )
+ return;
+
+ while ( level >= lowest )
+ {
+ u32 next_level;
+ u32 offset = gfn >> ((PTE_PER_TABLE_SHIFT *
+ (level - IOMMU_PAGING_MODE_LEVEL_1)));
+ offset &= ~PTE_PER_TABLE_MASK;
+
+ table = map_domain_page(mfn_x(table_mfn));
+
+ p2m_entry = (l1_pgentry_t *)table + offset;
+ next_level = (level == lowest) ? 0 : level - 1;
+ l1e_add_flags(*p2m_entry, next_level_to_flags(next_level));
+ table_mfn = _mfn(l1e_get_pfn(*p2m_entry));
+
+ unmap_domain_page(table);
+
+ level--;
+ }
+
+#endif
+}
diff -r 6827fd35cf58 -r 48052c25abfd xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Thu Mar 24 16:18:28 2011 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Thu Mar 24 16:34:29 2011 +0100
@@ -362,6 +362,9 @@
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
+ if ( iommu_hap_pt_share )
+ return;
+
spin_lock(&hd->mapping_lock);
if ( hd->root_table )
{
diff -r 6827fd35cf58 -r 48052c25abfd xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Thu Mar 24 16:18:28 2011 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Thu Mar 24 16:34:29 2011 +0100
@@ -24,6 +24,7 @@
#include <xen/sched.h>
#include <asm/amd-iommu.h>
#include <xen/domain_page.h>
+#include <asm/p2m.h>
#define for_each_amd_iommu(amd_iommu) \
list_for_each_entry(amd_iommu, \
@@ -92,6 +93,13 @@
void amd_iommu_resume(void);
void amd_iommu_suspend(void);
+/* Share p2m table with iommu */
+void amd_iommu_set_p2m_table(struct domain *d);
+void amd_iommu_set_p2m_next_level(struct p2m_domain *p2m, unsigned long gfn,
+ unsigned long mfn, unsigned int page_order);
+int amd_iommu_invalidate_pages(struct domain *d, unsigned long gfn,
+ unsigned int order);
+
static inline u32 get_field_from_reg_u32(u32 reg_value, u32 mask, u32 shift)
{
u32 field;
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
next reply other threads:[~2011-03-25 10:32 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-03-25 10:32 Wei Wang [this message]
2011-04-04 11:05 ` [RFC PATCH 2/3] AMD IOMMU: Implement p2m sharing Tim Deegan
2011-04-04 12:14 ` Wei Wang2
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4D8C6F20.6010005@amd.com \
--to=wei.wang2@amd.com \
--cc=xen-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).