From mboxrd@z Thu Jan 1 00:00:00 1970 From: Jean Guyader Subject: [PATCH 6/6] Introduce per cpu flag (iommu_dont_flush_iotlb) to avoid unnecessary iotlb flush Date: Thu, 10 Nov 2011 08:44:04 +0000 Message-ID: <1320914644-4357-7-git-send-email-jean.guyader@eu.citrix.com> References: <1320914644-4357-1-git-send-email-jean.guyader@eu.citrix.com> <1320914644-4357-2-git-send-email-jean.guyader@eu.citrix.com> <1320914644-4357-3-git-send-email-jean.guyader@eu.citrix.com> <1320914644-4357-4-git-send-email-jean.guyader@eu.citrix.com> <1320914644-4357-5-git-send-email-jean.guyader@eu.citrix.com> <1320914644-4357-6-git-send-email-jean.guyader@eu.citrix.com> Mime-Version: 1.0 Content-Type: multipart/mixed; boundary="------------true" Return-path: In-Reply-To: <1320914644-4357-6-git-send-email-jean.guyader@eu.citrix.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xensource.com Errors-To: xen-devel-bounces@lists.xensource.com To: xen-devel@lists.xensource.com Cc: tim@xen.org, allen.m.kay@intel.com, keir@xen.org, Jean Guyader , JBeulich@suse.com List-Id: xen-devel@lists.xenproject.org --------------true Content-Type: text/plain; charset="UTF-8"; format=fixed Content-Transfer-Encoding: 8bit Add cpu flag that will be checked by the iommu low level code to skip iotlb flushes. iommu_iotlb_flush shall be called explicitly. Signed-off-by: Jean Guyader Acked-by: Allen M Kay --- xen/arch/x86/mm.c | 14 ++++++++++++++ xen/drivers/passthrough/iommu.c | 5 +++++ xen/drivers/passthrough/vtd/iommu.c | 6 ++++-- xen/include/xen/iommu.h | 12 ++++++++++++ 4 files changed, 35 insertions(+), 2 deletions(-) --------------true Content-Type: text/x-patch; name="0006-Introduce-per-cpu-flag-iommu_dont_flush_iotlb-to-avo.patch" Content-Transfer-Encoding: 8bit Content-Disposition: attachment; filename="0006-Introduce-per-cpu-flag-iommu_dont_flush_iotlb-to-avo.patch" diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 2417fe9..17a8504 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -4594,13 +4594,20 @@ static int handle_iomem_range(unsigned long s, unsigned long e, void *p) static int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap xatp) { + struct xen_add_to_physmap start_xatp; struct page_info *page = NULL; unsigned long prev_mfn, mfn = 0, gpfn; int rc; if ( xatp.space != XENMAPSPACE_gmfn_range ) xatp.size = 1; + else + { + if ( need_iommu(d) ) + this_cpu(iommu_dont_flush_iotlb) = 1; + } + start_xatp = xatp; while ( xatp.size > 0 ) { if ( hypercall_preempt_check() ) @@ -4705,6 +4712,13 @@ static int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap xat break; } + if ( xatp.space == XENMAPSPACE_gmfn_range && need_iommu(d) ) + { + this_cpu(iommu_dont_flush_iotlb) = 0; + iommu_iotlb_flush(d, start_xatp.idx, start_xatp.size - xatp.size); + iommu_iotlb_flush(d, start_xatp.gpfn, start_xatp.size - xatp.size); + } + return rc; } diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c index ca7b37b..bacca11 100644 --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -52,6 +52,8 @@ bool_t __read_mostly iommu_hap_pt_share = 1; bool_t __read_mostly iommu_debug; bool_t __read_mostly amd_iommu_perdev_intremap; +DEFINE_PER_CPU(bool_t, iommu_dont_flush_iotlb); + static void __init parse_iommu_param(char *s) { char *ss; @@ -227,6 +229,7 @@ static int iommu_populate_page_table(struct domain *d) spin_lock(&d->page_alloc_lock); + this_cpu(iommu_dont_flush_iotlb) = 1; page_list_for_each ( page, &d->page_list ) { if ( is_hvm_domain(d) || @@ -244,6 +247,8 @@ static int iommu_populate_page_table(struct domain *d) } } } + this_cpu(iommu_dont_flush_iotlb) = 0; + iommu_iotlb_flush_all(d); spin_unlock(&d->page_alloc_lock); return 0; } diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c index 7ec9541..a3dd018 100644 --- a/xen/drivers/passthrough/vtd/iommu.c +++ b/xen/drivers/passthrough/vtd/iommu.c @@ -660,7 +660,8 @@ static void dma_pte_clear_one(struct domain *domain, u64 addr) spin_unlock(&hd->mapping_lock); iommu_flush_cache_entry(pte, sizeof(struct dma_pte)); - __intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K , 0, 1); + if ( !this_cpu(iommu_dont_flush_iotlb) ) + __intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K , 0, 1); unmap_vtd_domain_page(page); @@ -1753,7 +1754,8 @@ static int intel_iommu_map_page( spin_unlock(&hd->mapping_lock); unmap_vtd_domain_page(page); - __intel_iommu_iotlb_flush(d, gfn, dma_pte_present(old), 1); + if ( !this_cpu(iommu_dont_flush_iotlb) ) + __intel_iommu_iotlb_flush(d, gfn, dma_pte_present(old), 1); return 0; } diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h index a1034df..6f7fbf7 100644 --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -160,4 +160,16 @@ int iommu_do_domctl(struct xen_domctl *, XEN_GUEST_HANDLE(xen_domctl_t)); void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count); void iommu_iotlb_flush_all(struct domain *d); +/* + * The purpose of the iommu_dont_flush_iotlb optional cpu flag is to + * avoid unecessary iotlb_flush in the low level IOMMU code. + * + * iommu_map_page/iommu_unmap_page must flush the iotlb but somethimes + * this operation can be really expensive. This flag will be set by the + * caller to notify the low level IOMMU code to avoid the iotlb flushes. + * iommu_iotlb_flush/iommu_iotlb_flush_all will be explicitly called by + * the caller. + */ +DECLARE_PER_CPU(bool_t, iommu_dont_flush_iotlb); + #endif /* _IOMMU_H_ */ --------------true Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit Content-Disposition: inline _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel --------------true--