kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Matthias Brugger <mbrugger@suse.com>
To: pbonzini@redhat.com, rkrcmar@redhat.com,
	christoffer.dall@linaro.org, marc.zyngier@arm.com,
	linux@armlinux.org.uk, catalin.marinas@arm.com,
	will.deacon@arm.com
Cc: suzuki.poulose@arm.com, james.morse@arm.com,
	david.daney@cavium.com, rrichter@cavium.com, agraf@suse.de,
	mbrugger@suse.com, mark.rutland@arm.com,
	lorenzo.pieralisi@arm.com, dave.long@linaro.org,
	ard.biesheuvel@linaro.org, zlim.lnx@gmail.com,
	kvm@vger.kernel.org, linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-kernel@vger.kernel.org
Subject: [PATCH 2/4] arm64: Implement IPI based TLB invalidation
Date: Thu,  4 Aug 2016 11:15:15 +0200	[thread overview]
Message-ID: <1470302117-32296-3-git-send-email-mbrugger@suse.com> (raw)
In-Reply-To: <1470302117-32296-1-git-send-email-mbrugger@suse.com>

Hardware may lack a sane implementation of TLB invalidation
using broadcast TLBI command. Add a capability to enable TLB
invalidation using IPI.

Signed-off-by: David Daney <david.daney@cavium.com>
Signed-off-by: Robert Richter <rrichter@cavium.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Matthias Brugger <mbrugger@suse.com>
---
 arch/arm64/include/asm/cpufeature.h |  3 +-
 arch/arm64/include/asm/tlbflush.h   | 94 ++++++++++++++++++++++++++++++++-----
 arch/arm64/mm/flush.c               | 46 ++++++++++++++++++
 3 files changed, 129 insertions(+), 14 deletions(-)

diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 49dd1bd..c4bf72b 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -36,8 +36,9 @@
 #define ARM64_HAS_VIRT_HOST_EXTN		11
 #define ARM64_WORKAROUND_CAVIUM_27456		12
 #define ARM64_HAS_32BIT_EL0			13
+#define ARM64_HAS_NO_BCAST_TLBI			14
 
-#define ARM64_NCAPS				14
+#define ARM64_NCAPS				15
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index b460ae2..edc5495 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -71,7 +71,10 @@ static inline void local_flush_tlb_all(void)
 	isb();
 }
 
-static inline void flush_tlb_all(void)
+void __flush_tlb_all_ipi(void);
+void __flush_tlb_mm_ipi(struct mm_struct *mm);
+
+static inline void __flush_tlb_all_tlbi(void)
 {
 	dsb(ishst);
 	asm("tlbi	vmalle1is");
@@ -79,7 +82,17 @@ static inline void flush_tlb_all(void)
 	isb();
 }
 
-static inline void flush_tlb_mm(struct mm_struct *mm)
+static inline void flush_tlb_all(void)
+{
+	if (cpus_have_cap(ARM64_HAS_NO_BCAST_TLBI)) {
+		__flush_tlb_all_ipi();
+		return;
+	}
+
+	__flush_tlb_all_tlbi();
+}
+
+static inline void __flush_tlb_mm_tlbi(struct mm_struct *mm)
 {
 	unsigned long asid = ASID(mm) << 48;
 
@@ -88,8 +101,18 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
 	dsb(ish);
 }
 
-static inline void flush_tlb_page(struct vm_area_struct *vma,
-				  unsigned long uaddr)
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+	if (cpus_have_cap(ARM64_HAS_NO_BCAST_TLBI)) {
+		__flush_tlb_mm_ipi(mm);
+		return;
+	}
+
+	__flush_tlb_mm_tlbi(mm);
+}
+
+static inline void __flush_tlb_page_tlbi(struct vm_area_struct *vma,
+		unsigned long uaddr)
 {
 	unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
 
@@ -98,15 +121,26 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
 	dsb(ish);
 }
 
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+		unsigned long uaddr)
+{
+	if (cpus_have_cap(ARM64_HAS_NO_BCAST_TLBI)) {
+		__flush_tlb_mm_ipi(vma->vm_mm);
+		return;
+	}
+
+	__flush_tlb_page_tlbi(vma, uaddr);
+}
+
 /*
  * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
  * necessarily a performance improvement.
  */
 #define MAX_TLB_RANGE	(1024UL << PAGE_SHIFT)
 
-static inline void __flush_tlb_range(struct vm_area_struct *vma,
-				     unsigned long start, unsigned long end,
-				     bool last_level)
+static inline void __flush_tlb_range_tlbi(struct vm_area_struct *vma,
+		unsigned long start, unsigned long end,
+		bool last_level)
 {
 	unsigned long asid = ASID(vma->vm_mm) << 48;
 	unsigned long addr;
@@ -129,13 +163,26 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
 	dsb(ish);
 }
 
+static inline void __flush_tlb_range(struct vm_area_struct *vma,
+		unsigned long start, unsigned long end,
+		bool last_level)
+{
+	if (cpus_have_cap(ARM64_HAS_NO_BCAST_TLBI)) {
+		__flush_tlb_mm_ipi(vma->vm_mm);
+		return;
+	}
+
+	__flush_tlb_range_tlbi(vma, start, end, last_level);
+}
+
 static inline void flush_tlb_range(struct vm_area_struct *vma,
-				   unsigned long start, unsigned long end)
+		unsigned long start, unsigned long end)
 {
 	__flush_tlb_range(vma, start, end, false);
 }
 
-static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+static inline void __flush_tlb_kernel_range_tlbi(unsigned long start,
+		unsigned long end)
 {
 	unsigned long addr;
 
@@ -154,17 +201,38 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
 	isb();
 }
 
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+	if (cpus_have_cap(ARM64_HAS_NO_BCAST_TLBI)) {
+		__flush_tlb_all_ipi();
+		return;
+	}
+
+	__flush_tlb_kernel_range_tlbi(start, end);
+}
+
+static inline void __flush_tlb_pgtable_tlbi(struct mm_struct *mm,
+		unsigned long uaddr)
+{
+	unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
+
+	asm("tlbi	vae1is, %0" : : "r" (addr));
+	dsb(ish);
+}
+
 /*
  * Used to invalidate the TLB (walk caches) corresponding to intermediate page
  * table levels (pgd/pud/pmd).
  */
 static inline void __flush_tlb_pgtable(struct mm_struct *mm,
-				       unsigned long uaddr)
+		unsigned long uaddr)
 {
-	unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
+	if (cpus_have_cap(ARM64_HAS_NO_BCAST_TLBI)) {
+		__flush_tlb_mm_ipi(mm);
+		return;
+	}
 
-	asm("tlbi	vae1is, %0" : : "r" (addr));
-	dsb(ish);
+	__flush_tlb_pgtable_tlbi(mm, uaddr);
 }
 
 #endif
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 43a76b0..402036a 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -27,6 +27,24 @@
 
 #include "mm.h"
 
+static void flush_tlb_local(void *info)
+{
+	local_flush_tlb_all();
+}
+
+static void flush_tlb_mm_local(void *info)
+{
+	unsigned long asid = (unsigned long)info;
+
+	asm volatile("\n"
+	"  dsb     nshst\n"
+	"  tlbi    aside1, %0\n"
+	"  dsb     nsh\n"
+	"  isb     sy"
+	: : "r" (asid)
+	);
+}
+
 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
 		       unsigned long end)
 {
@@ -90,6 +108,34 @@ void flush_dcache_page(struct page *page)
 }
 EXPORT_SYMBOL(flush_dcache_page);
 
+void __flush_tlb_mm_ipi(struct mm_struct *mm)
+{
+	unsigned long asid;
+
+	if (!mm) {
+		flush_tlb_all();
+	} else {
+		asid = ASID(mm) << 48;
+		/* Make sure page table modifications are visible. */
+		dsb(ishst);
+		/* IPI to all CPUs to do local flush. */
+		on_each_cpu(flush_tlb_mm_local, (void *)asid, 1);
+	}
+}
+EXPORT_SYMBOL(__flush_tlb_mm_ipi);
+
+void __flush_tlb_all_ipi(void)
+{
+	/* Make sure page table modifications are visible. */
+	dsb(ishst);
+	if (num_online_cpus() <= 1)
+		local_flush_tlb_all();
+	else
+		/* IPI to all CPUs to do local flush. */
+		on_each_cpu(flush_tlb_local, NULL, 1);
+}
+EXPORT_SYMBOL(__flush_tlb_all_ipi);
+
 /*
  * Additional functions defined in assembly.
  */
-- 
2.6.6

  parent reply	other threads:[~2016-08-04  9:15 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-08-04  9:15 arm64: Implement IPI based TLB invalidation Matthias Brugger
2016-08-04  9:15 ` [PATCH 1/4] arm64: insn: Do not disable irqs during patching Matthias Brugger
2016-08-04  9:15 ` Matthias Brugger [this message]
2016-08-04  9:15 ` [PATCH 3/4] KVM: arm/arm64: Check for broadcast TLBI support Matthias Brugger
2016-08-06  7:22   ` kbuild test robot
2016-08-04  9:15 ` [PATCH 4/4] arm64: hibernate: " Matthias Brugger
2016-08-04 20:57 ` [PATCH] arm64: Add workaround for Cavium erratum 26026 Robert Richter
2016-08-04 21:40   ` David Daney
2016-08-05  7:00     ` Robert Richter
2016-08-05 16:37       ` David Daney
2016-08-05  2:35   ` kbuild test robot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1470302117-32296-3-git-send-email-mbrugger@suse.com \
    --to=mbrugger@suse.com \
    --cc=agraf@suse.de \
    --cc=ard.biesheuvel@linaro.org \
    --cc=catalin.marinas@arm.com \
    --cc=christoffer.dall@linaro.org \
    --cc=dave.long@linaro.org \
    --cc=david.daney@cavium.com \
    --cc=james.morse@arm.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=lorenzo.pieralisi@arm.com \
    --cc=marc.zyngier@arm.com \
    --cc=mark.rutland@arm.com \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=rrichter@cavium.com \
    --cc=suzuki.poulose@arm.com \
    --cc=will.deacon@arm.com \
    --cc=zlim.lnx@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).