linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Catalin Marinas <catalin.marinas@arm.com>
To: linux-arch@vger.kernel.org, linux-arm-kernel@lists.infradead.org
Cc: Will Deacon <will.deacon@arm.com>,
	linux-kernel@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>
Subject: [PATCH v2 10/31] arm64: TLB maintenance functionality
Date: Tue, 14 Aug 2012 18:52:11 +0100	[thread overview]
Message-ID: <1344966752-16102-11-git-send-email-catalin.marinas@arm.com> (raw)
In-Reply-To: <1344966752-16102-1-git-send-email-catalin.marinas@arm.com>

This patch adds the TLB maintenance functions. There is no distinction
made between the I and D TLBs. TLB maintenance operations are
automatically broadcast between CPUs in hardware. The inner-shareable
operations are always present, even on UP systems.

NOTE: Large part of this patch to be dropped once Peter Z's generic
mmu_gather patches are merged.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm64/include/asm/tlb.h      |  190 +++++++++++++++++++++++++++++++++++++
 arch/arm64/include/asm/tlbflush.h |  123 ++++++++++++++++++++++++
 arch/arm64/mm/tlb.S               |   71 ++++++++++++++
 3 files changed, 384 insertions(+), 0 deletions(-)
 create mode 100644 arch/arm64/include/asm/tlb.h
 create mode 100644 arch/arm64/include/asm/tlbflush.h
 create mode 100644 arch/arm64/mm/tlb.S

diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
new file mode 100644
index 0000000..654f096
--- /dev/null
+++ b/arch/arm64/include/asm/tlb.h
@@ -0,0 +1,190 @@
+/*
+ * Based on arch/arm/include/asm/tlb.h
+ *
+ * Copyright (C) 2002 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_TLB_H
+#define __ASM_TLB_H
+
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+#define MMU_GATHER_BUNDLE	8
+
+/*
+ * TLB handling.  This allows us to remove pages from the page
+ * tables, and efficiently handle the TLB issues.
+ */
+struct mmu_gather {
+	struct mm_struct	*mm;
+	unsigned int		fullmm;
+	struct vm_area_struct	*vma;
+	unsigned long		range_start;
+	unsigned long		range_end;
+	unsigned int		nr;
+	unsigned int		max;
+	struct page		**pages;
+	struct page		*local[MMU_GATHER_BUNDLE];
+};
+
+/*
+ * This is unnecessarily complex.  There's three ways the TLB shootdown
+ * code is used:
+ *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
+ *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
+ *     tlb->vma will be non-NULL.
+ *  2. Unmapping all vmas.  See exit_mmap().
+ *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
+ *     tlb->vma will be non-NULL.  Additionally, page tables will be freed.
+ *  3. Unmapping argument pages.  See shift_arg_pages().
+ *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
+ *     tlb->vma will be NULL.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+	if (tlb->fullmm || !tlb->vma)
+		flush_tlb_mm(tlb->mm);
+	else if (tlb->range_end > 0) {
+		flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
+		tlb->range_start = TASK_SIZE;
+		tlb->range_end = 0;
+	}
+}
+
+static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
+{
+	if (!tlb->fullmm) {
+		if (addr < tlb->range_start)
+			tlb->range_start = addr;
+		if (addr + PAGE_SIZE > tlb->range_end)
+			tlb->range_end = addr + PAGE_SIZE;
+	}
+}
+
+static inline void __tlb_alloc_page(struct mmu_gather *tlb)
+{
+	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
+
+	if (addr) {
+		tlb->pages = (void *)addr;
+		tlb->max = PAGE_SIZE / sizeof(struct page *);
+	}
+}
+
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+	tlb_flush(tlb);
+	free_pages_and_swap_cache(tlb->pages, tlb->nr);
+	tlb->nr = 0;
+	if (tlb->pages == tlb->local)
+		__tlb_alloc_page(tlb);
+}
+
+static inline void
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+{
+	tlb->mm = mm;
+	tlb->fullmm = fullmm;
+	tlb->vma = NULL;
+	tlb->max = ARRAY_SIZE(tlb->local);
+	tlb->pages = tlb->local;
+	tlb->nr = 0;
+	__tlb_alloc_page(tlb);
+}
+
+static inline void
+tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+	tlb_flush_mmu(tlb);
+
+	/* keep the page table cache within bounds */
+	check_pgt_cache();
+
+	if (tlb->pages != tlb->local)
+		free_pages((unsigned long)tlb->pages, 0);
+}
+
+/*
+ * Memorize the range for the TLB flush.
+ */
+static inline void
+tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
+{
+	tlb_add_flush(tlb, addr);
+}
+
+/*
+ * In the case of tlb vma handling, we can optimise these away in the
+ * case where we're doing a full MM flush.  When we're doing a munmap,
+ * the vmas are adjusted to only cover the region to be torn down.
+ */
+static inline void
+tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+	if (!tlb->fullmm) {
+		tlb->vma = vma;
+		tlb->range_start = TASK_SIZE;
+		tlb->range_end = 0;
+	}
+}
+
+static inline void
+tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+	if (!tlb->fullmm)
+		tlb_flush(tlb);
+}
+
+static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+	tlb->pages[tlb->nr++] = page;
+	VM_BUG_ON(tlb->nr > tlb->max);
+	return tlb->max - tlb->nr;
+}
+
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+	if (!__tlb_remove_page(tlb, page))
+		tlb_flush_mmu(tlb);
+}
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+	unsigned long addr)
+{
+	pgtable_page_dtor(pte);
+	tlb_add_flush(tlb, addr);
+	tlb_remove_page(tlb, pte);
+}
+
+#ifndef CONFIG_ARM64_64K_PAGES
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
+				  unsigned long addr)
+{
+	tlb_add_flush(tlb, addr);
+	tlb_remove_page(tlb, virt_to_page(pmdp));
+}
+#endif
+
+#define pte_free_tlb(tlb, ptep, addr)	__pte_free_tlb(tlb, ptep, addr)
+#define pmd_free_tlb(tlb, pmdp, addr)	__pmd_free_tlb(tlb, pmdp, addr)
+#define pud_free_tlb(tlb, pudp, addr)	pud_free((tlb)->mm, pudp)
+
+#define tlb_migrate_finish(mm)		do { } while (0)
+
+#endif
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
new file mode 100644
index 0000000..615d131
--- /dev/null
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -0,0 +1,123 @@
+/*
+ * Based on arch/arm/include/asm/tlbflush.h
+ *
+ * Copyright (C) 1999-2003 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_TLBFLUSH_H
+#define __ASM_TLBFLUSH_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <asm/cputype.h>
+
+extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
+
+extern struct cpu_tlb_fns cpu_tlb;
+
+/*
+ *	TLB Management
+ *	==============
+ *
+ *	The arch/arm/mm/tlb-*.S files implement these methods.
+ *
+ *	The TLB specific code is expected to perform whatever tests it
+ *	needs to determine if it should invalidate the TLB for each
+ *	call.  Start addresses are inclusive and end addresses are
+ *	exclusive; it is safe to round these addresses down.
+ *
+ *	flush_tlb_all()
+ *
+ *		Invalidate the entire TLB.
+ *
+ *	flush_tlb_mm(mm)
+ *
+ *		Invalidate all TLB entries in a particular address
+ *		space.
+ *		- mm	- mm_struct describing address space
+ *
+ *	flush_tlb_range(mm,start,end)
+ *
+ *		Invalidate a range of TLB entries in the specified
+ *		address space.
+ *		- mm	- mm_struct describing address space
+ *		- start - start address (may not be aligned)
+ *		- end	- end address (exclusive, may not be aligned)
+ *
+ *	flush_tlb_page(vaddr,vma)
+ *
+ *		Invalidate the specified page in the specified address range.
+ *		- vaddr - virtual address (may not be aligned)
+ *		- vma	- vma_struct describing address range
+ *
+ *	flush_kern_tlb_page(kaddr)
+ *
+ *		Invalidate the TLB entry for the specified page.  The address
+ *		will be in the kernels virtual memory space.  Current uses
+ *		only require the D-TLB to be invalidated.
+ *		- kaddr - Kernel virtual memory address
+ */
+static inline void flush_tlb_all(void)
+{
+	dsb();
+	asm("tlbi	vmalle1is");
+	dsb();
+	isb();
+}
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+	unsigned long asid = (unsigned long)ASID(mm) << 48;
+
+	dsb();
+	asm("tlbi	aside1is, %0" : : "r" (asid));
+	dsb();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+				  unsigned long uaddr)
+{
+	unsigned long addr = uaddr >> 12 |
+		((unsigned long)ASID(vma->vm_mm) << 48);
+
+	dsb();
+	asm("tlbi	vae1is, %0" : : "r" (addr));
+	dsb();
+}
+
+/*
+ * Convert calls to our calling convention.
+ */
+#define flush_tlb_range(vma,start,end)	__cpu_flush_user_tlb_range(start,end,vma)
+#define flush_tlb_kernel_range(s,e)	__cpu_flush_kern_tlb_range(s,e)
+
+/*
+ * On AArch64, the cache coherency is handled via the set_pte_at() function.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+				    unsigned long addr, pte_t *ptep)
+{
+	/*
+	 * set_pte() does not have a DSB, so make sure that the page table
+	 * write is visible.
+	 */
+	dsb();
+}
+
+#endif
+
+#endif
diff --git a/arch/arm64/mm/tlb.S b/arch/arm64/mm/tlb.S
new file mode 100644
index 0000000..8ae80a1
--- /dev/null
+++ b/arch/arm64/mm/tlb.S
@@ -0,0 +1,71 @@
+/*
+ * Based on arch/arm/mm/tlb.S
+ *
+ * Copyright (C) 1997-2002 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ * Written by Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+#include "proc-macros.S"
+
+/*
+ *	__cpu_flush_user_tlb_range(start, end, vma)
+ *
+ *	Invalidate a range of TLB entries in the specified address space.
+ *
+ *	- start - start address (may not be aligned)
+ *	- end   - end address (exclusive, may not be aligned)
+ *	- vma   - vma_struct describing address range
+ */
+ENTRY(__cpu_flush_user_tlb_range)
+	vma_vm_mm x3, x2			// get vma->vm_mm
+	mmid	x3, x3				// get vm_mm->context.id
+	dsb	sy
+	lsr	x0, x0, #12			// align address
+	lsr	x1, x1, #12
+	bfi	x0, x3, #48, #16		// start VA and ASID
+	bfi	x1, x3, #48, #16		// end VA and ASID
+1:	tlbi	vae1is, x0			// TLB invalidate by address and ASID
+	add	x0, x0, #1
+	cmp	x0, x1
+	b.lo	1b
+	dsb	sy
+	ret
+ENDPROC(__cpu_flush_user_tlb_range)
+
+/*
+ *	__cpu_flush_kern_tlb_range(start,end)
+ *
+ *	Invalidate a range of kernel TLB entries.
+ *
+ *	- start - start address (may not be aligned)
+ *	- end   - end address (exclusive, may not be aligned)
+ */
+ENTRY(__cpu_flush_kern_tlb_range)
+	dsb	sy
+	lsr	x0, x0, #12			// align address
+	lsr	x1, x1, #12
+1:	tlbi	vaae1is, x0			// TLB invalidate by address
+	add	x0, x0, #1
+	cmp	x0, x1
+	b.lo	1b
+	dsb	sy
+	isb
+	ret
+ENDPROC(__cpu_flush_kern_tlb_range)

WARNING: multiple messages have this Message-ID (diff)
From: Catalin Marinas <catalin.marinas@arm.com>
To: linux-arch@vger.kernel.org, linux-arm-kernel@lists.infradead.org
Cc: linux-kernel@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
	Will Deacon <will.deacon@arm.com>
Subject: [PATCH v2 10/31] arm64: TLB maintenance functionality
Date: Tue, 14 Aug 2012 18:52:11 +0100	[thread overview]
Message-ID: <1344966752-16102-11-git-send-email-catalin.marinas@arm.com> (raw)
Message-ID: <20120814175211.vnpvG2cz3gDKKbXQogp2xMzPruKUQiGy5C0zcx2kuoE@z> (raw)
In-Reply-To: <1344966752-16102-1-git-send-email-catalin.marinas@arm.com>

This patch adds the TLB maintenance functions. There is no distinction
made between the I and D TLBs. TLB maintenance operations are
automatically broadcast between CPUs in hardware. The inner-shareable
operations are always present, even on UP systems.

NOTE: Large part of this patch to be dropped once Peter Z's generic
mmu_gather patches are merged.

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
---
 arch/arm64/include/asm/tlb.h      |  190 +++++++++++++++++++++++++++++++++++++
 arch/arm64/include/asm/tlbflush.h |  123 ++++++++++++++++++++++++
 arch/arm64/mm/tlb.S               |   71 ++++++++++++++
 3 files changed, 384 insertions(+), 0 deletions(-)
 create mode 100644 arch/arm64/include/asm/tlb.h
 create mode 100644 arch/arm64/include/asm/tlbflush.h
 create mode 100644 arch/arm64/mm/tlb.S

diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
new file mode 100644
index 0000000..654f096
--- /dev/null
+++ b/arch/arm64/include/asm/tlb.h
@@ -0,0 +1,190 @@
+/*
+ * Based on arch/arm/include/asm/tlb.h
+ *
+ * Copyright (C) 2002 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_TLB_H
+#define __ASM_TLB_H
+
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+#define MMU_GATHER_BUNDLE	8
+
+/*
+ * TLB handling.  This allows us to remove pages from the page
+ * tables, and efficiently handle the TLB issues.
+ */
+struct mmu_gather {
+	struct mm_struct	*mm;
+	unsigned int		fullmm;
+	struct vm_area_struct	*vma;
+	unsigned long		range_start;
+	unsigned long		range_end;
+	unsigned int		nr;
+	unsigned int		max;
+	struct page		**pages;
+	struct page		*local[MMU_GATHER_BUNDLE];
+};
+
+/*
+ * This is unnecessarily complex.  There's three ways the TLB shootdown
+ * code is used:
+ *  1. Unmapping a range of vmas.  See zap_page_range(), unmap_region().
+ *     tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
+ *     tlb->vma will be non-NULL.
+ *  2. Unmapping all vmas.  See exit_mmap().
+ *     tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
+ *     tlb->vma will be non-NULL.  Additionally, page tables will be freed.
+ *  3. Unmapping argument pages.  See shift_arg_pages().
+ *     tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
+ *     tlb->vma will be NULL.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+	if (tlb->fullmm || !tlb->vma)
+		flush_tlb_mm(tlb->mm);
+	else if (tlb->range_end > 0) {
+		flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
+		tlb->range_start = TASK_SIZE;
+		tlb->range_end = 0;
+	}
+}
+
+static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
+{
+	if (!tlb->fullmm) {
+		if (addr < tlb->range_start)
+			tlb->range_start = addr;
+		if (addr + PAGE_SIZE > tlb->range_end)
+			tlb->range_end = addr + PAGE_SIZE;
+	}
+}
+
+static inline void __tlb_alloc_page(struct mmu_gather *tlb)
+{
+	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
+
+	if (addr) {
+		tlb->pages = (void *)addr;
+		tlb->max = PAGE_SIZE / sizeof(struct page *);
+	}
+}
+
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+	tlb_flush(tlb);
+	free_pages_and_swap_cache(tlb->pages, tlb->nr);
+	tlb->nr = 0;
+	if (tlb->pages == tlb->local)
+		__tlb_alloc_page(tlb);
+}
+
+static inline void
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+{
+	tlb->mm = mm;
+	tlb->fullmm = fullmm;
+	tlb->vma = NULL;
+	tlb->max = ARRAY_SIZE(tlb->local);
+	tlb->pages = tlb->local;
+	tlb->nr = 0;
+	__tlb_alloc_page(tlb);
+}
+
+static inline void
+tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+	tlb_flush_mmu(tlb);
+
+	/* keep the page table cache within bounds */
+	check_pgt_cache();
+
+	if (tlb->pages != tlb->local)
+		free_pages((unsigned long)tlb->pages, 0);
+}
+
+/*
+ * Memorize the range for the TLB flush.
+ */
+static inline void
+tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
+{
+	tlb_add_flush(tlb, addr);
+}
+
+/*
+ * In the case of tlb vma handling, we can optimise these away in the
+ * case where we're doing a full MM flush.  When we're doing a munmap,
+ * the vmas are adjusted to only cover the region to be torn down.
+ */
+static inline void
+tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+	if (!tlb->fullmm) {
+		tlb->vma = vma;
+		tlb->range_start = TASK_SIZE;
+		tlb->range_end = 0;
+	}
+}
+
+static inline void
+tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+	if (!tlb->fullmm)
+		tlb_flush(tlb);
+}
+
+static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+	tlb->pages[tlb->nr++] = page;
+	VM_BUG_ON(tlb->nr > tlb->max);
+	return tlb->max - tlb->nr;
+}
+
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+	if (!__tlb_remove_page(tlb, page))
+		tlb_flush_mmu(tlb);
+}
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+	unsigned long addr)
+{
+	pgtable_page_dtor(pte);
+	tlb_add_flush(tlb, addr);
+	tlb_remove_page(tlb, pte);
+}
+
+#ifndef CONFIG_ARM64_64K_PAGES
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
+				  unsigned long addr)
+{
+	tlb_add_flush(tlb, addr);
+	tlb_remove_page(tlb, virt_to_page(pmdp));
+}
+#endif
+
+#define pte_free_tlb(tlb, ptep, addr)	__pte_free_tlb(tlb, ptep, addr)
+#define pmd_free_tlb(tlb, pmdp, addr)	__pmd_free_tlb(tlb, pmdp, addr)
+#define pud_free_tlb(tlb, pudp, addr)	pud_free((tlb)->mm, pudp)
+
+#define tlb_migrate_finish(mm)		do { } while (0)
+
+#endif
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
new file mode 100644
index 0000000..615d131
--- /dev/null
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -0,0 +1,123 @@
+/*
+ * Based on arch/arm/include/asm/tlbflush.h
+ *
+ * Copyright (C) 1999-2003 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_TLBFLUSH_H
+#define __ASM_TLBFLUSH_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <asm/cputype.h>
+
+extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
+extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
+
+extern struct cpu_tlb_fns cpu_tlb;
+
+/*
+ *	TLB Management
+ *	==============
+ *
+ *	The arch/arm/mm/tlb-*.S files implement these methods.
+ *
+ *	The TLB specific code is expected to perform whatever tests it
+ *	needs to determine if it should invalidate the TLB for each
+ *	call.  Start addresses are inclusive and end addresses are
+ *	exclusive; it is safe to round these addresses down.
+ *
+ *	flush_tlb_all()
+ *
+ *		Invalidate the entire TLB.
+ *
+ *	flush_tlb_mm(mm)
+ *
+ *		Invalidate all TLB entries in a particular address
+ *		space.
+ *		- mm	- mm_struct describing address space
+ *
+ *	flush_tlb_range(mm,start,end)
+ *
+ *		Invalidate a range of TLB entries in the specified
+ *		address space.
+ *		- mm	- mm_struct describing address space
+ *		- start - start address (may not be aligned)
+ *		- end	- end address (exclusive, may not be aligned)
+ *
+ *	flush_tlb_page(vaddr,vma)
+ *
+ *		Invalidate the specified page in the specified address range.
+ *		- vaddr - virtual address (may not be aligned)
+ *		- vma	- vma_struct describing address range
+ *
+ *	flush_kern_tlb_page(kaddr)
+ *
+ *		Invalidate the TLB entry for the specified page.  The address
+ *		will be in the kernels virtual memory space.  Current uses
+ *		only require the D-TLB to be invalidated.
+ *		- kaddr - Kernel virtual memory address
+ */
+static inline void flush_tlb_all(void)
+{
+	dsb();
+	asm("tlbi	vmalle1is");
+	dsb();
+	isb();
+}
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+	unsigned long asid = (unsigned long)ASID(mm) << 48;
+
+	dsb();
+	asm("tlbi	aside1is, %0" : : "r" (asid));
+	dsb();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma,
+				  unsigned long uaddr)
+{
+	unsigned long addr = uaddr >> 12 |
+		((unsigned long)ASID(vma->vm_mm) << 48);
+
+	dsb();
+	asm("tlbi	vae1is, %0" : : "r" (addr));
+	dsb();
+}
+
+/*
+ * Convert calls to our calling convention.
+ */
+#define flush_tlb_range(vma,start,end)	__cpu_flush_user_tlb_range(start,end,vma)
+#define flush_tlb_kernel_range(s,e)	__cpu_flush_kern_tlb_range(s,e)
+
+/*
+ * On AArch64, the cache coherency is handled via the set_pte_at() function.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+				    unsigned long addr, pte_t *ptep)
+{
+	/*
+	 * set_pte() does not have a DSB, so make sure that the page table
+	 * write is visible.
+	 */
+	dsb();
+}
+
+#endif
+
+#endif
diff --git a/arch/arm64/mm/tlb.S b/arch/arm64/mm/tlb.S
new file mode 100644
index 0000000..8ae80a1
--- /dev/null
+++ b/arch/arm64/mm/tlb.S
@@ -0,0 +1,71 @@
+/*
+ * Based on arch/arm/mm/tlb.S
+ *
+ * Copyright (C) 1997-2002 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ * Written by Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+#include "proc-macros.S"
+
+/*
+ *	__cpu_flush_user_tlb_range(start, end, vma)
+ *
+ *	Invalidate a range of TLB entries in the specified address space.
+ *
+ *	- start - start address (may not be aligned)
+ *	- end   - end address (exclusive, may not be aligned)
+ *	- vma   - vma_struct describing address range
+ */
+ENTRY(__cpu_flush_user_tlb_range)
+	vma_vm_mm x3, x2			// get vma->vm_mm
+	mmid	x3, x3				// get vm_mm->context.id
+	dsb	sy
+	lsr	x0, x0, #12			// align address
+	lsr	x1, x1, #12
+	bfi	x0, x3, #48, #16		// start VA and ASID
+	bfi	x1, x3, #48, #16		// end VA and ASID
+1:	tlbi	vae1is, x0			// TLB invalidate by address and ASID
+	add	x0, x0, #1
+	cmp	x0, x1
+	b.lo	1b
+	dsb	sy
+	ret
+ENDPROC(__cpu_flush_user_tlb_range)
+
+/*
+ *	__cpu_flush_kern_tlb_range(start,end)
+ *
+ *	Invalidate a range of kernel TLB entries.
+ *
+ *	- start - start address (may not be aligned)
+ *	- end   - end address (exclusive, may not be aligned)
+ */
+ENTRY(__cpu_flush_kern_tlb_range)
+	dsb	sy
+	lsr	x0, x0, #12			// align address
+	lsr	x1, x1, #12
+1:	tlbi	vaae1is, x0			// TLB invalidate by address
+	add	x0, x0, #1
+	cmp	x0, x1
+	b.lo	1b
+	dsb	sy
+	isb
+	ret
+ENDPROC(__cpu_flush_kern_tlb_range)


  parent reply	other threads:[~2012-08-14 17:52 UTC|newest]

Thread overview: 232+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-08-14 17:52 [PATCH v2 00/31] AArch64 Linux kernel port Catalin Marinas
2012-08-14 17:52 ` [PATCH v2 01/31] arm64: Assembly macros and definitions Catalin Marinas
2012-08-15 12:57   ` Arnd Bergmann
2012-08-14 17:52 ` [PATCH v2 02/31] arm64: Kernel booting and initialisation Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-14 23:06   ` Olof Johansson
2012-08-14 23:06     ` Olof Johansson
2012-08-15 17:37     ` Catalin Marinas
2012-08-15 19:03       ` Olof Johansson
2012-08-15 19:03         ` Olof Johansson
2012-08-15 19:53         ` Catalin Marinas
2012-08-15 19:53           ` Catalin Marinas
2012-08-15 13:20   ` Arnd Bergmann
2012-08-15 17:06     ` Olof Johansson
2012-08-16 12:53     ` Catalin Marinas
2012-08-16 18:59   ` Nicolas Pitre
2012-08-16 18:59     ` Nicolas Pitre
2012-08-17 11:20     ` Arnd Bergmann
2012-08-17 13:45       ` Catalin Marinas
2012-08-17 13:45         ` Catalin Marinas
2012-08-17 18:21       ` Nicolas Pitre
2012-08-17  8:56   ` Tony Lindgren
2012-08-17  9:41   ` Santosh Shilimkar
2012-08-17 10:05     ` Catalin Marinas
2012-08-17 10:05       ` Catalin Marinas
2012-08-17 10:10       ` Shilimkar, Santosh
2012-08-17 10:10         ` Shilimkar, Santosh
2012-08-17 13:13         ` Tony Lindgren
2012-08-17 13:48           ` Catalin Marinas
2012-08-24  9:50           ` Catalin Marinas
2012-08-14 17:52 ` [PATCH v2 03/31] arm64: Exception handling Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-14 23:29   ` Olof Johansson
2012-08-14 23:47     ` Thomas Gleixner
2012-08-15 13:03   ` Arnd Bergmann
2012-08-16 10:05     ` Will Deacon
2012-08-16 10:05       ` Will Deacon
2012-08-16 11:54       ` Arnd Bergmann
2012-08-14 17:52 ` [PATCH v2 04/31] arm64: MMU definitions Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15 13:30   ` Arnd Bergmann
2012-08-15 13:39     ` Catalin Marinas
2012-08-15 13:39       ` Catalin Marinas
2012-08-15 16:34     ` Geert Uytterhoeven
2012-08-15 16:45       ` Catalin Marinas
2012-08-17  9:04   ` Tony Lindgren
2012-08-17  9:21     ` Catalin Marinas
2012-08-17  9:38       ` Tony Lindgren
2012-08-14 17:52 ` [PATCH v2 05/31] arm64: MMU initialisation Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15 13:45   ` Arnd Bergmann
2012-08-17 10:06   ` Santosh Shilimkar
2012-08-17 10:15     ` Catalin Marinas
2012-08-17 10:25       ` Shilimkar, Santosh
2012-08-14 17:52 ` [PATCH v2 06/31] arm64: MMU fault handling and page table management Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15 13:47   ` Arnd Bergmann
2012-08-15 13:47     ` Arnd Bergmann
2012-08-17 16:07     ` Catalin Marinas
2012-08-14 17:52 ` [PATCH v2 07/31] arm64: Process management Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-14 23:50   ` Olof Johansson
2012-09-14 17:33     ` Catalin Marinas
2012-09-16  0:29       ` Olof Johansson
2012-08-15 13:53   ` Arnd Bergmann
2012-08-17 16:15     ` Catalin Marinas
2012-08-16 15:09   ` Tobias Klauser
2012-08-16 15:09     ` Tobias Klauser
2012-08-14 17:52 ` [PATCH v2 08/31] arm64: CPU support Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15  0:10   ` Olof Johansson
2012-08-20 15:57     ` Catalin Marinas
2012-08-20 20:47       ` Arnd Bergmann
2012-08-21  9:50         ` Catalin Marinas
2012-09-14 17:38     ` Catalin Marinas
2012-08-15 13:56   ` Arnd Bergmann
2012-08-20 16:00     ` Catalin Marinas
2012-08-14 17:52 ` [PATCH v2 09/31] arm64: Cache maintenance routines Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-17  9:57   ` Santosh Shilimkar
2012-08-17  9:57     ` Santosh Shilimkar
2012-08-17 10:07     ` Catalin Marinas
2012-08-17 10:12       ` Shilimkar, Santosh
2012-08-14 17:52 ` Catalin Marinas [this message]
2012-08-14 17:52   ` [PATCH v2 10/31] arm64: TLB maintenance functionality Catalin Marinas
2012-08-14 17:52 ` [PATCH v2 11/31] arm64: IRQ handling Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-14 23:22   ` Aaro Koskinen
2012-08-14 23:22     ` Aaro Koskinen
2012-08-14 17:52 ` [PATCH v2 12/31] arm64: Atomic operations Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15  0:21   ` Olof Johansson
2012-08-14 17:52 ` [PATCH v2 13/31] arm64: Device specific operations Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15  0:33   ` Olof Johansson
2012-08-15  0:33     ` Olof Johansson
2012-09-14 17:29     ` Catalin Marinas
2012-09-14 17:31       ` Arnd Bergmann
2012-09-14 17:39         ` Catalin Marinas
2012-09-16  0:28           ` Olof Johansson
2012-08-15 16:13   ` Arnd Bergmann
2012-08-17  9:19   ` Tony Lindgren
2012-08-17  9:19     ` Tony Lindgren
2012-08-14 17:52 ` [PATCH v2 14/31] arm64: DMA mapping API Catalin Marinas
2012-08-15  0:40   ` Olof Johansson
2012-08-15  0:40     ` Olof Johansson
2012-08-21 13:05     ` Catalin Marinas
2012-08-15 16:16   ` Arnd Bergmann
2012-08-21 12:59     ` Catalin Marinas
2012-08-21 12:59       ` Catalin Marinas
2012-08-14 17:52 ` [PATCH v2 15/31] arm64: SMP support Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15  0:49   ` Olof Johansson
2012-08-15 13:04   ` Arnd Bergmann
2012-08-17  9:21   ` Tony Lindgren
2012-08-17  9:32     ` Catalin Marinas
2012-08-17  9:39       ` Tony Lindgren
2012-08-14 17:52 ` [PATCH v2 16/31] arm64: ELF definitions Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15 14:15   ` Arnd Bergmann
2012-08-16 10:23     ` Will Deacon
2012-08-16 10:23       ` Will Deacon
2012-08-16 12:37       ` Arnd Bergmann
2012-08-16 12:37         ` Arnd Bergmann
2012-08-21 16:06         ` Catalin Marinas
2012-08-21 18:17           ` Geert Uytterhoeven
2012-08-21 18:17             ` Geert Uytterhoeven
2012-08-21 18:27             ` Catalin Marinas
2012-08-21 18:53               ` Mike Frysinger
2012-08-21 18:53                 ` Mike Frysinger
2012-08-21 20:17           ` Arnd Bergmann
2012-09-05 19:56             ` Chris Metcalf
2012-08-14 17:52 ` [PATCH v2 17/31] arm64: System calls handling Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15 14:22   ` Arnd Bergmann
2012-08-21 17:51     ` Catalin Marinas
2012-08-21 20:14       ` Arnd Bergmann
2012-08-21 20:14         ` Arnd Bergmann
2012-08-21 22:01         ` Catalin Marinas
2012-08-22  7:56           ` Arnd Bergmann
2012-08-22 10:29             ` Catalin Marinas
2012-08-22 12:27               ` Arnd Bergmann
2012-08-22 17:13                 ` Catalin Marinas
2012-09-03 11:48                   ` Catalin Marinas
2012-09-03 12:39                     ` Arnd Bergmann
2012-08-14 17:52 ` [PATCH v2 18/31] arm64: VDSO support Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-14 17:52 ` [PATCH v2 19/31] arm64: Signal handling support Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-14 17:52 ` [PATCH v2 20/31] arm64: User access library functions Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15 14:49   ` [PATCH v2 20/31] arm64: User access library function Arnd Bergmann
2012-09-03 12:58     ` Catalin Marinas
2012-09-03 12:58       ` Catalin Marinas
2012-09-05 19:13     ` Russell King - ARM Linux
2012-09-05 21:01       ` Catalin Marinas
2012-09-05 21:01         ` Catalin Marinas
2012-09-05 21:05         ` Russell King - ARM Linux
2012-09-06  8:36           ` Catalin Marinas
2012-09-06  8:36             ` Catalin Marinas
2012-08-14 17:52 ` [PATCH v2 21/31] arm64: 32-bit (compat) applications support Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15 14:34   ` Arnd Bergmann
2012-08-16 10:28     ` Will Deacon
2012-08-16 12:39       ` Arnd Bergmann
2012-08-23  6:46       ` PER_LINUX32, Was: " Arnd Bergmann
2012-08-23 10:42         ` Catalin Marinas
2012-08-23 10:42           ` Catalin Marinas
2012-08-28 18:28         ` Jiri Kosina
2012-08-24 10:43     ` Catalin Marinas
2012-08-26  4:49       ` Arnd Bergmann
2012-08-26  4:49         ` Arnd Bergmann
2012-08-20 10:53   ` Pavel Machek
2012-08-20 20:34     ` Arnd Bergmann
2012-08-21 10:28       ` Pavel Machek
2012-08-21 10:28         ` Pavel Machek
2012-08-14 17:52 ` [PATCH v2 22/31] arm64: Floating point and SIMD Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15 14:35   ` Arnd Bergmann
2012-08-14 17:52 ` [PATCH v2 23/31] arm64: Debugging support Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15 15:07   ` Arnd Bergmann
2012-08-16 10:47     ` Will Deacon
2012-08-16 12:49       ` Arnd Bergmann
2012-08-17  7:06         ` Arnd Bergmann
2012-08-20  9:07           ` Will Deacon
2012-08-20  9:27             ` Will Deacon
2012-08-20  9:27               ` Will Deacon
2012-08-20 20:10               ` Arnd Bergmann
2012-08-21  8:58                 ` Will Deacon
2012-08-21  8:58                   ` Will Deacon
2012-08-14 17:52 ` [PATCH v2 24/31] arm64: Add support for /proc/sys/debug/exception-trace Catalin Marinas
2012-08-15 15:08   ` Arnd Bergmann
2012-08-15 15:08     ` Arnd Bergmann
2012-08-14 17:52 ` [PATCH v2 25/31] arm64: Performance counters support Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15 15:11   ` Arnd Bergmann
2012-08-16 10:51     ` Will Deacon
2012-08-14 17:52 ` [PATCH v2 26/31] arm64: Miscellaneous library functions Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15 15:21   ` Arnd Bergmann
2012-08-15 15:21     ` Arnd Bergmann
2012-08-16 10:57     ` Will Deacon
2012-08-16 13:00       ` Arnd Bergmann
2012-08-16 14:11         ` Catalin Marinas
2012-08-16 14:11           ` Catalin Marinas
2012-08-14 17:52 ` [PATCH v2 27/31] arm64: Loadable modules Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15 15:23   ` Arnd Bergmann
2012-08-15 15:35     ` Catalin Marinas
2012-08-15 16:16       ` Arnd Bergmann
2012-08-14 17:52 ` [PATCH v2 28/31] arm64: Generic timers support Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15 15:52   ` Arnd Bergmann
2012-08-16 12:40   ` Linus Walleij
2012-08-17  9:29   ` Tony Lindgren
2012-08-17 10:21   ` Santosh Shilimkar
2012-08-21 19:20   ` Christopher Covington
2012-08-14 17:52 ` [PATCH v2 29/31] arm64: Miscellaneous header files Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15 15:56   ` Arnd Bergmann
2012-08-14 17:52 ` [PATCH v2 30/31] arm64: Build infrastructure Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-14 21:01   ` Sam Ravnborg
2012-08-15 16:07   ` Arnd Bergmann
2012-08-17  9:32   ` Tony Lindgren
2012-08-17  9:32     ` Tony Lindgren
2012-08-17  9:46     ` Catalin Marinas
2012-08-14 17:52 ` [PATCH v2 31/31] arm64: MAINTAINERS update Catalin Marinas
2012-08-14 17:52   ` Catalin Marinas
2012-08-15 15:57   ` Arnd Bergmann
2012-08-17  9:36 ` [PATCH v2 00/31] AArch64 Linux kernel port Tony Lindgren

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1344966752-16102-11-git-send-email-catalin.marinas@arm.com \
    --to=catalin.marinas@arm.com \
    --cc=arnd@arndb.de \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).