From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: Andrea Arcangeli <aarcange@redhat.com>,
Avi Kivity <avi@redhat.com>, Thomas Gleixner <tglx@linutronix.de>,
Rik van Riel <riel@redhat.com>, Ingo Molnar <mingo@elte.hu>,
akpm@linux-fou
Cc: linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org,
Benjamin Herrenschmidt <benh@kernel.crashing.org>,
David Miller <davem@davemloft.net>,
Hugh Dickins <hugh.dickins@tiscali.co.uk>,
Mel Gorman <mel@csn.ul.ie>, Nick Piggin <npiggin@suse.de>,
Peter Zijlstra <a.p.zijlstra@chello.nl>,
"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Subject: [PATCH 13/28] sparc: Preemptible mmu_gather
Date: Mon, 07 Jun 2010 13:07:07 +0200 [thread overview]
Message-ID: <20100607111407.584761191@chello.nl> (raw)
In-Reply-To: 20100607110654.606530953@chello.nl
[-- Attachment #1: mm-preempt-tlb-gather-sparc.patch --]
[-- Type: text/plain, Size: 8844 bytes --]
Rework the sparc mmu_gather usage to conform to the new world order :-)
Sparc mmu_gather does two things:
- tracks vaddrs to unhash
- tracks pages to free
Split these two things like powerpc has done and keep the vaddrs
in per-cpu data structures and flush them on context switch.
The remaining bits can then use the generic mmu_gather.
Cc: David Miller <davem@davemloft.net>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
arch/sparc/include/asm/pgalloc_64.h | 3 +
arch/sparc/include/asm/tlb_64.h | 91 ++---------------------------------
arch/sparc/include/asm/tlbflush_64.h | 12 +++-
arch/sparc/mm/tlb.c | 42 +++++++++-------
arch/sparc/mm/tsb.c | 15 +++--
5 files changed, 51 insertions(+), 112 deletions(-)
Index: linux-2.6/arch/sparc/include/asm/pgalloc_64.h
===================================================================
--- linux-2.6.orig/arch/sparc/include/asm/pgalloc_64.h
+++ linux-2.6/arch/sparc/include/asm/pgalloc_64.h
@@ -78,4 +78,7 @@ static inline void check_pgt_cache(void)
quicklist_trim(0, NULL, 25, 16);
}
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
+#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
+
#endif /* _SPARC64_PGALLOC_H */
Index: linux-2.6/arch/sparc/include/asm/tlb_64.h
===================================================================
--- linux-2.6.orig/arch/sparc/include/asm/tlb_64.h
+++ linux-2.6/arch/sparc/include/asm/tlb_64.h
@@ -7,66 +7,11 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
-#define TLB_BATCH_NR 192
-
-/*
- * For UP we don't need to worry about TLB flush
- * and page free order so much..
- */
-#ifdef CONFIG_SMP
- #define FREE_PTE_NR 506
- #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U)
-#else
- #define FREE_PTE_NR 1
- #define tlb_fast_mode(bp) 1
-#endif
-
-struct mmu_gather {
- struct mm_struct *mm;
- unsigned int pages_nr;
- unsigned int need_flush;
- unsigned int fullmm;
- unsigned int tlb_nr;
- unsigned long vaddrs[TLB_BATCH_NR];
- struct page *pages[FREE_PTE_NR];
-};
-
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
-
#ifdef CONFIG_SMP
extern void smp_flush_tlb_pending(struct mm_struct *,
unsigned long, unsigned long *);
#endif
-extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
-extern void flush_tlb_pending(void);
-
-static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
-{
- struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
-
- BUG_ON(mp->tlb_nr);
-
- mp->mm = mm;
- mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U;
- mp->fullmm = full_mm_flush;
-
- return mp;
-}
-
-
-static inline void tlb_flush_mmu(struct mmu_gather *mp)
-{
- if (!mp->fullmm)
- flush_tlb_pending();
- if (mp->need_flush) {
- free_pages_and_swap_cache(mp->pages, mp->pages_nr);
- mp->pages_nr = 0;
- mp->need_flush = 0;
- }
-
-}
-
#ifdef CONFIG_SMP
extern void smp_flush_tlb_mm(struct mm_struct *mm);
#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
@@ -74,38 +19,14 @@ extern void smp_flush_tlb_mm(struct mm_s
#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
#endif
-static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end)
-{
- tlb_flush_mmu(mp);
-
- if (mp->fullmm)
- mp->fullmm = 0;
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-
- put_cpu_var(mmu_gathers);
-}
-
-static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
-{
- if (tlb_fast_mode(mp)) {
- free_page_and_swap_cache(page);
- return;
- }
- mp->need_flush = 1;
- mp->pages[mp->pages_nr++] = page;
- if (mp->pages_nr >= FREE_PTE_NR)
- tlb_flush_mmu(mp);
-}
-
-#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
-#define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage)
-#define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp)
-#define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr)
+extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
+extern void flush_tlb_pending(void);
-#define tlb_migrate_finish(mm) do { } while (0)
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+#define tlb_flush(tlb) flush_tlb_pending()
+
+#include <asm-generic/tlb.h>
#endif /* _SPARC64_TLB_H */
Index: linux-2.6/arch/sparc/include/asm/tlbflush_64.h
===================================================================
--- linux-2.6.orig/arch/sparc/include/asm/tlbflush_64.h
+++ linux-2.6/arch/sparc/include/asm/tlbflush_64.h
@@ -5,9 +5,17 @@
#include <asm/mmu_context.h>
/* TSB flush operations. */
-struct mmu_gather;
+
+#define TLB_BATCH_NR 192
+
+struct tlb_batch {
+ struct mm_struct *mm;
+ unsigned long tlb_nr;
+ unsigned long vaddrs[TLB_BATCH_NR];
+};
+
extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
-extern void flush_tsb_user(struct mmu_gather *mp);
+extern void flush_tsb_user(struct tlb_batch *tb);
/* TLB flush operations. */
Index: linux-2.6/arch/sparc/mm/tlb.c
===================================================================
--- linux-2.6.orig/arch/sparc/mm/tlb.c
+++ linux-2.6/arch/sparc/mm/tlb.c
@@ -19,33 +19,33 @@
/* Heavily inspired by the ppc64 code. */
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
void flush_tlb_pending(void)
{
- struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
+ struct tlb_batch *tb = &get_cpu_var(tlb_batch);
- if (mp->tlb_nr) {
- flush_tsb_user(mp);
+ if (tb->tlb_nr) {
+ flush_tsb_user(tb);
- if (CTX_VALID(mp->mm->context)) {
+ if (CTX_VALID(tb->mm->context)) {
#ifdef CONFIG_SMP
- smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
- &mp->vaddrs[0]);
+ smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
+ &tb->vaddrs[0]);
#else
- __flush_tlb_pending(CTX_HWBITS(mp->mm->context),
- mp->tlb_nr, &mp->vaddrs[0]);
+ __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
+ tb->tlb_nr, &tb->vaddrs[0]);
#endif
}
- mp->tlb_nr = 0;
+ tb->tlb_nr = 0;
}
- put_cpu_var(mmu_gathers);
+ put_cpu_var(tlb_batch);
}
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
{
- struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
+ struct tlb_batch *tb = &get_cpu_var(tlb_batch);
unsigned long nr;
vaddr &= PAGE_MASK;
@@ -77,21 +77,27 @@ void tlb_batch_add(struct mm_struct *mm,
no_cache_flush:
- if (mp->fullmm)
+ /*
+ if (tb->fullmm) {
+ put_cpu_var(tlb_batch);
return;
+ }
+ */
- nr = mp->tlb_nr;
+ nr = tb->tlb_nr;
- if (unlikely(nr != 0 && mm != mp->mm)) {
+ if (unlikely(nr != 0 && mm != tb->mm)) {
flush_tlb_pending();
nr = 0;
}
if (nr == 0)
- mp->mm = mm;
+ tb->mm = mm;
- mp->vaddrs[nr] = vaddr;
- mp->tlb_nr = ++nr;
+ tb->vaddrs[nr] = vaddr;
+ tb->tlb_nr = ++nr;
if (nr >= TLB_BATCH_NR)
flush_tlb_pending();
+
+ put_cpu_var(tlb_batch);
}
Index: linux-2.6/arch/sparc/mm/tsb.c
===================================================================
--- linux-2.6.orig/arch/sparc/mm/tsb.c
+++ linux-2.6/arch/sparc/mm/tsb.c
@@ -47,12 +47,13 @@ void flush_tsb_kernel_range(unsigned lon
}
}
-static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries)
+static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
+ unsigned long tsb, unsigned long nentries)
{
unsigned long i;
- for (i = 0; i < mp->tlb_nr; i++) {
- unsigned long v = mp->vaddrs[i];
+ for (i = 0; i < tb->tlb_nr; i++) {
+ unsigned long v = tb->vaddrs[i];
unsigned long tag, ent, hash;
v &= ~0x1UL;
@@ -65,9 +66,9 @@ static void __flush_tsb_one(struct mmu_g
}
}
-void flush_tsb_user(struct mmu_gather *mp)
+void flush_tsb_user(struct tlb_batch *tb)
{
- struct mm_struct *mm = mp->mm;
+ struct mm_struct *mm = tb->mm;
unsigned long nentries, base, flags;
spin_lock_irqsave(&mm->context.lock, flags);
@@ -76,7 +77,7 @@ void flush_tsb_user(struct mmu_gather *m
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
- __flush_tsb_one(mp, PAGE_SHIFT, base, nentries);
+ __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
#ifdef CONFIG_HUGETLB_PAGE
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
@@ -84,7 +85,7 @@ void flush_tsb_user(struct mmu_gather *m
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
- __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries);
+ __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
}
#endif
spin_unlock_irqrestore(&mm->context.lock, flags);
WARNING: multiple messages have this Message-ID (diff)
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: Andrea Arcangeli <aarcange@redhat.com>,
Avi Kivity <avi@redhat.com>, Thomas Gleixner <tglx@linutronix.de>,
Rik van Riel <riel@redhat.com>, Ingo Molnar <mingo@elte.hu>,
akpm@linux-foundation.org,
Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org,
Benjamin Herrenschmidt <benh@kernel.crashing.org>,
David Miller <davem@davemloft.net>,
Hugh Dickins <hugh.dickins@tiscali.co.uk>,
Mel Gorman <mel@csn.ul.ie>, Nick Piggin <npiggin@suse.de>,
Peter Zijlstra <a.p.zijlstra@chello.nl>,
"Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Subject: [PATCH 13/28] sparc: Preemptible mmu_gather
Date: Mon, 07 Jun 2010 13:07:07 +0200 [thread overview]
Message-ID: <20100607111407.584761191@chello.nl> (raw)
Message-ID: <20100607110707.SpQ5i2WqVjhhql7PG1UJlYm_gzTQJHE-QMfVcNob-40@z> (raw)
In-Reply-To: 20100607110654.606530953@chello.nl
[-- Attachment #1: mm-preempt-tlb-gather-sparc.patch --]
[-- Type: text/plain, Size: 8846 bytes --]
Rework the sparc mmu_gather usage to conform to the new world order :-)
Sparc mmu_gather does two things:
- tracks vaddrs to unhash
- tracks pages to free
Split these two things like powerpc has done and keep the vaddrs
in per-cpu data structures and flush them on context switch.
The remaining bits can then use the generic mmu_gather.
Cc: David Miller <davem@davemloft.net>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
arch/sparc/include/asm/pgalloc_64.h | 3 +
arch/sparc/include/asm/tlb_64.h | 91 ++---------------------------------
arch/sparc/include/asm/tlbflush_64.h | 12 +++-
arch/sparc/mm/tlb.c | 42 +++++++++-------
arch/sparc/mm/tsb.c | 15 +++--
5 files changed, 51 insertions(+), 112 deletions(-)
Index: linux-2.6/arch/sparc/include/asm/pgalloc_64.h
===================================================================
--- linux-2.6.orig/arch/sparc/include/asm/pgalloc_64.h
+++ linux-2.6/arch/sparc/include/asm/pgalloc_64.h
@@ -78,4 +78,7 @@ static inline void check_pgt_cache(void)
quicklist_trim(0, NULL, 25, 16);
}
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
+#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
+
#endif /* _SPARC64_PGALLOC_H */
Index: linux-2.6/arch/sparc/include/asm/tlb_64.h
===================================================================
--- linux-2.6.orig/arch/sparc/include/asm/tlb_64.h
+++ linux-2.6/arch/sparc/include/asm/tlb_64.h
@@ -7,66 +7,11 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
-#define TLB_BATCH_NR 192
-
-/*
- * For UP we don't need to worry about TLB flush
- * and page free order so much..
- */
-#ifdef CONFIG_SMP
- #define FREE_PTE_NR 506
- #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U)
-#else
- #define FREE_PTE_NR 1
- #define tlb_fast_mode(bp) 1
-#endif
-
-struct mmu_gather {
- struct mm_struct *mm;
- unsigned int pages_nr;
- unsigned int need_flush;
- unsigned int fullmm;
- unsigned int tlb_nr;
- unsigned long vaddrs[TLB_BATCH_NR];
- struct page *pages[FREE_PTE_NR];
-};
-
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
-
#ifdef CONFIG_SMP
extern void smp_flush_tlb_pending(struct mm_struct *,
unsigned long, unsigned long *);
#endif
-extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
-extern void flush_tlb_pending(void);
-
-static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
-{
- struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
-
- BUG_ON(mp->tlb_nr);
-
- mp->mm = mm;
- mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U;
- mp->fullmm = full_mm_flush;
-
- return mp;
-}
-
-
-static inline void tlb_flush_mmu(struct mmu_gather *mp)
-{
- if (!mp->fullmm)
- flush_tlb_pending();
- if (mp->need_flush) {
- free_pages_and_swap_cache(mp->pages, mp->pages_nr);
- mp->pages_nr = 0;
- mp->need_flush = 0;
- }
-
-}
-
#ifdef CONFIG_SMP
extern void smp_flush_tlb_mm(struct mm_struct *mm);
#define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
@@ -74,38 +19,14 @@ extern void smp_flush_tlb_mm(struct mm_s
#define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
#endif
-static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end)
-{
- tlb_flush_mmu(mp);
-
- if (mp->fullmm)
- mp->fullmm = 0;
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-
- put_cpu_var(mmu_gathers);
-}
-
-static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page)
-{
- if (tlb_fast_mode(mp)) {
- free_page_and_swap_cache(page);
- return;
- }
- mp->need_flush = 1;
- mp->pages[mp->pages_nr++] = page;
- if (mp->pages_nr >= FREE_PTE_NR)
- tlb_flush_mmu(mp);
-}
-
-#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0)
-#define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage)
-#define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp)
-#define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr)
+extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
+extern void flush_tlb_pending(void);
-#define tlb_migrate_finish(mm) do { } while (0)
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+#define tlb_flush(tlb) flush_tlb_pending()
+
+#include <asm-generic/tlb.h>
#endif /* _SPARC64_TLB_H */
Index: linux-2.6/arch/sparc/include/asm/tlbflush_64.h
===================================================================
--- linux-2.6.orig/arch/sparc/include/asm/tlbflush_64.h
+++ linux-2.6/arch/sparc/include/asm/tlbflush_64.h
@@ -5,9 +5,17 @@
#include <asm/mmu_context.h>
/* TSB flush operations. */
-struct mmu_gather;
+
+#define TLB_BATCH_NR 192
+
+struct tlb_batch {
+ struct mm_struct *mm;
+ unsigned long tlb_nr;
+ unsigned long vaddrs[TLB_BATCH_NR];
+};
+
extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
-extern void flush_tsb_user(struct mmu_gather *mp);
+extern void flush_tsb_user(struct tlb_batch *tb);
/* TLB flush operations. */
Index: linux-2.6/arch/sparc/mm/tlb.c
===================================================================
--- linux-2.6.orig/arch/sparc/mm/tlb.c
+++ linux-2.6/arch/sparc/mm/tlb.c
@@ -19,33 +19,33 @@
/* Heavily inspired by the ppc64 code. */
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
void flush_tlb_pending(void)
{
- struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
+ struct tlb_batch *tb = &get_cpu_var(tlb_batch);
- if (mp->tlb_nr) {
- flush_tsb_user(mp);
+ if (tb->tlb_nr) {
+ flush_tsb_user(tb);
- if (CTX_VALID(mp->mm->context)) {
+ if (CTX_VALID(tb->mm->context)) {
#ifdef CONFIG_SMP
- smp_flush_tlb_pending(mp->mm, mp->tlb_nr,
- &mp->vaddrs[0]);
+ smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
+ &tb->vaddrs[0]);
#else
- __flush_tlb_pending(CTX_HWBITS(mp->mm->context),
- mp->tlb_nr, &mp->vaddrs[0]);
+ __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
+ tb->tlb_nr, &tb->vaddrs[0]);
#endif
}
- mp->tlb_nr = 0;
+ tb->tlb_nr = 0;
}
- put_cpu_var(mmu_gathers);
+ put_cpu_var(tlb_batch);
}
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
{
- struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
+ struct tlb_batch *tb = &get_cpu_var(tlb_batch);
unsigned long nr;
vaddr &= PAGE_MASK;
@@ -77,21 +77,27 @@ void tlb_batch_add(struct mm_struct *mm,
no_cache_flush:
- if (mp->fullmm)
+ /*
+ if (tb->fullmm) {
+ put_cpu_var(tlb_batch);
return;
+ }
+ */
- nr = mp->tlb_nr;
+ nr = tb->tlb_nr;
- if (unlikely(nr != 0 && mm != mp->mm)) {
+ if (unlikely(nr != 0 && mm != tb->mm)) {
flush_tlb_pending();
nr = 0;
}
if (nr == 0)
- mp->mm = mm;
+ tb->mm = mm;
- mp->vaddrs[nr] = vaddr;
- mp->tlb_nr = ++nr;
+ tb->vaddrs[nr] = vaddr;
+ tb->tlb_nr = ++nr;
if (nr >= TLB_BATCH_NR)
flush_tlb_pending();
+
+ put_cpu_var(tlb_batch);
}
Index: linux-2.6/arch/sparc/mm/tsb.c
===================================================================
--- linux-2.6.orig/arch/sparc/mm/tsb.c
+++ linux-2.6/arch/sparc/mm/tsb.c
@@ -47,12 +47,13 @@ void flush_tsb_kernel_range(unsigned lon
}
}
-static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries)
+static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
+ unsigned long tsb, unsigned long nentries)
{
unsigned long i;
- for (i = 0; i < mp->tlb_nr; i++) {
- unsigned long v = mp->vaddrs[i];
+ for (i = 0; i < tb->tlb_nr; i++) {
+ unsigned long v = tb->vaddrs[i];
unsigned long tag, ent, hash;
v &= ~0x1UL;
@@ -65,9 +66,9 @@ static void __flush_tsb_one(struct mmu_g
}
}
-void flush_tsb_user(struct mmu_gather *mp)
+void flush_tsb_user(struct tlb_batch *tb)
{
- struct mm_struct *mm = mp->mm;
+ struct mm_struct *mm = tb->mm;
unsigned long nentries, base, flags;
spin_lock_irqsave(&mm->context.lock, flags);
@@ -76,7 +77,7 @@ void flush_tsb_user(struct mmu_gather *m
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
- __flush_tsb_one(mp, PAGE_SHIFT, base, nentries);
+ __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
#ifdef CONFIG_HUGETLB_PAGE
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
@@ -84,7 +85,7 @@ void flush_tsb_user(struct mmu_gather *m
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
- __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries);
+ __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
}
#endif
spin_unlock_irqrestore(&mm->context.lock, flags);
next prev parent reply other threads:[~2010-06-07 11:20 UTC|newest]
Thread overview: 81+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-06-07 11:06 [PATCH 00/28] mm: preemptibility -v3 Peter Zijlstra
2010-06-07 11:06 ` Peter Zijlstra
2010-06-07 11:06 ` [PATCH 01/28] powerpc: Use call_rcu_sched() for pagetables Peter Zijlstra
2010-06-07 11:06 ` Peter Zijlstra
2010-06-07 11:06 ` [PATCH 02/28] mm: Improve page_lock_anon_vma() comment Peter Zijlstra
2010-06-07 11:06 ` Peter Zijlstra
2010-06-09 10:50 ` Mel Gorman
2010-06-07 11:06 ` [PATCH 03/28] rename anon_vma_lock to vma_lock_anon_vma Peter Zijlstra
2010-06-07 11:06 ` Peter Zijlstra
2010-06-07 15:01 ` Peter Zijlstra
2010-06-09 10:59 ` Mel Gorman
2010-06-07 11:06 ` [PATCH 04/28] change direct call of spin_lock(anon_vma->lock) to inline function Peter Zijlstra
2010-06-07 11:06 ` Peter Zijlstra
2010-06-07 11:06 ` [PATCH 05/28] track the root (oldest) anon_vma Peter Zijlstra
2010-06-07 11:06 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 06/28] always lock " Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 07/28] extend KSM refcounts to the anon_vma root Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-09 10:55 ` Mel Gorman
2010-06-07 11:07 ` [PATCH 08/28] mm: Rename drop_anon_vma to put_anon_vma Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 09/28] mm: Move anon_vma ref out from under CONFIG_KSM Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-09 11:05 ` Mel Gorman
2010-06-09 11:05 ` Mel Gorman
2010-06-07 11:07 ` [PATCH 10/28] mm: Make use of the anon_vma ref count Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-09 13:22 ` Mel Gorman
2010-06-09 13:32 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 11/28] mm: Preemptible mmu_gather Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 12/28] powerpc: " Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra [this message]
2010-06-07 11:07 ` [PATCH 13/28] sparc: " Peter Zijlstra
2010-06-07 11:07 ` [PATCH 14/28] s390: preemptible mmu_gather Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 15/28] arm: Preemptible mmu_gather Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 16/28] sh: " Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:38 ` Peter Zijlstra
2010-06-07 11:38 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 17/28] um: " Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 18/28] ia64: " Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 19/28] mm, powerpc: Move the RCU page-table freeing into generic code Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 20/28] sparc64: Kill page table quicklists Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 21/28] sparc64: Use RCU page table freeing Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 22/28] sparc64: Add support for _PAGE_SPECIAL Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 23/28] sparc64: Implement get_user_pages_fast() Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 24/28] lockdep, mutex: Provide mutex_lock_nest_lock Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 25/28] mutex: Provide mutex_is_contended Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 26/28] mm: Convert i_mmap_lock and anon_vma->lock to mutexes Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 27/28] mm: Extended batches for generic mmu_gather Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 11:07 ` [PATCH 28/28] mm: Optimize page_lock_anon_vma() fast-path Peter Zijlstra
2010-06-07 11:07 ` Peter Zijlstra
2010-06-07 13:57 ` [PATCH 00/28] mm: preemptibility -v3 Sam Ravnborg
2010-06-07 15:02 ` Peter Zijlstra
2010-06-07 16:36 ` Andi Kleen
2010-06-07 16:39 ` Peter Zijlstra
2010-06-07 16:39 ` Peter Zijlstra
2010-06-10 1:45 ` Zhang, Yanmin
2010-06-10 6:52 ` Peter Zijlstra
2010-06-10 6:59 ` Zhang, Yanmin
2010-06-21 10:21 ` Peter Zijlstra
2010-06-24 9:55 ` Peter Zijlstra
2010-06-29 7:40 ` Zhang, Yanmin
2010-06-29 7:40 ` Zhang, Yanmin
2010-06-29 7:48 ` Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20100607111407.584761191@chello.nl \
--to=a.p.zijlstra@chello.nl \
--cc=aarcange@redhat.com \
--cc=akpm@linux-fou \
--cc=avi@redhat.com \
--cc=benh@kernel.crashing.org \
--cc=davem@davemloft.net \
--cc=hugh.dickins@tiscali.co.uk \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mel@csn.ul.ie \
--cc=mingo@elte.hu \
--cc=npiggin@suse.de \
--cc=paulmck@linux.vnet.ibm.com \
--cc=riel@redhat.com \
--cc=tglx@linutronix.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).