From: Peter Zijlstra <a.p.zijlstra@chello.nl> To: Andrea Arcangeli <aarcange@redhat.com>, Avi Kivity <avi@redhat.com>, Thomas Gleixner <tglx@linutronix.de>, Rik van Riel <riel@redhat.com>, Ingo Molnar <mingo@elte.hu>, akpm@linux-fou Cc: linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org, Benjamin Herrenschmidt <benh@kernel.crashing.org>, David Miller <davem@davemloft.net>, Hugh Dickins <hugh.dickins@tiscali.co.uk>, Mel Gorman <mel@csn.ul.ie>, Nick Piggin <npiggin@suse.de>, Peter Zijlstra <a.p.zijlstra@chello.nl>, Paul McKenney <paulmck@linux.vnet.ibm.com>, Yanmin Zhang <yanmin_zhang@linux.intel.com>, Stephen Rothwell <sfr@canb.auug.org.au> Subject: [PATCH 09/20] sparc: Preemptible mmu_gather Date: Sat, 28 Aug 2010 16:16:46 +0200 [thread overview] Message-ID: <20100828142456.032152056@chello.nl> (raw) In-Reply-To: 20100828141637.421594670@chello.nl [-- Attachment #1: mm-preempt-tlb-gather-sparc.patch --] [-- Type: text/plain, Size: 8844 bytes --] Rework the sparc mmu_gather usage to conform to the new world order :-) Sparc mmu_gather does two things: - tracks vaddrs to unhash - tracks pages to free Split these two things like powerpc has done and keep the vaddrs in per-cpu data structures and flush them on context switch. The remaining bits can then use the generic mmu_gather. Cc: David Miller <davem@davemloft.net> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> --- arch/sparc/include/asm/pgalloc_64.h | 3 + arch/sparc/include/asm/tlb_64.h | 91 ++--------------------------------- arch/sparc/include/asm/tlbflush_64.h | 12 +++- arch/sparc/mm/tlb.c | 42 +++++++++------- arch/sparc/mm/tsb.c | 15 +++-- 5 files changed, 51 insertions(+), 112 deletions(-) Index: linux-2.6/arch/sparc/include/asm/pgalloc_64.h =================================================================== --- linux-2.6.orig/arch/sparc/include/asm/pgalloc_64.h +++ linux-2.6/arch/sparc/include/asm/pgalloc_64.h @@ -78,4 +78,7 @@ static inline void check_pgt_cache(void) quicklist_trim(0, NULL, 25, 16); } +#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) +#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) + #endif /* _SPARC64_PGALLOC_H */ Index: linux-2.6/arch/sparc/include/asm/tlb_64.h =================================================================== --- linux-2.6.orig/arch/sparc/include/asm/tlb_64.h +++ linux-2.6/arch/sparc/include/asm/tlb_64.h @@ -7,66 +7,11 @@ #include <asm/tlbflush.h> #include <asm/mmu_context.h> -#define TLB_BATCH_NR 192 - -/* - * For UP we don't need to worry about TLB flush - * and page free order so much.. - */ -#ifdef CONFIG_SMP - #define FREE_PTE_NR 506 - #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U) -#else - #define FREE_PTE_NR 1 - #define tlb_fast_mode(bp) 1 -#endif - -struct mmu_gather { - struct mm_struct *mm; - unsigned int pages_nr; - unsigned int need_flush; - unsigned int fullmm; - unsigned int tlb_nr; - unsigned long vaddrs[TLB_BATCH_NR]; - struct page *pages[FREE_PTE_NR]; -}; - -DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); - #ifdef CONFIG_SMP extern void smp_flush_tlb_pending(struct mm_struct *, unsigned long, unsigned long *); #endif -extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *); -extern void flush_tlb_pending(void); - -static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) -{ - struct mmu_gather *mp = &get_cpu_var(mmu_gathers); - - BUG_ON(mp->tlb_nr); - - mp->mm = mm; - mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U; - mp->fullmm = full_mm_flush; - - return mp; -} - - -static inline void tlb_flush_mmu(struct mmu_gather *mp) -{ - if (!mp->fullmm) - flush_tlb_pending(); - if (mp->need_flush) { - free_pages_and_swap_cache(mp->pages, mp->pages_nr); - mp->pages_nr = 0; - mp->need_flush = 0; - } - -} - #ifdef CONFIG_SMP extern void smp_flush_tlb_mm(struct mm_struct *mm); #define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm) @@ -74,38 +19,14 @@ extern void smp_flush_tlb_mm(struct mm_s #define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT) #endif -static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end) -{ - tlb_flush_mmu(mp); - - if (mp->fullmm) - mp->fullmm = 0; - - /* keep the page table cache within bounds */ - check_pgt_cache(); - - put_cpu_var(mmu_gathers); -} - -static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page) -{ - if (tlb_fast_mode(mp)) { - free_page_and_swap_cache(page); - return; - } - mp->need_flush = 1; - mp->pages[mp->pages_nr++] = page; - if (mp->pages_nr >= FREE_PTE_NR) - tlb_flush_mmu(mp); -} - -#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0) -#define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage) -#define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp) -#define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr) +extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *); +extern void flush_tlb_pending(void); -#define tlb_migrate_finish(mm) do { } while (0) #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) +#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) +#define tlb_flush(tlb) flush_tlb_pending() + +#include <asm-generic/tlb.h> #endif /* _SPARC64_TLB_H */ Index: linux-2.6/arch/sparc/include/asm/tlbflush_64.h =================================================================== --- linux-2.6.orig/arch/sparc/include/asm/tlbflush_64.h +++ linux-2.6/arch/sparc/include/asm/tlbflush_64.h @@ -5,9 +5,17 @@ #include <asm/mmu_context.h> /* TSB flush operations. */ -struct mmu_gather; + +#define TLB_BATCH_NR 192 + +struct tlb_batch { + struct mm_struct *mm; + unsigned long tlb_nr; + unsigned long vaddrs[TLB_BATCH_NR]; +}; + extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); -extern void flush_tsb_user(struct mmu_gather *mp); +extern void flush_tsb_user(struct tlb_batch *tb); /* TLB flush operations. */ Index: linux-2.6/arch/sparc/mm/tlb.c =================================================================== --- linux-2.6.orig/arch/sparc/mm/tlb.c +++ linux-2.6/arch/sparc/mm/tlb.c @@ -19,33 +19,33 @@ /* Heavily inspired by the ppc64 code. */ -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); void flush_tlb_pending(void) { - struct mmu_gather *mp = &get_cpu_var(mmu_gathers); + struct tlb_batch *tb = &get_cpu_var(tlb_batch); - if (mp->tlb_nr) { - flush_tsb_user(mp); + if (tb->tlb_nr) { + flush_tsb_user(tb); - if (CTX_VALID(mp->mm->context)) { + if (CTX_VALID(tb->mm->context)) { #ifdef CONFIG_SMP - smp_flush_tlb_pending(mp->mm, mp->tlb_nr, - &mp->vaddrs[0]); + smp_flush_tlb_pending(tb->mm, tb->tlb_nr, + &tb->vaddrs[0]); #else - __flush_tlb_pending(CTX_HWBITS(mp->mm->context), - mp->tlb_nr, &mp->vaddrs[0]); + __flush_tlb_pending(CTX_HWBITS(tb->mm->context), + tb->tlb_nr, &tb->vaddrs[0]); #endif } - mp->tlb_nr = 0; + tb->tlb_nr = 0; } - put_cpu_var(mmu_gathers); + put_cpu_var(tlb_batch); } void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) { - struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); + struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; vaddr &= PAGE_MASK; @@ -77,21 +77,27 @@ void tlb_batch_add(struct mm_struct *mm, no_cache_flush: - if (mp->fullmm) + /* + if (tb->fullmm) { + put_cpu_var(tlb_batch); return; + } + */ - nr = mp->tlb_nr; + nr = tb->tlb_nr; - if (unlikely(nr != 0 && mm != mp->mm)) { + if (unlikely(nr != 0 && mm != tb->mm)) { flush_tlb_pending(); nr = 0; } if (nr == 0) - mp->mm = mm; + tb->mm = mm; - mp->vaddrs[nr] = vaddr; - mp->tlb_nr = ++nr; + tb->vaddrs[nr] = vaddr; + tb->tlb_nr = ++nr; if (nr >= TLB_BATCH_NR) flush_tlb_pending(); + + put_cpu_var(tlb_batch); } Index: linux-2.6/arch/sparc/mm/tsb.c =================================================================== --- linux-2.6.orig/arch/sparc/mm/tsb.c +++ linux-2.6/arch/sparc/mm/tsb.c @@ -47,12 +47,13 @@ void flush_tsb_kernel_range(unsigned lon } } -static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries) +static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, + unsigned long tsb, unsigned long nentries) { unsigned long i; - for (i = 0; i < mp->tlb_nr; i++) { - unsigned long v = mp->vaddrs[i]; + for (i = 0; i < tb->tlb_nr; i++) { + unsigned long v = tb->vaddrs[i]; unsigned long tag, ent, hash; v &= ~0x1UL; @@ -65,9 +66,9 @@ static void __flush_tsb_one(struct mmu_g } } -void flush_tsb_user(struct mmu_gather *mp) +void flush_tsb_user(struct tlb_batch *tb) { - struct mm_struct *mm = mp->mm; + struct mm_struct *mm = tb->mm; unsigned long nentries, base, flags; spin_lock_irqsave(&mm->context.lock, flags); @@ -76,7 +77,7 @@ void flush_tsb_user(struct mmu_gather *m nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_tsb_one(mp, PAGE_SHIFT, base, nentries); + __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); #ifdef CONFIG_HUGETLB_PAGE if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { @@ -84,7 +85,7 @@ void flush_tsb_user(struct mmu_gather *m nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries); + __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); } #endif spin_unlock_irqrestore(&mm->context.lock, flags);
WARNING: multiple messages have this Message-ID (diff)
From: Peter Zijlstra <a.p.zijlstra@chello.nl> To: Andrea Arcangeli <aarcange@redhat.com>, Avi Kivity <avi@redhat.com>, Thomas Gleixner <tglx@linutronix.de>, Rik van Riel <riel@redhat.com>, Ingo Molnar <mingo@elte.hu>, akpm@linux-foundation.org, Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org, Benjamin Herrenschmidt <benh@kernel.crashing.org>, David Miller <davem@davemloft.net>, Hugh Dickins <hugh.dickins@tiscali.co.uk>, Mel Gorman <mel@csn.ul.ie>, Nick Piggin <npiggin@suse.de>, Peter Zijlstra <a.p.zijlstra@chello.nl>, Paul McKenney <paulmck@linux.vnet.ibm.com>, Yanmin Zhang <yanmin_zhang@linux.intel.com>, Stephen Rothwell <sfr@canb.auug.org.au> Subject: [PATCH 09/20] sparc: Preemptible mmu_gather Date: Sat, 28 Aug 2010 16:16:46 +0200 [thread overview] Message-ID: <20100828142456.032152056@chello.nl> (raw) Message-ID: <20100828141646.PMOTvZStQKoS1_mwsJZT33iqxgcIoEivg6TuLfaS8rQ@z> (raw) In-Reply-To: 20100828141637.421594670@chello.nl [-- Attachment #1: mm-preempt-tlb-gather-sparc.patch --] [-- Type: text/plain, Size: 8846 bytes --] Rework the sparc mmu_gather usage to conform to the new world order :-) Sparc mmu_gather does two things: - tracks vaddrs to unhash - tracks pages to free Split these two things like powerpc has done and keep the vaddrs in per-cpu data structures and flush them on context switch. The remaining bits can then use the generic mmu_gather. Cc: David Miller <davem@davemloft.net> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> --- arch/sparc/include/asm/pgalloc_64.h | 3 + arch/sparc/include/asm/tlb_64.h | 91 ++--------------------------------- arch/sparc/include/asm/tlbflush_64.h | 12 +++- arch/sparc/mm/tlb.c | 42 +++++++++------- arch/sparc/mm/tsb.c | 15 +++-- 5 files changed, 51 insertions(+), 112 deletions(-) Index: linux-2.6/arch/sparc/include/asm/pgalloc_64.h =================================================================== --- linux-2.6.orig/arch/sparc/include/asm/pgalloc_64.h +++ linux-2.6/arch/sparc/include/asm/pgalloc_64.h @@ -78,4 +78,7 @@ static inline void check_pgt_cache(void) quicklist_trim(0, NULL, 25, 16); } +#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte) +#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) + #endif /* _SPARC64_PGALLOC_H */ Index: linux-2.6/arch/sparc/include/asm/tlb_64.h =================================================================== --- linux-2.6.orig/arch/sparc/include/asm/tlb_64.h +++ linux-2.6/arch/sparc/include/asm/tlb_64.h @@ -7,66 +7,11 @@ #include <asm/tlbflush.h> #include <asm/mmu_context.h> -#define TLB_BATCH_NR 192 - -/* - * For UP we don't need to worry about TLB flush - * and page free order so much.. - */ -#ifdef CONFIG_SMP - #define FREE_PTE_NR 506 - #define tlb_fast_mode(bp) ((bp)->pages_nr == ~0U) -#else - #define FREE_PTE_NR 1 - #define tlb_fast_mode(bp) 1 -#endif - -struct mmu_gather { - struct mm_struct *mm; - unsigned int pages_nr; - unsigned int need_flush; - unsigned int fullmm; - unsigned int tlb_nr; - unsigned long vaddrs[TLB_BATCH_NR]; - struct page *pages[FREE_PTE_NR]; -}; - -DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); - #ifdef CONFIG_SMP extern void smp_flush_tlb_pending(struct mm_struct *, unsigned long, unsigned long *); #endif -extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *); -extern void flush_tlb_pending(void); - -static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) -{ - struct mmu_gather *mp = &get_cpu_var(mmu_gathers); - - BUG_ON(mp->tlb_nr); - - mp->mm = mm; - mp->pages_nr = num_online_cpus() > 1 ? 0U : ~0U; - mp->fullmm = full_mm_flush; - - return mp; -} - - -static inline void tlb_flush_mmu(struct mmu_gather *mp) -{ - if (!mp->fullmm) - flush_tlb_pending(); - if (mp->need_flush) { - free_pages_and_swap_cache(mp->pages, mp->pages_nr); - mp->pages_nr = 0; - mp->need_flush = 0; - } - -} - #ifdef CONFIG_SMP extern void smp_flush_tlb_mm(struct mm_struct *mm); #define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm) @@ -74,38 +19,14 @@ extern void smp_flush_tlb_mm(struct mm_s #define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT) #endif -static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, unsigned long end) -{ - tlb_flush_mmu(mp); - - if (mp->fullmm) - mp->fullmm = 0; - - /* keep the page table cache within bounds */ - check_pgt_cache(); - - put_cpu_var(mmu_gathers); -} - -static inline void tlb_remove_page(struct mmu_gather *mp, struct page *page) -{ - if (tlb_fast_mode(mp)) { - free_page_and_swap_cache(page); - return; - } - mp->need_flush = 1; - mp->pages[mp->pages_nr++] = page; - if (mp->pages_nr >= FREE_PTE_NR) - tlb_flush_mmu(mp); -} - -#define tlb_remove_tlb_entry(mp,ptep,addr) do { } while (0) -#define pte_free_tlb(mp, ptepage, addr) pte_free((mp)->mm, ptepage) -#define pmd_free_tlb(mp, pmdp, addr) pmd_free((mp)->mm, pmdp) -#define pud_free_tlb(tlb,pudp, addr) __pud_free_tlb(tlb,pudp,addr) +extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *); +extern void flush_tlb_pending(void); -#define tlb_migrate_finish(mm) do { } while (0) #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) +#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) +#define tlb_flush(tlb) flush_tlb_pending() + +#include <asm-generic/tlb.h> #endif /* _SPARC64_TLB_H */ Index: linux-2.6/arch/sparc/include/asm/tlbflush_64.h =================================================================== --- linux-2.6.orig/arch/sparc/include/asm/tlbflush_64.h +++ linux-2.6/arch/sparc/include/asm/tlbflush_64.h @@ -5,9 +5,17 @@ #include <asm/mmu_context.h> /* TSB flush operations. */ -struct mmu_gather; + +#define TLB_BATCH_NR 192 + +struct tlb_batch { + struct mm_struct *mm; + unsigned long tlb_nr; + unsigned long vaddrs[TLB_BATCH_NR]; +}; + extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); -extern void flush_tsb_user(struct mmu_gather *mp); +extern void flush_tsb_user(struct tlb_batch *tb); /* TLB flush operations. */ Index: linux-2.6/arch/sparc/mm/tlb.c =================================================================== --- linux-2.6.orig/arch/sparc/mm/tlb.c +++ linux-2.6/arch/sparc/mm/tlb.c @@ -19,33 +19,33 @@ /* Heavily inspired by the ppc64 code. */ -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); +static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); void flush_tlb_pending(void) { - struct mmu_gather *mp = &get_cpu_var(mmu_gathers); + struct tlb_batch *tb = &get_cpu_var(tlb_batch); - if (mp->tlb_nr) { - flush_tsb_user(mp); + if (tb->tlb_nr) { + flush_tsb_user(tb); - if (CTX_VALID(mp->mm->context)) { + if (CTX_VALID(tb->mm->context)) { #ifdef CONFIG_SMP - smp_flush_tlb_pending(mp->mm, mp->tlb_nr, - &mp->vaddrs[0]); + smp_flush_tlb_pending(tb->mm, tb->tlb_nr, + &tb->vaddrs[0]); #else - __flush_tlb_pending(CTX_HWBITS(mp->mm->context), - mp->tlb_nr, &mp->vaddrs[0]); + __flush_tlb_pending(CTX_HWBITS(tb->mm->context), + tb->tlb_nr, &tb->vaddrs[0]); #endif } - mp->tlb_nr = 0; + tb->tlb_nr = 0; } - put_cpu_var(mmu_gathers); + put_cpu_var(tlb_batch); } void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) { - struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); + struct tlb_batch *tb = &get_cpu_var(tlb_batch); unsigned long nr; vaddr &= PAGE_MASK; @@ -77,21 +77,27 @@ void tlb_batch_add(struct mm_struct *mm, no_cache_flush: - if (mp->fullmm) + /* + if (tb->fullmm) { + put_cpu_var(tlb_batch); return; + } + */ - nr = mp->tlb_nr; + nr = tb->tlb_nr; - if (unlikely(nr != 0 && mm != mp->mm)) { + if (unlikely(nr != 0 && mm != tb->mm)) { flush_tlb_pending(); nr = 0; } if (nr == 0) - mp->mm = mm; + tb->mm = mm; - mp->vaddrs[nr] = vaddr; - mp->tlb_nr = ++nr; + tb->vaddrs[nr] = vaddr; + tb->tlb_nr = ++nr; if (nr >= TLB_BATCH_NR) flush_tlb_pending(); + + put_cpu_var(tlb_batch); } Index: linux-2.6/arch/sparc/mm/tsb.c =================================================================== --- linux-2.6.orig/arch/sparc/mm/tsb.c +++ linux-2.6/arch/sparc/mm/tsb.c @@ -47,12 +47,13 @@ void flush_tsb_kernel_range(unsigned lon } } -static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries) +static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, + unsigned long tsb, unsigned long nentries) { unsigned long i; - for (i = 0; i < mp->tlb_nr; i++) { - unsigned long v = mp->vaddrs[i]; + for (i = 0; i < tb->tlb_nr; i++) { + unsigned long v = tb->vaddrs[i]; unsigned long tag, ent, hash; v &= ~0x1UL; @@ -65,9 +66,9 @@ static void __flush_tsb_one(struct mmu_g } } -void flush_tsb_user(struct mmu_gather *mp) +void flush_tsb_user(struct tlb_batch *tb) { - struct mm_struct *mm = mp->mm; + struct mm_struct *mm = tb->mm; unsigned long nentries, base, flags; spin_lock_irqsave(&mm->context.lock, flags); @@ -76,7 +77,7 @@ void flush_tsb_user(struct mmu_gather *m nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_tsb_one(mp, PAGE_SHIFT, base, nentries); + __flush_tsb_one(tb, PAGE_SHIFT, base, nentries); #ifdef CONFIG_HUGETLB_PAGE if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { @@ -84,7 +85,7 @@ void flush_tsb_user(struct mmu_gather *m nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; if (tlb_type == cheetah_plus || tlb_type == hypervisor) base = __pa(base); - __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries); + __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries); } #endif spin_unlock_irqrestore(&mm->context.lock, flags);
next prev parent reply other threads:[~2010-08-28 14:16 UTC|newest] Thread overview: 74+ messages / expand[flat|nested] mbox.gz Atom feed top 2010-08-28 14:16 [PATCH 00/20] mm: Preemptibility -v4 Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 01/20] powerpc: Use call_rcu_sched() for pagetables Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-31 6:10 ` Benjamin Herrenschmidt 2010-08-28 14:16 ` [PATCH 02/20] mm: Improve page_lock_anon_vma() comment Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 03/20] mm: Rename drop_anon_vma to put_anon_vma Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 15:08 ` Pekka Enberg 2010-08-28 14:16 ` [PATCH 04/20] mm: Move anon_vma ref out from under CONFIG_KSM Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 05/20] mm: Simplify anon_vma refcounts Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 15:13 ` Pekka Enberg 2010-08-28 14:16 ` [PATCH 06/20] mm: Use refcounts for page_lock_anon_vma() Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 07/20] mm: Preemptible mmu_gather Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 08/20] powerpc: " Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-31 6:26 ` Benjamin Herrenschmidt 2010-08-31 6:31 ` Benjamin Herrenschmidt 2010-08-31 6:31 ` Benjamin Herrenschmidt 2010-08-31 9:14 ` Peter Zijlstra 2010-08-31 9:14 ` Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra [this message] 2010-08-28 14:16 ` [PATCH 09/20] sparc: " Peter Zijlstra 2010-08-28 14:16 ` [PATCH 10/20] s390: preemptible mmu_gather Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 11/20] arm: Preemptible mmu_gather Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 12/20] sh: " Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 13/20] um: " Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 14/20] ia64: " Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-30 15:44 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 15/20] mm, powerpc: Move the RCU page-table freeing into generic code Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 16/20] lockdep, mutex: Provide mutex_lock_nest_lock Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 17/20] mutex: Provide mutex_is_contended Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 18/20] mm: Convert i_mmap_lock and anon_vma->lock to mutexes Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 19/20] mm: Extended batches for generic mmu_gather Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 20/20] mm: Optimize page_lock_anon_vma() fast-path Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:32 ` [PATCH 00/20] mm: Preemptibility -v4 Peter Zijlstra 2010-08-28 22:28 ` David Miller 2010-08-28 22:41 ` Peter Zijlstra 2010-08-28 14:56 ` Piotr Hosowicz 2010-08-28 15:10 ` Peter Zijlstra 2010-08-28 15:17 ` Piotr Hosowicz 2010-08-28 15:23 ` Peter Zijlstra 2010-08-28 16:01 ` Piotr Hosowicz 2010-08-29 12:46 ` Piotr Hosowicz 2010-08-29 12:46 ` Piotr Hosowicz 2010-08-29 13:37 ` Peter Zijlstra 2010-08-29 13:43 ` Piotr Hosowicz 2010-08-31 14:02 ` Piotr Hosowicz 2010-08-31 14:14 ` Piotr Hosowicz 2010-09-02 14:53 ` Piotr Hosowicz 2010-08-28 15:19 ` Pekka Enberg 2010-08-28 15:27 ` Peter Zijlstra 2010-08-28 15:27 ` Peter Zijlstra [not found] ` <AANLkTikSm2Mq8hGNac9rpFH-3pvryw2kW57EP45Ny6Vp@mail.gmail.com> 2010-09-14 5:36 ` Alex,Shi 2010-09-14 7:42 ` Peter Zijlstra 2010-09-14 7:42 ` Peter Zijlstra -- strict thread matches above, loose matches on Subject: below -- 2010-10-18 11:24 [PATCH 00/20] mm: Preemptibility -v5 Peter Zijlstra 2010-10-18 11:24 ` [PATCH 09/20] sparc: Preemptible mmu_gather Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20100828142456.032152056@chello.nl \ --to=a.p.zijlstra@chello.nl \ --cc=aarcange@redhat.com \ --cc=akpm@linux-fou \ --cc=avi@redhat.com \ --cc=benh@kernel.crashing.org \ --cc=davem@davemloft.net \ --cc=hugh.dickins@tiscali.co.uk \ --cc=linux-arch@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=mel@csn.ul.ie \ --cc=mingo@elte.hu \ --cc=npiggin@suse.de \ --cc=paulmck@linux.vnet.ibm.com \ --cc=riel@redhat.com \ --cc=sfr@canb.auug.org.au \ --cc=tglx@linutronix.de \ --cc=yanmin_zhang@linux.intel.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).