From: Peter Zijlstra <a.p.zijlstra@chello.nl> To: Andrea Arcangeli <aarcange@redhat.com>, Avi Kivity <avi@redhat.com>, Thomas Gleixner <tglx@linutronix.de>, Rik van Riel <riel@redhat.com>, Ingo Molnar <mingo@elte.hu>, akpm@linux-fou Cc: linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org, Benjamin Herrenschmidt <benh@kernel.crashing.org>, David Miller <davem@davemloft.net>, Hugh Dickins <hugh.dickins@tiscali.co.uk>, Mel Gorman <mel@csn.ul.ie>, Nick Piggin <npiggin@suse.de>, Peter Zijlstra <a.p.zijlstra@chello.nl>, Paul McKenney <paulmck@linux.vnet.ibm.com>, Yanmin Zhang <yanmin_zhang@linux.intel.com>, Stephen Rothwell <sfr@canb.auug.org.au> Subject: [PATCH 08/20] powerpc: Preemptible mmu_gather Date: Sat, 28 Aug 2010 16:16:45 +0200 [thread overview] Message-ID: <20100828142455.960494507@chello.nl> (raw) In-Reply-To: 20100828141637.421594670@chello.nl [-- Attachment #1: mm-preempt-tlb-gather-power.patch --] [-- Type: text/plain, Size: 8654 bytes --] Fix up powerpc to the new mmu_gather stuffs. PPC has an extra batching queue to RCU free the actual pagetable allocations, use the ARCH extentions for that for now. For the ppc64_tlb_batch, which tracks the vaddrs to unhash from the hardware hash-table, keep using per-cpu arrays but flush on context switch and use a TLF bit to track the laxy_mmu state. Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> --- arch/powerpc/include/asm/pgalloc.h | 4 ++-- arch/powerpc/include/asm/thread_info.h | 2 ++ arch/powerpc/include/asm/tlb.h | 10 ++++++++++ arch/powerpc/kernel/process.c | 19 +++++++++++++++++++ arch/powerpc/mm/pgtable.c | 14 ++++---------- arch/powerpc/mm/tlb_hash32.c | 2 +- arch/powerpc/mm/tlb_hash64.c | 12 +++++++----- arch/powerpc/mm/tlb_nohash.c | 2 +- 8 files changed, 46 insertions(+), 19 deletions(-) Index: linux-2.6/arch/powerpc/include/asm/tlb.h =================================================================== --- linux-2.6.orig/arch/powerpc/include/asm/tlb.h +++ linux-2.6/arch/powerpc/include/asm/tlb.h @@ -28,6 +28,16 @@ #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) +#define HAVE_ARCH_MMU_GATHER 1 + +struct pte_freelist_batch; + +struct arch_mmu_gather { + struct pte_freelist_batch *batch; +}; + +#define ARCH_MMU_GATHER_INIT (struct arch_mmu_gather){ .batch = NULL, } + extern void tlb_flush(struct mmu_gather *tlb); /* Get the generic bits... */ Index: linux-2.6/arch/powerpc/kernel/process.c =================================================================== --- linux-2.6.orig/arch/powerpc/kernel/process.c +++ linux-2.6/arch/powerpc/kernel/process.c @@ -393,6 +393,9 @@ struct task_struct *__switch_to(struct t struct thread_struct *new_thread, *old_thread; unsigned long flags; struct task_struct *last; +#ifdef CONFIG_PPC64 + struct ppc64_tlb_batch *batch; +#endif #ifdef CONFIG_SMP /* avoid complexity of lazy save/restore of fpu @@ -511,6 +514,14 @@ struct task_struct *__switch_to(struct t old_thread->accum_tb += (current_tb - start_tb); new_thread->start_tb = current_tb; } + + batch = &__get_cpu_var(ppc64_tlb_batch); + if (batch->active) { + task_thread_info(prev)->local_flags |= _TLF_LAZY_MMU; + if (batch->index) + __flush_tlb_pending(batch); + batch->active = 0; + } #endif local_irq_save(flags); @@ -527,6 +538,14 @@ struct task_struct *__switch_to(struct t hard_irq_disable(); last = _switch(old_thread, new_thread); +#ifdef CONFIG_PPC64 + if (task_thread_info(new)->local_flags & _TLF_LAZY_MMU) { + task_thread_info(new)->local_flags &= ~_TLF_LAZY_MMU; + batch = &__get_cpu_var(ppc64_tlb_batch); + batch->active = 1; + } +#endif + local_irq_restore(flags); return last; Index: linux-2.6/arch/powerpc/mm/pgtable.c =================================================================== --- linux-2.6.orig/arch/powerpc/mm/pgtable.c +++ linux-2.6/arch/powerpc/mm/pgtable.c @@ -33,8 +33,6 @@ #include "mmu_decl.h" -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - #ifdef CONFIG_SMP /* @@ -43,7 +41,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_ga * freeing a page table page that is being walked without locks */ -static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); static unsigned long pte_freelist_forced_free; struct pte_freelist_batch @@ -97,12 +94,10 @@ static void pte_free_submit(struct pte_f void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) { - /* This is safe since tlb_gather_mmu has disabled preemption */ - struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); + struct pte_freelist_batch **batchp = &tlb->arch.batch; unsigned long pgf; - if (atomic_read(&tlb->mm->mm_users) < 2 || - cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){ + if (atomic_read(&tlb->mm->mm_users) < 2) { pgtable_free(table, shift); return; } @@ -124,10 +119,9 @@ void pgtable_free_tlb(struct mmu_gather } } -void pte_free_finish(void) +void pte_free_finish(struct mmu_gather *tlb) { - /* This is safe since tlb_gather_mmu has disabled preemption */ - struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); + struct pte_freelist_batch **batchp = &tlb->arch.batch; if (*batchp == NULL) return; Index: linux-2.6/arch/powerpc/mm/tlb_hash64.c =================================================================== --- linux-2.6.orig/arch/powerpc/mm/tlb_hash64.c +++ linux-2.6/arch/powerpc/mm/tlb_hash64.c @@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, p * neesd to be flushed. This function will either perform the flush * immediately or will batch it up if the current CPU has an active * batch on it. - * - * Must be called from within some kind of spinlock/non-preempt region... */ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge) { - struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); unsigned long vsid, vaddr; unsigned int psize; int ssize; @@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *m */ if (!batch->active) { flush_hash_page(vaddr, rpte, psize, ssize, 0); + put_cpu_var(ppc64_tlb_batch); return; } @@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *m batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) __flush_tlb_pending(batch); + put_cpu_var(ppc64_tlb_batch); } /* @@ -155,7 +155,7 @@ void __flush_tlb_pending(struct ppc64_tl void tlb_flush(struct mmu_gather *tlb) { - struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch); /* If there's a TLB batch pending, then we must flush it because the * pages are going to be freed and we really don't want to have a CPU @@ -164,8 +164,10 @@ void tlb_flush(struct mmu_gather *tlb) if (tlbbatch->index) __flush_tlb_pending(tlbbatch); + put_cpu_var(ppc64_tlb_batch); + /* Push out batch of freed page tables */ - pte_free_finish(); + pte_free_finish(tlb); } /** Index: linux-2.6/arch/powerpc/include/asm/thread_info.h =================================================================== --- linux-2.6.orig/arch/powerpc/include/asm/thread_info.h +++ linux-2.6/arch/powerpc/include/asm/thread_info.h @@ -139,10 +139,12 @@ static inline struct thread_info *curren #define TLF_NAPPING 0 /* idle thread enabled NAP mode */ #define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */ #define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */ +#define TLF_LAZY_MMU 3 /* tlb_batch is active */ #define _TLF_NAPPING (1 << TLF_NAPPING) #define _TLF_SLEEPING (1 << TLF_SLEEPING) #define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK) +#define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU) #ifndef __ASSEMBLY__ #define HAVE_SET_RESTORE_SIGMASK 1 Index: linux-2.6/arch/powerpc/include/asm/pgalloc.h =================================================================== --- linux-2.6.orig/arch/powerpc/include/asm/pgalloc.h +++ linux-2.6/arch/powerpc/include/asm/pgalloc.h @@ -32,13 +32,13 @@ static inline void pte_free(struct mm_st #ifdef CONFIG_SMP extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift); -extern void pte_free_finish(void); +extern void pte_free_finish(struct mmu_gather *tlb); #else /* CONFIG_SMP */ static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) { pgtable_free(table, shift); } -static inline void pte_free_finish(void) { } +static inline void pte_free_finish(struct mmu_gather *tlb) { } #endif /* !CONFIG_SMP */ static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, Index: linux-2.6/arch/powerpc/mm/tlb_hash32.c =================================================================== --- linux-2.6.orig/arch/powerpc/mm/tlb_hash32.c +++ linux-2.6/arch/powerpc/mm/tlb_hash32.c @@ -73,7 +73,7 @@ void tlb_flush(struct mmu_gather *tlb) } /* Push out batch of freed page tables */ - pte_free_finish(); + pte_free_finish(tlb); } /* Index: linux-2.6/arch/powerpc/mm/tlb_nohash.c =================================================================== --- linux-2.6.orig/arch/powerpc/mm/tlb_nohash.c +++ linux-2.6/arch/powerpc/mm/tlb_nohash.c @@ -301,7 +301,7 @@ void tlb_flush(struct mmu_gather *tlb) flush_tlb_mm(tlb->mm); /* Push out batch of freed page tables */ - pte_free_finish(); + pte_free_finish(tlb); } /*
WARNING: multiple messages have this Message-ID (diff)
From: Peter Zijlstra <a.p.zijlstra@chello.nl> To: Andrea Arcangeli <aarcange@redhat.com>, Avi Kivity <avi@redhat.com>, Thomas Gleixner <tglx@linutronix.de>, Rik van Riel <riel@redhat.com>, Ingo Molnar <mingo@elte.hu>, akpm@linux-foundation.org, Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org, Benjamin Herrenschmidt <benh@kernel.crashing.org>, David Miller <davem@davemloft.net>, Hugh Dickins <hugh.dickins@tiscali.co.uk>, Mel Gorman <mel@csn.ul.ie>, Nick Piggin <npiggin@suse.de>, Peter Zijlstra <a.p.zijlstra@chello.nl>, Paul McKenney <paulmck@linux.vnet.ibm.com>, Yanmin Zhang <yanmin_zhang@linux.intel.com>, Stephen Rothwell <sfr@canb.auug.org.au> Subject: [PATCH 08/20] powerpc: Preemptible mmu_gather Date: Sat, 28 Aug 2010 16:16:45 +0200 [thread overview] Message-ID: <20100828142455.960494507@chello.nl> (raw) Message-ID: <20100828141645.Da8rwLAmlTdR4E8c38IjT8Olc-R5MuMT6sGJCZFfcJc@z> (raw) In-Reply-To: 20100828141637.421594670@chello.nl [-- Attachment #1: mm-preempt-tlb-gather-power.patch --] [-- Type: text/plain, Size: 8656 bytes --] Fix up powerpc to the new mmu_gather stuffs. PPC has an extra batching queue to RCU free the actual pagetable allocations, use the ARCH extentions for that for now. For the ppc64_tlb_batch, which tracks the vaddrs to unhash from the hardware hash-table, keep using per-cpu arrays but flush on context switch and use a TLF bit to track the laxy_mmu state. Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> --- arch/powerpc/include/asm/pgalloc.h | 4 ++-- arch/powerpc/include/asm/thread_info.h | 2 ++ arch/powerpc/include/asm/tlb.h | 10 ++++++++++ arch/powerpc/kernel/process.c | 19 +++++++++++++++++++ arch/powerpc/mm/pgtable.c | 14 ++++---------- arch/powerpc/mm/tlb_hash32.c | 2 +- arch/powerpc/mm/tlb_hash64.c | 12 +++++++----- arch/powerpc/mm/tlb_nohash.c | 2 +- 8 files changed, 46 insertions(+), 19 deletions(-) Index: linux-2.6/arch/powerpc/include/asm/tlb.h =================================================================== --- linux-2.6.orig/arch/powerpc/include/asm/tlb.h +++ linux-2.6/arch/powerpc/include/asm/tlb.h @@ -28,6 +28,16 @@ #define tlb_start_vma(tlb, vma) do { } while (0) #define tlb_end_vma(tlb, vma) do { } while (0) +#define HAVE_ARCH_MMU_GATHER 1 + +struct pte_freelist_batch; + +struct arch_mmu_gather { + struct pte_freelist_batch *batch; +}; + +#define ARCH_MMU_GATHER_INIT (struct arch_mmu_gather){ .batch = NULL, } + extern void tlb_flush(struct mmu_gather *tlb); /* Get the generic bits... */ Index: linux-2.6/arch/powerpc/kernel/process.c =================================================================== --- linux-2.6.orig/arch/powerpc/kernel/process.c +++ linux-2.6/arch/powerpc/kernel/process.c @@ -393,6 +393,9 @@ struct task_struct *__switch_to(struct t struct thread_struct *new_thread, *old_thread; unsigned long flags; struct task_struct *last; +#ifdef CONFIG_PPC64 + struct ppc64_tlb_batch *batch; +#endif #ifdef CONFIG_SMP /* avoid complexity of lazy save/restore of fpu @@ -511,6 +514,14 @@ struct task_struct *__switch_to(struct t old_thread->accum_tb += (current_tb - start_tb); new_thread->start_tb = current_tb; } + + batch = &__get_cpu_var(ppc64_tlb_batch); + if (batch->active) { + task_thread_info(prev)->local_flags |= _TLF_LAZY_MMU; + if (batch->index) + __flush_tlb_pending(batch); + batch->active = 0; + } #endif local_irq_save(flags); @@ -527,6 +538,14 @@ struct task_struct *__switch_to(struct t hard_irq_disable(); last = _switch(old_thread, new_thread); +#ifdef CONFIG_PPC64 + if (task_thread_info(new)->local_flags & _TLF_LAZY_MMU) { + task_thread_info(new)->local_flags &= ~_TLF_LAZY_MMU; + batch = &__get_cpu_var(ppc64_tlb_batch); + batch->active = 1; + } +#endif + local_irq_restore(flags); return last; Index: linux-2.6/arch/powerpc/mm/pgtable.c =================================================================== --- linux-2.6.orig/arch/powerpc/mm/pgtable.c +++ linux-2.6/arch/powerpc/mm/pgtable.c @@ -33,8 +33,6 @@ #include "mmu_decl.h" -DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); - #ifdef CONFIG_SMP /* @@ -43,7 +41,6 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_ga * freeing a page table page that is being walked without locks */ -static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); static unsigned long pte_freelist_forced_free; struct pte_freelist_batch @@ -97,12 +94,10 @@ static void pte_free_submit(struct pte_f void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) { - /* This is safe since tlb_gather_mmu has disabled preemption */ - struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); + struct pte_freelist_batch **batchp = &tlb->arch.batch; unsigned long pgf; - if (atomic_read(&tlb->mm->mm_users) < 2 || - cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){ + if (atomic_read(&tlb->mm->mm_users) < 2) { pgtable_free(table, shift); return; } @@ -124,10 +119,9 @@ void pgtable_free_tlb(struct mmu_gather } } -void pte_free_finish(void) +void pte_free_finish(struct mmu_gather *tlb) { - /* This is safe since tlb_gather_mmu has disabled preemption */ - struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); + struct pte_freelist_batch **batchp = &tlb->arch.batch; if (*batchp == NULL) return; Index: linux-2.6/arch/powerpc/mm/tlb_hash64.c =================================================================== --- linux-2.6.orig/arch/powerpc/mm/tlb_hash64.c +++ linux-2.6/arch/powerpc/mm/tlb_hash64.c @@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, p * neesd to be flushed. This function will either perform the flush * immediately or will batch it up if the current CPU has an active * batch on it. - * - * Must be called from within some kind of spinlock/non-preempt region... */ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long pte, int huge) { - struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); unsigned long vsid, vaddr; unsigned int psize; int ssize; @@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *m */ if (!batch->active) { flush_hash_page(vaddr, rpte, psize, ssize, 0); + put_cpu_var(ppc64_tlb_batch); return; } @@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *m batch->index = ++i; if (i >= PPC64_TLB_BATCH_NR) __flush_tlb_pending(batch); + put_cpu_var(ppc64_tlb_batch); } /* @@ -155,7 +155,7 @@ void __flush_tlb_pending(struct ppc64_tl void tlb_flush(struct mmu_gather *tlb) { - struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch); + struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch); /* If there's a TLB batch pending, then we must flush it because the * pages are going to be freed and we really don't want to have a CPU @@ -164,8 +164,10 @@ void tlb_flush(struct mmu_gather *tlb) if (tlbbatch->index) __flush_tlb_pending(tlbbatch); + put_cpu_var(ppc64_tlb_batch); + /* Push out batch of freed page tables */ - pte_free_finish(); + pte_free_finish(tlb); } /** Index: linux-2.6/arch/powerpc/include/asm/thread_info.h =================================================================== --- linux-2.6.orig/arch/powerpc/include/asm/thread_info.h +++ linux-2.6/arch/powerpc/include/asm/thread_info.h @@ -139,10 +139,12 @@ static inline struct thread_info *curren #define TLF_NAPPING 0 /* idle thread enabled NAP mode */ #define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */ #define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */ +#define TLF_LAZY_MMU 3 /* tlb_batch is active */ #define _TLF_NAPPING (1 << TLF_NAPPING) #define _TLF_SLEEPING (1 << TLF_SLEEPING) #define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK) +#define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU) #ifndef __ASSEMBLY__ #define HAVE_SET_RESTORE_SIGMASK 1 Index: linux-2.6/arch/powerpc/include/asm/pgalloc.h =================================================================== --- linux-2.6.orig/arch/powerpc/include/asm/pgalloc.h +++ linux-2.6/arch/powerpc/include/asm/pgalloc.h @@ -32,13 +32,13 @@ static inline void pte_free(struct mm_st #ifdef CONFIG_SMP extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift); -extern void pte_free_finish(void); +extern void pte_free_finish(struct mmu_gather *tlb); #else /* CONFIG_SMP */ static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) { pgtable_free(table, shift); } -static inline void pte_free_finish(void) { } +static inline void pte_free_finish(struct mmu_gather *tlb) { } #endif /* !CONFIG_SMP */ static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, Index: linux-2.6/arch/powerpc/mm/tlb_hash32.c =================================================================== --- linux-2.6.orig/arch/powerpc/mm/tlb_hash32.c +++ linux-2.6/arch/powerpc/mm/tlb_hash32.c @@ -73,7 +73,7 @@ void tlb_flush(struct mmu_gather *tlb) } /* Push out batch of freed page tables */ - pte_free_finish(); + pte_free_finish(tlb); } /* Index: linux-2.6/arch/powerpc/mm/tlb_nohash.c =================================================================== --- linux-2.6.orig/arch/powerpc/mm/tlb_nohash.c +++ linux-2.6/arch/powerpc/mm/tlb_nohash.c @@ -301,7 +301,7 @@ void tlb_flush(struct mmu_gather *tlb) flush_tlb_mm(tlb->mm); /* Push out batch of freed page tables */ - pte_free_finish(); + pte_free_finish(tlb); } /*
next prev parent reply other threads:[~2010-08-28 14:16 UTC|newest] Thread overview: 74+ messages / expand[flat|nested] mbox.gz Atom feed top 2010-08-28 14:16 [PATCH 00/20] mm: Preemptibility -v4 Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 01/20] powerpc: Use call_rcu_sched() for pagetables Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-31 6:10 ` Benjamin Herrenschmidt 2010-08-28 14:16 ` [PATCH 02/20] mm: Improve page_lock_anon_vma() comment Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 03/20] mm: Rename drop_anon_vma to put_anon_vma Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 15:08 ` Pekka Enberg 2010-08-28 14:16 ` [PATCH 04/20] mm: Move anon_vma ref out from under CONFIG_KSM Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 05/20] mm: Simplify anon_vma refcounts Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 15:13 ` Pekka Enberg 2010-08-28 14:16 ` [PATCH 06/20] mm: Use refcounts for page_lock_anon_vma() Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 07/20] mm: Preemptible mmu_gather Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra [this message] 2010-08-28 14:16 ` [PATCH 08/20] powerpc: " Peter Zijlstra 2010-08-31 6:26 ` Benjamin Herrenschmidt 2010-08-31 6:31 ` Benjamin Herrenschmidt 2010-08-31 6:31 ` Benjamin Herrenschmidt 2010-08-31 9:14 ` Peter Zijlstra 2010-08-31 9:14 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 09/20] sparc: " Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 10/20] s390: preemptible mmu_gather Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 11/20] arm: Preemptible mmu_gather Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 12/20] sh: " Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 13/20] um: " Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 14/20] ia64: " Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-30 15:44 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 15/20] mm, powerpc: Move the RCU page-table freeing into generic code Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 16/20] lockdep, mutex: Provide mutex_lock_nest_lock Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 17/20] mutex: Provide mutex_is_contended Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 18/20] mm: Convert i_mmap_lock and anon_vma->lock to mutexes Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 19/20] mm: Extended batches for generic mmu_gather Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:16 ` [PATCH 20/20] mm: Optimize page_lock_anon_vma() fast-path Peter Zijlstra 2010-08-28 14:16 ` Peter Zijlstra 2010-08-28 14:32 ` [PATCH 00/20] mm: Preemptibility -v4 Peter Zijlstra 2010-08-28 22:28 ` David Miller 2010-08-28 22:41 ` Peter Zijlstra 2010-08-28 14:56 ` Piotr Hosowicz 2010-08-28 15:10 ` Peter Zijlstra 2010-08-28 15:17 ` Piotr Hosowicz 2010-08-28 15:23 ` Peter Zijlstra 2010-08-28 16:01 ` Piotr Hosowicz 2010-08-29 12:46 ` Piotr Hosowicz 2010-08-29 12:46 ` Piotr Hosowicz 2010-08-29 13:37 ` Peter Zijlstra 2010-08-29 13:43 ` Piotr Hosowicz 2010-08-31 14:02 ` Piotr Hosowicz 2010-08-31 14:14 ` Piotr Hosowicz 2010-09-02 14:53 ` Piotr Hosowicz 2010-08-28 15:19 ` Pekka Enberg 2010-08-28 15:27 ` Peter Zijlstra 2010-08-28 15:27 ` Peter Zijlstra [not found] ` <AANLkTikSm2Mq8hGNac9rpFH-3pvryw2kW57EP45Ny6Vp@mail.gmail.com> 2010-09-14 5:36 ` Alex,Shi 2010-09-14 7:42 ` Peter Zijlstra 2010-09-14 7:42 ` Peter Zijlstra -- strict thread matches above, loose matches on Subject: below -- 2010-10-18 11:24 [PATCH 00/20] mm: Preemptibility -v5 Peter Zijlstra 2010-10-18 11:24 ` [PATCH 08/20] powerpc: Preemptible mmu_gather Peter Zijlstra 2010-10-18 11:24 ` Peter Zijlstra
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20100828142455.960494507@chello.nl \ --to=a.p.zijlstra@chello.nl \ --cc=aarcange@redhat.com \ --cc=akpm@linux-fou \ --cc=avi@redhat.com \ --cc=benh@kernel.crashing.org \ --cc=davem@davemloft.net \ --cc=hugh.dickins@tiscali.co.uk \ --cc=linux-arch@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=mel@csn.ul.ie \ --cc=mingo@elte.hu \ --cc=npiggin@suse.de \ --cc=paulmck@linux.vnet.ibm.com \ --cc=riel@redhat.com \ --cc=sfr@canb.auug.org.au \ --cc=tglx@linutronix.de \ --cc=yanmin_zhang@linux.intel.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).