From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: Andrea Arcangeli <aarcange@redhat.com>,
Avi Kivity <avi@redhat.com>, Thomas Gleixner <tglx@linutronix.de>,
Rik van Riel <riel@redhat.com>, Ingo Molnar <mingo@elte.hu>,
akpm@linux-fou
Cc: linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org,
linux-mm@kvack.org,
Benjamin Herrenschmidt <benh@kernel.crashing.org>,
David Miller <davem@davemloft.net>,
Hugh Dickins <hugh.dickins@tiscali.co.uk>,
Mel Gorman <mel@csn.ul.ie>, Nick Piggin <npiggin@kernel.dk>,
Peter Zijlstra <a.p.zijlstra@chello.nl>,
Paul McKenney <paulmck@linux.vnet.ibm.com>,
Yanmin Zhang <yanmin_zhang@linux.intel.com>,
Stephen Rothwell <sfr@canb.auug.org.au>,
Tony Luck <tony.luck@intel.com>
Subject: [PATCH 15/21] ia64: Preemptible mmu_gather
Date: Fri, 26 Nov 2010 15:38:58 +0100 [thread overview]
Message-ID: <20101126145411.104660318@chello.nl> (raw)
In-Reply-To: 20101126143843.801484792@chello.nl
[-- Attachment #1: mm-preempt-tlb-gather-ia64.patch --]
[-- Type: text/plain, Size: 3079 bytes --]
Fix up the ia64 mmu_gather code to conform to the new API.
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
arch/ia64/include/asm/tlb.h | 39 ++++++++++++++++++++++++---------------
1 file changed, 24 insertions(+), 15 deletions(-)
Index: linux-2.6/arch/ia64/include/asm/tlb.h
===================================================================
--- linux-2.6.orig/arch/ia64/include/asm/tlb.h
+++ linux-2.6/arch/ia64/include/asm/tlb.h
@@ -47,21 +47,21 @@
#include <asm/machvec.h>
#ifdef CONFIG_SMP
-# define FREE_PTE_NR 2048
# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
#else
-# define FREE_PTE_NR 0
# define tlb_fast_mode(tlb) (1)
#endif
struct mmu_gather {
struct mm_struct *mm;
unsigned int nr; /* == ~0U => fast mode */
+ unsigned int max;
unsigned char fullmm; /* non-zero means full mm flush */
unsigned char need_flush; /* really unmapped some PTEs? */
unsigned long start_addr;
unsigned long end_addr;
- struct page *pages[FREE_PTE_NR];
+ struct page **pages;
+ struct page *local[8];
};
struct ia64_tr_entry {
@@ -90,9 +90,6 @@ extern struct ia64_tr_entry *ia64_idtrs[
#define RR_RID_MASK 0x00000000ffffff00L
#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
-/* Users of the generic TLB shootdown code must declare this storage space. */
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
-
/*
* Flush the TLB for address range START to END and, if not in fast mode, release the
* freed pages that where gathered up to this point.
@@ -147,15 +144,23 @@ ia64_tlb_flush_mmu (struct mmu_gather *t
}
}
-/*
- * Return a pointer to an initialized struct mmu_gather.
- */
-static inline struct mmu_gather *
-tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
+static inline void __tlb_alloc_pages(struct mmu_gather *tlb)
{
- struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
+ unsigned long addr = __get_free_pages(GFP_ATOMIC, 0);
+
+ if (addr) {
+ tlb->pages = (void *)addr;
+ tlb->max = PAGE_SIZE / sizeof(void *);
+ }
+}
+
+static inline void
+tlb_gather_mmu (struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+{
tlb->mm = mm;
+ tlb->max = ARRAY_SIZE(tlb->local);
+ tlb->pages = tlb->local;
/*
* Use fast mode if only 1 CPU is online.
*
@@ -172,7 +177,6 @@ tlb_gather_mmu (struct mm_struct *mm, un
tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
tlb->fullmm = full_mm_flush;
tlb->start_addr = ~0UL;
- return tlb;
}
/*
@@ -191,7 +195,8 @@ tlb_finish_mmu (struct mmu_gather *tlb,
/* keep the page table cache within bounds */
check_pgt_cache();
- put_cpu_var(mmu_gathers);
+ if (tlb->pages != tlb->local)
+ free_pages((unsigned long)tlb->pages, 0);
}
/*
@@ -208,8 +213,12 @@ tlb_remove_page (struct mmu_gather *tlb,
free_page_and_swap_cache(page);
return;
}
+
+ if (!tlb->nr && tlb->pages == tlb->local)
+ __tlb_alloc_pages(tlb);
+
tlb->pages[tlb->nr++] = page;
- if (tlb->nr >= FREE_PTE_NR)
+ if (tlb->nr >= tlb->max)
ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
}
WARNING: multiple messages have this Message-ID (diff)
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: Andrea Arcangeli <aarcange@redhat.com>,
Avi Kivity <avi@redhat.com>, Thomas Gleixner <tglx@linutronix.de>,
Rik van Riel <riel@redhat.com>, Ingo Molnar <mingo@elte.hu>,
akpm@linux-foundation.org,
Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org,
linux-mm@kvack.org,
Benjamin Herrenschmidt <benh@kernel.crashing.org>,
David Miller <davem@davemloft.net>,
Hugh Dickins <hugh.dickins@tiscali.co.uk>,
Mel Gorman <mel@csn.ul.ie>, Nick Piggin <npiggin@kernel.dk>,
Peter Zijlstra <a.p.zijlstra@chello.nl>,
Paul McKenney <paulmck@linux.vnet.ibm.com>,
Yanmin Zhang <yanmin_zhang@linux.intel.com>,
Stephen Rothwell <sfr@canb.auug.org.au>,
Tony Luck <tony.luck@intel.com>
Subject: [PATCH 15/21] ia64: Preemptible mmu_gather
Date: Fri, 26 Nov 2010 15:38:58 +0100 [thread overview]
Message-ID: <20101126145411.104660318@chello.nl> (raw)
Message-ID: <20101126143858.PCNCTgodmq3gOuIUNC9vorPgYG6LS7FhJ1W9dp_Pi6Q@z> (raw)
In-Reply-To: 20101126143843.801484792@chello.nl
[-- Attachment #1: mm-preempt-tlb-gather-ia64.patch --]
[-- Type: text/plain, Size: 3081 bytes --]
Fix up the ia64 mmu_gather code to conform to the new API.
Cc: Tony Luck <tony.luck@intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
arch/ia64/include/asm/tlb.h | 39 ++++++++++++++++++++++++---------------
1 file changed, 24 insertions(+), 15 deletions(-)
Index: linux-2.6/arch/ia64/include/asm/tlb.h
===================================================================
--- linux-2.6.orig/arch/ia64/include/asm/tlb.h
+++ linux-2.6/arch/ia64/include/asm/tlb.h
@@ -47,21 +47,21 @@
#include <asm/machvec.h>
#ifdef CONFIG_SMP
-# define FREE_PTE_NR 2048
# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
#else
-# define FREE_PTE_NR 0
# define tlb_fast_mode(tlb) (1)
#endif
struct mmu_gather {
struct mm_struct *mm;
unsigned int nr; /* == ~0U => fast mode */
+ unsigned int max;
unsigned char fullmm; /* non-zero means full mm flush */
unsigned char need_flush; /* really unmapped some PTEs? */
unsigned long start_addr;
unsigned long end_addr;
- struct page *pages[FREE_PTE_NR];
+ struct page **pages;
+ struct page *local[8];
};
struct ia64_tr_entry {
@@ -90,9 +90,6 @@ extern struct ia64_tr_entry *ia64_idtrs[
#define RR_RID_MASK 0x00000000ffffff00L
#define RR_TO_RID(val) ((val >> 8) & 0xffffff)
-/* Users of the generic TLB shootdown code must declare this storage space. */
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
-
/*
* Flush the TLB for address range START to END and, if not in fast mode, release the
* freed pages that where gathered up to this point.
@@ -147,15 +144,23 @@ ia64_tlb_flush_mmu (struct mmu_gather *t
}
}
-/*
- * Return a pointer to an initialized struct mmu_gather.
- */
-static inline struct mmu_gather *
-tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
+static inline void __tlb_alloc_pages(struct mmu_gather *tlb)
{
- struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
+ unsigned long addr = __get_free_pages(GFP_ATOMIC, 0);
+
+ if (addr) {
+ tlb->pages = (void *)addr;
+ tlb->max = PAGE_SIZE / sizeof(void *);
+ }
+}
+
+static inline void
+tlb_gather_mmu (struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+{
tlb->mm = mm;
+ tlb->max = ARRAY_SIZE(tlb->local);
+ tlb->pages = tlb->local;
/*
* Use fast mode if only 1 CPU is online.
*
@@ -172,7 +177,6 @@ tlb_gather_mmu (struct mm_struct *mm, un
tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
tlb->fullmm = full_mm_flush;
tlb->start_addr = ~0UL;
- return tlb;
}
/*
@@ -191,7 +195,8 @@ tlb_finish_mmu (struct mmu_gather *tlb,
/* keep the page table cache within bounds */
check_pgt_cache();
- put_cpu_var(mmu_gathers);
+ if (tlb->pages != tlb->local)
+ free_pages((unsigned long)tlb->pages, 0);
}
/*
@@ -208,8 +213,12 @@ tlb_remove_page (struct mmu_gather *tlb,
free_page_and_swap_cache(page);
return;
}
+
+ if (!tlb->nr && tlb->pages == tlb->local)
+ __tlb_alloc_pages(tlb);
+
tlb->pages[tlb->nr++] = page;
- if (tlb->nr >= FREE_PTE_NR)
+ if (tlb->nr >= tlb->max)
ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
}
next prev parent reply other threads:[~2010-11-26 14:38 UTC|newest]
Thread overview: 121+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-11-26 14:38 [PATCH 00/21] mm: Preemptibility -v6 Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-26 14:38 ` [PATCH 01/21] mm: Revert page_lock_anon_vma() lock annotation Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-30 1:19 ` KOSAKI Motohiro
2010-11-26 14:38 ` [PATCH 02/21] powerpc: Use call_rcu_sched() for pagetables Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-27 10:33 ` Nick Piggin
2010-11-27 10:33 ` Nick Piggin
2010-11-27 21:55 ` Benjamin Herrenschmidt
2010-11-27 21:55 ` Benjamin Herrenschmidt
2010-11-26 14:38 ` [PATCH 03/21] mm: Improve page_lock_anon_vma() comment Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-29 2:14 ` KAMEZAWA Hiroyuki
2010-11-26 14:38 ` [PATCH 04/21] mm: Rename drop_anon_vma to put_anon_vma Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-29 2:16 ` KAMEZAWA Hiroyuki
2010-11-29 2:16 ` KAMEZAWA Hiroyuki
2010-11-26 14:38 ` [PATCH 05/21] mm: Move anon_vma ref out from under CONFIG_KSM Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-29 2:19 ` KAMEZAWA Hiroyuki
2010-11-26 14:38 ` [PATCH 06/21] mm: Simplify anon_vma refcounts Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-29 2:30 ` KAMEZAWA Hiroyuki
2010-11-26 14:38 ` [PATCH 07/21] mm: Use refcounts for page_lock_anon_vma() Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-29 2:35 ` KAMEZAWA Hiroyuki
2010-11-29 2:35 ` KAMEZAWA Hiroyuki
2010-11-29 20:41 ` Peter Zijlstra
2010-11-30 1:21 ` KOSAKI Motohiro
2010-11-30 1:21 ` KOSAKI Motohiro
2010-11-26 14:38 ` [PATCH 08/21] mm: Preemptible mmu_gather Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-29 2:53 ` KAMEZAWA Hiroyuki
2010-11-29 2:53 ` KAMEZAWA Hiroyuki
2010-11-29 20:47 ` Peter Zijlstra
2010-11-29 20:47 ` Peter Zijlstra
2010-11-26 14:38 ` [PATCH 09/21] powerpc: " Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-30 3:12 ` Benjamin Herrenschmidt
2010-11-30 3:12 ` Benjamin Herrenschmidt
2010-11-30 3:35 ` Benjamin Herrenschmidt
2010-11-30 3:35 ` Benjamin Herrenschmidt
2010-11-30 19:25 ` Peter Zijlstra
2010-11-30 19:25 ` Peter Zijlstra
2010-11-26 14:38 ` [PATCH 10/21] sparc: " Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-26 14:38 ` [PATCH 11/21] s390: preemptible mmu_gather Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-26 14:38 ` [PATCH 12/21] arm: Preemptible mmu_gather Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-26 14:38 ` [PATCH 13/21] sh: " Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-26 14:38 ` [PATCH 14/21] um: " Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra [this message]
2010-11-26 14:38 ` [PATCH 15/21] ia64: " Peter Zijlstra
2010-11-26 14:38 ` [PATCH 16/21] mm, powerpc: Move the RCU page-table freeing into generic code Peter Zijlstra
2010-11-26 14:38 ` Peter Zijlstra
2010-11-30 3:05 ` Benjamin Herrenschmidt
2010-11-30 3:05 ` Benjamin Herrenschmidt
2010-11-26 14:39 ` [PATCH 17/21] lockdep, mutex: Provide mutex_lock_nest_lock Peter Zijlstra
2010-11-26 14:39 ` Peter Zijlstra
2010-11-26 14:39 ` [PATCH 18/21] mutex: Provide mutex_is_contended Peter Zijlstra
2010-11-26 14:39 ` Peter Zijlstra
2010-11-29 2:58 ` KAMEZAWA Hiroyuki
2010-11-29 2:58 ` KAMEZAWA Hiroyuki
2010-11-29 20:49 ` Peter Zijlstra
2010-11-29 20:49 ` Peter Zijlstra
2010-11-26 14:39 ` [PATCH 19/21] mm: Convert i_mmap_lock and anon_vma->lock to mutexes Peter Zijlstra
2010-11-26 14:39 ` Peter Zijlstra
2010-11-29 3:05 ` KAMEZAWA Hiroyuki
2010-11-29 20:50 ` Peter Zijlstra
2010-11-29 20:50 ` Peter Zijlstra
2010-11-30 1:28 ` KOSAKI Motohiro
2010-11-26 14:39 ` [PATCH 20/21] mm: Extended batches for generic mmu_gather Peter Zijlstra
2010-11-26 14:39 ` Peter Zijlstra
2010-11-29 3:11 ` KAMEZAWA Hiroyuki
2010-11-29 3:11 ` KAMEZAWA Hiroyuki
2010-11-26 14:39 ` [PATCH 21/21] mm: Optimize page_lock_anon_vma() fast-path Peter Zijlstra
2010-11-26 14:39 ` Peter Zijlstra
2010-11-29 3:22 ` KAMEZAWA Hiroyuki
2010-11-29 3:22 ` KAMEZAWA Hiroyuki
2010-11-29 9:00 ` [PATCH 00/21] mm: Preemptibility -v6 Benjamin Herrenschmidt
2010-11-29 9:00 ` Benjamin Herrenschmidt
2010-11-29 11:41 ` Peter Zijlstra
2010-11-29 11:41 ` Peter Zijlstra
2011-01-18 7:12 ` Hugh Dickins
2011-01-18 10:30 ` Peter Zijlstra
2011-01-18 10:30 ` Peter Zijlstra
2011-01-18 10:44 ` Peter Zijlstra
2011-01-18 10:44 ` Peter Zijlstra
2011-01-18 10:50 ` Peter Zijlstra
2011-01-19 17:10 ` Peter Zijlstra
2011-01-20 19:57 ` Hugh Dickins
2011-01-20 19:57 ` Hugh Dickins
2011-01-21 7:36 ` Benjamin Herrenschmidt
2011-01-21 7:36 ` Benjamin Herrenschmidt
2011-01-21 15:33 ` Peter Zijlstra
2011-01-21 15:33 ` Peter Zijlstra
2011-01-22 21:06 ` Paul E. McKenney
2011-01-23 11:03 ` Peter Zijlstra
2011-01-23 11:03 ` Peter Zijlstra
2011-01-24 12:21 ` Peter Zijlstra
2011-01-24 12:21 ` Peter Zijlstra
2011-01-24 14:34 ` Oleg Nesterov
2011-01-24 14:34 ` Oleg Nesterov
2011-01-24 15:00 ` Peter Zijlstra
2011-01-24 15:33 ` Oleg Nesterov
2011-01-24 15:33 ` Oleg Nesterov
2011-01-24 12:45 ` Peter Zijlstra
2011-01-24 12:45 ` Peter Zijlstra
2011-01-24 14:24 ` Peter Zijlstra
2011-01-24 14:24 ` Peter Zijlstra
2011-01-21 17:44 ` Andrea Arcangeli
2011-01-21 17:44 ` Andrea Arcangeli
2011-01-31 10:02 ` Martin Schwidefsky
2011-01-31 10:02 ` Martin Schwidefsky
2011-02-15 14:00 ` Martin Schwidefsky
2011-02-15 14:00 ` Martin Schwidefsky
2011-02-15 15:39 ` Martin Schwidefsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20101126145411.104660318@chello.nl \
--to=a.p.zijlstra@chello.nl \
--cc=aarcange@redhat.com \
--cc=akpm@linux-fou \
--cc=avi@redhat.com \
--cc=benh@kernel.crashing.org \
--cc=davem@davemloft.net \
--cc=hugh.dickins@tiscali.co.uk \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mel@csn.ul.ie \
--cc=mingo@elte.hu \
--cc=npiggin@kernel.dk \
--cc=paulmck@linux.vnet.ibm.com \
--cc=riel@redhat.com \
--cc=sfr@canb.auug.org.au \
--cc=tglx@linutronix.de \
--cc=tony.luck@intel.com \
--cc=yanmin_zhang@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).