linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: Andrea Arcangeli <aarcange@redhat.com>,
	Avi Kivity <avi@redhat.com>, Thomas Gleixner <tglx@linutronix.de>,
	Rik van Riel <riel@redhat.com>, Ingo Molnar <mingo@elte.hu>,
	akpm@linux-fou
Cc: linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org,
	linux-mm@kvack.org,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	David Miller <davem@davemloft.net>,
	Hugh Dickins <hugh.dickins@tiscali.co.uk>,
	Mel Gorman <mel@csn.ul.ie>, Nick Piggin <npiggin@kernel.dk>,
	Peter Zijlstra <a.p.zijlstra@chello.nl>,
	Paul McKenney <paulmck@linux.vnet.ibm.com>,
	Yanmin Zhang <yanmin_zhang@linux.intel.com>,
	Tony Luck <tony.luck@intel.com>
Subject: [PATCH 07/20] ia64: mmu_gather rework
Date: Fri, 01 Apr 2011 14:13:05 +0200	[thread overview]
Message-ID: <20110401121725.653631940@chello.nl> (raw)
In-Reply-To: 20110401121258.211963744@chello.nl

[-- Attachment #1: peter_zijlstra-ia64-preemptible_mmu_gather.patch --]
[-- Type: text/plain, Size: 4373 bytes --]

Fix up the ia64 mmu_gather code to conform to the new API.

Acked-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
---
 arch/ia64/include/asm/tlb.h |   66 ++++++++++++++++++++++++++++++--------------
 1 file changed, 46 insertions(+), 20 deletions(-)

Index: linux-2.6/arch/ia64/include/asm/tlb.h
===================================================================
--- linux-2.6.orig/arch/ia64/include/asm/tlb.h
+++ linux-2.6/arch/ia64/include/asm/tlb.h
@@ -47,21 +47,27 @@
 #include <asm/machvec.h>
 
 #ifdef CONFIG_SMP
-# define FREE_PTE_NR		2048
 # define tlb_fast_mode(tlb)	((tlb)->nr == ~0U)
 #else
-# define FREE_PTE_NR		0
 # define tlb_fast_mode(tlb)	(1)
 #endif
 
+/*
+ * If we can't allocate a page to make a big batch of page pointers
+ * to work on, then just handle a few from the on-stack structure.
+ */
+#define	IA64_GATHER_BUNDLE	8
+
 struct mmu_gather {
 	struct mm_struct	*mm;
 	unsigned int		nr;		/* == ~0U => fast mode */
+	unsigned int		max;
 	unsigned char		fullmm;		/* non-zero means full mm flush */
 	unsigned char		need_flush;	/* really unmapped some PTEs? */
 	unsigned long		start_addr;
 	unsigned long		end_addr;
-	struct page 		*pages[FREE_PTE_NR];
+	struct page		**pages;
+	struct page		*local[IA64_GATHER_BUNDLE];
 };
 
 struct ia64_tr_entry {
@@ -90,9 +96,6 @@ extern struct ia64_tr_entry *ia64_idtrs[
 #define RR_RID_MASK	0x00000000ffffff00L
 #define RR_TO_RID(val) 	((val >> 8) & 0xffffff)
 
-/* Users of the generic TLB shootdown code must declare this storage space. */
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
-
 /*
  * Flush the TLB for address range START to END and, if not in fast mode, release the
  * freed pages that where gathered up to this point.
@@ -147,15 +150,23 @@ ia64_tlb_flush_mmu (struct mmu_gather *t
 	}
 }
 
-/*
- * Return a pointer to an initialized struct mmu_gather.
- */
-static inline struct mmu_gather *
-tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
+static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 {
-	struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
+	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
 
+	if (addr) {
+		tlb->pages = (void *)addr;
+		tlb->max = PAGE_SIZE / sizeof(void *);
+	}
+}
+
+
+static inline void
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+{
 	tlb->mm = mm;
+	tlb->max = ARRAY_SIZE(tlb->local);
+	tlb->pages = tlb->local;
 	/*
 	 * Use fast mode if only 1 CPU is online.
 	 *
@@ -172,7 +183,6 @@ tlb_gather_mmu (struct mm_struct *mm, un
 	tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
 	tlb->fullmm = full_mm_flush;
 	tlb->start_addr = ~0UL;
-	return tlb;
 }
 
 /*
@@ -180,7 +190,7 @@ tlb_gather_mmu (struct mm_struct *mm, un
  * collected.
  */
 static inline void
-tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
+tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 {
 	/*
 	 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
@@ -191,7 +201,8 @@ tlb_finish_mmu (struct mmu_gather *tlb, 
 	/* keep the page table cache within bounds */
 	check_pgt_cache();
 
-	put_cpu_var(mmu_gathers);
+	if (tlb->pages != tlb->local)
+		free_pages((unsigned long)tlb->pages, 0);
 }
 
 /*
@@ -199,18 +210,33 @@ tlb_finish_mmu (struct mmu_gather *tlb, 
  * must be delayed until after the TLB has been flushed (see comments at the beginning of
  * this file).
  */
-static inline void
-tlb_remove_page (struct mmu_gather *tlb, struct page *page)
+static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
 	tlb->need_flush = 1;
 
 	if (tlb_fast_mode(tlb)) {
 		free_page_and_swap_cache(page);
-		return;
+		return 1; /* avoid calling tlb_flush_mmu */
 	}
+
+	if (!tlb->nr && tlb->pages == tlb->local)
+		__tlb_alloc_page(tlb);
+
 	tlb->pages[tlb->nr++] = page;
-	if (tlb->nr >= FREE_PTE_NR)
-		ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
+	VM_BUG_ON(tlb->nr > tlb->max);
+
+	return tlb->max - tlb->nr;
+}
+
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+	ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
+}
+
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+	if (!__tlb_remove_page(tlb, page))
+		tlb_flush_mmu(tlb);
 }
 
 /*

WARNING: multiple messages have this Message-ID (diff)
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: Andrea Arcangeli <aarcange@redhat.com>,
	Avi Kivity <avi@redhat.com>, Thomas Gleixner <tglx@linutronix.de>,
	Rik van Riel <riel@redhat.com>, Ingo Molnar <mingo@elte.hu>,
	akpm@linux-foundation.org,
	Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-kernel@vger.kernel.org, linux-arch@vger.kernel.org,
	linux-mm@kvack.org,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	David Miller <davem@davemloft.net>,
	Hugh Dickins <hugh.dickins@tiscali.co.uk>,
	Mel Gorman <mel@csn.ul.ie>, Nick Piggin <npiggin@kernel.dk>,
	Peter Zijlstra <a.p.zijlstra@chello.nl>,
	Paul McKenney <paulmck@linux.vnet.ibm.com>,
	Yanmin Zhang <yanmin_zhang@linux.intel.com>,
	Tony Luck <tony.luck@intel.com>
Subject: [PATCH 07/20] ia64: mmu_gather rework
Date: Fri, 01 Apr 2011 14:13:05 +0200	[thread overview]
Message-ID: <20110401121725.653631940@chello.nl> (raw)
Message-ID: <20110401121305.FpcOUPomh_FCaF4MMZ4sLrna3YK5MiQDU45VimF16XY@z> (raw)
In-Reply-To: 20110401121258.211963744@chello.nl

[-- Attachment #1: peter_zijlstra-ia64-preemptible_mmu_gather.patch --]
[-- Type: text/plain, Size: 4375 bytes --]

Fix up the ia64 mmu_gather code to conform to the new API.

Acked-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
---
 arch/ia64/include/asm/tlb.h |   66 ++++++++++++++++++++++++++++++--------------
 1 file changed, 46 insertions(+), 20 deletions(-)

Index: linux-2.6/arch/ia64/include/asm/tlb.h
===================================================================
--- linux-2.6.orig/arch/ia64/include/asm/tlb.h
+++ linux-2.6/arch/ia64/include/asm/tlb.h
@@ -47,21 +47,27 @@
 #include <asm/machvec.h>
 
 #ifdef CONFIG_SMP
-# define FREE_PTE_NR		2048
 # define tlb_fast_mode(tlb)	((tlb)->nr == ~0U)
 #else
-# define FREE_PTE_NR		0
 # define tlb_fast_mode(tlb)	(1)
 #endif
 
+/*
+ * If we can't allocate a page to make a big batch of page pointers
+ * to work on, then just handle a few from the on-stack structure.
+ */
+#define	IA64_GATHER_BUNDLE	8
+
 struct mmu_gather {
 	struct mm_struct	*mm;
 	unsigned int		nr;		/* == ~0U => fast mode */
+	unsigned int		max;
 	unsigned char		fullmm;		/* non-zero means full mm flush */
 	unsigned char		need_flush;	/* really unmapped some PTEs? */
 	unsigned long		start_addr;
 	unsigned long		end_addr;
-	struct page 		*pages[FREE_PTE_NR];
+	struct page		**pages;
+	struct page		*local[IA64_GATHER_BUNDLE];
 };
 
 struct ia64_tr_entry {
@@ -90,9 +96,6 @@ extern struct ia64_tr_entry *ia64_idtrs[
 #define RR_RID_MASK	0x00000000ffffff00L
 #define RR_TO_RID(val) 	((val >> 8) & 0xffffff)
 
-/* Users of the generic TLB shootdown code must declare this storage space. */
-DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
-
 /*
  * Flush the TLB for address range START to END and, if not in fast mode, release the
  * freed pages that where gathered up to this point.
@@ -147,15 +150,23 @@ ia64_tlb_flush_mmu (struct mmu_gather *t
 	}
 }
 
-/*
- * Return a pointer to an initialized struct mmu_gather.
- */
-static inline struct mmu_gather *
-tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
+static inline void __tlb_alloc_page(struct mmu_gather *tlb)
 {
-	struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
+	unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
 
+	if (addr) {
+		tlb->pages = (void *)addr;
+		tlb->max = PAGE_SIZE / sizeof(void *);
+	}
+}
+
+
+static inline void
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+{
 	tlb->mm = mm;
+	tlb->max = ARRAY_SIZE(tlb->local);
+	tlb->pages = tlb->local;
 	/*
 	 * Use fast mode if only 1 CPU is online.
 	 *
@@ -172,7 +183,6 @@ tlb_gather_mmu (struct mm_struct *mm, un
 	tlb->nr = (num_online_cpus() == 1) ? ~0U : 0;
 	tlb->fullmm = full_mm_flush;
 	tlb->start_addr = ~0UL;
-	return tlb;
 }
 
 /*
@@ -180,7 +190,7 @@ tlb_gather_mmu (struct mm_struct *mm, un
  * collected.
  */
 static inline void
-tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end)
+tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
 {
 	/*
 	 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and
@@ -191,7 +201,8 @@ tlb_finish_mmu (struct mmu_gather *tlb, 
 	/* keep the page table cache within bounds */
 	check_pgt_cache();
 
-	put_cpu_var(mmu_gathers);
+	if (tlb->pages != tlb->local)
+		free_pages((unsigned long)tlb->pages, 0);
 }
 
 /*
@@ -199,18 +210,33 @@ tlb_finish_mmu (struct mmu_gather *tlb, 
  * must be delayed until after the TLB has been flushed (see comments at the beginning of
  * this file).
  */
-static inline void
-tlb_remove_page (struct mmu_gather *tlb, struct page *page)
+static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
 {
 	tlb->need_flush = 1;
 
 	if (tlb_fast_mode(tlb)) {
 		free_page_and_swap_cache(page);
-		return;
+		return 1; /* avoid calling tlb_flush_mmu */
 	}
+
+	if (!tlb->nr && tlb->pages == tlb->local)
+		__tlb_alloc_page(tlb);
+
 	tlb->pages[tlb->nr++] = page;
-	if (tlb->nr >= FREE_PTE_NR)
-		ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
+	VM_BUG_ON(tlb->nr > tlb->max);
+
+	return tlb->max - tlb->nr;
+}
+
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+	ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
+}
+
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+	if (!__tlb_remove_page(tlb, page))
+		tlb_flush_mmu(tlb);
 }
 
 /*



  parent reply	other threads:[~2011-04-01 13:39 UTC|newest]

Thread overview: 75+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-04-01 12:12 [PATCH 00/20] mm: Preemptibility -v10 Peter Zijlstra
2011-04-01 12:12 ` Peter Zijlstra
2011-04-01 12:12 ` [PATCH 01/20] mm: mmu_gather rework Peter Zijlstra
2011-04-01 12:12   ` Peter Zijlstra
2011-04-19 20:06   ` Andrew Morton
2011-04-19 20:06     ` Andrew Morton
2011-04-20  8:47     ` Peter Zijlstra
2011-04-20  8:47       ` Peter Zijlstra
2011-04-20  9:10       ` Peter Zijlstra
2011-04-20  9:10         ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 02/20] powerpc: " Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 03/20] sparc: " Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 04/20] s390: " Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 05/20] arm: " Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 06/20] sh: " Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-01 12:13 ` Peter Zijlstra [this message]
2011-04-01 12:13   ` [PATCH 07/20] ia64: " Peter Zijlstra
2011-04-01 12:13 ` [PATCH 08/20] um: " Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 09/20] mm: Now that all old mmu_gather code is gone, remove the storage Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 10/20] mm, powerpc: Move the RCU page-table freeing into generic code Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 11/20] s390: use generic RCP page-table freeing Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 12/20] mm: Extended batches for generic mmu_gather Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-19 20:06   ` Andrew Morton
2011-04-19 20:06     ` Andrew Morton
2011-04-20 10:40     ` Peter Zijlstra
2011-04-20 10:40       ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 13/20] lockdep, mutex: Provide mutex_lock_nest_lock Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-19 20:06   ` Andrew Morton
2011-04-19 20:06     ` Andrew Morton
2011-04-20 11:03     ` Peter Zijlstra
2011-04-20 11:03       ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 14/20] mm: Remove i_mmap_lock lockbreak Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-19 20:07   ` Andrew Morton
2011-04-19 20:07     ` Andrew Morton
2011-04-21 13:32     ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 15/20] mm: Convert i_mmap_lock to a mutex Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-19 20:07   ` Andrew Morton
2011-04-19 20:07     ` Andrew Morton
2011-04-21 13:28     ` Peter Zijlstra
2011-04-21 13:28       ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 16/20] mm: Revert page_lock_anon_vma() lock annotation Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 17/20] mm: Improve page_lock_anon_vma() comment Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 18/20] mm: Use refcounts for page_lock_anon_vma() Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 19/20] mm: Convert anon_vma->lock to a mutex Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-19 20:07   ` Andrew Morton
2011-04-19 20:07     ` Andrew Morton
2011-04-21 13:28     ` Peter Zijlstra
2011-04-21 13:28       ` Peter Zijlstra
2011-04-01 12:13 ` [PATCH 20/20] mm: Optimize page_lock_anon_vma() fast-path Peter Zijlstra
2011-04-01 12:13   ` Peter Zijlstra
2011-04-19 20:08   ` Andrew Morton
2011-04-19 20:08     ` Andrew Morton
2011-04-20 12:38     ` Peter Zijlstra
2011-04-20 12:38       ` Peter Zijlstra
2011-04-20 15:00       ` Peter Zijlstra
2011-04-20 15:00         ` Peter Zijlstra
2011-04-01 13:51 ` [PATCH 00/20] mm: Preemptibility -v10 Peter Zijlstra
2011-04-01 13:51   ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20110401121725.653631940@chello.nl \
    --to=a.p.zijlstra@chello.nl \
    --cc=aarcange@redhat.com \
    --cc=akpm@linux-fou \
    --cc=avi@redhat.com \
    --cc=benh@kernel.crashing.org \
    --cc=davem@davemloft.net \
    --cc=hugh.dickins@tiscali.co.uk \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mel@csn.ul.ie \
    --cc=mingo@elte.hu \
    --cc=npiggin@kernel.dk \
    --cc=paulmck@linux.vnet.ibm.com \
    --cc=riel@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=tony.luck@intel.com \
    --cc=yanmin_zhang@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).