linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
To: linux-mm@kvack.org
Cc: Alexis Bruemmer <alexisb@us.ibm.com>,
	Peter Zijlstra <a.p.zijlstra@chello.nl>
Subject: [PATCH/RFC 9/9] mm: nr_ptes needs to be atomic
Date: Mon, 22 Oct 2007 16:15:28 +0530	[thread overview]
Message-ID: <20071022104531.716194505@linux.vnet.ibm.com> (raw)
In-Reply-To: 20071022104518.985992030@linux.vnet.ibm.com

[-- Attachment #1: 9_nr_ptes.patch --]
[-- Type: text/plain, Size: 3692 bytes --]

---
 arch/powerpc/mm/fault.c   |    6 ++++++
 arch/sh/mm/cache-sh4.c    |    2 +-
 arch/um/kernel/skas/mmu.c |    2 +-
 fs/proc/task_mmu.c        |    2 +-
 include/linux/sched.h     |    4 +++-
 kernel/fork.c             |    2 +-
 mm/memory.c               |    4 ++--
 7 files changed, 15 insertions(+), 7 deletions(-)

--- linux-2.6.23-rc8.orig/arch/powerpc/mm/fault.c
+++ linux-2.6.23-rc8/arch/powerpc/mm/fault.c
@@ -235,6 +235,12 @@ again:
 	if (!(vma->vm_flags & VM_GROWSDOWN))
 		goto bad_area;
 
+	if (!locked) {
+		put_vma(vma);
+		locked = 1;
+		goto again;
+	}
+
 	/*
 	 * N.B. The POWER/Open ABI allows programs to access up to
 	 * 288 bytes below the stack pointer.
--- linux-2.6.23-rc8.orig/arch/sh/mm/cache-sh4.c
+++ linux-2.6.23-rc8/arch/sh/mm/cache-sh4.c
@@ -373,7 +373,7 @@ void flush_cache_mm(struct mm_struct *mm
 	 * Don't bother groveling around the dcache for the VMA ranges
 	 * if there are too many PTEs to make it worthwhile.
 	 */
-	if (mm->nr_ptes >= MAX_DCACHE_PAGES)
+	if (atomic_long_read(&mm->nr_ptes) >= MAX_DCACHE_PAGES)
 		flush_dcache_all();
 	else {
 		struct vm_area_struct *vma;
--- linux-2.6.23-rc8.orig/arch/um/kernel/skas/mmu.c
+++ linux-2.6.23-rc8/arch/um/kernel/skas/mmu.c
@@ -98,7 +98,7 @@ int init_new_context_skas(struct task_st
 		if(ret)
 			goto out_free;
 
-		mm->nr_ptes--;
+		atomic_long_dec(&mm->nr_ptes);
 	}
 
 	to_mm->id.stack = stack;
--- linux-2.6.23-rc8.orig/fs/proc/task_mmu.c
+++ linux-2.6.23-rc8/fs/proc/task_mmu.c
@@ -52,7 +52,7 @@ char *task_mem(struct mm_struct *mm, cha
 		total_rss << (PAGE_SHIFT-10),
 		data << (PAGE_SHIFT-10),
 		mm->stack_vm << (PAGE_SHIFT-10), text, lib,
-		(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
+		(PTRS_PER_PTE*sizeof(pte_t)*atomic_long_read(&mm->nr_ptes)) >> 10);
 	return buffer;
 }
 
--- linux-2.6.23-rc8.orig/include/linux/sched.h
+++ linux-2.6.23-rc8/include/linux/sched.h
@@ -400,11 +400,13 @@ struct mm_struct {
 	unsigned long hiwater_vm;	/* High-water virtual memory usage */
 
 	unsigned long total_vm, locked_vm, shared_vm, exec_vm;
-	unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
+	unsigned long stack_vm, reserved_vm, def_flags;
 	unsigned long start_code, end_code, start_data, end_data;
 	unsigned long start_brk, brk, start_stack;
 	unsigned long arg_start, arg_end, env_start, env_end;
 
+	atomic_long_t nr_ptes;
+
 	unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
 
 	cpumask_t cpu_vm_mask;
--- linux-2.6.23-rc8.orig/kernel/fork.c
+++ linux-2.6.23-rc8/kernel/fork.c
@@ -335,7 +335,7 @@ static struct mm_struct * mm_init(struct
 	mm->flags = (current->mm) ? current->mm->flags
 				  : MMF_DUMP_FILTER_DEFAULT;
 	mm->core_waiters = 0;
-	mm->nr_ptes = 0;
+	atomic_long_set(&mm->nr_ptes, 0);
 	set_mm_counter(mm, file_rss, 0);
 	set_mm_counter(mm, anon_rss, 0);
 	spin_lock_init(&mm->page_table_lock);
--- linux-2.6.23-rc8.orig/mm/memory.c
+++ linux-2.6.23-rc8/mm/memory.c
@@ -127,7 +127,7 @@ static void free_pte_range(struct mmu_ga
 	pte_lock_deinit(page);
 	pte_free_tlb(tlb, page);
 	dec_zone_page_state(page, NR_PAGETABLE);
-	tlb->mm->nr_ptes--;
+	atomic_long_dec(&tlb->mm->nr_ptes);
 }
 
 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -310,7 +310,7 @@ int __pte_alloc(struct mm_struct *mm, pm
 		pte_lock_deinit(new);
 		pte_free(new);
 	} else {
-		mm->nr_ptes++;
+		atomic_long_inc(&mm->nr_ptes);
 		inc_zone_page_state(new, NR_PAGETABLE);
 		pmd_populate(mm, pmd, new);
 	}

-- 

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

      parent reply	other threads:[~2007-10-22 10:43 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-10-22 10:45 [PATCH/RFC 0/9] VMA lookup with RCU Vaidyanathan Srinivasan
2007-10-22 10:45 ` [PATCH/RFC 1/9] Data structure changes Vaidyanathan Srinivasan
2007-10-22 10:45 ` [PATCH/RFC 2/9] lib: RCU friendly B+tree Vaidyanathan Srinivasan
2007-10-22 10:45 ` [PATCH/RFC 3/9] mm: use the B+tree for vma lookups Vaidyanathan Srinivasan
2007-10-22 10:45 ` [PATCH/RFC 4/9] mm: RCU " Vaidyanathan Srinivasan
2007-10-22 10:45 ` [PATCH/RFC 5/9] i386: rcu vma lookups for faults Vaidyanathan Srinivasan
2007-10-22 10:45 ` [PATCH/RFC 6/9] x86_64: " Vaidyanathan Srinivasan
2007-10-22 10:45 ` [PATCH/RFC 7/9] Add page fault code for PPC64 path Vaidyanathan Srinivasan
2007-10-22 10:45 ` [PATCH/RFC 8/9] debug: instrument the fault path Vaidyanathan Srinivasan
2007-10-22 10:45 ` Vaidyanathan Srinivasan [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20071022104531.716194505@linux.vnet.ibm.com \
    --to=svaidy@linux.vnet.ibm.com \
    --cc=a.p.zijlstra@chello.nl \
    --cc=alexisb@us.ibm.com \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).