From: Nick Piggin <nickpiggin@yahoo.com.au>
To: Linux Kernel Mailing List <Linux-Kernel@vger.kernel.org>
Subject: [patch 4/14] mm: rmap opt
Date: Sun, 06 Nov 2005 19:22:09 +1100 [thread overview]
Message-ID: <436DBD31.8060801@yahoo.com.au> (raw)
In-Reply-To: <436DBD11.8010600@yahoo.com.au>
[-- Attachment #1: Type: text/plain, Size: 34 bytes --]
4/14
--
SUSE Labs, Novell Inc.
[-- Attachment #2: mm-rmap-opt.patch --]
[-- Type: text/plain, Size: 6853 bytes --]
Slightly optimise rmap functions by minimising atomic operations when
we know there will be no concurrent modifications.
Index: linux-2.6/include/linux/rmap.h
===================================================================
--- linux-2.6.orig/include/linux/rmap.h
+++ linux-2.6/include/linux/rmap.h
@@ -71,6 +71,7 @@ void __anon_vma_link(struct vm_area_stru
* rmap interfaces called when adding or removing pte of page
*/
void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
+void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
void page_add_file_rmap(struct page *);
void page_remove_rmap(struct page *);
Index: linux-2.6/mm/memory.c
===================================================================
--- linux-2.6.orig/mm/memory.c
+++ linux-2.6/mm/memory.c
@@ -1337,14 +1337,15 @@ static int do_wp_page(struct mm_struct *
inc_mm_counter(mm, anon_rss);
dec_mm_counter(mm, file_rss);
}
+
flush_cache_page(vma, address, pfn);
entry = mk_pte(new_page, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
ptep_establish(vma, address, page_table, entry);
update_mmu_cache(vma, address, entry);
lazy_mmu_prot_update(entry);
+ page_add_new_anon_rmap(new_page, vma, address);
lru_cache_add_active(new_page);
- page_add_anon_rmap(new_page, vma, address);
/* Free the old page.. */
new_page = old_page;
@@ -1796,9 +1797,8 @@ static int do_anonymous_page(struct mm_s
if (!pte_none(*page_table))
goto release;
inc_mm_counter(mm, anon_rss);
+ page_add_new_anon_rmap(page, vma, address);
lru_cache_add_active(page);
- SetPageReferenced(page);
- page_add_anon_rmap(page, vma, address);
} else {
/* Map the ZERO_PAGE - vm_page_prot is readonly */
page = ZERO_PAGE(address);
@@ -1924,11 +1924,10 @@ retry:
entry = mk_pte(new_page, vma->vm_page_prot);
if (write_access)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
- set_pte_at(mm, address, page_table, entry);
if (anon) {
inc_mm_counter(mm, anon_rss);
+ page_add_new_anon_rmap(new_page, vma, address);
lru_cache_add_active(new_page);
- page_add_anon_rmap(new_page, vma, address);
} else if (!(vma->vm_flags & VM_RESERVED)) {
inc_mm_counter(mm, file_rss);
page_add_file_rmap(new_page);
@@ -1939,6 +1938,7 @@ retry:
goto unlock;
}
+ set_pte_at(mm, address, page_table, entry);
/* no need to invalidate: a not-present page shouldn't be cached */
update_mmu_cache(vma, address, entry);
lazy_mmu_prot_update(entry);
Index: linux-2.6/mm/rmap.c
===================================================================
--- linux-2.6.orig/mm/rmap.c
+++ linux-2.6/mm/rmap.c
@@ -440,6 +440,26 @@ int page_referenced(struct page *page, i
}
/**
+ * page_set_anon_rmap - setup new anonymous rmap
+ * @page: the page to add the mapping to
+ * @vma: the vm area in which the mapping is added
+ * @address: the user virtual address mapped
+ */
+static void __page_set_anon_rmap(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ struct anon_vma *anon_vma = vma->anon_vma;
+
+ BUG_ON(!anon_vma);
+ anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+ page->mapping = (struct address_space *) anon_vma;
+
+ page->index = linear_page_index(vma, address);
+
+ inc_page_state(nr_mapped);
+}
+
+/**
* page_add_anon_rmap - add pte mapping to an anonymous page
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
@@ -450,21 +470,28 @@ int page_referenced(struct page *page, i
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
- if (atomic_inc_and_test(&page->_mapcount)) {
- struct anon_vma *anon_vma = vma->anon_vma;
-
- BUG_ON(!anon_vma);
- anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
- page->mapping = (struct address_space *) anon_vma;
-
- page->index = linear_page_index(vma, address);
-
- inc_page_state(nr_mapped);
- }
+ if (atomic_inc_and_test(&page->_mapcount))
+ __page_set_anon_rmap(page, vma, address);
/* else checking page index and mapping is racy */
}
/**
+ * page_add_new_anon_rmap - add pte mapping to a new anonymous page
+ * @page: the page to add the mapping to
+ * @vma: the vm area in which the mapping is added
+ * @address: the user virtual address mapped
+ *
+ * same as page_add_anon_rmap but must only be called on *new* pages.
+ */
+void page_add_new_anon_rmap(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
+ __page_set_anon_rmap(page, vma, address);
+}
+
+
+/**
* page_add_file_rmap - add pte mapping to a file page
* @page: the page to add the mapping to
*
@@ -487,21 +514,28 @@ void page_add_file_rmap(struct page *pag
*/
void page_remove_rmap(struct page *page)
{
- if (atomic_add_negative(-1, &page->_mapcount)) {
+ int fast = (page_mapcount(page) == 1) &
+ PageAnon(page) & (!PageSwapCache(page));
+
+ /* fast page may become SwapCache here, but nothing new will map it. */
+ if (fast)
+ reset_page_mapcount(page);
+ else if (atomic_add_negative(-1, &page->_mapcount))
BUG_ON(page_mapcount(page) < 0);
- /*
- * It would be tidy to reset the PageAnon mapping here,
- * but that might overwrite a racing page_add_anon_rmap
- * which increments mapcount after us but sets mapping
- * before us: so leave the reset to free_hot_cold_page,
- * and remember that it's only reliable while mapped.
- * Leaving it set also helps swapoff to reinstate ptes
- * faster for those pages still in swapcache.
- */
if (page_test_and_clear_dirty(page))
set_page_dirty(page);
- dec_page_state(nr_mapped);
- }
+ else
+ return; /* non zero mapcount */
+ /*
+ * It would be tidy to reset the PageAnon mapping here,
+ * but that might overwrite a racing page_add_anon_rmap
+ * which increments mapcount after us but sets mapping
+ * before us: so leave the reset to free_hot_cold_page,
+ * and remember that it's only reliable while mapped.
+ * Leaving it set also helps swapoff to reinstate ptes
+ * faster for those pages still in swapcache.
+ */
+ dec_page_state(nr_mapped);
}
/*
Index: linux-2.6/include/linux/page-flags.h
===================================================================
--- linux-2.6.orig/include/linux/page-flags.h
+++ linux-2.6/include/linux/page-flags.h
@@ -182,6 +182,7 @@ extern void __mod_page_state(unsigned lo
#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
+#define __SetPageReferenced(page) __set_bit(PG_referenced, &(page)->flags)
#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
next prev parent reply other threads:[~2005-11-06 8:20 UTC|newest]
Thread overview: 33+ messages / expand[flat|nested] mbox.gz Atom feed top
2005-11-06 8:11 [rfc][patch 0/14] mm: performance improvements Nick Piggin
2005-11-06 8:20 ` [patch 1/14] mm: opt rmqueue Nick Piggin
2005-11-06 8:20 ` [patch 2/14] mm: Nick Piggin
2005-11-06 8:20 ` [patch 2/14] mm: pte prefetch Nick Piggin
2005-11-06 8:21 ` [patch 3/14] mm: release opt Nick Piggin
2005-11-06 8:22 ` Nick Piggin [this message]
2005-11-06 8:23 ` [patch 5/14] mm: set_page_refs opt Nick Piggin
2005-11-06 8:24 ` [patch 6/14] mm: microopt conditions Nick Piggin
2005-11-06 8:24 ` [patch 7/14] mm: remove bad_range Nick Piggin
2005-11-06 8:25 ` [patch 8/14] mm: remove pcp_low Nick Piggin
2005-11-06 8:25 ` [patch 9/14] mm: page_state opt Nick Piggin
2005-11-06 8:26 ` [patch 10/14] mm: single pcp list Nick Piggin
2005-11-06 8:26 ` [patch 11/14] mm: increase pcp size Nick Piggin
2005-11-06 8:27 ` [patch 12/14] mm: variable " Nick Piggin
2005-11-06 8:27 ` [patch 13/14] mm: cleanup zone_pcp Nick Piggin
2005-11-06 8:28 ` [patch 14/14] mm: page_alloc cleanups Nick Piggin
2005-11-13 2:38 ` [patch 9/14] mm: page_state opt Andi Kleen
2005-11-06 17:37 ` [patch 7/14] mm: remove bad_range Bob Picco
2005-11-07 0:58 ` Nick Piggin
2005-11-07 3:00 ` Bob Picco
2005-11-07 3:05 ` Nick Piggin
2005-11-07 1:40 ` [patch 5/14] mm: set_page_refs opt Christoph Hellwig
2005-11-07 1:45 ` Nick Piggin
2005-11-06 8:35 ` [patch 2/14] mm: pte prefetch Arjan van de Ven
2005-11-06 8:51 ` Nick Piggin
2005-11-06 17:37 ` [patch 1/14] mm: opt rmqueue Andi Kleen
2005-11-07 1:06 ` Nick Piggin
2005-11-07 3:23 ` Andi Kleen
2005-11-07 3:43 ` Nick Piggin
2005-11-07 1:39 ` [rfc][patch 0/14] mm: performance improvements Christoph Hellwig
2005-11-07 1:51 ` Nick Piggin
2005-11-07 3:57 ` Paul Jackson
2005-11-07 4:51 ` Nick Piggin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=436DBD31.8060801@yahoo.com.au \
--to=nickpiggin@yahoo.com.au \
--cc=Linux-Kernel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox