From: Matthew Wilcox <willy@infradead.org>
To: Alexander Duyck <alexander.duyck@gmail.com>
Cc: Matthew Wilcox <mawilcox@microsoft.com>,
netdev@vger.kernel.org, linux-mm@kvack.org,
Jesper Dangaard Brouer <brouer@redhat.com>,
Eric Dumazet <eric.dumazet@gmail.com>
Subject: [PATCH v2 2/8] page_frag_cache: Move slowpath code from page_frag_alloc
Date: Thu, 22 Mar 2018 08:31:51 -0700 [thread overview]
Message-ID: <20180322153157.10447-3-willy@infradead.org> (raw)
In-Reply-To: <20180322153157.10447-1-willy@infradead.org>
From: Matthew Wilcox <mawilcox@microsoft.com>
Put all the unlikely code in __page_frag_cache_refill to make the
fastpath code more obvious.
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
---
mm/page_alloc.c | 70 ++++++++++++++++++++++++++++-----------------------------
1 file changed, 34 insertions(+), 36 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 61366f23e8c8..6d2c106f4e5d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4339,20 +4339,50 @@ EXPORT_SYMBOL(free_pages);
static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
gfp_t gfp_mask)
{
+ unsigned int size = PAGE_SIZE;
struct page *page = NULL;
+ struct page *old = nc->va ? virt_to_page(nc->va) : NULL;
gfp_t gfp = gfp_mask;
+ unsigned int pagecnt_bias = nc->pagecnt_bias & ~PFC_MEMALLOC;
+
+ /* If all allocations have been freed, we can reuse this page */
+ if (old && page_ref_sub_and_test(old, pagecnt_bias)) {
+ page = old;
+#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
+ /* if size can vary use size else just use PAGE_SIZE */
+ size = nc->size;
+#endif
+ /* Page count is 0, we can safely set it */
+ set_page_count(page, size);
+ goto reset;
+ }
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
__GFP_NOMEMALLOC;
page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
PAGE_FRAG_CACHE_MAX_ORDER);
- nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
+ if (page)
+ size = PAGE_FRAG_CACHE_MAX_SIZE;
+ nc->size = size;
#endif
if (unlikely(!page))
page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
+ if (!page) {
+ nc->va = NULL;
+ return NULL;
+ }
+
+ nc->va = page_address(page);
- nc->va = page ? page_address(page) : NULL;
+ /* Using atomic_set() would break get_page_unless_zero() users. */
+ page_ref_add(page, size - 1);
+reset:
+ /* reset page count bias and offset to start of new frag */
+ nc->pagecnt_bias = size;
+ if (page_is_pfmemalloc(page))
+ nc->pagecnt_bias |= PFC_MEMALLOC;
+ nc->offset = size;
return page;
}
@@ -4375,7 +4405,6 @@ EXPORT_SYMBOL(__page_frag_cache_drain);
void *page_frag_alloc(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask)
{
- unsigned int size = PAGE_SIZE;
struct page *page;
int offset;
@@ -4384,42 +4413,11 @@ void *page_frag_alloc(struct page_frag_cache *nc,
page = __page_frag_cache_refill(nc, gfp_mask);
if (!page)
return NULL;
-
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
- /* if size can vary use size else just use PAGE_SIZE */
- size = nc->size;
-#endif
- /* Even if we own the page, we do not use atomic_set().
- * This would break get_page_unless_zero() users.
- */
- page_ref_add(page, size - 1);
-
- /* reset page count bias and offset to start of new frag */
- nc->pagecnt_bias = size;
- if (page_is_pfmemalloc(page))
- nc->pagecnt_bias |= PFC_MEMALLOC;
- nc->offset = size;
}
offset = nc->offset - fragsz;
- if (unlikely(offset < 0)) {
- unsigned int pagecnt_bias = nc->pagecnt_bias & ~PFC_MEMALLOC;
- page = virt_to_page(nc->va);
-
- if (!page_ref_sub_and_test(page, pagecnt_bias))
- goto refill;
-
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
- /* if size can vary use size else just use PAGE_SIZE */
- size = nc->size;
-#endif
- /* OK, page count is 0, we can safely set it */
- set_page_count(page, size);
-
- /* reset page count bias and offset to start of new frag */
- nc->pagecnt_bias = size | (nc->pagecnt_bias - pagecnt_bias);
- offset = size - fragsz;
- }
+ if (unlikely(offset < 0))
+ goto refill;
nc->pagecnt_bias--;
nc->offset = offset;
--
2.16.2
next prev parent reply other threads:[~2018-03-22 15:31 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-03-22 15:31 [PATCH v2 0/8] page_frag_cache improvements Matthew Wilcox
2018-03-22 15:31 ` [PATCH v2 1/8] page_frag_cache: Remove pfmemalloc bool Matthew Wilcox
2018-03-22 16:39 ` Alexander Duyck
2018-03-22 17:08 ` Matthew Wilcox
2018-03-22 15:31 ` Matthew Wilcox [this message]
2018-03-22 15:31 ` [PATCH v2 3/8] page_frag_cache: Rename 'nc' to 'pfc' Matthew Wilcox
2018-03-22 15:31 ` [PATCH v2 4/8] page_frag_cache: Rename fragsz to size Matthew Wilcox
2018-03-22 15:31 ` [PATCH v2 5/8] page_frag_cache: Save memory on small machines Matthew Wilcox
2018-03-22 15:31 ` [PATCH v2 6/8] page_frag_cache: Use a mask instead of offset Matthew Wilcox
2018-03-22 16:22 ` Alexander Duyck
2018-03-22 16:41 ` Matthew Wilcox
2018-03-22 17:31 ` Alexander Duyck
2018-03-22 17:34 ` Matthew Wilcox
2018-03-22 15:31 ` [PATCH v2 7/8] page_frag: Update documentation Matthew Wilcox
2018-03-22 15:31 ` [PATCH v2 8/8] page_frag: Account allocations Matthew Wilcox
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20180322153157.10447-3-willy@infradead.org \
--to=willy@infradead.org \
--cc=alexander.duyck@gmail.com \
--cc=brouer@redhat.com \
--cc=eric.dumazet@gmail.com \
--cc=linux-mm@kvack.org \
--cc=mawilcox@microsoft.com \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).