linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: David Howells <dhowells@redhat.com>
To: Matthew Wilcox <willy@infradead.org>,
	"David S. Miller" <davem@davemloft.net>,
	Eric Dumazet <edumazet@google.com>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>
Cc: David Howells <dhowells@redhat.com>,
	Al Viro <viro@zeniv.linux.org.uk>,
	Christoph Hellwig <hch@infradead.org>,
	Jens Axboe <axboe@kernel.dk>, Jeff Layton <jlayton@kernel.org>,
	Christian Brauner <brauner@kernel.org>,
	Chuck Lever III <chuck.lever@oracle.com>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	netdev@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: [RFC PATCH v2 06/48] mm: Make the page_frag_cache allocator use multipage folios
Date: Wed, 29 Mar 2023 15:13:12 +0100	[thread overview]
Message-ID: <20230329141354.516864-7-dhowells@redhat.com> (raw)
In-Reply-To: <20230329141354.516864-1-dhowells@redhat.com>

Change the page_frag_cache allocator to use multipage folios rather than
groups of pages.  This reduces page_frag_free to just a folio_put() or
put_page().

Signed-off-by: David Howells <dhowells@redhat.com>
cc: "David S. Miller" <davem@davemloft.net>
cc: Eric Dumazet <edumazet@google.com>
cc: Jakub Kicinski <kuba@kernel.org>
cc: Paolo Abeni <pabeni@redhat.com>
cc: Jens Axboe <axboe@kernel.dk>
cc: Matthew Wilcox <willy@infradead.org>
cc: netdev@vger.kernel.org
---
 include/linux/mm_types.h | 13 ++----
 mm/page_frag_alloc.c     | 88 +++++++++++++++++++---------------------
 2 files changed, 45 insertions(+), 56 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0722859c3647..49a70b3f44a9 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -420,18 +420,13 @@ static inline void *folio_get_private(struct folio *folio)
 }
 
 struct page_frag_cache {
-	void * va;
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
-	__u16 offset;
-	__u16 size;
-#else
-	__u32 offset;
-#endif
+	struct folio	*folio;
+	unsigned int	offset;
 	/* we maintain a pagecount bias, so that we dont dirty cache line
 	 * containing page->_refcount every time we allocate a fragment.
 	 */
-	unsigned int		pagecnt_bias;
-	bool pfmemalloc;
+	unsigned int	pagecnt_bias;
+	bool		pfmemalloc;
 };
 
 typedef unsigned long vm_flags_t;
diff --git a/mm/page_frag_alloc.c b/mm/page_frag_alloc.c
index bee95824ef8f..c3792b68ce32 100644
--- a/mm/page_frag_alloc.c
+++ b/mm/page_frag_alloc.c
@@ -16,33 +16,34 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 
-static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
-					     gfp_t gfp_mask)
+/*
+ * Allocate a new folio for the frag cache.
+ */
+static struct folio *page_frag_cache_refill(struct page_frag_cache *nc,
+					    gfp_t gfp_mask)
 {
-	struct page *page = NULL;
+	struct folio *folio = NULL;
 	gfp_t gfp = gfp_mask;
 
 #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
-	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
-		    __GFP_NOMEMALLOC;
-	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
-				PAGE_FRAG_CACHE_MAX_ORDER);
-	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
+	gfp_mask |= __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
+	folio = folio_alloc(gfp_mask, PAGE_FRAG_CACHE_MAX_ORDER);
 #endif
-	if (unlikely(!page))
-		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
-
-	nc->va = page ? page_address(page) : NULL;
+	if (unlikely(!folio))
+		folio = folio_alloc(gfp, 0);
 
-	return page;
+	if (folio)
+		nc->folio = folio;
+	return folio;
 }
 
 void __page_frag_cache_drain(struct page *page, unsigned int count)
 {
-	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
+	struct folio *folio = page_folio(page);
 
-	if (page_ref_sub_and_test(page, count - 1))
-		__free_pages(page, compound_order(page));
+	VM_BUG_ON_FOLIO(folio_ref_count(folio) == 0, folio);
+
+	folio_put_refs(folio, count);
 }
 EXPORT_SYMBOL(__page_frag_cache_drain);
 
@@ -50,54 +51,47 @@ void *page_frag_alloc_align(struct page_frag_cache *nc,
 		      unsigned int fragsz, gfp_t gfp_mask,
 		      unsigned int align_mask)
 {
-	unsigned int size = PAGE_SIZE;
-	struct page *page;
-	int offset;
+	struct folio *folio = nc->folio;
+	size_t offset;
 
-	if (unlikely(!nc->va)) {
+	if (unlikely(!folio)) {
 refill:
-		page = __page_frag_cache_refill(nc, gfp_mask);
-		if (!page)
+		folio = page_frag_cache_refill(nc, gfp_mask);
+		if (!folio)
 			return NULL;
 
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
-		/* if size can vary use size else just use PAGE_SIZE */
-		size = nc->size;
-#endif
 		/* Even if we own the page, we do not use atomic_set().
 		 * This would break get_page_unless_zero() users.
 		 */
-		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
+		folio_ref_add(folio, PAGE_FRAG_CACHE_MAX_SIZE);
 
 		/* reset page count bias and offset to start of new frag */
-		nc->pfmemalloc = page_is_pfmemalloc(page);
+		nc->pfmemalloc = folio_is_pfmemalloc(folio);
 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
-		nc->offset = size;
+		nc->offset = folio_size(folio);
 	}
 
-	offset = nc->offset - fragsz;
-	if (unlikely(offset < 0)) {
-		page = virt_to_page(nc->va);
-
-		if (page_ref_count(page) != nc->pagecnt_bias)
+	offset = nc->offset;
+	if (unlikely(fragsz > offset)) {
+		/* Reuse the folio if everyone we gave it to has finished with it. */
+		if (!folio_ref_sub_and_test(folio, nc->pagecnt_bias)) {
+			nc->folio = NULL;
 			goto refill;
+		}
+
 		if (unlikely(nc->pfmemalloc)) {
-			page_ref_sub(page, nc->pagecnt_bias - 1);
-			__free_pages(page, compound_order(page));
+			__folio_put(folio);
+			nc->folio = NULL;
 			goto refill;
 		}
 
-#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
-		/* if size can vary use size else just use PAGE_SIZE */
-		size = nc->size;
-#endif
 		/* OK, page count is 0, we can safely set it */
-		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
+		folio_set_count(folio, PAGE_FRAG_CACHE_MAX_SIZE + 1);
 
 		/* reset page count bias and offset to start of new frag */
 		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
-		offset = size - fragsz;
-		if (unlikely(offset < 0)) {
+		offset = folio_size(folio);
+		if (unlikely(fragsz > offset)) {
 			/*
 			 * The caller is trying to allocate a fragment
 			 * with fragsz > PAGE_SIZE but the cache isn't big
@@ -107,15 +101,17 @@ void *page_frag_alloc_align(struct page_frag_cache *nc,
 			 * it could make memory pressure worse
 			 * so we simply return NULL here.
 			 */
+			nc->offset = offset;
 			return NULL;
 		}
 	}
 
 	nc->pagecnt_bias--;
+	offset -= fragsz;
 	offset &= align_mask;
 	nc->offset = offset;
 
-	return nc->va + offset;
+	return folio_address(folio) + offset;
 }
 EXPORT_SYMBOL(page_frag_alloc_align);
 
@@ -124,8 +120,6 @@ EXPORT_SYMBOL(page_frag_alloc_align);
  */
 void page_frag_free(void *addr)
 {
-	struct page *page = virt_to_head_page(addr);
-
-	__free_pages(page, compound_order(page));
+	folio_put(virt_to_folio(addr));
 }
 EXPORT_SYMBOL(page_frag_free);



  parent reply	other threads:[~2023-03-29 14:14 UTC|newest]

Thread overview: 77+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <e128356a-f56f-4c02-7437-dfea38e4194b@suse.de>
2023-03-29 14:13 ` [RFC PATCH v2 00/48] splice, net: Replace sendpage with sendmsg(MSG_SPLICE_PAGES) David Howells
2023-03-29 14:13   ` [RFC PATCH v2 01/48] netfs: Fix netfs_extract_iter_to_sg() for ITER_UBUF/IOVEC David Howells
2023-03-29 14:13   ` [RFC PATCH v2 02/48] iov_iter: Remove last_offset member David Howells
2023-03-29 14:13   ` [RFC PATCH v2 03/48] iov_iter: Add an iterator-of-iterators David Howells
2023-03-29 14:13   ` [RFC PATCH v2 04/48] net: Declare MSG_SPLICE_PAGES internal sendmsg() flag David Howells
2023-03-30 14:28     ` Willem de Bruijn
2023-03-30 15:07     ` David Howells
2023-03-30 17:51       ` Willem de Bruijn
2023-03-29 14:13   ` [RFC PATCH v2 05/48] mm: Move the page fragment allocator from page_alloc.c into its own file David Howells
2023-03-29 14:13   ` David Howells [this message]
2023-03-29 14:13   ` [RFC PATCH v2 07/48] mm: Make the page_frag_cache allocator use per-cpu David Howells
2023-03-29 14:13   ` [RFC PATCH v2 08/48] tcp: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 09/48] tcp: Make sendmsg(MSG_SPLICE_PAGES) copy unspliceable data David Howells
2023-03-29 14:13   ` [RFC PATCH v2 10/48] tcp: Convert do_tcp_sendpages() to use MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 11/48] tcp_bpf: Inline do_tcp_sendpages as it's now a wrapper around tcp_sendmsg David Howells
2023-03-29 14:13   ` [RFC PATCH v2 12/48] espintcp: Inline do_tcp_sendpages() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 13/48] tls: " David Howells
2023-03-29 14:13   ` [RFC PATCH v2 14/48] siw: " David Howells
2023-03-29 14:13   ` [RFC PATCH v2 15/48] tcp: Fold do_tcp_sendpages() into tcp_sendpage_locked() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 16/48] ip, udp: Support MSG_SPLICE_PAGES David Howells
2023-03-30 14:20     ` Willem de Bruijn
2023-03-30 14:39     ` David Howells
2023-03-30 17:46       ` Willem de Bruijn
2023-03-30 15:11     ` David Howells
2023-03-30 17:55       ` Willem de Bruijn
2023-03-30 19:49       ` David Howells
2023-03-29 14:13   ` [RFC PATCH v2 17/48] ip, udp: Make sendmsg(MSG_SPLICE_PAGES) copy unspliceable data David Howells
2023-03-29 14:13   ` [RFC PATCH v2 18/48] udp: Convert udp_sendpage() to use MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 19/48] af_unix: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 20/48] af_unix: Make sendmsg(MSG_SPLICE_PAGES) copy unspliceable data David Howells
2023-03-29 14:13   ` [RFC PATCH v2 21/48] crypto: af_alg: Pin pages rather than ref'ing if appropriate David Howells
2023-03-29 14:13   ` [RFC PATCH v2 22/48] crypto: af_alg: Use netfs_extract_iter_to_sg() to create scatterlists David Howells
2023-03-29 14:13   ` [RFC PATCH v2 23/48] crypto: af_alg: Indent the loop in af_alg_sendmsg() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 24/48] crypto: af_alg: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 25/48] crypto: af_alg: Convert af_alg_sendpage() to use MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 26/48] crypto: af_alg/hash: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 27/48] splice, net: Use sendmsg(MSG_SPLICE_PAGES) rather than ->sendpage() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 28/48] splice: Reimplement splice_to_socket() to pass multiple bufs to sendmsg() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 29/48] Remove file->f_op->sendpage David Howells
2023-03-29 14:13   ` [RFC PATCH v2 30/48] siw: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage to transmit David Howells
2023-03-29 15:18     ` Bernard Metzler
2023-03-29 15:32     ` David Howells
2023-03-29 14:13   ` [RFC PATCH v2 31/48] ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage David Howells
2023-03-29 14:13   ` [RFC PATCH v2 32/48] iscsi: " David Howells
2023-03-29 14:13   ` [RFC PATCH v2 33/48] iscsi: Assume "sendpage" is okay in iscsi_tcp_segment_map() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 34/48] tcp_bpf: Make tcp_bpf_sendpage() go through tcp_bpf_sendmsg(MSG_SPLICE_PAGES) David Howells
2023-03-29 14:13   ` [RFC PATCH v2 35/48] net: Use sendmsg(MSG_SPLICE_PAGES) not sendpage in skb_send_sock() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 36/48] algif: Remove hash_sendpage*() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 37/48] ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage() David Howells
2023-03-30  1:45     ` Xiubo Li
2023-03-30  6:48     ` David Howells
2023-03-31 13:05       ` Xiubo Li
2023-04-03  3:27       ` Xiubo Li
2023-04-03  8:32       ` David Howells
2023-04-10  0:38         ` Xiubo Li
2023-03-29 14:13   ` [RFC PATCH v2 38/48] rds: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage David Howells
2023-03-29 14:13   ` [RFC PATCH v2 39/48] dlm: " David Howells
2023-03-29 14:13   ` [RFC PATCH v2 40/48] sunrpc: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage David Howells
2023-03-29 15:28     ` Chuck Lever III
2023-03-29 19:58     ` David Howells
2023-03-30  9:29     ` David Howells
2023-03-30  9:41     ` David Howells
2023-03-30 13:16       ` Chuck Lever III
2023-03-30 13:01     ` David Howells
2023-03-30 13:16     ` David Howells
2023-03-30 13:27       ` Chuck Lever III
2023-03-30 14:26       ` David Howells
2023-03-30 16:36         ` Chuck Lever III
2023-04-14 14:41           ` Daire Byrne
2023-03-29 14:13   ` [RFC PATCH v2 41/48] sunrpc: Rely on TCP sendmsg + MSG_SPLICE_PAGES to copy unspliceable data David Howells
2023-03-29 14:13   ` [RFC PATCH v2 42/48] nvme: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage David Howells
2023-03-29 14:13   ` [RFC PATCH v2 43/48] kcm: " David Howells
2023-03-29 14:13   ` [RFC PATCH v2 44/48] smc: Drop smc_sendpage() in favour of smc_sendmsg() + MSG_SPLICE_PAGES David Howells
2023-03-29 14:13   ` [RFC PATCH v2 45/48] ocfs2: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 46/48] drbd: Use sendmsg(MSG_SPLICE_PAGES) rather than sendmsg() David Howells
2023-03-29 14:13   ` [RFC PATCH v2 47/48] drdb: Send an entire bio in a single sendmsg David Howells
     [not found]   ` <20230329141354.516864-49-dhowells@redhat.com>
2023-03-29 14:39     ` [RFC PATCH v2 48/48] sock: Remove ->sendpage*() in favour of sendmsg(MSG_SPLICE_PAGES) David Howells

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230329141354.516864-7-dhowells@redhat.com \
    --to=dhowells@redhat.com \
    --cc=axboe@kernel.dk \
    --cc=brauner@kernel.org \
    --cc=chuck.lever@oracle.com \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=hch@infradead.org \
    --cc=jlayton@kernel.org \
    --cc=kuba@kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=torvalds@linux-foundation.org \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).