netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [RFC net-next] net: veth: reduce page_pool memory footprint using half page per-buffer
@ 2023-05-12 13:08 Lorenzo Bianconi
  2023-05-12 13:43 ` Alexander Lobakin
                   ` (2 more replies)
  0 siblings, 3 replies; 14+ messages in thread
From: Lorenzo Bianconi @ 2023-05-12 13:08 UTC (permalink / raw)
  To: netdev
  Cc: lorenzo.bianconi, bpf, davem, edumazet, kuba, pabeni, ast, daniel,
	hawk, john.fastabend, linyunsheng

In order to reduce page_pool memory footprint, rely on
page_pool_dev_alloc_frag routine and reduce buffer size
(VETH_PAGE_POOL_FRAG_SIZE) to PAGE_SIZE / 2 in order to consume one page
for two 1500B frames. Reduce VETH_XDP_PACKET_HEADROOM to 192 from 256
(XDP_PACKET_HEADROOM) to fit max_head_size in VETH_PAGE_POOL_FRAG_SIZE.
Please note, using default values (CONFIG_MAX_SKB_FRAGS=17), maximum
supported MTU is now reduced to 36350B.

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
---
 drivers/net/veth.c | 39 +++++++++++++++++++++++++--------------
 1 file changed, 25 insertions(+), 14 deletions(-)

diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 614f3e3efab0..0e648703cccf 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -31,9 +31,12 @@
 #define DRV_NAME	"veth"
 #define DRV_VERSION	"1.0"
 
-#define VETH_XDP_FLAG		BIT(0)
-#define VETH_RING_SIZE		256
-#define VETH_XDP_HEADROOM	(XDP_PACKET_HEADROOM + NET_IP_ALIGN)
+#define VETH_XDP_FLAG			BIT(0)
+#define VETH_RING_SIZE			256
+#define VETH_XDP_PACKET_HEADROOM	192
+#define VETH_XDP_HEADROOM		(VETH_XDP_PACKET_HEADROOM + \
+					 NET_IP_ALIGN)
+#define VETH_PAGE_POOL_FRAG_SIZE	2048
 
 #define VETH_XDP_TX_BULK_SIZE	16
 #define VETH_XDP_BATCH		16
@@ -736,7 +739,7 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
 	if (skb_shared(skb) || skb_head_is_locked(skb) ||
 	    skb_shinfo(skb)->nr_frags ||
 	    skb_headroom(skb) < XDP_PACKET_HEADROOM) {
-		u32 size, len, max_head_size, off;
+		u32 size, len, max_head_size, off, pp_off;
 		struct sk_buff *nskb;
 		struct page *page;
 		int i, head_off;
@@ -747,17 +750,20 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
 		 *
 		 * Make sure we have enough space for linear and paged area
 		 */
-		max_head_size = SKB_WITH_OVERHEAD(PAGE_SIZE -
+		max_head_size = SKB_WITH_OVERHEAD(VETH_PAGE_POOL_FRAG_SIZE -
 						  VETH_XDP_HEADROOM);
-		if (skb->len > PAGE_SIZE * MAX_SKB_FRAGS + max_head_size)
+		if (skb->len >
+		    VETH_PAGE_POOL_FRAG_SIZE * MAX_SKB_FRAGS + max_head_size)
 			goto drop;
 
 		/* Allocate skb head */
-		page = page_pool_dev_alloc_pages(rq->page_pool);
+		page = page_pool_dev_alloc_frag(rq->page_pool, &pp_off,
+						VETH_PAGE_POOL_FRAG_SIZE);
 		if (!page)
 			goto drop;
 
-		nskb = napi_build_skb(page_address(page), PAGE_SIZE);
+		nskb = napi_build_skb(page_address(page) + pp_off,
+				      VETH_PAGE_POOL_FRAG_SIZE);
 		if (!nskb) {
 			page_pool_put_full_page(rq->page_pool, page, true);
 			goto drop;
@@ -782,15 +788,18 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
 		len = skb->len - off;
 
 		for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) {
-			page = page_pool_dev_alloc_pages(rq->page_pool);
+			page = page_pool_dev_alloc_frag(rq->page_pool, &pp_off,
+							VETH_PAGE_POOL_FRAG_SIZE);
 			if (!page) {
 				consume_skb(nskb);
 				goto drop;
 			}
 
-			size = min_t(u32, len, PAGE_SIZE);
-			skb_add_rx_frag(nskb, i, page, 0, size, PAGE_SIZE);
-			if (skb_copy_bits(skb, off, page_address(page),
+			size = min_t(u32, len, VETH_PAGE_POOL_FRAG_SIZE);
+			skb_add_rx_frag(nskb, i, page, pp_off, size,
+					VETH_PAGE_POOL_FRAG_SIZE);
+			if (skb_copy_bits(skb, off,
+					  page_address(page) + pp_off,
 					  size)) {
 				consume_skb(nskb);
 				goto drop;
@@ -1035,6 +1044,7 @@ static int veth_create_page_pool(struct veth_rq *rq)
 	struct page_pool_params pp_params = {
 		.order = 0,
 		.pool_size = VETH_RING_SIZE,
+		.flags = PP_FLAG_PAGE_FRAG,
 		.nid = NUMA_NO_NODE,
 		.dev = &rq->dev->dev,
 	};
@@ -1634,13 +1644,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
 			goto err;
 		}
 
-		max_mtu = SKB_WITH_OVERHEAD(PAGE_SIZE - VETH_XDP_HEADROOM) -
+		max_mtu = SKB_WITH_OVERHEAD(VETH_PAGE_POOL_FRAG_SIZE -
+					    VETH_XDP_HEADROOM) -
 			  peer->hard_header_len;
 		/* Allow increasing the max_mtu if the program supports
 		 * XDP fragments.
 		 */
 		if (prog->aux->xdp_has_frags)
-			max_mtu += PAGE_SIZE * MAX_SKB_FRAGS;
+			max_mtu += VETH_PAGE_POOL_FRAG_SIZE * MAX_SKB_FRAGS;
 
 		if (peer->mtu > max_mtu) {
 			NL_SET_ERR_MSG_MOD(extack, "Peer MTU is too large to set XDP");
-- 
2.40.1


^ permalink raw reply related	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2023-05-18  1:16 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-05-12 13:08 [RFC net-next] net: veth: reduce page_pool memory footprint using half page per-buffer Lorenzo Bianconi
2023-05-12 13:43 ` Alexander Lobakin
2023-05-12 14:14   ` Lorenzo Bianconi
2023-05-15 16:36     ` Alexander Lobakin
2023-05-15 11:10 ` Yunsheng Lin
2023-05-15 11:24   ` Lorenzo Bianconi
2023-05-15 13:11     ` Maciej Fijalkowski
2023-05-16 22:52       ` Lorenzo Bianconi
2023-05-17  9:41         ` Yunsheng Lin
2023-05-17 14:17           ` Lorenzo Bianconi
2023-05-18  1:16             ` Yunsheng Lin
2023-05-17 14:58         ` Jakub Kicinski
2023-05-16 12:55     ` Yunsheng Lin
2023-05-16 16:11 ` Jesper Dangaard Brouer

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).