netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net-next] net: mana: Add page pool for RX buffers
@ 2023-07-13 14:48 Haiyang Zhang
  2023-07-14  3:53 ` Jakub Kicinski
  0 siblings, 1 reply; 8+ messages in thread
From: Haiyang Zhang @ 2023-07-13 14:48 UTC (permalink / raw)
  To: linux-hyperv@vger.kernel.org, netdev@vger.kernel.org
  Cc: Haiyang Zhang, Dexuan Cui, KY Srinivasan, Paul Rosswurm,
	olaf@aepfle.de, vkuznets@redhat.com, davem@davemloft.net,
	wei.liu@kernel.org, edumazet@google.com, kuba@kernel.org,
	pabeni@redhat.com, leon@kernel.org, Long Li,
	ssengar@linux.microsoft.com, linux-rdma@vger.kernel.org,
	daniel@iogearbox.net, john.fastabend@gmail.com,
	bpf@vger.kernel.org, ast@kernel.org, Ajay Sharma, hawk@kernel.org,
	tglx@linutronix.de, shradhagupta@linux.microsoft.com,
	linux-kernel@vger.kernel.org

Add page pool for RX buffers for faster buffer cycle and reduce CPU
usage.

Get an extra ref count of a page after allocation, so after upper
layers put the page, it's still referenced by the pool. We can reuse
it as RX buffer without alloc a new page.

Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
---
 drivers/net/ethernet/microsoft/mana/mana_en.c | 73 ++++++++++++++++++-
 include/net/mana/mana.h                       |  5 ++
 2 files changed, 77 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index a499e460594b..6444a8e47852 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1507,6 +1507,34 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe,
 	return;
 }
 
+static struct page *mana_get_page_from_pool(struct mana_rxq *rxq)
+{
+	struct page *page;
+	int i;
+
+	i = rxq->pl_last + 1;
+	if (i >= MANA_POOL_SIZE)
+		i = 0;
+
+	rxq->pl_last = i;
+
+	page = rxq->pool[i];
+	if (page_ref_count(page) == 1) {
+		get_page(page);
+		return page;
+	}
+
+	page = dev_alloc_page();
+	if (page) {
+		put_page(rxq->pool[i]);
+
+		get_page(page);
+		rxq->pool[i] = page;
+	}
+
+	return page;
+}
+
 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
 			     dma_addr_t *da, bool is_napi)
 {
@@ -1533,7 +1561,7 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
 			return NULL;
 		}
 	} else {
-		page = dev_alloc_page();
+		page = mana_get_page_from_pool(rxq);
 		if (!page)
 			return NULL;
 
@@ -1873,6 +1901,21 @@ static int mana_create_txq(struct mana_port_context *apc,
 	return err;
 }
 
+static void mana_release_rxq_pool(struct mana_rxq *rxq)
+{
+	struct page *page;
+	int i;
+
+	for (i = 0; i < MANA_POOL_SIZE; i++) {
+		page = rxq->pool[i];
+
+		if (page)
+			put_page(page);
+
+		rxq->pool[i] = NULL;
+	}
+}
+
 static void mana_destroy_rxq(struct mana_port_context *apc,
 			     struct mana_rxq *rxq, bool validate_state)
 
@@ -1917,6 +1960,8 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
 		rx_oob->buf_va = NULL;
 	}
 
+	mana_release_rxq_pool(rxq);
+
 	if (rxq->gdma_rq)
 		mana_gd_destroy_queue(gc, rxq->gdma_rq);
 
@@ -2008,6 +2053,27 @@ static int mana_push_wqe(struct mana_rxq *rxq)
 	return 0;
 }
 
+static int mana_alloc_rxq_pool(struct mana_rxq *rxq)
+{
+	struct page *page;
+	int i;
+
+	for (i = 0; i < MANA_POOL_SIZE; i++) {
+		page = dev_alloc_page();
+		if (!page)
+			goto err;
+
+		rxq->pool[i] = page;
+	}
+
+	return 0;
+
+err:
+	mana_release_rxq_pool(rxq);
+
+	return -ENOMEM;
+}
+
 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
 					u32 rxq_idx, struct mana_eq *eq,
 					struct net_device *ndev)
@@ -2029,6 +2095,11 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
 	if (!rxq)
 		return NULL;
 
+	if (mana_alloc_rxq_pool(rxq)) {
+		kfree(rxq);
+		return NULL;
+	}
+
 	rxq->ndev = ndev;
 	rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
 	rxq->rxq_idx = rxq_idx;
diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
index 024ad8ddb27e..8f1f09f9e4ab 100644
--- a/include/net/mana/mana.h
+++ b/include/net/mana/mana.h
@@ -297,6 +297,8 @@ struct mana_recv_buf_oob {
 
 #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
 
+#define MANA_POOL_SIZE (RX_BUFFERS_PER_QUEUE * 2)
+
 struct mana_rxq {
 	struct gdma_queue *gdma_rq;
 	/* Cache the gdma receive queue id */
@@ -330,6 +332,9 @@ struct mana_rxq {
 	bool xdp_flush;
 	int xdp_rc; /* XDP redirect return code */
 
+	struct page *pool[MANA_POOL_SIZE];
+	int pl_last;
+
 	/* MUST BE THE LAST MEMBER:
 	 * Each receive buffer has an associated mana_recv_buf_oob.
 	 */
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2023-07-18  0:09 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-07-13 14:48 [PATCH net-next] net: mana: Add page pool for RX buffers Haiyang Zhang
2023-07-14  3:53 ` Jakub Kicinski
2023-07-14  7:43   ` Jesper Dangaard Brouer
2023-07-14 12:51     ` Haiyang Zhang
2023-07-14 13:13       ` Jesper Dangaard Brouer
2023-07-14 15:31         ` Jakub Kicinski
2023-07-17 18:26         ` Haiyang Zhang
2023-07-17 23:59       ` Zhu Yanjun

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).