From: Yunsheng Lin <linyunsheng@huawei.com>
To: <davem@davemloft.net>, <kuba@kernel.org>
Cc: <alexander.duyck@gmail.com>, <linux@armlinux.org.uk>,
<mw@semihalf.com>, <linuxarm@openeuler.org>,
<yisen.zhuang@huawei.com>, <salil.mehta@huawei.com>,
<thomas.petazzoni@bootlin.com>, <hawk@kernel.org>,
<ilias.apalodimas@linaro.org>, <ast@kernel.org>,
<daniel@iogearbox.net>, <john.fastabend@gmail.com>,
<akpm@linux-foundation.org>, <peterz@infradead.org>,
<will@kernel.org>, <willy@infradead.org>, <vbabka@suse.cz>,
<fenghua.yu@intel.com>, <guro@fb.com>, <peterx@redhat.com>,
<feng.tang@intel.com>, <jgg@ziepe.ca>, <mcroce@microsoft.com>,
<hughd@google.com>, <jonathan.lemon@gmail.com>, <alobakin@pm.me>,
<willemb@google.com>, <wenxu@ucloud.cn>,
<cong.wang@bytedance.com>, <haokexin@gmail.com>,
<nogikh@google.com>, <elver@google.com>, <yhs@fb.com>,
<kpsingh@kernel.org>, <andrii@kernel.org>, <kafai@fb.com>,
<songliubraving@fb.com>, <netdev@vger.kernel.org>,
<linux-kernel@vger.kernel.org>, <bpf@vger.kernel.org>,
<chenhao288@hisilicon.com>, <edumazet@google.com>,
<yoshfuji@linux-ipv6.org>, <dsahern@kernel.org>,
<memxor@gmail.com>, <linux@rempel-privat.de>,
<atenart@kernel.org>, <weiwan@google.com>, <ap420073@gmail.com>,
<arnd@arndb.de>, <mathew.j.martineau@linux.intel.com>,
<aahringo@redhat.com>, <ceggers@arri.de>, <yangbo.lu@nxp.com>,
<fw@strlen.de>, <xiangxia.m.yue@gmail.com>,
<linmiaohe@huawei.com>
Subject: [PATCH RFC 3/7] net: add NAPI api to register and retrieve the page pool ptr
Date: Wed, 18 Aug 2021 11:32:19 +0800 [thread overview]
Message-ID: <1629257542-36145-4-git-send-email-linyunsheng@huawei.com> (raw)
In-Reply-To: <1629257542-36145-1-git-send-email-linyunsheng@huawei.com>
As tx recycling is built upon the busy polling infrastructure,
and busy polling is based on napi_id, so add a api for driver
to register a page pool to a NAPI instance and api for socket
layer to retrieve the page pool corresponding to a NAPI.
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
include/linux/netdevice.h | 9 +++++++++
net/core/dev.c | 34 +++++++++++++++++++++++++++++++---
2 files changed, 40 insertions(+), 3 deletions(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2f03cd9..51a1169 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -40,6 +40,7 @@
#endif
#include <net/netprio_cgroup.h>
#include <net/xdp.h>
+#include <net/page_pool.h>
#include <linux/netdev_features.h>
#include <linux/neighbour.h>
@@ -336,6 +337,7 @@ struct napi_struct {
struct hlist_node napi_hash_node;
unsigned int napi_id;
struct task_struct *thread;
+ struct page_pool *pp;
};
enum {
@@ -349,6 +351,7 @@ enum {
NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */
+ NAPI_STATE_RECYCLABLE, /* Support tx page recycling */
};
enum {
@@ -362,6 +365,7 @@ enum {
NAPIF_STATE_PREFER_BUSY_POLL = BIT(NAPI_STATE_PREFER_BUSY_POLL),
NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED),
+ NAPIF_STATE_RECYCLABLE = BIT(NAPI_STATE_RECYCLABLE),
};
enum gro_result {
@@ -2473,6 +2477,10 @@ static inline void *netdev_priv(const struct net_device *dev)
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight);
+void netif_recyclable_napi_add(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight, struct page_pool *pool);
+
/**
* netif_tx_napi_add - initialize a NAPI context
* @dev: network device
@@ -2997,6 +3005,7 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
+struct page_pool *page_pool_get_by_napi_id(unsigned int napi_id);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
diff --git a/net/core/dev.c b/net/core/dev.c
index 74fd402..d6b905b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -935,6 +935,19 @@ struct net_device *dev_get_by_napi_id(unsigned int napi_id)
}
EXPORT_SYMBOL(dev_get_by_napi_id);
+struct page_pool *page_pool_get_by_napi_id(unsigned int napi_id)
+{
+ struct napi_struct *napi;
+ struct page_pool *pp = NULL;
+
+ napi = napi_by_id(napi_id);
+ if (napi)
+ pp = napi->pp;
+
+ return pp;
+}
+EXPORT_SYMBOL(page_pool_get_by_napi_id);
+
/**
* netdev_get_name - get a netdevice name, knowing its ifindex.
* @net: network namespace
@@ -6757,7 +6770,8 @@ EXPORT_SYMBOL(napi_busy_loop);
static void napi_hash_add(struct napi_struct *napi)
{
- if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
+ if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
+ !test_bit(NAPI_STATE_RECYCLABLE, &napi->state))
return;
spin_lock(&napi_hash_lock);
@@ -6860,8 +6874,10 @@ int dev_set_threaded(struct net_device *dev, bool threaded)
}
EXPORT_SYMBOL(dev_set_threaded);
-void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
- int (*poll)(struct napi_struct *, int), int weight)
+void netif_recyclable_napi_add(struct net_device *dev,
+ struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int),
+ int weight, struct page_pool *pool)
{
if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
return;
@@ -6886,6 +6902,11 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
set_bit(NAPI_STATE_SCHED, &napi->state);
set_bit(NAPI_STATE_NPSVC, &napi->state);
list_add_rcu(&napi->dev_list, &dev->napi_list);
+ if (pool) {
+ napi->pp = pool;
+ set_bit(NAPI_STATE_RECYCLABLE, &napi->state);
+ }
+
napi_hash_add(napi);
/* Create kthread for this napi if dev->threaded is set.
* Clear dev->threaded if kthread creation failed so that
@@ -6894,6 +6915,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
if (dev->threaded && napi_kthread_create(napi))
dev->threaded = 0;
}
+EXPORT_SYMBOL(netif_recyclable_napi_add);
+
+void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight)
+{
+ netif_recyclable_napi_add(dev, napi, poll, weight, NULL);
+}
EXPORT_SYMBOL(netif_napi_add);
void napi_disable(struct napi_struct *n)
--
2.7.4
next prev parent reply other threads:[~2021-08-18 3:33 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-08-18 3:32 [PATCH RFC 0/7] add socket to netdev page frag recycling support Yunsheng Lin
2021-08-18 3:32 ` [PATCH RFC 1/7] page_pool: refactor the page pool to support multi alloc context Yunsheng Lin
2021-08-18 3:32 ` [PATCH RFC 2/7] skbuff: add interface to manipulate frag count for tx recycling Yunsheng Lin
2021-08-18 3:32 ` Yunsheng Lin [this message]
2021-08-18 3:32 ` [PATCH RFC 4/7] net: pfrag_pool: add pfrag pool support based on page pool Yunsheng Lin
2021-08-18 3:32 ` [PATCH RFC 5/7] sock: support refilling pfrag from pfrag_pool Yunsheng Lin
2021-08-18 3:32 ` [PATCH RFC 6/7] net: hns3: support tx recycling in the hns3 driver Yunsheng Lin
2021-08-18 8:57 ` [PATCH RFC 0/7] add socket to netdev page frag recycling support Eric Dumazet
2021-08-18 9:36 ` Yunsheng Lin
2021-08-23 9:25 ` [Linuxarm] " Yunsheng Lin
2021-08-23 15:04 ` Eric Dumazet
2021-08-24 8:03 ` Yunsheng Lin
2021-08-25 16:29 ` David Ahern
2021-08-25 16:32 ` Eric Dumazet
2021-08-25 16:38 ` David Ahern
2021-08-25 17:24 ` Eric Dumazet
2021-08-26 4:05 ` David Ahern
2021-08-18 22:05 ` David Ahern
2021-08-19 8:18 ` Yunsheng Lin
2021-08-20 14:35 ` David Ahern
2021-08-23 3:32 ` Yunsheng Lin
2021-08-24 3:34 ` David Ahern
2021-08-24 8:41 ` Yunsheng Lin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1629257542-36145-4-git-send-email-linyunsheng@huawei.com \
--to=linyunsheng@huawei.com \
--cc=aahringo@redhat.com \
--cc=akpm@linux-foundation.org \
--cc=alexander.duyck@gmail.com \
--cc=alobakin@pm.me \
--cc=andrii@kernel.org \
--cc=ap420073@gmail.com \
--cc=arnd@arndb.de \
--cc=ast@kernel.org \
--cc=atenart@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=ceggers@arri.de \
--cc=chenhao288@hisilicon.com \
--cc=cong.wang@bytedance.com \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=dsahern@kernel.org \
--cc=edumazet@google.com \
--cc=elver@google.com \
--cc=feng.tang@intel.com \
--cc=fenghua.yu@intel.com \
--cc=fw@strlen.de \
--cc=guro@fb.com \
--cc=haokexin@gmail.com \
--cc=hawk@kernel.org \
--cc=hughd@google.com \
--cc=ilias.apalodimas@linaro.org \
--cc=jgg@ziepe.ca \
--cc=john.fastabend@gmail.com \
--cc=jonathan.lemon@gmail.com \
--cc=kafai@fb.com \
--cc=kpsingh@kernel.org \
--cc=kuba@kernel.org \
--cc=linmiaohe@huawei.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux@armlinux.org.uk \
--cc=linux@rempel-privat.de \
--cc=linuxarm@openeuler.org \
--cc=mathew.j.martineau@linux.intel.com \
--cc=mcroce@microsoft.com \
--cc=memxor@gmail.com \
--cc=mw@semihalf.com \
--cc=netdev@vger.kernel.org \
--cc=nogikh@google.com \
--cc=peterx@redhat.com \
--cc=peterz@infradead.org \
--cc=salil.mehta@huawei.com \
--cc=songliubraving@fb.com \
--cc=thomas.petazzoni@bootlin.com \
--cc=vbabka@suse.cz \
--cc=weiwan@google.com \
--cc=wenxu@ucloud.cn \
--cc=will@kernel.org \
--cc=willemb@google.com \
--cc=willy@infradead.org \
--cc=xiangxia.m.yue@gmail.com \
--cc=yangbo.lu@nxp.com \
--cc=yhs@fb.com \
--cc=yisen.zhuang@huawei.com \
--cc=yoshfuji@linux-ipv6.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox