From: Joe Damato <jdamato@fastly.com>
To: netdev@vger.kernel.org
Cc: mkarsten@uwaterloo.ca, kuba@kernel.org, skhawaja@google.com,
sdf@fomichev.me, bjorn@rivosinc.com, amritha.nambiar@intel.com,
sridhar.samudrala@intel.com, Joe Damato <jdamato@fastly.com>,
"David S. Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Paolo Abeni <pabeni@redhat.com>,
Donald Hunter <donald.hunter@gmail.com>,
Jesper Dangaard Brouer <hawk@kernel.org>,
Xuan Zhuo <xuanzhuo@linux.alibaba.com>,
Daniel Jurgens <danielj@nvidia.com>,
Larysa Zaremba <larysa.zaremba@intel.com>,
linux-kernel@vger.kernel.org (open list)
Subject: [RFC net-next v3 6/9] netdev-genl: Support setting per-NAPI config values
Date: Thu, 12 Sep 2024 10:07:14 +0000 [thread overview]
Message-ID: <20240912100738.16567-7-jdamato@fastly.com> (raw)
In-Reply-To: <20240912100738.16567-1-jdamato@fastly.com>
Add support to set per-NAPI defer_hard_irqs and gro_flush_timeout.
Signed-off-by: Joe Damato <jdamato@fastly.com>
---
Documentation/netlink/specs/netdev.yaml | 11 ++++++
include/uapi/linux/netdev.h | 1 +
net/core/netdev-genl-gen.c | 14 ++++++++
net/core/netdev-genl-gen.h | 1 +
net/core/netdev-genl.c | 45 +++++++++++++++++++++++++
tools/include/uapi/linux/netdev.h | 1 +
6 files changed, 73 insertions(+)
diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml
index 906091c3059a..319c1e179b08 100644
--- a/Documentation/netlink/specs/netdev.yaml
+++ b/Documentation/netlink/specs/netdev.yaml
@@ -633,6 +633,17 @@ operations:
- rx-bytes
- tx-packets
- tx-bytes
+ -
+ name: napi-set
+ doc: Set configurable NAPI instance settings.
+ attribute-set: napi
+ flags: [ admin-perm ]
+ do:
+ request:
+ attributes:
+ - napi-id
+ - defer-hard-irqs
+ - gro-flush-timeout
mcast-groups:
list:
diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h
index b088a34e9254..4c5bfbc85504 100644
--- a/include/uapi/linux/netdev.h
+++ b/include/uapi/linux/netdev.h
@@ -188,6 +188,7 @@ enum {
NETDEV_CMD_QUEUE_GET,
NETDEV_CMD_NAPI_GET,
NETDEV_CMD_QSTATS_GET,
+ NETDEV_CMD_NAPI_SET,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c
index 8350a0afa9ec..ead570c6ff7d 100644
--- a/net/core/netdev-genl-gen.c
+++ b/net/core/netdev-genl-gen.c
@@ -74,6 +74,13 @@ static const struct nla_policy netdev_qstats_get_nl_policy[NETDEV_A_QSTATS_SCOPE
[NETDEV_A_QSTATS_SCOPE] = NLA_POLICY_MASK(NLA_UINT, 0x1),
};
+/* NETDEV_CMD_NAPI_SET - set */
+static const struct nla_policy netdev_napi_set_nl_policy[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT + 1] = {
+ [NETDEV_A_NAPI_ID] = { .type = NLA_U32, },
+ [NETDEV_A_NAPI_DEFER_HARD_IRQS] = { .type = NLA_S32 },
+ [NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT] = { .type = NLA_UINT },
+};
+
/* Ops table for netdev */
static const struct genl_split_ops netdev_nl_ops[] = {
{
@@ -151,6 +158,13 @@ static const struct genl_split_ops netdev_nl_ops[] = {
.maxattr = NETDEV_A_QSTATS_SCOPE,
.flags = GENL_CMD_CAP_DUMP,
},
+ {
+ .cmd = NETDEV_CMD_NAPI_SET,
+ .doit = netdev_nl_napi_set_doit,
+ .policy = netdev_napi_set_nl_policy,
+ .maxattr = NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
};
static const struct genl_multicast_group netdev_nl_mcgrps[] = {
diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h
index 4db40fd5b4a9..b70cb0f20acb 100644
--- a/net/core/netdev-genl-gen.h
+++ b/net/core/netdev-genl-gen.h
@@ -28,6 +28,7 @@ int netdev_nl_queue_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
int netdev_nl_napi_get_doit(struct sk_buff *skb, struct genl_info *info);
int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
+int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info);
int netdev_nl_qstats_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
index 4698034b5a49..3c90a2fd005a 100644
--- a/net/core/netdev-genl.c
+++ b/net/core/netdev-genl.c
@@ -300,6 +300,51 @@ int netdev_nl_napi_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
return err;
}
+static int
+netdev_nl_napi_set_config(struct napi_struct *napi, struct genl_info *info)
+{
+ u64 gro_flush_timeout = 0;
+ u32 defer = 0;
+
+ if (info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]) {
+ defer = nla_get_u32(info->attrs[NETDEV_A_NAPI_DEFER_HARD_IRQS]);
+ napi_set_defer_hard_irqs(napi, defer);
+ }
+
+ if (info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]) {
+ gro_flush_timeout = nla_get_uint(info->attrs[NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT]);
+ napi_set_gro_flush_timeout(napi, gro_flush_timeout);
+ }
+
+ return 0;
+}
+
+int netdev_nl_napi_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct napi_struct *napi;
+ unsigned int napi_id;
+ int err;
+
+ if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_NAPI_ID))
+ return -EINVAL;
+
+ napi_id = nla_get_u32(info->attrs[NETDEV_A_NAPI_ID]);
+
+ rtnl_lock();
+
+ napi = napi_by_id(napi_id);
+ if (napi) {
+ err = netdev_nl_napi_set_config(napi, info);
+ } else {
+ NL_SET_BAD_ATTR(info->extack, info->attrs[NETDEV_A_NAPI_ID]);
+ err = -ENOENT;
+ }
+
+ rtnl_unlock();
+
+ return err;
+}
+
static int
netdev_nl_queue_fill_one(struct sk_buff *rsp, struct net_device *netdev,
u32 q_idx, u32 q_type, const struct genl_info *info)
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
index b088a34e9254..4c5bfbc85504 100644
--- a/tools/include/uapi/linux/netdev.h
+++ b/tools/include/uapi/linux/netdev.h
@@ -188,6 +188,7 @@ enum {
NETDEV_CMD_QUEUE_GET,
NETDEV_CMD_NAPI_GET,
NETDEV_CMD_QSTATS_GET,
+ NETDEV_CMD_NAPI_SET,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
--
2.25.1
next prev parent reply other threads:[~2024-09-12 10:08 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-12 10:07 [RFC net-next v3 0/9] Add support for per-NAPI config via netlink Joe Damato
2024-09-12 10:07 ` [RFC net-next v3 1/9] net: napi: Make napi_defer_hard_irqs per-NAPI Joe Damato
2024-09-12 10:07 ` [RFC net-next v3 2/9] netdev-genl: Dump napi_defer_hard_irqs Joe Damato
2024-09-12 10:07 ` [RFC net-next v3 3/9] net: napi: Make gro_flush_timeout per-NAPI Joe Damato
2024-09-12 10:07 ` [RFC net-next v3 4/9] netdev-genl: Dump gro_flush_timeout Joe Damato
2024-09-12 10:07 ` [RFC net-next v3 5/9] net: napi: Add napi_config Joe Damato
2024-09-12 15:03 ` Joe Damato
2024-09-13 21:44 ` Willem de Bruijn
2024-09-13 22:10 ` Joe Damato
2024-09-13 17:42 ` Stanislav Fomichev
2024-09-13 18:55 ` Joe Damato
2024-09-13 19:39 ` Joe Damato
2024-09-14 13:00 ` Joe Damato
2024-09-12 10:07 ` Joe Damato [this message]
2024-09-12 10:38 ` [RFC net-next v3 6/9] netdev-genl: Support setting per-NAPI config values Joe Damato
2024-09-12 10:07 ` [RFC net-next v3 7/9] bnxt: Add support for napi storage Joe Damato
2024-09-12 10:07 ` [RFC net-next v3 8/9] mlx5: " Joe Damato
2024-09-12 10:07 ` [RFC net-next v3 9/9] mlx4: Add support for napi storage to RX CQs Joe Damato
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240912100738.16567-7-jdamato@fastly.com \
--to=jdamato@fastly.com \
--cc=amritha.nambiar@intel.com \
--cc=bjorn@rivosinc.com \
--cc=danielj@nvidia.com \
--cc=davem@davemloft.net \
--cc=donald.hunter@gmail.com \
--cc=edumazet@google.com \
--cc=hawk@kernel.org \
--cc=kuba@kernel.org \
--cc=larysa.zaremba@intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mkarsten@uwaterloo.ca \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=sdf@fomichev.me \
--cc=skhawaja@google.com \
--cc=sridhar.samudrala@intel.com \
--cc=xuanzhuo@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).