* [PATCH net-next 0/2] nfp: add flow-steering support
@ 2023-11-17 7:11 Louis Peens
2023-11-17 7:11 ` [PATCH net-next 1/2] nfp: add ethtool flow steering callbacks Louis Peens
` (2 more replies)
0 siblings, 3 replies; 8+ messages in thread
From: Louis Peens @ 2023-11-17 7:11 UTC (permalink / raw)
To: David Miller, Jakub Kicinski, Paolo Abeni
Cc: Yinjun Zhang, netdev, oss-drivers
This short series adds flow steering support for the nfp driver.
The first patch adds the part to communicate with ethtool but
stubs out the HW offload parts. The second patch implements the
HW communication and offloads flow steering.
After this series the user can now use 'ethtool -N/-n' to configure
and display rx classification rules.
Yinjun Zhang (2):
nfp: add ethtool flow steering callbacks
nfp: offload flow steering to the nfp
drivers/net/ethernet/netronome/nfp/nfp_net.h | 36 ++
.../ethernet/netronome/nfp/nfp_net_common.c | 183 +++++++++
.../net/ethernet/netronome/nfp/nfp_net_ctrl.h | 15 +
.../ethernet/netronome/nfp/nfp_net_ethtool.c | 369 ++++++++++++++++++
4 files changed, 603 insertions(+)
--
2.34.1
^ permalink raw reply [flat|nested] 8+ messages in thread
* [PATCH net-next 1/2] nfp: add ethtool flow steering callbacks
2023-11-17 7:11 [PATCH net-next 0/2] nfp: add flow-steering support Louis Peens
@ 2023-11-17 7:11 ` Louis Peens
2023-11-20 9:43 ` Simon Horman
2023-11-17 7:11 ` [PATCH net-next 2/2] nfp: offload flow steering to the nfp Louis Peens
2023-11-21 2:10 ` [PATCH net-next 0/2] nfp: add flow-steering support patchwork-bot+netdevbpf
2 siblings, 1 reply; 8+ messages in thread
From: Louis Peens @ 2023-11-17 7:11 UTC (permalink / raw)
To: David Miller, Jakub Kicinski, Paolo Abeni
Cc: Yinjun Zhang, netdev, oss-drivers
From: Yinjun Zhang <yinjun.zhang@corigine.com>
This is the first part to implement flow steering. The communication
between ethtool and driver is done. User can use following commands
to display and set flows:
ethtool -n <netdev>
ethtool -N <netdev> flow-type ...
Signed-off-by: Yinjun Zhang <yinjun.zhang@corigine.com>
Signed-off-by: Louis Peens <louis.peens@corigine.com>
---
drivers/net/ethernet/netronome/nfp/nfp_net.h | 36 ++
.../ethernet/netronome/nfp/nfp_net_common.c | 24 ++
.../net/ethernet/netronome/nfp/nfp_net_ctrl.h | 1 +
.../ethernet/netronome/nfp/nfp_net_ethtool.c | 369 ++++++++++++++++++
4 files changed, 430 insertions(+)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 939cfce15830..bd0e26524417 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -621,6 +621,9 @@ struct nfp_net_dp {
* @mbox_amsg.lock: Protect message list
* @mbox_amsg.list: List of message to process
* @mbox_amsg.work: Work to process message asynchronously
+ * @fs: Flow steering
+ * @fs.count: Flow count
+ * @fs.list: List of flows
* @app_priv: APP private data for this vNIC
*/
struct nfp_net {
@@ -728,9 +731,39 @@ struct nfp_net {
struct work_struct work;
} mbox_amsg;
+ struct {
+ u16 count;
+ struct list_head list;
+ } fs;
+
void *app_priv;
};
+struct nfp_fs_entry {
+ struct list_head node;
+ u32 flow_type;
+ u32 loc;
+ struct {
+ union {
+ struct {
+ __be32 sip4;
+ __be32 dip4;
+ };
+ struct {
+ __be32 sip6[4];
+ __be32 dip6[4];
+ };
+ };
+ union {
+ __be16 l3_proto;
+ u8 l4_proto;
+ };
+ __be16 sport;
+ __be16 dport;
+ } key, msk;
+ u64 action;
+};
+
struct nfp_mbox_amsg_entry {
struct list_head list;
int (*cfg)(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry);
@@ -987,6 +1020,9 @@ struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
struct netlink_ext_ack *extack);
+int nfp_net_fs_add_hw(struct nfp_net *nn, struct nfp_fs_entry *entry);
+int nfp_net_fs_del_hw(struct nfp_net *nn, struct nfp_fs_entry *entry);
+
#ifdef CONFIG_NFP_DEBUG
void nfp_net_debugfs_create(void);
void nfp_net_debugfs_destroy(void);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index de0a5d5ded30..12eda2c2ac23 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1763,6 +1763,27 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
}
+int nfp_net_fs_add_hw(struct nfp_net *nn, struct nfp_fs_entry *entry)
+{
+ return -EOPNOTSUPP;
+}
+
+int nfp_net_fs_del_hw(struct nfp_net *nn, struct nfp_fs_entry *entry)
+{
+ return -EOPNOTSUPP;
+}
+
+static void nfp_net_fs_clean(struct nfp_net *nn)
+{
+ struct nfp_fs_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &nn->fs.list, node) {
+ nfp_net_fs_del_hw(nn, entry);
+ list_del(&entry->node);
+ kfree(entry);
+ }
+}
+
static void nfp_net_stat64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -2740,6 +2761,8 @@ int nfp_net_init(struct nfp_net *nn)
INIT_LIST_HEAD(&nn->mbox_amsg.list);
INIT_WORK(&nn->mbox_amsg.work, nfp_net_mbox_amsg_work);
+ INIT_LIST_HEAD(&nn->fs.list);
+
return register_netdev(nn->dp.netdev);
err_clean_mbox:
@@ -2759,6 +2782,7 @@ void nfp_net_clean(struct nfp_net *nn)
unregister_netdev(nn->dp.netdev);
nfp_net_ipsec_clean(nn);
nfp_ccm_mbox_clean(nn);
+ nfp_net_fs_clean(nn);
flush_work(&nn->mbox_amsg.work);
nfp_net_reconfig_wait_posted(nn);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 3e63f6d6a563..515472924a5d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -269,6 +269,7 @@
#define NFP_NET_CFG_CTRL_IPSEC (0x1 << 1) /* IPsec offload */
#define NFP_NET_CFG_CTRL_MCAST_FILTER (0x1 << 2) /* Multicast Filter */
#define NFP_NET_CFG_CTRL_FREELIST_EN (0x1 << 6) /* Freelist enable flag bit */
+#define NFP_NET_CFG_CTRL_FLOW_STEER (0x1 << 8) /* Flow steering */
#define NFP_NET_CFG_CAP_WORD1 0x00a4
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index e75cbb287625..d7896391b8ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1317,6 +1317,116 @@ static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
return 0;
}
+#define NFP_FS_MAX_ENTRY 1024
+
+static int nfp_net_fs_to_ethtool(struct nfp_fs_entry *entry, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ unsigned int i;
+
+ switch (entry->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fs->h_u.tcp_ip4_spec.ip4src = entry->key.sip4;
+ fs->h_u.tcp_ip4_spec.ip4dst = entry->key.dip4;
+ fs->h_u.tcp_ip4_spec.psrc = entry->key.sport;
+ fs->h_u.tcp_ip4_spec.pdst = entry->key.dport;
+ fs->m_u.tcp_ip4_spec.ip4src = entry->msk.sip4;
+ fs->m_u.tcp_ip4_spec.ip4dst = entry->msk.dip4;
+ fs->m_u.tcp_ip4_spec.psrc = entry->msk.sport;
+ fs->m_u.tcp_ip4_spec.pdst = entry->msk.dport;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ for (i = 0; i < 4; i++) {
+ fs->h_u.tcp_ip6_spec.ip6src[i] = entry->key.sip6[i];
+ fs->h_u.tcp_ip6_spec.ip6dst[i] = entry->key.dip6[i];
+ fs->m_u.tcp_ip6_spec.ip6src[i] = entry->msk.sip6[i];
+ fs->m_u.tcp_ip6_spec.ip6dst[i] = entry->msk.dip6[i];
+ }
+ fs->h_u.tcp_ip6_spec.psrc = entry->key.sport;
+ fs->h_u.tcp_ip6_spec.pdst = entry->key.dport;
+ fs->m_u.tcp_ip6_spec.psrc = entry->msk.sport;
+ fs->m_u.tcp_ip6_spec.pdst = entry->msk.dport;
+ break;
+ case IPV4_USER_FLOW:
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fs->h_u.usr_ip4_spec.ip4src = entry->key.sip4;
+ fs->h_u.usr_ip4_spec.ip4dst = entry->key.dip4;
+ fs->h_u.usr_ip4_spec.proto = entry->key.l4_proto;
+ fs->m_u.usr_ip4_spec.ip4src = entry->msk.sip4;
+ fs->m_u.usr_ip4_spec.ip4dst = entry->msk.dip4;
+ fs->m_u.usr_ip4_spec.proto = entry->msk.l4_proto;
+ break;
+ case IPV6_USER_FLOW:
+ for (i = 0; i < 4; i++) {
+ fs->h_u.usr_ip6_spec.ip6src[i] = entry->key.sip6[i];
+ fs->h_u.usr_ip6_spec.ip6dst[i] = entry->key.dip6[i];
+ fs->m_u.usr_ip6_spec.ip6src[i] = entry->msk.sip6[i];
+ fs->m_u.usr_ip6_spec.ip6dst[i] = entry->msk.dip6[i];
+ }
+ fs->h_u.usr_ip6_spec.l4_proto = entry->key.l4_proto;
+ fs->m_u.usr_ip6_spec.l4_proto = entry->msk.l4_proto;
+ break;
+ case ETHER_FLOW:
+ fs->h_u.ether_spec.h_proto = entry->key.l3_proto;
+ fs->m_u.ether_spec.h_proto = entry->msk.l3_proto;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fs->flow_type = entry->flow_type;
+ fs->ring_cookie = entry->action;
+
+ if (fs->flow_type & FLOW_RSS) {
+ /* Only rss_context of 0 is supported. */
+ cmd->rss_context = 0;
+ /* RSS is used, mask the ring. */
+ fs->ring_cookie |= ETHTOOL_RX_FLOW_SPEC_RING;
+ }
+
+ return 0;
+}
+
+static int nfp_net_get_fs_rule(struct nfp_net *nn, struct ethtool_rxnfc *cmd)
+{
+ struct nfp_fs_entry *entry;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ if (cmd->fs.location >= NFP_FS_MAX_ENTRY)
+ return -EINVAL;
+
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (entry->loc == cmd->fs.location)
+ return nfp_net_fs_to_ethtool(entry, cmd);
+
+ if (entry->loc > cmd->fs.location)
+ /* no need to continue */
+ return -ENOENT;
+ }
+
+ return -ENOENT;
+}
+
+static int nfp_net_get_fs_loc(struct nfp_net *nn, u32 *rule_locs)
+{
+ struct nfp_fs_entry *entry;
+ u32 count = 0;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ list_for_each_entry(entry, &nn->fs.list, node)
+ rule_locs[count++] = entry->loc;
+
+ return 0;
+}
+
static int nfp_net_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
@@ -1326,6 +1436,14 @@ static int nfp_net_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
cmd->data = nn->dp.num_rx_rings;
return 0;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = nn->fs.count;
+ return 0;
+ case ETHTOOL_GRXCLSRULE:
+ return nfp_net_get_fs_rule(nn, cmd);
+ case ETHTOOL_GRXCLSRLALL:
+ cmd->data = NFP_FS_MAX_ENTRY;
+ return nfp_net_get_fs_loc(nn, rule_locs);
case ETHTOOL_GRXFH:
return nfp_net_get_rss_hash_opts(nn, cmd);
default:
@@ -1385,6 +1503,253 @@ static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
return 0;
}
+static int nfp_net_fs_from_ethtool(struct nfp_fs_entry *entry, struct ethtool_rx_flow_spec *fs)
+{
+ unsigned int i;
+
+ /* FLOW_EXT/FLOW_MAC_EXT is not supported. */
+ switch (fs->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ entry->msk.sip4 = fs->m_u.tcp_ip4_spec.ip4src;
+ entry->msk.dip4 = fs->m_u.tcp_ip4_spec.ip4dst;
+ entry->msk.sport = fs->m_u.tcp_ip4_spec.psrc;
+ entry->msk.dport = fs->m_u.tcp_ip4_spec.pdst;
+ entry->key.sip4 = fs->h_u.tcp_ip4_spec.ip4src & entry->msk.sip4;
+ entry->key.dip4 = fs->h_u.tcp_ip4_spec.ip4dst & entry->msk.dip4;
+ entry->key.sport = fs->h_u.tcp_ip4_spec.psrc & entry->msk.sport;
+ entry->key.dport = fs->h_u.tcp_ip4_spec.pdst & entry->msk.dport;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ for (i = 0; i < 4; i++) {
+ entry->msk.sip6[i] = fs->m_u.tcp_ip6_spec.ip6src[i];
+ entry->msk.dip6[i] = fs->m_u.tcp_ip6_spec.ip6dst[i];
+ entry->key.sip6[i] = fs->h_u.tcp_ip6_spec.ip6src[i] & entry->msk.sip6[i];
+ entry->key.dip6[i] = fs->h_u.tcp_ip6_spec.ip6dst[i] & entry->msk.dip6[i];
+ }
+ entry->msk.sport = fs->m_u.tcp_ip6_spec.psrc;
+ entry->msk.dport = fs->m_u.tcp_ip6_spec.pdst;
+ entry->key.sport = fs->h_u.tcp_ip6_spec.psrc & entry->msk.sport;
+ entry->key.dport = fs->h_u.tcp_ip6_spec.pdst & entry->msk.dport;
+ break;
+ case IPV4_USER_FLOW:
+ entry->msk.sip4 = fs->m_u.usr_ip4_spec.ip4src;
+ entry->msk.dip4 = fs->m_u.usr_ip4_spec.ip4dst;
+ entry->msk.l4_proto = fs->m_u.usr_ip4_spec.proto;
+ entry->key.sip4 = fs->h_u.usr_ip4_spec.ip4src & entry->msk.sip4;
+ entry->key.dip4 = fs->h_u.usr_ip4_spec.ip4dst & entry->msk.dip4;
+ entry->key.l4_proto = fs->h_u.usr_ip4_spec.proto & entry->msk.l4_proto;
+ break;
+ case IPV6_USER_FLOW:
+ for (i = 0; i < 4; i++) {
+ entry->msk.sip6[i] = fs->m_u.usr_ip6_spec.ip6src[i];
+ entry->msk.dip6[i] = fs->m_u.usr_ip6_spec.ip6dst[i];
+ entry->key.sip6[i] = fs->h_u.usr_ip6_spec.ip6src[i] & entry->msk.sip6[i];
+ entry->key.dip6[i] = fs->h_u.usr_ip6_spec.ip6dst[i] & entry->msk.dip6[i];
+ }
+ entry->msk.l4_proto = fs->m_u.usr_ip6_spec.l4_proto;
+ entry->key.l4_proto = fs->h_u.usr_ip6_spec.l4_proto & entry->msk.l4_proto;
+ break;
+ case ETHER_FLOW:
+ entry->msk.l3_proto = fs->m_u.ether_spec.h_proto;
+ entry->key.l3_proto = fs->h_u.ether_spec.h_proto & entry->msk.l3_proto;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fs->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ entry->key.l4_proto = IPPROTO_TCP;
+ entry->msk.l4_proto = 0xff;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ entry->key.l4_proto = IPPROTO_UDP;
+ entry->msk.l4_proto = 0xff;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ entry->key.l4_proto = IPPROTO_SCTP;
+ entry->msk.l4_proto = 0xff;
+ break;
+ }
+
+ entry->flow_type = fs->flow_type;
+ entry->action = fs->ring_cookie;
+ entry->loc = fs->location;
+
+ return 0;
+}
+
+static int nfp_net_fs_check_existing(struct nfp_net *nn, struct nfp_fs_entry *new)
+{
+ struct nfp_fs_entry *entry;
+
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (new->loc != entry->loc &&
+ !((new->flow_type ^ entry->flow_type) & ~FLOW_RSS) &&
+ !memcmp(&new->key, &entry->key, sizeof(new->key)) &&
+ !memcmp(&new->msk, &entry->msk, sizeof(new->msk)))
+ return entry->loc;
+ }
+
+ /* -1 means no duplicates */
+ return -1;
+}
+
+static int nfp_net_fs_add(struct nfp_net *nn, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ struct nfp_fs_entry *new, *entry;
+ bool unsupp_mask;
+ int err, id;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ /* Only default RSS context(0) is supported. */
+ if ((fs->flow_type & FLOW_RSS) && cmd->rss_context)
+ return -EOPNOTSUPP;
+
+ if (fs->location >= NFP_FS_MAX_ENTRY)
+ return -EINVAL;
+
+ if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
+ fs->ring_cookie >= nn->dp.num_rx_rings)
+ return -EINVAL;
+
+ /* FLOW_EXT/FLOW_MAC_EXT is not supported. */
+ switch (fs->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ unsupp_mask = !!fs->m_u.tcp_ip4_spec.tos;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ unsupp_mask = !!fs->m_u.tcp_ip6_spec.tclass;
+ break;
+ case IPV4_USER_FLOW:
+ unsupp_mask = !!fs->m_u.usr_ip4_spec.l4_4_bytes ||
+ !!fs->m_u.usr_ip4_spec.tos ||
+ !!fs->m_u.usr_ip4_spec.ip_ver;
+ /* ip_ver must be ETH_RX_NFC_IP4. */
+ unsupp_mask |= fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4;
+ break;
+ case IPV6_USER_FLOW:
+ unsupp_mask = !!fs->m_u.usr_ip6_spec.l4_4_bytes ||
+ !!fs->m_u.usr_ip6_spec.tclass;
+ break;
+ case ETHER_FLOW:
+ if (fs->h_u.ether_spec.h_proto == htons(ETH_P_IP) ||
+ fs->h_u.ether_spec.h_proto == htons(ETH_P_IPV6)) {
+ nn_err(nn, "Please use ip4/ip6 flow type instead.\n");
+ return -EOPNOTSUPP;
+ }
+ /* Only unmasked ethtype is supported. */
+ unsupp_mask = !is_zero_ether_addr(fs->m_u.ether_spec.h_dest) ||
+ !is_zero_ether_addr(fs->m_u.ether_spec.h_source) ||
+ (fs->m_u.ether_spec.h_proto != htons(0xffff));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (unsupp_mask)
+ return -EOPNOTSUPP;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ nfp_net_fs_from_ethtool(new, fs);
+
+ id = nfp_net_fs_check_existing(nn, new);
+ if (id >= 0) {
+ nn_err(nn, "Identical rule is existing in %d.\n", id);
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* Insert to list in ascending order of location. */
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (entry->loc == fs->location) {
+ err = nfp_net_fs_del_hw(nn, entry);
+ if (err)
+ goto err;
+
+ nn->fs.count--;
+ err = nfp_net_fs_add_hw(nn, new);
+ if (err)
+ goto err;
+
+ nn->fs.count++;
+ list_replace(&entry->node, &new->node);
+ kfree(entry);
+
+ return 0;
+ }
+
+ if (entry->loc > fs->location)
+ break;
+ }
+
+ if (nn->fs.count == NFP_FS_MAX_ENTRY) {
+ err = -ENOSPC;
+ goto err;
+ }
+
+ err = nfp_net_fs_add_hw(nn, new);
+ if (err)
+ goto err;
+
+ list_add_tail(&new->node, &entry->node);
+ nn->fs.count++;
+
+ return 0;
+
+err:
+ kfree(new);
+ return err;
+}
+
+static int nfp_net_fs_del(struct nfp_net *nn, struct ethtool_rxnfc *cmd)
+{
+ struct nfp_fs_entry *entry;
+ int err;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ if (!nn->fs.count || cmd->fs.location >= NFP_FS_MAX_ENTRY)
+ return -EINVAL;
+
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (entry->loc == cmd->fs.location) {
+ err = nfp_net_fs_del_hw(nn, entry);
+ if (err)
+ return err;
+
+ list_del(&entry->node);
+ kfree(entry);
+ nn->fs.count--;
+
+ return 0;
+ } else if (entry->loc > cmd->fs.location) {
+ /* no need to continue */
+ break;
+ }
+ }
+
+ return -ENOENT;
+}
+
static int nfp_net_set_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd)
{
@@ -1393,6 +1758,10 @@ static int nfp_net_set_rxnfc(struct net_device *netdev,
switch (cmd->cmd) {
case ETHTOOL_SRXFH:
return nfp_net_set_rss_hash_opt(nn, cmd);
+ case ETHTOOL_SRXCLSRLINS:
+ return nfp_net_fs_add(nn, cmd);
+ case ETHTOOL_SRXCLSRLDEL:
+ return nfp_net_fs_del(nn, cmd);
default:
return -EOPNOTSUPP;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [PATCH net-next 2/2] nfp: offload flow steering to the nfp
2023-11-17 7:11 [PATCH net-next 0/2] nfp: add flow-steering support Louis Peens
2023-11-17 7:11 ` [PATCH net-next 1/2] nfp: add ethtool flow steering callbacks Louis Peens
@ 2023-11-17 7:11 ` Louis Peens
2023-11-20 9:44 ` Simon Horman
2023-11-21 2:10 ` [PATCH net-next 0/2] nfp: add flow-steering support patchwork-bot+netdevbpf
2 siblings, 1 reply; 8+ messages in thread
From: Louis Peens @ 2023-11-17 7:11 UTC (permalink / raw)
To: David Miller, Jakub Kicinski, Paolo Abeni
Cc: Yinjun Zhang, netdev, oss-drivers
From: Yinjun Zhang <yinjun.zhang@corigine.com>
This is the second part to implement flow steering. Mailbox is used
for the communication between driver and HW.
Signed-off-by: Yinjun Zhang <yinjun.zhang@corigine.com>
Signed-off-by: Louis Peens <louis.peens@corigine.com>
---
.../ethernet/netronome/nfp/nfp_net_common.c | 163 +++++++++++++++++-
.../net/ethernet/netronome/nfp/nfp_net_ctrl.h | 14 ++
2 files changed, 175 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 12eda2c2ac23..ac1f4514b1d0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1763,14 +1763,173 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
}
+static void
+nfp_net_fs_fill_v4(struct nfp_net *nn, struct nfp_fs_entry *entry, u32 op, u32 *addr)
+{
+ unsigned int i;
+
+ union {
+ struct {
+ __be16 loc;
+ u8 k_proto, m_proto;
+ __be32 k_sip, m_sip, k_dip, m_dip;
+ __be16 k_sport, m_sport, k_dport, m_dport;
+ };
+ __be32 val[7];
+ } v4_rule;
+
+ nn_writel(nn, *addr, op);
+ *addr += sizeof(u32);
+
+ v4_rule.loc = cpu_to_be16(entry->loc);
+ v4_rule.k_proto = entry->key.l4_proto;
+ v4_rule.m_proto = entry->msk.l4_proto;
+ v4_rule.k_sip = entry->key.sip4;
+ v4_rule.m_sip = entry->msk.sip4;
+ v4_rule.k_dip = entry->key.dip4;
+ v4_rule.m_dip = entry->msk.dip4;
+ v4_rule.k_sport = entry->key.sport;
+ v4_rule.m_sport = entry->msk.sport;
+ v4_rule.k_dport = entry->key.dport;
+ v4_rule.m_dport = entry->msk.dport;
+
+ for (i = 0; i < ARRAY_SIZE(v4_rule.val); i++, *addr += sizeof(__be32))
+ nn_writel(nn, *addr, be32_to_cpu(v4_rule.val[i]));
+}
+
+static void
+nfp_net_fs_fill_v6(struct nfp_net *nn, struct nfp_fs_entry *entry, u32 op, u32 *addr)
+{
+ unsigned int i;
+
+ union {
+ struct {
+ __be16 loc;
+ u8 k_proto, m_proto;
+ __be32 k_sip[4], m_sip[4], k_dip[4], m_dip[4];
+ __be16 k_sport, m_sport, k_dport, m_dport;
+ };
+ __be32 val[19];
+ } v6_rule;
+
+ nn_writel(nn, *addr, op);
+ *addr += sizeof(u32);
+
+ v6_rule.loc = cpu_to_be16(entry->loc);
+ v6_rule.k_proto = entry->key.l4_proto;
+ v6_rule.m_proto = entry->msk.l4_proto;
+ for (i = 0; i < 4; i++) {
+ v6_rule.k_sip[i] = entry->key.sip6[i];
+ v6_rule.m_sip[i] = entry->msk.sip6[i];
+ v6_rule.k_dip[i] = entry->key.dip6[i];
+ v6_rule.m_dip[i] = entry->msk.dip6[i];
+ }
+ v6_rule.k_sport = entry->key.sport;
+ v6_rule.m_sport = entry->msk.sport;
+ v6_rule.k_dport = entry->key.dport;
+ v6_rule.m_dport = entry->msk.dport;
+
+ for (i = 0; i < ARRAY_SIZE(v6_rule.val); i++, *addr += sizeof(__be32))
+ nn_writel(nn, *addr, be32_to_cpu(v6_rule.val[i]));
+}
+
+#define NFP_FS_QUEUE_ID GENMASK(22, 16)
+#define NFP_FS_ACT GENMASK(15, 0)
+#define NFP_FS_ACT_DROP BIT(0)
+#define NFP_FS_ACT_Q BIT(1)
+static void
+nfp_net_fs_fill_act(struct nfp_net *nn, struct nfp_fs_entry *entry, u32 addr)
+{
+ u32 action = 0; /* 0 means default passthrough */
+
+ if (entry->action == RX_CLS_FLOW_DISC)
+ action = NFP_FS_ACT_DROP;
+ else if (!(entry->flow_type & FLOW_RSS))
+ action = FIELD_PREP(NFP_FS_QUEUE_ID, entry->action) | NFP_FS_ACT_Q;
+
+ nn_writel(nn, addr, action);
+}
+
int nfp_net_fs_add_hw(struct nfp_net *nn, struct nfp_fs_entry *entry)
{
- return -EOPNOTSUPP;
+ u32 addr = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ int err;
+
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_FS_SZ);
+ if (err)
+ return err;
+
+ switch (entry->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_USER_FLOW:
+ nfp_net_fs_fill_v4(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_ADD_V4, &addr);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_USER_FLOW:
+ nfp_net_fs_fill_v6(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_ADD_V6, &addr);
+ break;
+ case ETHER_FLOW:
+ nn_writel(nn, addr, NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE);
+ addr += sizeof(u32);
+ nn_writew(nn, addr, be16_to_cpu(entry->key.l3_proto));
+ addr += sizeof(u32);
+ break;
+ }
+
+ nfp_net_fs_fill_act(nn, entry, addr);
+
+ err = nfp_net_mbox_reconfig_and_unlock(nn, NFP_NET_CFG_MBOX_CMD_FLOW_STEER);
+ if (err) {
+ nn_err(nn, "Add new fs rule failed with %d\n", err);
+ return -EIO;
+ }
+
+ return 0;
}
int nfp_net_fs_del_hw(struct nfp_net *nn, struct nfp_fs_entry *entry)
{
- return -EOPNOTSUPP;
+ u32 addr = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ int err;
+
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_FS_SZ);
+ if (err)
+ return err;
+
+ switch (entry->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_USER_FLOW:
+ nfp_net_fs_fill_v4(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_DEL_V4, &addr);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_USER_FLOW:
+ nfp_net_fs_fill_v6(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_DEL_V6, &addr);
+ break;
+ case ETHER_FLOW:
+ nn_writel(nn, addr, NFP_NET_CFG_MBOX_CMD_FS_DEL_ETHTYPE);
+ addr += sizeof(u32);
+ nn_writew(nn, addr, be16_to_cpu(entry->key.l3_proto));
+ addr += sizeof(u32);
+ break;
+ }
+
+ nfp_net_fs_fill_act(nn, entry, addr);
+
+ err = nfp_net_mbox_reconfig_and_unlock(nn, NFP_NET_CFG_MBOX_CMD_FLOW_STEER);
+ if (err) {
+ nn_err(nn, "Delete fs rule failed with %d\n", err);
+ return -EIO;
+ }
+
+ return 0;
}
static void nfp_net_fs_clean(struct nfp_net *nn)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 515472924a5d..eaf4d3c499d1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -419,6 +419,8 @@
#define NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD 8
#define NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL 9
+#define NFP_NET_CFG_MBOX_CMD_FLOW_STEER 10
+
/* VLAN filtering using general use mailbox
* %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
* %NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
@@ -441,6 +443,18 @@
#define NFP_NET_CFG_MULTICAST_MAC_LO (NFP_NET_CFG_MULTICAST + 6)
#define NFP_NET_CFG_MULTICAST_SZ 0x0006
+/* Max size of FS rules in bytes */
+#define NFP_NET_CFG_FS_SZ 0x0054
+/* Sub commands for FS */
+enum {
+ NFP_NET_CFG_MBOX_CMD_FS_ADD_V4,
+ NFP_NET_CFG_MBOX_CMD_FS_DEL_V4,
+ NFP_NET_CFG_MBOX_CMD_FS_ADD_V6,
+ NFP_NET_CFG_MBOX_CMD_FS_DEL_V6,
+ NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE,
+ NFP_NET_CFG_MBOX_CMD_FS_DEL_ETHTYPE,
+};
+
/* TLV capabilities
* %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV
* %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV
--
2.34.1
^ permalink raw reply related [flat|nested] 8+ messages in thread
* Re: [PATCH net-next 1/2] nfp: add ethtool flow steering callbacks
2023-11-17 7:11 ` [PATCH net-next 1/2] nfp: add ethtool flow steering callbacks Louis Peens
@ 2023-11-20 9:43 ` Simon Horman
2023-11-20 10:18 ` Yinjun Zhang
0 siblings, 1 reply; 8+ messages in thread
From: Simon Horman @ 2023-11-20 9:43 UTC (permalink / raw)
To: Louis Peens
Cc: David Miller, Jakub Kicinski, Paolo Abeni, Yinjun Zhang, netdev,
oss-drivers
On Fri, Nov 17, 2023 at 09:11:13AM +0200, Louis Peens wrote:
> From: Yinjun Zhang <yinjun.zhang@corigine.com>
>
> This is the first part to implement flow steering. The communication
> between ethtool and driver is done. User can use following commands
> to display and set flows:
>
> ethtool -n <netdev>
> ethtool -N <netdev> flow-type ...
>
> Signed-off-by: Yinjun Zhang <yinjun.zhang@corigine.com>
> Signed-off-by: Louis Peens <louis.peens@corigine.com>
Thanks Yinjun and Louis,
The minor suggestion provided inline not withstanding this looks good to me.
Reviewed-by: Simon Horman <horms@kernel.org>
...
> diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
> index e75cbb287625..d7896391b8ba 100644
> --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
> +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
> @@ -1317,6 +1317,116 @@ static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
> return 0;
> }
>
> +#define NFP_FS_MAX_ENTRY 1024
> +
> +static int nfp_net_fs_to_ethtool(struct nfp_fs_entry *entry, struct ethtool_rxnfc *cmd)
> +{
> + struct ethtool_rx_flow_spec *fs = &cmd->fs;
> + unsigned int i;
> +
> + switch (entry->flow_type & ~FLOW_RSS) {
> + case TCP_V4_FLOW:
> + case UDP_V4_FLOW:
> + case SCTP_V4_FLOW:
> + fs->h_u.tcp_ip4_spec.ip4src = entry->key.sip4;
> + fs->h_u.tcp_ip4_spec.ip4dst = entry->key.dip4;
> + fs->h_u.tcp_ip4_spec.psrc = entry->key.sport;
> + fs->h_u.tcp_ip4_spec.pdst = entry->key.dport;
> + fs->m_u.tcp_ip4_spec.ip4src = entry->msk.sip4;
> + fs->m_u.tcp_ip4_spec.ip4dst = entry->msk.dip4;
> + fs->m_u.tcp_ip4_spec.psrc = entry->msk.sport;
> + fs->m_u.tcp_ip4_spec.pdst = entry->msk.dport;
> + break;
> + case TCP_V6_FLOW:
> + case UDP_V6_FLOW:
> + case SCTP_V6_FLOW:
> + for (i = 0; i < 4; i++) {
> + fs->h_u.tcp_ip6_spec.ip6src[i] = entry->key.sip6[i];
> + fs->h_u.tcp_ip6_spec.ip6dst[i] = entry->key.dip6[i];
> + fs->m_u.tcp_ip6_spec.ip6src[i] = entry->msk.sip6[i];
> + fs->m_u.tcp_ip6_spec.ip6dst[i] = entry->msk.dip6[i];
> + }
I think the above loop can be more succinctly be expressed using a single
memcpy(). For which I do see precedence in Intel drivers. Likewise
elsewhere in this patch-set.
I don't feel strongly about this, so feel free to take this suggestion,
defer it to later, or dismiss it entirely.
> + fs->h_u.tcp_ip6_spec.psrc = entry->key.sport;
> + fs->h_u.tcp_ip6_spec.pdst = entry->key.dport;
> + fs->m_u.tcp_ip6_spec.psrc = entry->msk.sport;
> + fs->m_u.tcp_ip6_spec.pdst = entry->msk.dport;
> + break;
...
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH net-next 2/2] nfp: offload flow steering to the nfp
2023-11-17 7:11 ` [PATCH net-next 2/2] nfp: offload flow steering to the nfp Louis Peens
@ 2023-11-20 9:44 ` Simon Horman
0 siblings, 0 replies; 8+ messages in thread
From: Simon Horman @ 2023-11-20 9:44 UTC (permalink / raw)
To: Louis Peens
Cc: David Miller, Jakub Kicinski, Paolo Abeni, Yinjun Zhang, netdev,
oss-drivers
On Fri, Nov 17, 2023 at 09:11:14AM +0200, Louis Peens wrote:
> From: Yinjun Zhang <yinjun.zhang@corigine.com>
>
> This is the second part to implement flow steering. Mailbox is used
> for the communication between driver and HW.
>
> Signed-off-by: Yinjun Zhang <yinjun.zhang@corigine.com>
> Signed-off-by: Louis Peens <louis.peens@corigine.com>
Thanks Yinjun and Louis,
The minor suggestion provided in my response to patch 1/2 not withstanding
this looks good to me.
Reviewed-by: Simon Horman <horms@kernel.org>
...
^ permalink raw reply [flat|nested] 8+ messages in thread
* RE: [PATCH net-next 1/2] nfp: add ethtool flow steering callbacks
2023-11-20 9:43 ` Simon Horman
@ 2023-11-20 10:18 ` Yinjun Zhang
2023-11-20 10:52 ` Simon Horman
0 siblings, 1 reply; 8+ messages in thread
From: Yinjun Zhang @ 2023-11-20 10:18 UTC (permalink / raw)
To: Simon Horman, Louis Peens
Cc: David Miller, Jakub Kicinski, Paolo Abeni, netdev@vger.kernel.org,
oss-drivers
On Monday, November 20, 2023 5:43 PM, Simon Horman wrote:
<...>
> > + case TCP_V6_FLOW:
> > + case UDP_V6_FLOW:
> > + case SCTP_V6_FLOW:
> > + for (i = 0; i < 4; i++) {
> > + fs->h_u.tcp_ip6_spec.ip6src[i] = entry->key.sip6[i];
> > + fs->h_u.tcp_ip6_spec.ip6dst[i] = entry->key.dip6[i];
> > + fs->m_u.tcp_ip6_spec.ip6src[i] = entry->msk.sip6[i];
> > + fs->m_u.tcp_ip6_spec.ip6dst[i] = entry->msk.dip6[i];
> > + }
>
> I think the above loop can be more succinctly be expressed using a single
> memcpy(). For which I do see precedence in Intel drivers. Likewise
> elsewhere in this patch-set.
>
> I don't feel strongly about this, so feel free to take this suggestion,
> defer it to later, or dismiss it entirely.
Thanks Simon. Louis did have same suggestion about this part. But
since we have similar code below:
```
for (i = 0; i < 4; i++) {
entry->msk.sip6[i] = fs->m_u.tcp_ip6_spec.ip6src[i];
entry->msk.dip6[i] = fs->m_u.tcp_ip6_spec.ip6dst[i];
entry->key.sip6[i] = fs->h_u.tcp_ip6_spec.ip6src[i] & entry->msk.sip6[i];
entry->key.dip6[i] = fs->h_u.tcp_ip6_spec.ip6dst[i] & entry->msk.dip6[i];
}
```
which can not be replaced by `memcpy`, so I decided to leave them as
they are to keep consistency.
So if you don't feel strongly and nobody else objects it, I'll leave it.
>
> > + fs->h_u.tcp_ip6_spec.psrc = entry->key.sport;
> > + fs->h_u.tcp_ip6_spec.pdst = entry->key.dport;
> > + fs->m_u.tcp_ip6_spec.psrc = entry->msk.sport;
> > + fs->m_u.tcp_ip6_spec.pdst = entry->msk.dport;
> > + break;
>
> ...
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH net-next 1/2] nfp: add ethtool flow steering callbacks
2023-11-20 10:18 ` Yinjun Zhang
@ 2023-11-20 10:52 ` Simon Horman
0 siblings, 0 replies; 8+ messages in thread
From: Simon Horman @ 2023-11-20 10:52 UTC (permalink / raw)
To: Yinjun Zhang
Cc: Louis Peens, David Miller, Jakub Kicinski, Paolo Abeni,
netdev@vger.kernel.org, oss-drivers
On Mon, Nov 20, 2023 at 10:18:13AM +0000, Yinjun Zhang wrote:
> On Monday, November 20, 2023 5:43 PM, Simon Horman wrote:
> <...>
> > > + case TCP_V6_FLOW:
> > > + case UDP_V6_FLOW:
> > > + case SCTP_V6_FLOW:
> > > + for (i = 0; i < 4; i++) {
> > > + fs->h_u.tcp_ip6_spec.ip6src[i] = entry->key.sip6[i];
> > > + fs->h_u.tcp_ip6_spec.ip6dst[i] = entry->key.dip6[i];
> > > + fs->m_u.tcp_ip6_spec.ip6src[i] = entry->msk.sip6[i];
> > > + fs->m_u.tcp_ip6_spec.ip6dst[i] = entry->msk.dip6[i];
> > > + }
> >
> > I think the above loop can be more succinctly be expressed using a single
> > memcpy(). For which I do see precedence in Intel drivers. Likewise
> > elsewhere in this patch-set.
> >
> > I don't feel strongly about this, so feel free to take this suggestion,
> > defer it to later, or dismiss it entirely.
>
> Thanks Simon. Louis did have same suggestion about this part. But
> since we have similar code below:
> ```
> for (i = 0; i < 4; i++) {
> entry->msk.sip6[i] = fs->m_u.tcp_ip6_spec.ip6src[i];
> entry->msk.dip6[i] = fs->m_u.tcp_ip6_spec.ip6dst[i];
> entry->key.sip6[i] = fs->h_u.tcp_ip6_spec.ip6src[i] & entry->msk.sip6[i];
> entry->key.dip6[i] = fs->h_u.tcp_ip6_spec.ip6dst[i] & entry->msk.dip6[i];
> }
> ```
> which can not be replaced by `memcpy`, so I decided to leave them as
> they are to keep consistency.
> So if you don't feel strongly and nobody else objects it, I'll leave it.
Hi Yinjun,
thanks for the clarification. I agree that we can leave this
if nobody else objects.
>
> >
> > > + fs->h_u.tcp_ip6_spec.psrc = entry->key.sport;
> > > + fs->h_u.tcp_ip6_spec.pdst = entry->key.dport;
> > > + fs->m_u.tcp_ip6_spec.psrc = entry->msk.sport;
> > > + fs->m_u.tcp_ip6_spec.pdst = entry->msk.dport;
> > > + break;
> >
> > ...
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [PATCH net-next 0/2] nfp: add flow-steering support
2023-11-17 7:11 [PATCH net-next 0/2] nfp: add flow-steering support Louis Peens
2023-11-17 7:11 ` [PATCH net-next 1/2] nfp: add ethtool flow steering callbacks Louis Peens
2023-11-17 7:11 ` [PATCH net-next 2/2] nfp: offload flow steering to the nfp Louis Peens
@ 2023-11-21 2:10 ` patchwork-bot+netdevbpf
2 siblings, 0 replies; 8+ messages in thread
From: patchwork-bot+netdevbpf @ 2023-11-21 2:10 UTC (permalink / raw)
To: Louis Peens; +Cc: davem, kuba, pabeni, yinjun.zhang, netdev, oss-drivers
Hello:
This series was applied to netdev/net-next.git (main)
by Jakub Kicinski <kuba@kernel.org>:
On Fri, 17 Nov 2023 09:11:12 +0200 you wrote:
> This short series adds flow steering support for the nfp driver.
> The first patch adds the part to communicate with ethtool but
> stubs out the HW offload parts. The second patch implements the
> HW communication and offloads flow steering.
>
> After this series the user can now use 'ethtool -N/-n' to configure
> and display rx classification rules.
>
> [...]
Here is the summary with links:
- [net-next,1/2] nfp: add ethtool flow steering callbacks
https://git.kernel.org/netdev/net-next/c/9eb03bb1c035
- [net-next,2/2] nfp: offload flow steering to the nfp
https://git.kernel.org/netdev/net-next/c/c38fb3dcd53d
You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2023-11-21 2:10 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-11-17 7:11 [PATCH net-next 0/2] nfp: add flow-steering support Louis Peens
2023-11-17 7:11 ` [PATCH net-next 1/2] nfp: add ethtool flow steering callbacks Louis Peens
2023-11-20 9:43 ` Simon Horman
2023-11-20 10:18 ` Yinjun Zhang
2023-11-20 10:52 ` Simon Horman
2023-11-17 7:11 ` [PATCH net-next 2/2] nfp: offload flow steering to the nfp Louis Peens
2023-11-20 9:44 ` Simon Horman
2023-11-21 2:10 ` [PATCH net-next 0/2] nfp: add flow-steering support patchwork-bot+netdevbpf
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).