netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Pablo Neira Ayuso <pablo@netfilter.org>
To: netdev@vger.kernel.org
Cc: davem@davemloft.net, thomas.lendacky@amd.com,
	f.fainelli@gmail.com, ariel.elior@cavium.com,
	michael.chan@broadcom.com, santosh@chelsio.com,
	madalin.bucur@nxp.com, yisen.zhuang@huawei.com,
	salil.mehta@huawei.com, jeffrey.t.kirsher@intel.com,
	tariqt@mellanox.com, saeedm@mellanox.com, jiri@mellanox.com,
	idosch@mellanox.com, jakub.kicinski@netronome.com,
	peppe.cavallaro@st.com, grygorii.strashko@ti.com, andrew@lunn.ch,
	vivien.didelot@savoirfairelinux.com, alexandre.torgue@st.com,
	joabreu@synopsys.com, linux-net-drivers@solarflare.com,
	ganeshgr@chelsio.com, ogerlitz@mellanox.com,
	Manish.Chopra@cavium.com
Subject: [PATCH net-next,v3 10/12] dsa: bcm_sf2: use flow_rule infrastructure
Date: Wed, 21 Nov 2018 03:51:30 +0100	[thread overview]
Message-ID: <20181121025132.14305-11-pablo@netfilter.org> (raw)
In-Reply-To: <20181121025132.14305-1-pablo@netfilter.org>

Update this driver to use the flow_rule infrastructure, hence we can use
the same code to populate hardware IR from ethtool_rx_flow and the
cls_flower interfaces.

Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
v3: adapt it to use new ethtool_rx_flow_rule_alloc()

 drivers/net/dsa/bcm_sf2_cfp.c | 109 +++++++++++++++++++++++++++---------------
 1 file changed, 71 insertions(+), 38 deletions(-)

diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index e14663ab6dbc..3bdc65fe8408 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -16,6 +16,7 @@
 #include <linux/netdevice.h>
 #include <net/dsa.h>
 #include <linux/bitmap.h>
+#include <net/flow_offload.h>
 
 #include "bcm_sf2.h"
 #include "bcm_sf2_regs.h"
@@ -257,7 +258,8 @@ static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
 }
 
 static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
-				   struct ethtool_tcpip4_spec *v4_spec,
+				   struct flow_dissector_key_ipv4_addrs *addrs,
+				   struct flow_dissector_key_ports *ports,
 				   unsigned int slice_num,
 				   bool mask)
 {
@@ -278,7 +280,7 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
 	 * UDF_n_A6		[23:8]
 	 * UDF_n_A5		[7:0]
 	 */
-	reg = be16_to_cpu(v4_spec->pdst) >> 8;
+	reg = be16_to_cpu(ports->dst) >> 8;
 	if (mask)
 		offset = CORE_CFP_MASK_PORT(3);
 	else
@@ -289,9 +291,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
 	 * UDF_n_A4		[23:8]
 	 * UDF_n_A3		[7:0]
 	 */
-	reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
-	      (u32)be16_to_cpu(v4_spec->psrc) << 8 |
-	      (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
+	reg = (be16_to_cpu(ports->dst) & 0xff) << 24 |
+	      (u32)be16_to_cpu(ports->src) << 8 |
+	      (be32_to_cpu(addrs->dst) & 0x0000ff00) >> 8;
 	if (mask)
 		offset = CORE_CFP_MASK_PORT(2);
 	else
@@ -302,9 +304,9 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
 	 * UDF_n_A2		[23:8]
 	 * UDF_n_A1		[7:0]
 	 */
-	reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
-	      (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
-	      (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
+	reg = (u32)(be32_to_cpu(addrs->dst) & 0xff) << 24 |
+	      (u32)(be32_to_cpu(addrs->dst) >> 16) << 8 |
+	      (be32_to_cpu(addrs->src) & 0x0000ff00) >> 8;
 	if (mask)
 		offset = CORE_CFP_MASK_PORT(1);
 	else
@@ -317,8 +319,8 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
 	 * Slice ID		[3:2]
 	 * Slice valid		[1:0]
 	 */
-	reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
-	      (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
+	reg = (u32)(be32_to_cpu(addrs->src) & 0xff) << 24 |
+	      (u32)(be32_to_cpu(addrs->src) >> 16) << 8 |
 	      SLICE_NUM(slice_num) | SLICE_VALID;
 	if (mask)
 		offset = CORE_CFP_MASK_PORT(0);
@@ -332,9 +334,13 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
 				     unsigned int queue_num,
 				     struct ethtool_rx_flow_spec *fs)
 {
-	struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
 	const struct cfp_udf_layout *layout;
 	unsigned int slice_num, rule_index;
+	struct ethtool_rx_flow_rule *flow;
+	struct flow_match_ipv4_addrs ipv4;
+	struct flow_match_ports ports;
+	struct flow_match_basic basic;
+	struct flow_match_ip ip;
 	u8 ip_proto, ip_frag;
 	u8 num_udf;
 	u32 reg;
@@ -343,13 +349,9 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
 	switch (fs->flow_type & ~FLOW_EXT) {
 	case TCP_V4_FLOW:
 		ip_proto = IPPROTO_TCP;
-		v4_spec = &fs->h_u.tcp_ip4_spec;
-		v4_m_spec = &fs->m_u.tcp_ip4_spec;
 		break;
 	case UDP_V4_FLOW:
 		ip_proto = IPPROTO_UDP;
-		v4_spec = &fs->h_u.udp_ip4_spec;
-		v4_m_spec = &fs->m_u.udp_ip4_spec;
 		break;
 	default:
 		return -EINVAL;
@@ -367,11 +369,22 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
 	if (rule_index > bcm_sf2_cfp_rule_size(priv))
 		return -ENOSPC;
 
+	flow = ethtool_rx_flow_rule_alloc(fs);
+	if (!flow)
+		return -ENOMEM;
+
+	flow_rule_match_ipv4_addrs(flow->rule, &ipv4);
+	flow_rule_match_ports(flow->rule, &ports);
+	flow_rule_match_basic(flow->rule, &basic);
+	flow_rule_match_ip(flow->rule, &ip);
+
 	layout = &udf_tcpip4_layout;
 	/* We only use one UDF slice for now */
 	slice_num = bcm_sf2_get_slice_number(layout, 0);
-	if (slice_num == UDF_NUM_SLICES)
-		return -EINVAL;
+	if (slice_num == UDF_NUM_SLICES) {
+		ret = -EINVAL;
+		goto out_err_flow_rule;
+	}
 
 	num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
 
@@ -398,9 +411,10 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
 	 * Reserved		[1]
 	 * UDF_Valid[8]		[0]
 	 */
-	core_writel(priv, v4_spec->tos << IPTOS_SHIFT |
-		    ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
-		    udf_upper_bits(num_udf),
+	core_writel(priv, ip.key->tos << IPTOS_SHIFT |
+			  basic.key->n_proto << IPPROTO_SHIFT |
+			  ip_frag << IP_FRAG_SHIFT |
+			  udf_upper_bits(num_udf),
 		    CORE_CFP_DATA_PORT(6));
 
 	/* Mask with the specific layout for IPv4 packets */
@@ -417,8 +431,8 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
 	core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
 
 	/* Program the match and the mask */
-	bcm_sf2_cfp_slice_ipv4(priv, v4_spec, slice_num, false);
-	bcm_sf2_cfp_slice_ipv4(priv, v4_m_spec, SLICE_NUM_MASK, true);
+	bcm_sf2_cfp_slice_ipv4(priv, ipv4.key, ports.key, slice_num, false);
+	bcm_sf2_cfp_slice_ipv4(priv, ipv4.mask, ports.mask, SLICE_NUM_MASK, true);
 
 	/* Insert into TCAM now */
 	bcm_sf2_cfp_rule_addr_set(priv, rule_index);
@@ -426,14 +440,14 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
 	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
 	if (ret) {
 		pr_err("TCAM entry at addr %d failed\n", rule_index);
-		return ret;
+		goto out_err_flow_rule;
 	}
 
 	/* Insert into Action and policer RAMs now */
 	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index, port_num,
 				      queue_num, true);
 	if (ret)
-		return ret;
+		goto out_err_flow_rule;
 
 	/* Turn on CFP for this rule now */
 	reg = core_readl(priv, CORE_CFP_CTL_REG);
@@ -446,6 +460,10 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
 	fs->location = rule_index;
 
 	return 0;
+
+out_err_flow_rule:
+	ethtool_rx_flow_rule_free(flow);
+	return ret;
 }
 
 static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
@@ -584,6 +602,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec;
 	unsigned int slice_num, rule_index[2];
 	const struct cfp_udf_layout *layout;
+	struct ethtool_rx_flow_rule *flow;
+	struct flow_match_ipv6_addrs ipv6;
+	struct flow_match_ports ports;
+	struct flow_match_basic basic;
 	u8 ip_proto, ip_frag;
 	int ret = 0;
 	u8 num_udf;
@@ -645,6 +667,15 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 		goto out_err;
 	}
 
+	flow = ethtool_rx_flow_rule_alloc(fs);
+	if (!flow) {
+		ret = -ENOMEM;
+		goto out_err;
+	}
+	flow_rule_match_ipv6_addrs(flow->rule, &ipv6);
+	flow_rule_match_ports(flow->rule, &ports);
+	flow_rule_match_basic(flow->rule, &basic);
+
 	/* Apply the UDF layout for this filter */
 	bcm_sf2_cfp_udf_set(priv, layout, slice_num);
 
@@ -668,7 +699,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	 * Reserved		[1]
 	 * UDF_Valid[8]		[0]
 	 */
-	reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT |
+	reg = 1 << L3_FRAMING_SHIFT | basic.key->n_proto << IPPROTO_SHIFT |
 		ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf);
 	core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
 
@@ -688,10 +719,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
 
 	/* Slice the IPv6 source address and port */
-	bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
-				slice_num, false);
-	bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
-				SLICE_NUM_MASK, true);
+	bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
+			       ports.key->src, slice_num, false);
+	bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
+			       ports.mask->src, SLICE_NUM_MASK, true);
 
 	/* Insert into TCAM now because we need to insert a second rule */
 	bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
@@ -699,20 +730,20 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
 	if (ret) {
 		pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
-		goto out_err;
+		goto out_err_flow_rule;
 	}
 
 	/* Insert into Action and policer RAMs now */
 	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
 				      queue_num, false);
 	if (ret)
-		goto out_err;
+		goto out_err_flow_rule;
 
 	/* Now deal with the second slice to chain this rule */
 	slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
 	if (slice_num == UDF_NUM_SLICES) {
 		ret = -EINVAL;
-		goto out_err;
+		goto out_err_flow_rule;
 	}
 
 	num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
@@ -748,10 +779,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	/* Mask all */
 	core_writel(priv, 0, CORE_CFP_MASK_PORT(5));
 
-	bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6dst, v6_spec->pdst, slice_num,
-			       false);
-	bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6dst, v6_m_spec->pdst,
-			       SLICE_NUM_MASK, true);
+	bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
+			       ports.key->dst, slice_num, false);
+	bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
+			       ports.key->dst, SLICE_NUM_MASK, true);
 
 	/* Insert into TCAM now */
 	bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
@@ -759,7 +790,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
 	if (ret) {
 		pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
-		goto out_err;
+		goto out_err_flow_rule;
 	}
 
 	/* Insert into Action and policer RAMs now, set chain ID to
@@ -768,7 +799,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
 				      queue_num, true);
 	if (ret)
-		goto out_err;
+		goto out_err_flow_rule;
 
 	/* Turn on CFP for this rule now */
 	reg = core_readl(priv, CORE_CFP_CTL_REG);
@@ -784,6 +815,8 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 
 	return ret;
 
+out_err_flow_rule:
+	ethtool_rx_flow_rule_free(flow);
 out_err:
 	clear_bit(rule_index[1], priv->cfp.used);
 	return ret;
-- 
2.11.0

  parent reply	other threads:[~2018-11-21 13:24 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-21  2:51 [PATCH net-next,v3 00/12] add flow_rule infrastructure Pablo Neira Ayuso
2018-11-21  2:51 ` [PATCH net-next,v3 01/12] flow_dissector: add flow_rule and flow_match structures and use them Pablo Neira Ayuso
2018-11-21  2:51 ` [PATCH net-next,v3 02/12] net/mlx5e: support for two independent packet edit actions Pablo Neira Ayuso
2018-11-21  2:51 ` [PATCH net-next,v3 03/12] flow_dissector: add flow action infrastructure Pablo Neira Ayuso
2018-11-22  4:47   ` Florian Fainelli
2018-11-21  2:51 ` [PATCH net-next,v3 04/12] cls_api: add translator to flow_action representation Pablo Neira Ayuso
2018-11-21 21:15   ` Marcelo Ricardo Leitner
2018-11-21 23:48     ` Pablo Neira Ayuso
2018-11-22 16:33   ` Marcelo Ricardo Leitner
2018-11-21  2:51 ` [PATCH net-next,v3 05/12] cls_flower: add statistics retrieval infrastructure and use it Pablo Neira Ayuso
2018-11-21  2:51 ` [PATCH net-next,v3 06/12] drivers: net: use flow action infrastructure Pablo Neira Ayuso
2018-11-21  2:51 ` [PATCH net-next,v3 07/12] cls_flower: don't expose TC actions to drivers anymore Pablo Neira Ayuso
2018-11-21  2:51 ` [PATCH net-next,v3 08/12] flow_dissector: add wake-up-on-lan and queue to flow_action Pablo Neira Ayuso
2018-11-21  2:51 ` [PATCH net-next,v3 09/12] flow_dissector: add basic ethtool_rx_flow_spec to flow_rule structure translator Pablo Neira Ayuso
2018-11-22  4:57   ` Florian Fainelli
2018-11-22  6:19     ` Jiri Pirko
2018-11-22  4:59   ` Florian Fainelli
2018-11-22  6:21     ` Jiri Pirko
2018-11-21  2:51 ` Pablo Neira Ayuso [this message]
2018-11-22  4:56   ` [PATCH net-next,v3 10/12] dsa: bcm_sf2: use flow_rule infrastructure Florian Fainelli
2018-11-21  2:51 ` [PATCH net-next,v3 11/12] qede: place ethtool_rx_flow_spec after code after TC flower codebase Pablo Neira Ayuso
2018-11-21  2:51 ` [PATCH net-next,v3 12/12] qede: use ethtool_rx_flow_rule() to remove duplicated parser code Pablo Neira Ayuso
2018-11-22 16:59   ` Marcelo Ricardo Leitner
2018-11-22 16:22 ` [PATCH net-next,v3 00/12] add flow_rule infrastructure Marcelo Ricardo Leitner
2018-11-22 21:08   ` Marcelo Ricardo Leitner
2018-11-26 19:33     ` Pablo Neira Ayuso
2018-11-26 20:28       ` Marcelo Ricardo Leitner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181121025132.14305-11-pablo@netfilter.org \
    --to=pablo@netfilter.org \
    --cc=Manish.Chopra@cavium.com \
    --cc=alexandre.torgue@st.com \
    --cc=andrew@lunn.ch \
    --cc=ariel.elior@cavium.com \
    --cc=davem@davemloft.net \
    --cc=f.fainelli@gmail.com \
    --cc=ganeshgr@chelsio.com \
    --cc=grygorii.strashko@ti.com \
    --cc=idosch@mellanox.com \
    --cc=jakub.kicinski@netronome.com \
    --cc=jeffrey.t.kirsher@intel.com \
    --cc=jiri@mellanox.com \
    --cc=joabreu@synopsys.com \
    --cc=linux-net-drivers@solarflare.com \
    --cc=madalin.bucur@nxp.com \
    --cc=michael.chan@broadcom.com \
    --cc=netdev@vger.kernel.org \
    --cc=ogerlitz@mellanox.com \
    --cc=peppe.cavallaro@st.com \
    --cc=saeedm@mellanox.com \
    --cc=salil.mehta@huawei.com \
    --cc=santosh@chelsio.com \
    --cc=tariqt@mellanox.com \
    --cc=thomas.lendacky@amd.com \
    --cc=vivien.didelot@savoirfairelinux.com \
    --cc=yisen.zhuang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).