netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Pablo Neira Ayuso <pablo@netfilter.org>
To: netdev@vger.kernel.org
Cc: davem@davemloft.net, thomas.lendacky@amd.com,
	f.fainelli@gmail.com, ariel.elior@cavium.com,
	michael.chan@broadcom.com, santosh@chelsio.com,
	madalin.bucur@nxp.com, yisen.zhuang@huawei.com,
	salil.mehta@huawei.com, jeffrey.t.kirsher@intel.com,
	tariqt@mellanox.com, saeedm@mellanox.com, jiri@mellanox.com,
	idosch@mellanox.com, ganeshgr@chelsio.com,
	jakub.kicinski@netronome.com, linux-net-drivers@solarflare.com,
	peppe.cavallaro@st.com, alexandre.torgue@st.com,
	joabreu@synopsys.com, grygorii.strashko@ti.com, andrew@lunn.ch,
	vivien.didelot@savoirfairelinux.com
Subject: [PATCH RFC,net-next 10/10] dsa: bcm_sf2: use flow_rule infrastructure
Date: Tue, 25 Sep 2018 21:20:01 +0200	[thread overview]
Message-ID: <20180925192001.2482-11-pablo@netfilter.org> (raw)
In-Reply-To: <20180925192001.2482-1-pablo@netfilter.org>

Update this driver to use the flow_rule infrastructure, hence the same
code to populate hardware IR can be used from ethtool_rx_flow and the
cls_flower interfaces.

Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
 drivers/net/dsa/bcm_sf2_cfp.c | 311 ++++++++++++++++++++++--------------------
 1 file changed, 166 insertions(+), 145 deletions(-)

diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 47c5f272a084..9dace0e25a3a 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -251,10 +251,12 @@ static int bcm_sf2_cfp_act_pol_set(struct bcm_sf2_priv *priv,
 }
 
 static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
-				   struct ethtool_tcpip4_spec *v4_spec,
+				   struct flow_rule *flow_rule,
 				   unsigned int slice_num,
 				   bool mask)
 {
+	struct flow_match_ipv4_addrs ipv4;
+	struct flow_match_ports ports;
 	u32 reg, offset;
 
 	/* C-Tag		[31:24]
@@ -268,41 +270,54 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
 		offset = CORE_CFP_DATA_PORT(4);
 	core_writel(priv, reg, offset);
 
+	flow_rule_match_ipv4_addrs(flow_rule, &ipv4);
+	flow_rule_match_ports(flow_rule, &ports);
+
 	/* UDF_n_A7		[31:24]
 	 * UDF_n_A6		[23:8]
 	 * UDF_n_A5		[7:0]
 	 */
-	reg = be16_to_cpu(v4_spec->pdst) >> 8;
-	if (mask)
+	if (mask) {
+		reg = be16_to_cpu(ports.mask->dst) >> 8;
 		offset = CORE_CFP_MASK_PORT(3);
-	else
+	} else {
+		reg = be16_to_cpu(ports.key->dst) >> 8;
 		offset = CORE_CFP_DATA_PORT(3);
+	}
 	core_writel(priv, reg, offset);
 
 	/* UDF_n_A5		[31:24]
 	 * UDF_n_A4		[23:8]
 	 * UDF_n_A3		[7:0]
 	 */
-	reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
-	      (u32)be16_to_cpu(v4_spec->psrc) << 8 |
-	      (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
-	if (mask)
+	if (mask) {
+		reg = (be16_to_cpu(ports.mask->dst) & 0xff) << 24 |
+		      (u32)be16_to_cpu(ports.mask->src) << 8 |
+		      (be32_to_cpu(ipv4.mask->dst) & 0x0000ff00) >> 8;
 		offset = CORE_CFP_MASK_PORT(2);
-	else
+	} else {
+		reg = (be16_to_cpu(ports.key->dst) & 0xff) << 24 |
+		      (u32)be16_to_cpu(ports.key->src) << 8 |
+		      (be32_to_cpu(ipv4.key->dst) & 0x0000ff00) >> 8;
 		offset = CORE_CFP_DATA_PORT(2);
+	}
 	core_writel(priv, reg, offset);
 
 	/* UDF_n_A3		[31:24]
 	 * UDF_n_A2		[23:8]
 	 * UDF_n_A1		[7:0]
 	 */
-	reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
-	      (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
-	      (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
-	if (mask)
+	if (mask) {
+		reg = (u32)(be32_to_cpu(ipv4.mask->dst) & 0xff) << 24 |
+		      (u32)(be32_to_cpu(ipv4.mask->dst) >> 16) << 8 |
+		      (be32_to_cpu(ipv4.mask->src) & 0x0000ff00) >> 8;
 		offset = CORE_CFP_MASK_PORT(1);
-	else
+	} else {
+		reg = (u32)(be32_to_cpu(ipv4.key->dst) & 0xff) << 24 |
+		      (u32)(be32_to_cpu(ipv4.key->dst) >> 16) << 8 |
+		      (be32_to_cpu(ipv4.key->src) & 0x0000ff00) >> 8;
 		offset = CORE_CFP_DATA_PORT(1);
+	}
 	core_writel(priv, reg, offset);
 
 	/* UDF_n_A1		[31:24]
@@ -311,56 +326,34 @@ static void bcm_sf2_cfp_slice_ipv4(struct bcm_sf2_priv *priv,
 	 * Slice ID		[3:2]
 	 * Slice valid		[1:0]
 	 */
-	reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
-	      (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
-	      SLICE_NUM(slice_num) | SLICE_VALID;
-	if (mask)
+	if (mask) {
+		reg = (u32)(be32_to_cpu(ipv4.mask->src) & 0xff) << 24 |
+		      (u32)(be32_to_cpu(ipv4.mask->src) >> 16) << 8 |
+		      SLICE_NUM(slice_num) | SLICE_VALID;
 		offset = CORE_CFP_MASK_PORT(0);
-	else
+	} else {
+		reg = (u32)(be32_to_cpu(ipv4.key->src) & 0xff) << 24 |
+		      (u32)(be32_to_cpu(ipv4.key->src) >> 16) << 8 |
+		      SLICE_NUM(slice_num) | SLICE_VALID;
 		offset = CORE_CFP_DATA_PORT(0);
+	}
 	core_writel(priv, reg, offset);
 }
 
 static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
 				     unsigned int port_num,
 				     unsigned int queue_num,
-				     struct ethtool_rx_flow_spec *fs)
+				     struct flow_rule *flow_rule,
+				     unsigned int rule_index, u8 ip_frag)
 {
-	struct ethtool_tcpip4_spec *v4_spec, *v4_m_spec;
 	const struct cfp_udf_layout *layout;
-	unsigned int slice_num, rule_index;
-	u8 ip_proto, ip_frag;
+	struct flow_match_basic basic;
+	struct flow_match_ip ip;
+	unsigned int slice_num;
 	u8 num_udf;
 	u32 reg;
 	int ret;
 
-	switch (fs->flow_type & ~FLOW_EXT) {
-	case TCP_V4_FLOW:
-		ip_proto = IPPROTO_TCP;
-		v4_spec = &fs->h_u.tcp_ip4_spec;
-		v4_m_spec = &fs->m_u.tcp_ip4_spec;
-		break;
-	case UDP_V4_FLOW:
-		ip_proto = IPPROTO_UDP;
-		v4_spec = &fs->h_u.udp_ip4_spec;
-		v4_m_spec = &fs->m_u.udp_ip4_spec;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	ip_frag = be32_to_cpu(fs->m_ext.data[0]);
-
-	/* Locate the first rule available */
-	if (fs->location == RX_CLS_LOC_ANY)
-		rule_index = find_first_zero_bit(priv->cfp.used,
-						 priv->num_cfp_rules);
-	else
-		rule_index = fs->location;
-
-	if (rule_index > bcm_sf2_cfp_rule_size(priv))
-		return -ENOSPC;
-
 	layout = &udf_tcpip4_layout;
 	/* We only use one UDF slice for now */
 	slice_num = bcm_sf2_get_slice_number(layout, 0);
@@ -378,6 +371,9 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
 	/* Source port map match */
 	core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
 
+	flow_rule_match_basic(flow_rule, &basic);
+	flow_rule_match_ip(flow_rule, &ip);
+
 	/* S-Tag status		[31:30]
 	 * C-Tag status		[29:28]
 	 * L2 framing		[27:26]
@@ -392,8 +388,9 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
 	 * Reserved		[1]
 	 * UDF_Valid[8]		[0]
 	 */
-	core_writel(priv, v4_spec->tos << IPTOS_SHIFT |
-		    ip_proto << IPPROTO_SHIFT | ip_frag << IP_FRAG_SHIFT |
+	core_writel(priv, ip.key->tos << IPTOS_SHIFT |
+		    basic.key->n_proto << IPPROTO_SHIFT |
+		    ip_frag << IP_FRAG_SHIFT |
 		    udf_upper_bits(num_udf),
 		    CORE_CFP_DATA_PORT(6));
 
@@ -411,8 +408,8 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
 	core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
 
 	/* Program the match and the mask */
-	bcm_sf2_cfp_slice_ipv4(priv, v4_spec, slice_num, false);
-	bcm_sf2_cfp_slice_ipv4(priv, v4_m_spec, SLICE_NUM_MASK, true);
+	bcm_sf2_cfp_slice_ipv4(priv, flow_rule, slice_num, false);
+	bcm_sf2_cfp_slice_ipv4(priv, flow_rule, SLICE_NUM_MASK, true);
 
 	/* Insert into TCAM now */
 	bcm_sf2_cfp_rule_addr_set(priv, rule_index);
@@ -437,9 +434,8 @@ static int bcm_sf2_cfp_ipv4_rule_set(struct bcm_sf2_priv *priv, int port,
 	/* Flag the rule as being used and return it */
 	set_bit(rule_index, priv->cfp.used);
 	set_bit(rule_index, priv->cfp.unique);
-	fs->location = rule_index;
 
-	return 0;
+	return rule_index;
 }
 
 static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
@@ -518,33 +514,18 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
 static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 				     unsigned int port_num,
 				     unsigned int queue_num,
-				     struct ethtool_rx_flow_spec *fs)
+				     struct flow_rule *flow_rule,
+				     unsigned int *rule_index, u8 ip_frag)
 {
-	struct ethtool_tcpip6_spec *v6_spec, *v6_m_spec;
-	unsigned int slice_num, rule_index[2];
 	const struct cfp_udf_layout *layout;
-	u8 ip_proto, ip_frag;
+	struct flow_match_ipv6_addrs ipv6;
+	struct flow_match_ports ports;
+	struct flow_match_basic basic;
+	unsigned int slice_num;
 	int ret = 0;
 	u8 num_udf;
 	u32 reg;
 
-	switch (fs->flow_type & ~FLOW_EXT) {
-	case TCP_V6_FLOW:
-		ip_proto = IPPROTO_TCP;
-		v6_spec = &fs->h_u.tcp_ip6_spec;
-		v6_m_spec = &fs->m_u.tcp_ip6_spec;
-		break;
-	case UDP_V6_FLOW:
-		ip_proto = IPPROTO_UDP;
-		v6_spec = &fs->h_u.udp_ip6_spec;
-		v6_m_spec = &fs->m_u.udp_ip6_spec;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	ip_frag = be32_to_cpu(fs->m_ext.data[0]);
-
 	layout = &udf_tcpip6_layout;
 	slice_num = bcm_sf2_get_slice_number(layout, 0);
 	if (slice_num == UDF_NUM_SLICES)
@@ -552,38 +533,6 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 
 	num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
 
-	/* Negotiate two indexes, one for the second half which we are chained
-	 * from, which is what we will return to user-space, and a second one
-	 * which is used to store its first half. That first half does not
-	 * allow any choice of placement, so it just needs to find the next
-	 * available bit. We return the second half as fs->location because
-	 * that helps with the rule lookup later on since the second half is
-	 * chained from its first half, we can easily identify IPv6 CFP rules
-	 * by looking whether they carry a CHAIN_ID.
-	 *
-	 * We also want the second half to have a lower rule_index than its
-	 * first half because the HW search is by incrementing addresses.
-	 */
-	if (fs->location == RX_CLS_LOC_ANY)
-		rule_index[1] = find_first_zero_bit(priv->cfp.used,
-						    priv->num_cfp_rules);
-	else
-		rule_index[1] = fs->location;
-	if (rule_index[1] > bcm_sf2_cfp_rule_size(priv))
-		return -ENOSPC;
-
-	/* Flag it as used (cleared on error path) such that we can immediately
-	 * obtain a second one to chain from.
-	 */
-	set_bit(rule_index[1], priv->cfp.used);
-
-	rule_index[0] = find_first_zero_bit(priv->cfp.used,
-					    priv->num_cfp_rules);
-	if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
-		ret = -ENOSPC;
-		goto out_err;
-	}
-
 	/* Apply the UDF layout for this filter */
 	bcm_sf2_cfp_udf_set(priv, layout, slice_num);
 
@@ -593,6 +542,8 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	/* Source port map match */
 	core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
 
+	flow_rule_match_basic(flow_rule, &basic);
+
 	/* S-Tag status		[31:30]
 	 * C-Tag status		[29:28]
 	 * L2 framing		[27:26]
@@ -607,7 +558,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	 * Reserved		[1]
 	 * UDF_Valid[8]		[0]
 	 */
-	reg = 1 << L3_FRAMING_SHIFT | ip_proto << IPPROTO_SHIFT |
+	reg = 1 << L3_FRAMING_SHIFT | basic.key->n_proto << IPPROTO_SHIFT |
 		ip_frag << IP_FRAG_SHIFT | udf_upper_bits(num_udf);
 	core_writel(priv, reg, CORE_CFP_DATA_PORT(6));
 
@@ -626,11 +577,14 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	/* Mask all but valid UDFs */
 	core_writel(priv, udf_lower_bits(num_udf) << 24, CORE_CFP_MASK_PORT(5));
 
+	flow_rule_match_ipv6_addrs(flow_rule, &ipv6);
+	flow_rule_match_ports(flow_rule, &ports);
+
 	/* Slice the IPv6 source address and port */
-	bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6src, v6_spec->psrc,
-				slice_num, false);
-	bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6src, v6_m_spec->psrc,
-				SLICE_NUM_MASK, true);
+	bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->src.in6_u.u6_addr32,
+			       ports.key->src, slice_num, false);
+	bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->src.in6_u.u6_addr32,
+			       ports.mask->src, SLICE_NUM_MASK, true);
 
 	/* Insert into TCAM now because we need to insert a second rule */
 	bcm_sf2_cfp_rule_addr_set(priv, rule_index[0]);
@@ -638,21 +592,19 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
 	if (ret) {
 		pr_err("TCAM entry at addr %d failed\n", rule_index[0]);
-		goto out_err;
+		return ret;
 	}
 
 	/* Insert into Action and policer RAMs now */
 	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[0], port_num,
 				      queue_num, false);
 	if (ret)
-		goto out_err;
+		return ret;
 
 	/* Now deal with the second slice to chain this rule */
 	slice_num = bcm_sf2_get_slice_number(layout, slice_num + 1);
-	if (slice_num == UDF_NUM_SLICES) {
-		ret = -EINVAL;
-		goto out_err;
-	}
+	if (slice_num == UDF_NUM_SLICES)
+		return -EINVAL;
 
 	num_udf = bcm_sf2_get_num_udf_slices(layout->udfs[slice_num].slices);
 
@@ -687,10 +639,10 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	/* Mask all */
 	core_writel(priv, 0, CORE_CFP_MASK_PORT(5));
 
-	bcm_sf2_cfp_slice_ipv6(priv, v6_spec->ip6dst, v6_spec->pdst, slice_num,
-			       false);
-	bcm_sf2_cfp_slice_ipv6(priv, v6_m_spec->ip6dst, v6_m_spec->pdst,
-			       SLICE_NUM_MASK, true);
+	bcm_sf2_cfp_slice_ipv6(priv, ipv6.key->dst.in6_u.u6_addr32,
+			       ports.key->dst, slice_num, false);
+	bcm_sf2_cfp_slice_ipv6(priv, ipv6.mask->dst.in6_u.u6_addr32,
+			       ports.key->dst, SLICE_NUM_MASK, true);
 
 	/* Insert into TCAM now */
 	bcm_sf2_cfp_rule_addr_set(priv, rule_index[1]);
@@ -698,7 +650,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
 	if (ret) {
 		pr_err("TCAM entry at addr %d failed\n", rule_index[1]);
-		goto out_err;
+		return ret;
 	}
 
 	/* Insert into Action and policer RAMs now, set chain ID to
@@ -707,7 +659,7 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	ret = bcm_sf2_cfp_act_pol_set(priv, rule_index[1], port_num,
 				      queue_num, true);
 	if (ret)
-		goto out_err;
+		return ret;
 
 	/* Turn on CFP for this rule now */
 	reg = core_readl(priv, CORE_CFP_CTL_REG);
@@ -719,13 +671,8 @@ static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
 	 */
 	set_bit(rule_index[0], priv->cfp.used);
 	set_bit(rule_index[1], priv->cfp.unique);
-	fs->location = rule_index[1];
-
-	return ret;
 
-out_err:
-	clear_bit(rule_index[1], priv->cfp.used);
-	return ret;
+	return rule_index[1];
 }
 
 static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
@@ -735,7 +682,10 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
 	s8 cpu_port = ds->ports[port].cpu_dp->index;
 	__u64 ring_cookie = fs->ring_cookie;
 	unsigned int queue_num, port_num;
+	struct flow_rule *flow_rule;
+	unsigned int rule_index[2];
 	int ret = -EINVAL;
+	u8 ip_frag;
 
 	/* Check for unsupported extensions */
 	if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
@@ -750,23 +700,38 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
 	    fs->location > bcm_sf2_cfp_rule_size(priv))
 		return -EINVAL;
 
-	/* This rule is a Wake-on-LAN filter and we must specifically
-	 * target the CPU port in order for it to be working.
-	 */
-	if (ring_cookie == RX_CLS_FLOW_WAKE)
-		ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES;
-
-	/* We do not support discarding packets, check that the
-	 * destination port is enabled and that we are within the
+	/* Check that the destination port is enabled and that we are within the
 	 * number of ports supported by the switch
 	 */
 	port_num = ring_cookie / SF2_NUM_EGRESS_QUEUES;
-
-	if (ring_cookie == RX_CLS_FLOW_DISC ||
-	    !(dsa_is_user_port(ds, port_num) ||
+	if (!(dsa_is_user_port(ds, port_num) ||
 	      dsa_is_cpu_port(ds, port_num)) ||
 	    port_num >= priv->hw_params.num_ports)
 		return -EINVAL;
+
+	/* User-defined data semantics depends on each driver. */
+	ip_frag = be32_to_cpu(fs->m_ext.data[0]);
+
+	flow_rule = ethtool_rx_flow_rule(fs);
+	if (!flow_rule)
+		return -ENOMEM;
+
+	switch (flow_rule->action.keys[0].id) {
+	case FLOW_ACTION_KEY_WAKE:
+		/* This rule is a Wake-on-LAN filter and we must specifically
+		 * target the CPU port in order for it to be working.
+		 */
+		ring_cookie = cpu_port * SF2_NUM_EGRESS_QUEUES;
+		break;
+	case FLOW_ACTION_KEY_QUEUE:
+		ring_cookie = flow_rule->action.keys[0].queue_index;
+		break;
+	default:
+		/* We do not support discarding packets. */
+		ret = -EINVAL;
+		goto err_out;
+	}
+
 	/*
 	 * We have a small oddity where Port 6 just does not have a
 	 * valid bit here (so we substract by one).
@@ -778,17 +743,73 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
 	switch (fs->flow_type & ~FLOW_EXT) {
 	case TCP_V4_FLOW:
 	case UDP_V4_FLOW:
+		/* Locate the first rule available */
+		if (fs->location == RX_CLS_LOC_ANY)
+			rule_index[0] = find_first_zero_bit(priv->cfp.used,
+							    priv->num_cfp_rules);
+		else
+			rule_index[0] = fs->location;
+
+		if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
+			ret = -ENOSPC;
+			goto err_out;
+		}
+
 		ret = bcm_sf2_cfp_ipv4_rule_set(priv, port, port_num,
-						queue_num, fs);
+						queue_num, flow_rule,
+						rule_index[0], ip_frag);
 		break;
 	case TCP_V6_FLOW:
 	case UDP_V6_FLOW:
+		/* Negotiate two indexes, one for the second half which we are
+		 * chained from, which is what we will return to user-space, and
+		 * a second one which is used to store its first half. That
+		 * first half does not allow any choice of placement, so it just
+		 * needs to find the next available bit. We return the second
+		 * half as fs->location because that helps with the rule lookup
+		 * later on since the second half is chained from its first
+		 * half, we can easily identify IPv6 CFP rules by looking
+		 * whether they carry a CHAIN_ID.
+		 *
+		 * We also want the second half to have a lower rule_index than
+		 * its first half because the HW search is by incrementing
+		 * addresses.
+		 */
+		if (fs->location == RX_CLS_LOC_ANY)
+			rule_index[1] = find_first_zero_bit(priv->cfp.used,
+							    priv->num_cfp_rules);
+		else
+			rule_index[1] = fs->location;
+
+		if (rule_index[1] > bcm_sf2_cfp_rule_size(priv)) {
+			ret = -ENOSPC;
+			goto err_out;
+		}
+
+		/* Flag it as used (cleared on error path) such that we can
+		 * immediately obtain a second one to chain from.
+		 */
+		set_bit(rule_index[1], priv->cfp.used);
+
+		rule_index[0] = find_first_zero_bit(priv->cfp.used,
+						    priv->num_cfp_rules);
+		if (rule_index[0] > bcm_sf2_cfp_rule_size(priv)) {
+			clear_bit(rule_index[1], priv->cfp.used);
+			ret = -ENOSPC;
+			goto err_out;
+		}
+
 		ret = bcm_sf2_cfp_ipv6_rule_set(priv, port, port_num,
-						queue_num, fs);
+						queue_num, flow_rule,
+						rule_index, ip_frag);
 		break;
 	default:
 		break;
 	}
+	if (ret >= 0)
+		fs->location = ret;
+err_out:
+	ethtool_rx_flow_rule_free(flow_rule);
 
 	return ret;
 }
-- 
2.11.0

  parent reply	other threads:[~2018-09-26  1:29 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-25 19:19 [PATCH RFC,net-next 00/10] add flow_rule infrastructure Pablo Neira Ayuso
2018-09-25 19:19 ` [PATCH RFC,net-next 01/10] flow_dissector: add flow_rule and flow_match structures and use them Pablo Neira Ayuso
2018-09-25 19:19 ` [PATCH RFC,net-next 02/10] net/mlx5e: allow two independent packet edit actions Pablo Neira Ayuso
2018-09-25 19:19 ` [PATCH RFC,net-next 03/10] flow_dissector: add flow action infrastructure Pablo Neira Ayuso
2018-09-25 19:19 ` [PATCH RFC,net-next 04/10] cls_flower: add translator to flow_action representation Pablo Neira Ayuso
2018-09-26 15:47   ` Jakub Kicinski
2018-09-25 19:19 ` [PATCH RFC,net-next 05/10] cls_flower: add statistics retrieval infrastructure and use it Pablo Neira Ayuso
2018-09-25 19:19 ` [PATCH RFC,net-next 06/10] drivers: net: use flow action infrastructure Pablo Neira Ayuso
2018-09-25 19:19 ` [PATCH RFC,net-next 07/10] cls_flower: don't expose TC actions to drivers anymore Pablo Neira Ayuso
2018-09-25 19:19 ` [PATCH RFC,net-next 08/10] flow_dissector: add wake-up-on-lan and queue to flow_action Pablo Neira Ayuso
2018-09-26 18:51   ` Florian Fainelli
2018-09-25 19:20 ` [PATCH RFC,net-next 09/10] flow_dissector: add basic ethtool_rx_flow_spec to flow_rule structure translator Pablo Neira Ayuso
2018-09-25 19:20 ` Pablo Neira Ayuso [this message]
2018-09-26 18:41   ` [PATCH RFC,net-next 10/10] dsa: bcm_sf2: use flow_rule infrastructure Florian Fainelli
2018-09-26 15:51 ` [PATCH RFC,net-next 00/10] add " Jakub Kicinski
2018-09-26 18:41   ` Florian Fainelli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180925192001.2482-11-pablo@netfilter.org \
    --to=pablo@netfilter.org \
    --cc=alexandre.torgue@st.com \
    --cc=andrew@lunn.ch \
    --cc=ariel.elior@cavium.com \
    --cc=davem@davemloft.net \
    --cc=f.fainelli@gmail.com \
    --cc=ganeshgr@chelsio.com \
    --cc=grygorii.strashko@ti.com \
    --cc=idosch@mellanox.com \
    --cc=jakub.kicinski@netronome.com \
    --cc=jeffrey.t.kirsher@intel.com \
    --cc=jiri@mellanox.com \
    --cc=joabreu@synopsys.com \
    --cc=linux-net-drivers@solarflare.com \
    --cc=madalin.bucur@nxp.com \
    --cc=michael.chan@broadcom.com \
    --cc=netdev@vger.kernel.org \
    --cc=peppe.cavallaro@st.com \
    --cc=saeedm@mellanox.com \
    --cc=salil.mehta@huawei.com \
    --cc=santosh@chelsio.com \
    --cc=tariqt@mellanox.com \
    --cc=thomas.lendacky@amd.com \
    --cc=vivien.didelot@savoirfairelinux.com \
    --cc=yisen.zhuang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).