From mboxrd@z Thu Jan 1 00:00:00 1970 From: Yongseok Koh Subject: [PATCH v2 4/5] net/mlx5: fix wildcard item for Direct Verbs Date: Tue, 23 Oct 2018 16:52:13 +0000 Message-ID: <20181023165200.2454-5-yskoh@mellanox.com> References: <20181017020739.11203-1-yskoh@mellanox.com> <20181023165200.2454-1-yskoh@mellanox.com> Mime-Version: 1.0 Content-Type: text/plain; charset="iso-8859-1" Content-Transfer-Encoding: quoted-printable Cc: "dev@dpdk.org" , Yongseok Koh , Ori Kam To: Shahaf Shuler Return-path: Received: from EUR02-HE1-obe.outbound.protection.outlook.com (mail-eopbgr10054.outbound.protection.outlook.com [40.107.1.54]) by dpdk.org (Postfix) with ESMTP id C82221B44B for ; Tue, 23 Oct 2018 18:52:15 +0200 (CEST) In-Reply-To: <20181023165200.2454-1-yskoh@mellanox.com> Content-Language: en-US List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" If a network layer is specified with no spec, it means wildcard match. flow_dv_translate_item_*() returns without writing anything if spec is null and it causes creation of wrong flow. E.g., the following flow has to patch with any ipv4 packet. flow create 0 ingress pattern eth / ipv4 / end actions ... But, with the current code, it matches any packet because PMD doesn't write anything about IPv4. The matcher value and mask becomes completely zero. It should have written the IP version at least. It is same for the rest of items. Even if the spec is null, PMD has to write constant fields before return, e.g. IP version and IP protocol number. Fixes: fc2c498ccb94 ("net/mlx5: add Direct Verbs translate items") Cc: orika@mellanox.com Signed-off-by: Yongseok Koh Acked-by: Ori Kam --- drivers/net/mlx5/mlx5_flow_dv.c | 55 ++++++++++++++++++++-----------------= ---- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_d= v.c index 15603401cf..57b01a78ee 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -2,7 +2,6 @@ * Copyright 2018 Mellanox Technologies, Ltd */ =20 - #include #include #include @@ -542,10 +541,6 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, char *l24_v; uint8_t tos; =20 - if (!ipv4_v) - return; - if (!ipv4_m) - ipv4_m =3D &nic_mask; if (inner) { headers_m =3D MLX5_ADDR_OF(fte_match_param, matcher, inner_headers); @@ -557,6 +552,10 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, } MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4); + if (!ipv4_v) + return; + if (!ipv4_m) + ipv4_m =3D &nic_mask; l24_m =3D MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dst_ipv4_dst_ipv6.ipv4_layout.ipv4); l24_v =3D MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, @@ -625,10 +624,6 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, int i; int size; =20 - if (!ipv6_v) - return; - if (!ipv6_m) - ipv6_m =3D &nic_mask; if (inner) { headers_m =3D MLX5_ADDR_OF(fte_match_param, matcher, inner_headers); @@ -638,6 +633,12 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, outer_headers); headers_v =3D MLX5_ADDR_OF(fte_match_param, key, outer_headers); } + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6); + if (!ipv6_v) + return; + if (!ipv6_m) + ipv6_m =3D &nic_mask; size =3D sizeof(ipv6_m->hdr.dst_addr); l24_m =3D MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dst_ipv4_dst_ipv6.ipv6_layout.ipv6); @@ -653,8 +654,6 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, memcpy(l24_m, ipv6_m->hdr.src_addr, size); for (i =3D 0; i < size; ++i) l24_v[i] =3D l24_m[i] & ipv6_v->hdr.src_addr[i]; - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6); /* TOS. */ vtc_m =3D rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow); vtc_v =3D rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow); @@ -703,10 +702,6 @@ flow_dv_translate_item_tcp(void *matcher, void *key, void *headers_m; void *headers_v; =20 - if (!tcp_v) - return; - if (!tcp_m) - tcp_m =3D &rte_flow_item_tcp_mask; if (inner) { headers_m =3D MLX5_ADDR_OF(fte_match_param, matcher, inner_headers); @@ -718,6 +713,10 @@ flow_dv_translate_item_tcp(void *matcher, void *key, } MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP); + if (!tcp_v) + return; + if (!tcp_m) + tcp_m =3D &rte_flow_item_tcp_mask; MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport, rte_be_to_cpu_16(tcp_m->hdr.src_port)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport, @@ -750,10 +749,6 @@ flow_dv_translate_item_udp(void *matcher, void *key, void *headers_m; void *headers_v; =20 - if (!udp_v) - return; - if (!udp_m) - udp_m =3D &rte_flow_item_udp_mask; if (inner) { headers_m =3D MLX5_ADDR_OF(fte_match_param, matcher, inner_headers); @@ -765,6 +760,10 @@ flow_dv_translate_item_udp(void *matcher, void *key, } MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); + if (!udp_v) + return; + if (!udp_m) + udp_m =3D &rte_flow_item_udp_mask; MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport, rte_be_to_cpu_16(udp_m->hdr.src_port)); MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, @@ -799,10 +798,6 @@ flow_dv_translate_item_gre(void *matcher, void *key, void *misc_m =3D MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); void *misc_v =3D MLX5_ADDR_OF(fte_match_param, key, misc_parameters); =20 - if (!gre_v) - return; - if (!gre_m) - gre_m =3D &rte_flow_item_gre_mask; if (inner) { headers_m =3D MLX5_ADDR_OF(fte_match_param, matcher, inner_headers); @@ -814,6 +809,10 @@ flow_dv_translate_item_gre(void *matcher, void *key, } MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff); MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE); + if (!gre_v) + return; + if (!gre_m) + gre_m =3D &rte_flow_item_gre_mask; MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, rte_be_to_cpu_16(gre_m->protocol)); MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, @@ -848,6 +847,7 @@ flow_dv_translate_item_nvgre(void *matcher, void *key, int size; int i; =20 + flow_dv_translate_item_gre(matcher, key, item, inner); if (!nvgre_v) return; if (!nvgre_m) @@ -858,7 +858,6 @@ flow_dv_translate_item_nvgre(void *matcher, void *key, memcpy(gre_key_m, tni_flow_id_m, size); for (i =3D 0; i < size; ++i) gre_key_v[i] =3D gre_key_m[i] & tni_flow_id_v[i]; - flow_dv_translate_item_gre(matcher, key, item, inner); } =20 /** @@ -890,10 +889,6 @@ flow_dv_translate_item_vxlan(void *matcher, void *key, int size; int i; =20 - if (!vxlan_v) - return; - if (!vxlan_m) - vxlan_m =3D &rte_flow_item_vxlan_mask; if (inner) { headers_m =3D MLX5_ADDR_OF(fte_match_param, matcher, inner_headers); @@ -909,6 +904,10 @@ flow_dv_translate_item_vxlan(void *matcher, void *key, MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF); MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport); } + if (!vxlan_v) + return; + if (!vxlan_m) + vxlan_m =3D &rte_flow_item_vxlan_mask; size =3D sizeof(vxlan_m->vni); vni_m =3D MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni); vni_v =3D MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni); --=20 2.11.0