From mboxrd@z Thu Jan 1 00:00:00 1970 From: Yongseok Koh Subject: Re: [PATCH v2 2/7] net/mlx5: e-switch VXLAN flow validation routine Date: Tue, 23 Oct 2018 10:04:32 +0000 Message-ID: <20181023100424.GB14792@mtidpdk.mti.labs.mlnx> References: <1538461807-37507-1-git-send-email-viacheslavo@mellanox.com> <1539612815-47199-1-git-send-email-viacheslavo@mellanox.com> <1539612815-47199-3-git-send-email-viacheslavo@mellanox.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: quoted-printable Cc: Shahaf Shuler , "dev@dpdk.org" To: Slava Ovsiienko Return-path: Received: from EUR02-VE1-obe.outbound.protection.outlook.com (mail-eopbgr20081.outbound.protection.outlook.com [40.107.2.81]) by dpdk.org (Postfix) with ESMTP id 9E5FD1B4A2 for ; Tue, 23 Oct 2018 12:04:34 +0200 (CEST) In-Reply-To: <1539612815-47199-3-git-send-email-viacheslavo@mellanox.com> Content-Language: en-US Content-ID: <3BCC3446042E714DA6C943D5A2208AB9@eurprd05.prod.outlook.com> List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" On Mon, Oct 15, 2018 at 02:13:30PM +0000, Viacheslav Ovsiienko wrote: > This part of patchset adds support for flow item/action lists > validation. The following entities are now supported: >=20 > - RTE_FLOW_ITEM_TYPE_VXLAN, contains the tunnel VNI >=20 > - RTE_FLOW_ACTION_TYPE_VXLAN_DECAP, if this action is specified > the items in the flow items list treated as outer network > parameters for tunnel outer header match. The ethernet layer > addresses always are treated as inner ones. >=20 > - RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP, contains the item list to > build the encapsulation header. In current implementation the > values is the subject for some constraints: > - outer source MAC address will be always unconditionally > set to the one of MAC addresses of outer egress interface > - no way to specify source UDP port > - all abovementioned parameters are ignored if specified > in the rule, warning messages are sent to the log >=20 > Suggested-by: Adrien Mazarguil > Signed-off-by: Viacheslav Ovsiienko > --- > drivers/net/mlx5/mlx5_flow_tcf.c | 711 +++++++++++++++++++++++++++++++++= +++++- > 1 file changed, 705 insertions(+), 6 deletions(-) >=20 > diff --git a/drivers/net/mlx5/mlx5_flow_tcf.c b/drivers/net/mlx5/mlx5_flo= w_tcf.c > index 8f9c78a..0055417 100644 > --- a/drivers/net/mlx5/mlx5_flow_tcf.c > +++ b/drivers/net/mlx5/mlx5_flow_tcf.c > @@ -430,6 +430,7 @@ struct mlx5_flow_tcf_context { > struct rte_flow_item_ipv6 ipv6; > struct rte_flow_item_tcp tcp; > struct rte_flow_item_udp udp; > + struct rte_flow_item_vxlan vxlan; > } flow_tcf_mask_empty; > =20 > /** Supported masks for known item types. */ > @@ -441,6 +442,7 @@ struct mlx5_flow_tcf_context { > struct rte_flow_item_ipv6 ipv6; > struct rte_flow_item_tcp tcp; > struct rte_flow_item_udp udp; > + struct rte_flow_item_vxlan vxlan; > } flow_tcf_mask_supported =3D { > .port_id =3D { > .id =3D 0xffffffff, > @@ -478,6 +480,9 @@ struct mlx5_flow_tcf_context { > .src_port =3D RTE_BE16(0xffff), > .dst_port =3D RTE_BE16(0xffff), > }, > + .vxlan =3D { > + .vni =3D "\xff\xff\xff", > + }, > }; > =20 > #define SZ_NLATTR_HDR MNL_ALIGN(sizeof(struct nlattr)) > @@ -943,6 +948,615 @@ struct pedit_parser { > } > =20 > /** > + * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_ETH item for E-Switch. How about mentioning it is to validate items of the "encap header"? Same fo= r the rest. > + * > + * @param[in] item > + * Pointer to the itemn structure. Typo. Same for the rest. > + * @param[out] error > + * Pointer to the error structure. > + * > + * @return > + * 0 on success, a negative errno value otherwise and rte_errno is set= . > + **/ > +static int > +flow_tcf_validate_vxlan_encap_eth(const struct rte_flow_item *item, > + struct rte_flow_error *error) > +{ > + const struct rte_flow_item_eth *spec =3D item->spec; > + const struct rte_flow_item_eth *mask =3D item->mask; > + > + if (!spec) > + /* > + * Specification for L2 addresses can be empty > + * because these ones are optional and not > + * required directly by tc rule. > + */ > + return 0; > + if (!mask) > + /* If mask is not specified use the default one. */ > + mask =3D &rte_flow_item_eth_mask; > + if (memcmp(&mask->dst, > + &flow_tcf_mask_empty.eth.dst, > + sizeof(flow_tcf_mask_empty.eth.dst))) { > + if (memcmp(&mask->dst, > + &rte_flow_item_eth_mask.dst, > + sizeof(rte_flow_item_eth_mask.dst))) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"eth.dst\" field"); > + } > + if (memcmp(&mask->src, > + &flow_tcf_mask_empty.eth.src, > + sizeof(flow_tcf_mask_empty.eth.src))) { > + if (memcmp(&mask->src, > + &rte_flow_item_eth_mask.src, > + sizeof(rte_flow_item_eth_mask.src))) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"eth.src\" field"); > + } > + if (mask->type !=3D RTE_BE16(0x0000)) { > + if (mask->type !=3D RTE_BE16(0xffff)) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"eth.type\" field"); > + DRV_LOG(WARNING, > + "outer ethernet type field " > + "cannot be forced for VXLAN " > + "encapsulation, parameter ignored"); > + } > + return 0; > +} > + > +/** > + * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_IPV4 item for E-Switch= . > + * > + * @param[in] item > + * Pointer to the itemn structure. > + * @param[out] error > + * Pointer to the error structure. > + * > + * @return > + * 0 on success, a negative errno value otherwise and rte_errno is set= . > + **/ > +static int > +flow_tcf_validate_vxlan_encap_ipv4(const struct rte_flow_item *item, > + struct rte_flow_error *error) > +{ > + const struct rte_flow_item_ipv4 *spec =3D item->spec; > + const struct rte_flow_item_ipv4 *mask =3D item->mask; > + > + if (!spec) > + /* > + * Specification for L3 addresses cannot be empty > + * because it is required by tunnel_key parameter. > + */ > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, item, > + "NULL outer L3 address specification " > + " for VXLAN encapsulation"); > + if (!mask) > + mask =3D &rte_flow_item_ipv4_mask; > + if (mask->hdr.dst_addr !=3D RTE_BE32(0x00000000)) { > + if (mask->hdr.dst_addr !=3D RTE_BE32(0xffffffff)) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"ipv4.hdr.dst_addr\" field"); > + /* More L3 address validations can be put here. */ > + } else { > + /* > + * Kernel uses the destination L3 address to determine > + * the routing path and obtain the L2 destination > + * address, so L3 destination address must be > + * specified in the tc rule. > + */ > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, item, > + "outer L3 destination address must be " > + "specified for VXLAN encapsulation"); > + } > + if (mask->hdr.src_addr !=3D RTE_BE32(0x00000000)) { > + if (mask->hdr.src_addr !=3D RTE_BE32(0xffffffff)) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"ipv4.hdr.src_addr\" field"); > + /* More L3 address validations can be put here. */ > + } else { > + /* > + * Kernel uses the source L3 address to select the > + * interface for egress encapsulated traffic, so > + * it must be specified in the tc rule. > + */ > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, item, > + "outer L3 source address must be " > + "specified for VXLAN encapsulation"); > + } > + return 0; > +} > + > +/** > + * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_IPV6 item for E-Switch= . > + * > + * @param[in] item > + * Pointer to the itemn structure. > + * @param[out] error > + * Pointer to the error structure. > + * > + * @return > + * 0 on success, a negative errno value otherwise and rte_ernno is set= . > + **/ > +static int > +flow_tcf_validate_vxlan_encap_ipv6(const struct rte_flow_item *item, > + struct rte_flow_error *error) > +{ > + const struct rte_flow_item_ipv6 *spec =3D item->spec; > + const struct rte_flow_item_ipv6 *mask =3D item->mask; > + > + if (!spec) > + /* > + * Specification for L3 addresses cannot be empty > + * because it is required by tunnel_key parameter. > + */ > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, item, > + "NULL outer L3 address specification " > + " for VXLAN encapsulation"); > + if (!mask) > + mask =3D &rte_flow_item_ipv6_mask; > + if (memcmp(&mask->hdr.dst_addr, > + &flow_tcf_mask_empty.ipv6.hdr.dst_addr, > + sizeof(flow_tcf_mask_empty.ipv6.hdr.dst_addr))) { > + if (memcmp(&mask->hdr.dst_addr, > + &rte_flow_item_ipv6_mask.hdr.dst_addr, > + sizeof(rte_flow_item_ipv6_mask.hdr.dst_addr))) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"ipv6.hdr.dst_addr\" field"); > + /* More L3 address validations can be put here. */ > + } else { > + /* > + * Kernel uses the destination L3 address to determine > + * the routing path and obtain the L2 destination > + * address (heigh or gate), so L3 destination address > + * must be specified within the tc rule. > + */ > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, item, > + "outer L3 destination address must be " > + "specified for VXLAN encapsulation"); > + } > + if (memcmp(&mask->hdr.src_addr, > + &flow_tcf_mask_empty.ipv6.hdr.src_addr, > + sizeof(flow_tcf_mask_empty.ipv6.hdr.src_addr))) { > + if (memcmp(&mask->hdr.src_addr, > + &rte_flow_item_ipv6_mask.hdr.src_addr, > + sizeof(rte_flow_item_ipv6_mask.hdr.src_addr))) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"ipv6.hdr.src_addr\" field"); > + /* More L3 address validation can be put here. */ > + } else { > + /* > + * Kernel uses the source L3 address to select the > + * interface for egress encapsulated traffic, so > + * it must be specified in the tc rule. > + */ > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, item, > + "outer L3 source address must be " > + "specified for VXLAN encapsulation"); > + } > + return 0; > +} > + > +/** > + * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_UDP item for E-Switch. > + * > + * @param[in] item > + * Pointer to the itemn structure. > + * @param[out] error > + * Pointer to the error structure. > + * > + * @return > + * 0 on success, a negative errno value otherwise and rte_ernno is set= . > + **/ > +static int > +flow_tcf_validate_vxlan_encap_udp(const struct rte_flow_item *item, > + struct rte_flow_error *error) > +{ > + const struct rte_flow_item_udp *spec =3D item->spec; > + const struct rte_flow_item_udp *mask =3D item->mask; > + > + if (!spec) > + /* > + * Specification for UDP ports cannot be empty > + * because it is required by tunnel_key parameter. > + */ > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, item, > + "NULL UDP port specification " > + " for VXLAN encapsulation"); > + if (!mask) > + mask =3D &rte_flow_item_udp_mask; > + if (mask->hdr.dst_port !=3D RTE_BE16(0x0000)) { > + if (mask->hdr.dst_port !=3D RTE_BE16(0xffff)) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"udp.hdr.dst_port\" field"); > + if (!spec->hdr.dst_port) > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, item, > + "zero encap remote UDP port"); > + } else { > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, item, > + "outer UDP remote port must be " > + "specified for VXLAN encapsulation"); > + } > + if (mask->hdr.src_port !=3D RTE_BE16(0x0000)) { > + if (mask->hdr.src_port !=3D RTE_BE16(0xffff)) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"udp.hdr.src_port\" field"); > + DRV_LOG(WARNING, > + "outer UDP source port cannot be " > + "forced for VXLAN encapsulation, " > + "parameter ignored"); > + } > + return 0; > +} > + > +/** > + * Validate VXLAN_ENCAP action RTE_FLOW_ITEM_TYPE_VXLAN item for E-Switc= h. > + * > + * @param[in] item > + * Pointer to the itemn structure. > + * @param[out] error > + * Pointer to the error structure. > + * > + * @return > + * 0 on success, a negative errno value otherwise and rte_ernno is set= . > + **/ > +static int > +flow_tcf_validate_vxlan_encap_vni(const struct rte_flow_item *item, > + struct rte_flow_error *error) > +{ > + const struct rte_flow_item_vxlan *spec =3D item->spec; > + const struct rte_flow_item_vxlan *mask =3D item->mask; > + > + if (!spec) > + /* Outer VNI is required by tunnel_key parameter. */ > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, item, > + "NULL VNI specification " > + " for VXLAN encapsulation"); > + if (!mask) > + mask =3D &rte_flow_item_vxlan_mask; > + if (mask->vni[0] !=3D 0 || > + mask->vni[1] !=3D 0 || > + mask->vni[2] !=3D 0) { can be one line. > + if (mask->vni[0] !=3D 0xff || > + mask->vni[1] !=3D 0xff || > + mask->vni[2] !=3D 0xff) same here. > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"vxlan.vni\" field"); > + if (spec->vni[0] =3D=3D 0 && > + spec->vni[1] =3D=3D 0 && > + spec->vni[2] =3D=3D 0) > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, item, > + "VXLAN vni cannot be 0"); It is already checked by mlx5_flow_validate_item_vxlan(). > + } else { > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, > + item, > + "outer VNI must be specified " > + "for VXLAN encapsulation"); > + } Already checked in mlx5_flow_validate_item_vxlan(). > + return 0; > +} > + > +/** > + * Validate VXLAN_ENCAP action item list for E-Switch. > + * > + * @param[in] action > + * Pointer to the VXLAN_ENCAP action structure. > + * @param[out] error > + * Pointer to the error structure. > + * > + * @return > + * 0 on success, a negative errno value otherwise and rte_ernno is set= . > + **/ > +static int > +flow_tcf_validate_vxlan_encap(const struct rte_flow_action *action, > + struct rte_flow_error *error) > +{ > + const struct rte_flow_item *items; > + int ret; > + uint32_t item_flags =3D 0; > + > + assert(action->type =3D=3D RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP); > + if (!action->conf) > + return rte_flow_error_set > + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, > + action, "Missing VXLAN tunnel " > + "action configuration"); > + items =3D ((const struct rte_flow_action_vxlan_encap *) > + action->conf)->definition; > + if (!items) > + return rte_flow_error_set > + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, > + action, "Missing VXLAN tunnel " > + "encapsulation parameters"); > + for (; items->type !=3D RTE_FLOW_ITEM_TYPE_END; items++) { > + switch (items->type) { > + case RTE_FLOW_ITEM_TYPE_VOID: > + break; > + case RTE_FLOW_ITEM_TYPE_ETH: > + ret =3D mlx5_flow_validate_item_eth(items, item_flags, > + error); > + if (ret < 0) > + return ret; > + ret =3D flow_tcf_validate_vxlan_encap_eth(items, error); > + if (ret < 0) > + return ret; > + item_flags |=3D MLX5_FLOW_LAYER_OUTER_L2; > + break; > + break; > + case RTE_FLOW_ITEM_TYPE_IPV4: > + ret =3D mlx5_flow_validate_item_ipv4(items, item_flags, > + error); > + if (ret < 0) > + return ret; > + ret =3D flow_tcf_validate_vxlan_encap_ipv4(items, error); > + if (ret < 0) > + return ret; > + item_flags |=3D MLX5_FLOW_LAYER_OUTER_L3_IPV4; > + break; > + case RTE_FLOW_ITEM_TYPE_IPV6: > + ret =3D mlx5_flow_validate_item_ipv6(items, item_flags, > + error); > + if (ret < 0) > + return ret; > + ret =3D flow_tcf_validate_vxlan_encap_ipv6(items, error); > + if (ret < 0) > + return ret; > + item_flags |=3D MLX5_FLOW_LAYER_OUTER_L3_IPV6; > + break; > + case RTE_FLOW_ITEM_TYPE_UDP: > + ret =3D mlx5_flow_validate_item_udp(items, item_flags, > + 0xFF, error); > + if (ret < 0) > + return ret; > + ret =3D flow_tcf_validate_vxlan_encap_udp(items, error); > + if (ret < 0) > + return ret; > + item_flags |=3D MLX5_FLOW_LAYER_OUTER_L4_UDP; > + break; > + case RTE_FLOW_ITEM_TYPE_VXLAN: > + ret =3D mlx5_flow_validate_item_vxlan(items, > + item_flags, error); > + if (ret < 0) > + return ret; > + ret =3D flow_tcf_validate_vxlan_encap_vni(items, error); > + if (ret < 0) > + return ret; > + item_flags |=3D MLX5_FLOW_LAYER_VXLAN; > + break; > + default: > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM, items, > + "VXLAN encap item not supported"); > + } > + } > + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3)) > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ACTION, action, > + "no outer L3 layer found" > + " for VXLAN encapsulation"); > + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)) > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ACTION, action, > + "no outer L4 layer found" L4 -> UDP? > + " for VXLAN encapsulation"); > + if (!(item_flags & MLX5_FLOW_LAYER_VXLAN)) > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ACTION, action, > + "no VXLAN VNI found" > + " for VXLAN encapsulation"); > + return 0; > +} > + > +/** > + * Validate VXLAN_DECAP action outer tunnel items for E-Switch. > + * > + * @param[in] item_flags > + * Mask of provided outer tunnel parameters > + * @param[in] ipv4 > + * Outer IPv4 address item (if any, NULL otherwise). > + * @param[in] ipv6 > + * Outer IPv6 address item (if any, NULL otherwise). > + * @param[in] udp > + * Outer UDP layer item (if any, NULL otherwise). > + * @param[out] error > + * Pointer to the error structure. > + * > + * @return > + * 0 on success, a negative errno value otherwise and rte_ernno is set= . > + **/ > +static int > +flow_tcf_validate_vxlan_decap(uint32_t item_flags, > + const struct rte_flow_action *action, > + const struct rte_flow_item *ipv4, > + const struct rte_flow_item *ipv6, > + const struct rte_flow_item *udp, > + struct rte_flow_error *error) > +{ > + if (!ipv4 && !ipv6) > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ACTION, action, > + "no outer L3 layer found" > + " for VXLAN decapsulation"); > + if (ipv4) { > + const struct rte_flow_item_ipv4 *spec =3D ipv4->spec; > + const struct rte_flow_item_ipv4 *mask =3D ipv4->mask; > + > + if (!spec) > + /* > + * Specification for L3 addresses cannot be empty > + * because it is required as decap parameter. > + */ > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, ipv4, > + "NULL outer L3 address specification " > + " for VXLAN decapsulation"); > + if (!mask) > + mask =3D &rte_flow_item_ipv4_mask; > + if (mask->hdr.dst_addr !=3D RTE_BE32(0x00000000)) { > + if (mask->hdr.dst_addr !=3D RTE_BE32(0xffffffff)) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"ipv4.hdr.dst_addr\" field"); > + /* More L3 address validations can be put here. */ > + } else { > + /* > + * Kernel uses the destination L3 address > + * to determine the ingress network interface > + * for traffic being decapculated. > + */ > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, ipv4, > + "outer L3 destination address must be " > + "specified for VXLAN decapsulation"); > + } > + /* Source L3 address is optional for decap. */ > + if (mask->hdr.src_addr !=3D RTE_BE32(0x00000000)) > + if (mask->hdr.src_addr !=3D RTE_BE32(0xffffffff)) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"ipv4.hdr.src_addr\" field"); > + } else { > + const struct rte_flow_item_ipv6 *spec =3D ipv6->spec; > + const struct rte_flow_item_ipv6 *mask =3D ipv6->mask; > + > + if (!spec) > + /* > + * Specification for L3 addresses cannot be empty > + * because it is required as decap parameter. > + */ > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, ipv6, > + "NULL outer L3 address specification " > + " for VXLAN decapsulation"); > + if (!mask) > + mask =3D &rte_flow_item_ipv6_mask; > + if (memcmp(&mask->hdr.dst_addr, > + &flow_tcf_mask_empty.ipv6.hdr.dst_addr, > + sizeof(flow_tcf_mask_empty.ipv6.hdr.dst_addr))) { > + if (memcmp(&mask->hdr.dst_addr, > + &rte_flow_item_ipv6_mask.hdr.dst_addr, > + sizeof(rte_flow_item_ipv6_mask.hdr.dst_addr))) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"ipv6.hdr.dst_addr\" field"); > + /* More L3 address validations can be put here. */ > + } else { > + /* > + * Kernel uses the destination L3 address > + * to determine the ingress network interface > + * for traffic being decapculated. > + */ > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, ipv6, > + "outer L3 destination address must be " > + "specified for VXLAN decapsulation"); > + } > + /* Source L3 address is optional for decap. */ > + if (memcmp(&mask->hdr.src_addr, > + &flow_tcf_mask_empty.ipv6.hdr.src_addr, > + sizeof(flow_tcf_mask_empty.ipv6.hdr.src_addr))) { > + if (memcmp(&mask->hdr.src_addr, > + &rte_flow_item_ipv6_mask.hdr.src_addr, > + sizeof(mask->hdr.src_addr))) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"ipv6.hdr.src_addr\" field"); > + } > + } > + if (!udp) { > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ACTION, action, > + "no outer L4 layer found" > + " for VXLAN decapsulation"); > + } else { > + const struct rte_flow_item_udp *spec =3D udp->spec; > + const struct rte_flow_item_udp *mask =3D udp->mask; > + > + if (!spec) > + /* > + * Specification for UDP ports cannot be empty > + * because it is required as decap parameter. > + */ > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, udp, > + "NULL UDP port specification " > + " for VXLAN decapsulation"); > + if (!mask) > + mask =3D &rte_flow_item_udp_mask; > + if (mask->hdr.dst_port !=3D RTE_BE16(0x0000)) { > + if (mask->hdr.dst_port !=3D RTE_BE16(0xffff)) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"udp.hdr.dst_port\" field"); > + if (!spec->hdr.dst_port) > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, udp, > + "zero decap local UDP port"); > + } else { > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ITEM, udp, > + "outer UDP destination port must be " > + "specified for VXLAN decapsulation"); > + } > + if (mask->hdr.src_port !=3D RTE_BE16(0x0000)) { > + if (mask->hdr.src_port !=3D RTE_BE16(0xffff)) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask, > + "no support for partial mask on" > + " \"udp.hdr.src_port\" field"); > + DRV_LOG(WARNING, > + "outer UDP local port cannot be " > + "forced for VXLAN encapsulation, " > + "parameter ignored"); > + } > + } > + if (!(item_flags & MLX5_FLOW_LAYER_VXLAN)) > + return rte_flow_error_set(error, EINVAL, > + RTE_FLOW_ERROR_TYPE_ACTION, action, > + "no VXLAN VNI found" > + " for VXLAN decapsulation"); > + /* VNI is already validated, extra check can be put here. */ > + return 0; > +} > + > +/** > * Validate flow for E-Switch. > * > * @param[in] priv > @@ -974,7 +1588,8 @@ struct pedit_parser { > const struct rte_flow_item_ipv6 *ipv6; > const struct rte_flow_item_tcp *tcp; > const struct rte_flow_item_udp *udp; > - } spec, mask; > + const struct rte_flow_item_vxlan *vxlan; > + } spec, mask; > union { > const struct rte_flow_action_port_id *port_id; > const struct rte_flow_action_jump *jump; > @@ -983,9 +1598,13 @@ struct pedit_parser { > of_set_vlan_vid; > const struct rte_flow_action_of_set_vlan_pcp * > of_set_vlan_pcp; > + const struct rte_flow_action_vxlan_encap *vxlan_encap; > const struct rte_flow_action_set_ipv4 *set_ipv4; > const struct rte_flow_action_set_ipv6 *set_ipv6; > } conf; > + const struct rte_flow_item *ipv4 =3D NULL; /* storage to check */ > + const struct rte_flow_item *ipv6 =3D NULL; /* outer tunnel. */ > + const struct rte_flow_item *udp =3D NULL; /* parameters. */ > uint32_t item_flags =3D 0; > uint32_t action_flags =3D 0; > uint8_t next_protocol =3D -1; > @@ -1114,7 +1733,6 @@ struct pedit_parser { > error); > if (ret < 0) > return ret; > - item_flags |=3D MLX5_FLOW_LAYER_OUTER_L3_IPV4; > mask.ipv4 =3D flow_tcf_item_mask > (items, &rte_flow_item_ipv4_mask, > &flow_tcf_mask_supported.ipv4, > @@ -1135,13 +1753,22 @@ struct pedit_parser { > next_protocol =3D > ((const struct rte_flow_item_ipv4 *) > (items->spec))->hdr.next_proto_id; > + if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) { > + /* > + * Multiple outer items are not allowed as > + * tunnel parameters, will raise an error later. > + */ > + ipv4 =3D NULL; Can't it be inner then? flow create 1 ingress transfer pattern eth src is 66:77:88:99:aa:bb dst is 00:11:22:33:44:55 / ipv4 src is 2.2.2.2 dst is 1.1.1.1 / udp src is 4789 dst is 4242 / vxlan vni is 0x112233 / eth / ipv6 / tcp dst is 42 / end actions vxlan_decap / port_id id 2 / end Is this flow supported by linux tcf? I took this example from Adrien's patc= h - "[8/8] net/mlx5: add VXLAN decap support to switch flow rules". If so, isn'= t it possible to have inner L3 layer (MLX5_FLOW_LAYER_INNER_*)? If not, you shou= ld return error in this case. I don't see any code to check redundant outer it= ems. Did I miss something? BTW, for the tunneled items, why don't you follow the code of Verbs(mlx5_flow_verbs.c) and DV(mlx5_flow_dv.c)? For tcf, it is the first t= ime to add tunneled item, but Verbs/DV already have validation code for tunnel,= so you can reuse the existing code. In flow_tcf_validate_vxlan_decap(), not ev= ery validation is VXLAN-specific but some of them can be common code. And if you need to know whether there's the VXLAN decap action prior to out= er header item validation, you can relocate the code - action validation first= and item validation next, as there's no dependency yet in the current code. Def= ining ipv4, ipv6, udp seems to make the code path more complex. For example, you just can call vxlan decap item validation (by splitting flow_tcf_validate_vxlan_decap()) at this point like: if (action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP) ret =3D flow_tcf_validate_vxlan_decap_ipv4(...); ... Same for other items. > + } else { > + ipv4 =3D items; > + item_flags |=3D MLX5_FLOW_LAYER_OUTER_L3_IPV4; > + } > break; > case RTE_FLOW_ITEM_TYPE_IPV6: > ret =3D mlx5_flow_validate_item_ipv6(items, item_flags, > error); > if (ret < 0) > return ret; > - item_flags |=3D MLX5_FLOW_LAYER_OUTER_L3_IPV6; > mask.ipv6 =3D flow_tcf_item_mask > (items, &rte_flow_item_ipv6_mask, > &flow_tcf_mask_supported.ipv6, > @@ -1162,13 +1789,22 @@ struct pedit_parser { > next_protocol =3D > ((const struct rte_flow_item_ipv6 *) > (items->spec))->hdr.proto; > + if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6) { > + /* > + *Multiple outer items are not allowed as > + * tunnel parameters > + */ > + ipv6 =3D NULL; > + } else { > + ipv6 =3D items; > + item_flags |=3D MLX5_FLOW_LAYER_OUTER_L3_IPV6; > + } > break; > case RTE_FLOW_ITEM_TYPE_UDP: > ret =3D mlx5_flow_validate_item_udp(items, item_flags, > next_protocol, error); > if (ret < 0) > return ret; > - item_flags |=3D MLX5_FLOW_LAYER_OUTER_L4_UDP; > mask.udp =3D flow_tcf_item_mask > (items, &rte_flow_item_udp_mask, > &flow_tcf_mask_supported.udp, > @@ -1177,6 +1813,12 @@ struct pedit_parser { > error); > if (!mask.udp) > return -rte_errno; > + if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP) { > + udp =3D NULL; > + } else { > + udp =3D items; > + item_flags |=3D MLX5_FLOW_LAYER_OUTER_L4_UDP; > + } > break; > case RTE_FLOW_ITEM_TYPE_TCP: > ret =3D mlx5_flow_validate_item_tcp > @@ -1186,7 +1828,6 @@ struct pedit_parser { > error); > if (ret < 0) > return ret; > - item_flags |=3D MLX5_FLOW_LAYER_OUTER_L4_TCP; > mask.tcp =3D flow_tcf_item_mask > (items, &rte_flow_item_tcp_mask, > &flow_tcf_mask_supported.tcp, > @@ -1195,11 +1836,36 @@ struct pedit_parser { > error); > if (!mask.tcp) > return -rte_errno; > + item_flags |=3D MLX5_FLOW_LAYER_OUTER_L4_TCP; > + break; > + case RTE_FLOW_ITEM_TYPE_VXLAN: > + ret =3D mlx5_flow_validate_item_vxlan(items, > + item_flags, error); > + if (ret < 0) > + return ret; > + mask.vxlan =3D flow_tcf_item_mask > + (items, &rte_flow_item_vxlan_mask, > + &flow_tcf_mask_supported.vxlan, > + &flow_tcf_mask_empty.vxlan, > + sizeof(flow_tcf_mask_supported.vxlan), > + error); > + if (!mask.vxlan) > + return -rte_errno; > + if (mask.vxlan->vni[0] !=3D 0xff || > + mask.vxlan->vni[1] !=3D 0xff || > + mask.vxlan->vni[2] !=3D 0xff) > + return rte_flow_error_set > + (error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ITEM_MASK, > + mask.vxlan, > + "no support for partial or " > + "empty mask on \"vxlan.vni\" field"); > + item_flags |=3D MLX5_FLOW_LAYER_VXLAN; > break; > default: > return rte_flow_error_set(error, ENOTSUP, > RTE_FLOW_ERROR_TYPE_ITEM, > - NULL, "item not supported"); > + items, "item not supported"); > } > } > for (; actions->type !=3D RTE_FLOW_ACTION_TYPE_END; actions++) { > @@ -1271,6 +1937,33 @@ struct pedit_parser { > " set action must follow push action"); > current_action_flag =3D MLX5_FLOW_ACTION_OF_SET_VLAN_PCP; > break; > + case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP: > + if (action_flags & (MLX5_ACTION_VXLAN_ENCAP > + | MLX5_ACTION_VXLAN_DECAP)) > + return rte_flow_error_set > + (error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ACTION, actions, > + "can't have multiple vxlan actions"); > + ret =3D flow_tcf_validate_vxlan_encap(actions, error); > + if (ret < 0) > + return ret; > + action_flags |=3D MLX5_ACTION_VXLAN_ENCAP; Recently, current_action_flag has been added for PEDIT actions. Please refe= r to the code above and make it compliant. > + break; > + case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP: > + if (action_flags & (MLX5_ACTION_VXLAN_ENCAP > + | MLX5_ACTION_VXLAN_DECAP)) > + return rte_flow_error_set > + (error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ACTION, actions, > + "can't have multiple vxlan actions"); > + ret =3D flow_tcf_validate_vxlan_decap(item_flags, > + actions, > + ipv4, ipv6, udp, > + error); > + if (ret < 0) > + return ret; > + action_flags |=3D MLX5_ACTION_VXLAN_DECAP; > + break; > case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC: > current_action_flag =3D MLX5_FLOW_ACTION_SET_IPV4_SRC; > break; > @@ -1391,6 +2084,12 @@ struct pedit_parser { > return rte_flow_error_set(error, EINVAL, > RTE_FLOW_ERROR_TYPE_ACTION, actions, > "no fate action is found"); > + if ((item_flags & MLX5_FLOW_LAYER_VXLAN) && > + !(action_flags & MLX5_ACTION_VXLAN_DECAP)) > + return rte_flow_error_set(error, ENOTSUP, > + RTE_FLOW_ERROR_TYPE_ACTION, NULL, > + "VNI pattern should be followed " > + " by VXLAN_DECAP action"); > return 0; > } > =20