netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net v2 0/2] xdp_rxq_info_reg fixes for mlx5e
@ 2023-01-30 20:13 Maxim Mikityanskiy
  2023-01-30 20:13 ` [PATCH net v2 1/2] net/mlx5e: XDP, Allow growing tail for XDP multi buffer Maxim Mikityanskiy
  2023-01-30 20:13 ` [PATCH net v2 2/2] net/mlx5e: xsk: Set napi_id to support busy polling on XSK RQ Maxim Mikityanskiy
  0 siblings, 2 replies; 4+ messages in thread
From: Maxim Mikityanskiy @ 2023-01-30 20:13 UTC (permalink / raw)
  To: netdev, Saeed Mahameed
  Cc: David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni,
	Gal Pressman, Tariq Toukan, Maxim Mikityanskiy

Two small fixes that add parameters to xdp_rxq_info_reg missed in older
commits.

v2 changes:

Let en/params.c decide the right size for xdp_frag_size, rather than
make en_main.c aware of the implementation details.

Maxim Mikityanskiy (2):
  net/mlx5e: XDP, Allow growing tail for XDP multi buffer
  net/mlx5e: xsk: Set napi_id to support busy polling on XSK RQ

 drivers/net/ethernet/mellanox/mlx5/core/en/params.c    | 9 +++++++--
 drivers/net/ethernet/mellanox/mlx5/core/en/params.h    | 1 +
 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c | 2 +-
 drivers/net/ethernet/mellanox/mlx5/core/en_main.c      | 7 ++++---
 4 files changed, 13 insertions(+), 6 deletions(-)

-- 
2.39.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH net v2 1/2] net/mlx5e: XDP, Allow growing tail for XDP multi buffer
  2023-01-30 20:13 [PATCH net v2 0/2] xdp_rxq_info_reg fixes for mlx5e Maxim Mikityanskiy
@ 2023-01-30 20:13 ` Maxim Mikityanskiy
  2023-01-31  5:46   ` Tariq Toukan
  2023-01-30 20:13 ` [PATCH net v2 2/2] net/mlx5e: xsk: Set napi_id to support busy polling on XSK RQ Maxim Mikityanskiy
  1 sibling, 1 reply; 4+ messages in thread
From: Maxim Mikityanskiy @ 2023-01-30 20:13 UTC (permalink / raw)
  To: netdev, Saeed Mahameed
  Cc: David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni,
	Gal Pressman, Tariq Toukan, Maxim Mikityanskiy

The cited commits missed passing frag_size to __xdp_rxq_info_reg, which
is required by bpf_xdp_adjust_tail to support growing the tail pointer
in fragmented packets. Pass the missing parameter when the current RQ
mode allows XDP multi buffer.

Fixes: ea5d49bdae8b ("net/mlx5e: Add XDP multi buffer support to the non-linear legacy RQ")
Fixes: 9cb9482ef10e ("net/mlx5e: Use fragments of the same size in non-linear legacy RQ with XDP")
Signed-off-by: Maxim Mikityanskiy <maxtram95@gmail.com>
Cc: Tariq Toukan <tariqt@nvidia.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/en/params.c | 9 +++++++--
 drivers/net/ethernet/mellanox/mlx5/core/en/params.h | 1 +
 drivers/net/ethernet/mellanox/mlx5/core/en_main.c   | 7 ++++---
 3 files changed, 12 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 4ad19c981294..151585302cd1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -662,7 +662,8 @@ static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp)
 static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
 				     struct mlx5e_params *params,
 				     struct mlx5e_xsk_param *xsk,
-				     struct mlx5e_rq_frags_info *info)
+				     struct mlx5e_rq_frags_info *info,
+				     u32 *xdp_frag_size)
 {
 	u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
 	int frag_size_max = DEFAULT_FRAG_SIZE;
@@ -737,6 +738,9 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
 	}
 	info->num_frags = i;
 
+	if (info->num_frags > 1 && params->xdp_prog)
+		*xdp_frag_size = PAGE_SIZE;
+
 	/* The last fragment of WQE with index 2*N may share the page with the
 	 * first fragment of WQE with index 2*N+1 in certain cases. If WQE 2*N+1
 	 * is not completed yet, WQE 2*N must not be allocated, as it's
@@ -917,7 +921,8 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
 	}
 	default: /* MLX5_WQ_TYPE_CYCLIC */
 		MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
-		err = mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
+		err = mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info,
+						&param->xdp_frag_size);
 		if (err)
 			return err;
 		ndsegs = param->frags_info.num_frags;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index c9be6eb88012..e5930fe752e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -24,6 +24,7 @@ struct mlx5e_rq_param {
 	u32                        rqc[MLX5_ST_SZ_DW(rqc)];
 	struct mlx5_wq_param       wq;
 	struct mlx5e_rq_frags_info frags_info;
+	u32                        xdp_frag_size;
 };
 
 struct mlx5e_sq_param {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index abcc614b6191..d02af93035b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -576,7 +576,7 @@ static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
 }
 
 static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
-			     struct mlx5e_rq *rq)
+			     u32 xdp_frag_size, struct mlx5e_rq *rq)
 {
 	struct mlx5_core_dev *mdev = c->mdev;
 	int err;
@@ -599,7 +599,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
 	if (err)
 		return err;
 
-	return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id);
+	return __xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id,
+				  xdp_frag_size);
 }
 
 static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
@@ -2214,7 +2215,7 @@ static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
 {
 	int err;
 
-	err = mlx5e_init_rxq_rq(c, params, &c->rq);
+	err = mlx5e_init_rxq_rq(c, params, rq_params->xdp_frag_size, &c->rq);
 	if (err)
 		return err;
 
-- 
2.39.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH net v2 2/2] net/mlx5e: xsk: Set napi_id to support busy polling on XSK RQ
  2023-01-30 20:13 [PATCH net v2 0/2] xdp_rxq_info_reg fixes for mlx5e Maxim Mikityanskiy
  2023-01-30 20:13 ` [PATCH net v2 1/2] net/mlx5e: XDP, Allow growing tail for XDP multi buffer Maxim Mikityanskiy
@ 2023-01-30 20:13 ` Maxim Mikityanskiy
  1 sibling, 0 replies; 4+ messages in thread
From: Maxim Mikityanskiy @ 2023-01-30 20:13 UTC (permalink / raw)
  To: netdev, Saeed Mahameed
  Cc: David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni,
	Gal Pressman, Tariq Toukan, Maxim Mikityanskiy

The cited commit missed setting napi_id on XSK RQs, it only affected
regular RQs. Add the missing part to support socket busy polling on XSK
RQs.

Fixes: a2740f529da2 ("net/mlx5e: xsk: Set napi_id to support busy polling")
Signed-off-by: Maxim Mikityanskiy <maxtram95@gmail.com>
Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
---
 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index ff03c43833bb..53c93d1daa7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -71,7 +71,7 @@ static int mlx5e_init_xsk_rq(struct mlx5e_channel *c,
 	if (err)
 		return err;
 
-	return  xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0);
+	return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, c->napi.napi_id);
 }
 
 static int mlx5e_open_xsk_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
-- 
2.39.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH net v2 1/2] net/mlx5e: XDP, Allow growing tail for XDP multi buffer
  2023-01-30 20:13 ` [PATCH net v2 1/2] net/mlx5e: XDP, Allow growing tail for XDP multi buffer Maxim Mikityanskiy
@ 2023-01-31  5:46   ` Tariq Toukan
  0 siblings, 0 replies; 4+ messages in thread
From: Tariq Toukan @ 2023-01-31  5:46 UTC (permalink / raw)
  To: Maxim Mikityanskiy, netdev, Saeed Mahameed
  Cc: David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni,
	Gal Pressman, Tariq Toukan



On 30/01/2023 22:13, Maxim Mikityanskiy wrote:
> The cited commits missed passing frag_size to __xdp_rxq_info_reg, which
> is required by bpf_xdp_adjust_tail to support growing the tail pointer
> in fragmented packets. Pass the missing parameter when the current RQ
> mode allows XDP multi buffer.
> 
> Fixes: ea5d49bdae8b ("net/mlx5e: Add XDP multi buffer support to the non-linear legacy RQ")
> Fixes: 9cb9482ef10e ("net/mlx5e: Use fragments of the same size in non-linear legacy RQ with XDP")
> Signed-off-by: Maxim Mikityanskiy <maxtram95@gmail.com>
> Cc: Tariq Toukan <tariqt@nvidia.com>
> ---
>   drivers/net/ethernet/mellanox/mlx5/core/en/params.c | 9 +++++++--
>   drivers/net/ethernet/mellanox/mlx5/core/en/params.h | 1 +
>   drivers/net/ethernet/mellanox/mlx5/core/en_main.c   | 7 ++++---
>   3 files changed, 12 insertions(+), 5 deletions(-)
> 

Patch is much cleaner now.

> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
> index 4ad19c981294..151585302cd1 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
> @@ -662,7 +662,8 @@ static int mlx5e_max_nonlinear_mtu(int first_frag_size, int frag_size, bool xdp)
>   static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
>   				     struct mlx5e_params *params,
>   				     struct mlx5e_xsk_param *xsk,
> -				     struct mlx5e_rq_frags_info *info)
> +				     struct mlx5e_rq_frags_info *info,
> +				     u32 *xdp_frag_size)

Even when returning success, this function does not always provide value 
for xdp_frag_size.

It means that the responsibility of initializing this param is on the 
caller side. But then it gets no indication whether the function 
overwrote it or not. It works, but I prefer a different caller/callee 
communication.

I suggest that the function should provide value for xdp_frag_size on 
every successful flow. Suggestion:

--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -782,6 +782,9 @@ static int mlx5e_build_rq_frags_info(struct 
mlx5_core_dev *mdev,

         info->log_num_frags = order_base_2(info->num_frags);

+       *xdp_frag_size =
+               info->num_frags > 1 && params->xdp_prog ? PAGE_SIZE : 0;
+
         return 0;
  }


>   {
>   	u32 byte_count = MLX5E_SW2HW_MTU(params, params->sw_mtu);
>   	int frag_size_max = DEFAULT_FRAG_SIZE;
> @@ -737,6 +738,9 @@ static int mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
>   	}
>   	info->num_frags = i;
>   
> +	if (info->num_frags > 1 && params->xdp_prog)
> +		*xdp_frag_size = PAGE_SIZE;
> +
>   	/* The last fragment of WQE with index 2*N may share the page with the
>   	 * first fragment of WQE with index 2*N+1 in certain cases. If WQE 2*N+1
>   	 * is not completed yet, WQE 2*N must not be allocated, as it's
> @@ -917,7 +921,8 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
>   	}
>   	default: /* MLX5_WQ_TYPE_CYCLIC */
>   		MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames);
> -		err = mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info);
> +		err = mlx5e_build_rq_frags_info(mdev, params, xsk, &param->frags_info,
> +						&param->xdp_frag_size);
>   		if (err)
>   			return err;
>   		ndsegs = param->frags_info.num_frags;
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
> index c9be6eb88012..e5930fe752e5 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
> @@ -24,6 +24,7 @@ struct mlx5e_rq_param {
>   	u32                        rqc[MLX5_ST_SZ_DW(rqc)];
>   	struct mlx5_wq_param       wq;
>   	struct mlx5e_rq_frags_info frags_info;
> +	u32                        xdp_frag_size;
>   };
>   
>   struct mlx5e_sq_param {
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
> index abcc614b6191..d02af93035b2 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
> @@ -576,7 +576,7 @@ static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
>   }
>   
>   static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
> -			     struct mlx5e_rq *rq)
> +			     u32 xdp_frag_size, struct mlx5e_rq *rq)
>   {
>   	struct mlx5_core_dev *mdev = c->mdev;
>   	int err;
> @@ -599,7 +599,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
>   	if (err)
>   		return err;
>   
> -	return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id);
> +	return __xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, c->napi.napi_id,
> +				  xdp_frag_size);
>   }
>   
>   static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
> @@ -2214,7 +2215,7 @@ static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
>   {
>   	int err;
>   
> -	err = mlx5e_init_rxq_rq(c, params, &c->rq);
> +	err = mlx5e_init_rxq_rq(c, params, rq_params->xdp_frag_size, &c->rq);
>   	if (err)
>   		return err;
>   

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2023-01-31  5:46 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-01-30 20:13 [PATCH net v2 0/2] xdp_rxq_info_reg fixes for mlx5e Maxim Mikityanskiy
2023-01-30 20:13 ` [PATCH net v2 1/2] net/mlx5e: XDP, Allow growing tail for XDP multi buffer Maxim Mikityanskiy
2023-01-31  5:46   ` Tariq Toukan
2023-01-30 20:13 ` [PATCH net v2 2/2] net/mlx5e: xsk: Set napi_id to support busy polling on XSK RQ Maxim Mikityanskiy

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).