netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Przemek Kitszel <przemyslaw.kitszel@intel.com>
To: Tariq Toukan <tariqt@nvidia.com>, Moshe Shemesh <moshe@nvidia.com>
Cc: <netdev@vger.kernel.org>, Saeed Mahameed <saeedm@nvidia.com>,
	Gal Pressman <gal@nvidia.com>,
	Leon Romanovsky <leonro@nvidia.com>,
	Mark Bloch <mbloch@nvidia.com>,
	Yevgeny Kliteynik <kliteyn@nvidia.com>,
	"David S. Miller" <davem@davemloft.net>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
	Eric Dumazet <edumazet@google.com>,
	Andrew Lunn <andrew+netdev@lunn.ch>
Subject: Re: [PATCH net-next 06/13] net/mlx5: fs, add HWS modify header API function
Date: Tue, 7 Jan 2025 13:09:51 +0100	[thread overview]
Message-ID: <0a115ea8-7be5-47db-9fa5-b248bccbcd38@intel.com> (raw)
In-Reply-To: <20250107060708.1610882-7-tariqt@nvidia.com>

On 1/7/25 07:07, Tariq Toukan wrote:
> From: Moshe Shemesh <moshe@nvidia.com>
> 
> Add modify header alloc and dealloc API functions to provide modify
> header actions for steering rules. Use fs hws pools to get actions from
> shared bulks of modify header actions.
> 
> Signed-off-by: Moshe Shemesh <moshe@nvidia.com>
> Reviewed-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
> Reviewed-by: Mark Bloch <mbloch@nvidia.com>
> Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
> ---
>   .../net/ethernet/mellanox/mlx5/core/fs_core.h |   1 +
>   .../mellanox/mlx5/core/steering/hws/fs_hws.c  | 117 +++++++++++++
>   .../mellanox/mlx5/core/steering/hws/fs_hws.h  |   2 +
>   .../mlx5/core/steering/hws/fs_hws_pools.c     | 164 ++++++++++++++++++
>   .../mlx5/core/steering/hws/fs_hws_pools.h     |  22 +++
>   5 files changed, 306 insertions(+)
> 
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
> index 9b0575a61362..06ec48f51b6d 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
> @@ -65,6 +65,7 @@ struct mlx5_modify_hdr {
>   	enum mlx5_flow_resource_owner owner;
>   	union {
>   		struct mlx5_fs_dr_action fs_dr_action;
> +		struct mlx5_fs_hws_action fs_hws_action;
>   		u32 id;
>   	};
>   };
> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
> index 723865140b2e..a75e5ce168c7 100644
> --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
> +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
> @@ -14,6 +14,8 @@ static struct mlx5hws_action *
>   create_action_remove_header_vlan(struct mlx5hws_context *ctx);
>   static void destroy_pr_pool(struct mlx5_fs_pool *pool, struct xarray *pr_pools,
>   			    unsigned long index);
> +static void destroy_mh_pool(struct mlx5_fs_pool *pool, struct xarray *mh_pools,
> +			    unsigned long index);

usual "please add your suffix" complain

sorry for mostly nitpicks, I will take deeper look later

>   
>   static int init_hws_actions_pool(struct mlx5_core_dev *dev,
>   				 struct mlx5_fs_hws_context *fs_ctx)
> @@ -56,6 +58,7 @@ static int init_hws_actions_pool(struct mlx5_core_dev *dev,
>   		goto cleanup_insert_hdr;
>   	xa_init(&hws_pool->el2tol3tnl_pools);
>   	xa_init(&hws_pool->el2tol2tnl_pools);
> +	xa_init(&hws_pool->mh_pools);
>   	return 0;
>   
>   cleanup_insert_hdr:
> @@ -81,6 +84,9 @@ static void cleanup_hws_actions_pool(struct mlx5_fs_hws_context *fs_ctx)
>   	struct mlx5_fs_pool *pool;
>   	unsigned long i;
>   
> +	xa_for_each(&hws_pool->mh_pools, i, pool)
> +		destroy_mh_pool(pool, &hws_pool->mh_pools, i);
> +	xa_destroy(&hws_pool->mh_pools);
>   	xa_for_each(&hws_pool->el2tol2tnl_pools, i, pool)
>   		destroy_pr_pool(pool, &hws_pool->el2tol2tnl_pools, i);
>   	xa_destroy(&hws_pool->el2tol2tnl_pools);
> @@ -528,6 +534,115 @@ static void mlx5_cmd_hws_packet_reformat_dealloc(struct mlx5_flow_root_namespace
>   	pkt_reformat->fs_hws_action.pr_data = NULL;
>   }
>   
> +static struct mlx5_fs_pool *
> +create_mh_pool(struct mlx5_core_dev *dev,

ditto prefix

[...]

> +static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
> +					    u8 namespace, u8 num_actions,
> +					    void *modify_actions,
> +					    struct mlx5_modify_hdr *modify_hdr)
> +{
> +	struct mlx5_fs_hws_actions_pool *hws_pool = &ns->fs_hws_context.hws_pool;
> +	struct mlx5hws_action_mh_pattern pattern = {};
> +	struct mlx5_fs_hws_mh *mh_data = NULL;
> +	struct mlx5hws_action *hws_action;
> +	struct mlx5_fs_pool *pool;
> +	unsigned long i, cnt = 0;
> +	bool known_pattern;
> +	int err;
> +
> +	pattern.sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
> +	pattern.data = modify_actions;
> +
> +	known_pattern = false;
> +	xa_for_each(&hws_pool->mh_pools, i, pool) {
> +		if (mlx5_fs_hws_mh_pool_match(pool, &pattern)) {
> +			known_pattern = true;
> +			break;
> +		}
> +		cnt++;
> +	}
> +
> +	if (!known_pattern) {
> +		pool = create_mh_pool(ns->dev, &pattern, &hws_pool->mh_pools, cnt);
> +		if (IS_ERR(pool))
> +			return PTR_ERR(pool);
> +	}

if, by any chance, .mh_pools was empty, next line has @pool
uninitialized

> +	mh_data = mlx5_fs_hws_mh_pool_acquire_mh(pool);
> +	if (IS_ERR(mh_data)) {
> +		err = PTR_ERR(mh_data);
> +		goto destroy_pool;
> +	}
> +	hws_action = mh_data->bulk->hws_action;
> +	mh_data->data = kmemdup(pattern.data, pattern.sz, GFP_KERNEL);
> +	if (!mh_data->data) {
> +		err = -ENOMEM;
> +		goto release_mh;
> +	}
> +	modify_hdr->fs_hws_action.mh_data = mh_data;
> +	modify_hdr->fs_hws_action.fs_pool = pool;
> +	modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
> +	modify_hdr->fs_hws_action.hws_action = hws_action;
> +
> +	return 0;
> +
> +release_mh:
> +	mlx5_fs_hws_mh_pool_release_mh(pool, mh_data);
> +destroy_pool:
> +	if (!known_pattern)
> +		destroy_mh_pool(pool, &hws_pool->mh_pools, cnt);
> +	return err;
> +}

[...]

> +static struct mlx5_fs_bulk *
> +mlx5_fs_hws_mh_bulk_create(struct mlx5_core_dev *dev, void *pool_ctx)
> +{
> +	struct mlx5hws_action_mh_pattern *pattern;
> +	struct mlx5_flow_root_namespace *root_ns;
> +	struct mlx5_fs_hws_mh_bulk *mh_bulk;
> +	struct mlx5hws_context *ctx;
> +	int bulk_len;
> +	int i;

meld @i to prev line, or better declare within the for loop

> +
> +	root_ns = mlx5_get_root_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
> +	if (!root_ns || root_ns->mode != MLX5_FLOW_STEERING_MODE_HMFS)
> +		return NULL;
> +
> +	ctx = root_ns->fs_hws_context.hws_ctx;
> +	if (!ctx)
> +		return NULL;
> +
> +	if (!pool_ctx)
> +		return NULL;

you could combine the two checks above

[...]

> +bool mlx5_fs_hws_mh_pool_match(struct mlx5_fs_pool *mh_pool,
> +			       struct mlx5hws_action_mh_pattern *pattern)
> +{
> +	struct mlx5hws_action_mh_pattern *pool_pattern;
> +	int num_actions, i;
> +
> +	pool_pattern = mh_pool->pool_ctx;
> +	if (WARN_ON_ONCE(!pool_pattern))
> +		return false;
> +
> +	if (pattern->sz != pool_pattern->sz)
> +		return false;
> +	num_actions = pattern->sz / MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
> +	for (i = 0; i < num_actions; i++)

missing braces

> +		if ((__force __be32)pattern->data[i] !=
> +		    (__force __be32)pool_pattern->data[i])
> +			return false;
> +	return true;
> +}


  reply	other threads:[~2025-01-07 12:10 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-01-07  6:06 [PATCH net-next 00/13] mlx5 HW-Managed Flow Steering in FS core level Tariq Toukan
2025-01-07  6:06 ` [PATCH net-next 01/13] net/mlx5: fs, add HWS root namespace functions Tariq Toukan
2025-01-07 11:27   ` Przemek Kitszel
2025-01-08 11:53     ` Moshe Shemesh
2025-01-07  6:06 ` [PATCH net-next 02/13] net/mlx5: fs, add HWS flow table API functions Tariq Toukan
2025-01-07  6:06 ` [PATCH net-next 03/13] net/mlx5: fs, add HWS flow group " Tariq Toukan
2025-01-07  6:06 ` [PATCH net-next 04/13] net/mlx5: fs, add HWS actions pool Tariq Toukan
2025-01-07 11:36   ` Przemek Kitszel
2025-01-08 11:55     ` Moshe Shemesh
2025-01-07  6:07 ` [PATCH net-next 05/13] net/mlx5: fs, add HWS packet reformat API function Tariq Toukan
2025-01-07  6:07 ` [PATCH net-next 06/13] net/mlx5: fs, add HWS modify header " Tariq Toukan
2025-01-07 12:09   ` Przemek Kitszel [this message]
2025-01-08 12:04     ` Moshe Shemesh
2025-01-08 14:58       ` Przemek Kitszel
2025-01-08 16:54         ` Moshe Shemesh
2025-01-07  6:07 ` [PATCH net-next 07/13] net/mlx5: fs, manage flow counters HWS action sharing by refcount Tariq Toukan
2025-01-07  6:07 ` [PATCH net-next 08/13] net/mlx5: fs, add dest table cache Tariq Toukan
2025-01-07  6:07 ` [PATCH net-next 09/13] net/mlx5: fs, add HWS fte API functions Tariq Toukan
2025-01-07  6:07 ` [PATCH net-next 10/13] net/mlx5: fs, add support for dest vport HWS action Tariq Toukan
2025-01-07  6:07 ` [PATCH net-next 11/13] net/mlx5: fs, set create match definer to not supported by HWS Tariq Toukan
2025-01-07  6:07 ` [PATCH net-next 12/13] net/mlx5: fs, add HWS get capabilities Tariq Toukan
2025-01-07  6:07 ` [PATCH net-next 13/13] net/mlx5: fs, add HWS to steering mode options Tariq Toukan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=0a115ea8-7be5-47db-9fa5-b248bccbcd38@intel.com \
    --to=przemyslaw.kitszel@intel.com \
    --cc=andrew+netdev@lunn.ch \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=gal@nvidia.com \
    --cc=kliteyn@nvidia.com \
    --cc=kuba@kernel.org \
    --cc=leonro@nvidia.com \
    --cc=mbloch@nvidia.com \
    --cc=moshe@nvidia.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=saeedm@nvidia.com \
    --cc=tariqt@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).