From: Jakub Kicinski <kuba@kernel.org>
To: idosch@nvidia.com, petrm@nvidia.com, simon.horman@corigine.com
Cc: netdev@vger.kernel.org, leonro@nvidia.com, jiri@resnulli.us,
Jakub Kicinski <kuba@kernel.org>
Subject: [RFT net-next 5/6] devlink: hold the instance lock in port_split / port_unsplit callbacks
Date: Wed, 9 Mar 2022 16:16:31 -0800 [thread overview]
Message-ID: <20220310001632.470337-6-kuba@kernel.org> (raw)
In-Reply-To: <20220310001632.470337-1-kuba@kernel.org>
Let the core take the devlink instance lock around port splitting
and remove the now redundant locking in the drivers.
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
.../net/ethernet/mellanox/mlxsw/spectrum.c | 7 ----
.../net/ethernet/netronome/nfp/nfp_devlink.c | 32 +++++--------------
net/core/devlink.c | 2 --
3 files changed, 8 insertions(+), 33 deletions(-)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 1e823b669d1c..8eb05090ffec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2025,7 +2025,6 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
struct mlxsw_sp_port_mapping port_mapping;
struct mlxsw_sp_port *mlxsw_sp_port;
enum mlxsw_reg_pmtdb_status status;
@@ -2063,7 +2062,6 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
port_mapping = mlxsw_sp_port->mapping;
- devl_lock(devlink);
for (i = 0; i < count; i++) {
u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
@@ -2077,13 +2075,11 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
goto err_port_split_create;
}
- devl_unlock(devlink);
return 0;
err_port_split_create:
mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
- devl_unlock(devlink);
return err;
}
@@ -2091,7 +2087,6 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
unsigned int count;
@@ -2123,7 +2118,6 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
return err;
}
- devl_lock(devlink);
for (i = 0; i < count; i++) {
u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
@@ -2132,7 +2126,6 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
}
mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
- devl_unlock(devlink);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 865f62958a72..6bd6f4a67c30 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -70,29 +70,21 @@ nfp_devlink_port_split(struct devlink *devlink, unsigned int port_index,
unsigned int lanes;
int ret;
- devl_lock(devlink);
-
rtnl_lock();
ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, ð_port);
rtnl_unlock();
if (ret)
- goto out;
+ return ret;
- if (eth_port.port_lanes % count) {
- ret = -EINVAL;
- goto out;
- }
+ if (eth_port.port_lanes % count)
+ return -EINVAL;
/* Special case the 100G CXP -> 2x40G split */
lanes = eth_port.port_lanes / count;
if (eth_port.lanes == 10 && count == 2)
lanes = 8 / count;
- ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
-out:
- devl_unlock(devlink);
-
- return ret;
+ return nfp_devlink_set_lanes(pf, eth_port.index, lanes);
}
static int
@@ -104,29 +96,21 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index,
unsigned int lanes;
int ret;
- devl_lock(devlink);
-
rtnl_lock();
ret = nfp_devlink_fill_eth_port_from_id(pf, port_index, ð_port);
rtnl_unlock();
if (ret)
- goto out;
+ return ret;
- if (!eth_port.is_split) {
- ret = -EINVAL;
- goto out;
- }
+ if (!eth_port.is_split)
+ return -EINVAL;
/* Special case the 100G CXP -> 2x40G unsplit */
lanes = eth_port.port_lanes;
if (eth_port.port_lanes == 8)
lanes = 10;
- ret = nfp_devlink_set_lanes(pf, eth_port.index, lanes);
-out:
- devl_unlock(devlink);
-
- return ret;
+ return nfp_devlink_set_lanes(pf, eth_port.index, lanes);
}
static int
diff --git a/net/core/devlink.c b/net/core/devlink.c
index c30da1fc023d..3069a3833576 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -8676,14 +8676,12 @@ static const struct genl_small_ops devlink_nl_ops[] = {
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_port_split_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
},
{
.cmd = DEVLINK_CMD_PORT_UNSPLIT,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_port_unsplit_doit,
.flags = GENL_ADMIN_PERM,
- .internal_flags = DEVLINK_NL_FLAG_NO_LOCK,
},
{
.cmd = DEVLINK_CMD_PORT_NEW,
--
2.34.1
next prev parent reply other threads:[~2022-03-10 0:16 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-03-10 0:16 [RFT net-next 0/6] devlink: expose instance locking and simplify port splitting Jakub Kicinski
2022-03-10 0:16 ` [RFT net-next 1/6] devlink: expose instance locking and add locked port registering Jakub Kicinski
2022-03-10 9:14 ` Jiri Pirko
2022-03-10 20:06 ` Jakub Kicinski
2022-03-11 9:15 ` Jiri Pirko
2022-03-11 16:33 ` Jakub Kicinski
2022-03-14 12:43 ` Jiri Pirko
2022-03-11 16:09 ` Leon Romanovsky
2022-03-11 16:26 ` Jakub Kicinski
2022-03-11 16:57 ` Leon Romanovsky
2022-03-11 17:39 ` Jakub Kicinski
2022-03-11 17:41 ` Jakub Kicinski
2022-03-11 17:49 ` Leon Romanovsky
2022-03-11 18:06 ` Jakub Kicinski
2022-03-11 18:19 ` Leon Romanovsky
2022-03-10 0:16 ` [RFT net-next 2/6] eth: nfp: wrap locking assertions in helpers Jakub Kicinski
2022-03-10 0:16 ` [RFT net-next 3/6] eth: nfp: replace driver's "pf" lock with devlink instance lock Jakub Kicinski
2022-03-10 0:16 ` [RFT net-next 4/6] eth: mlxsw: switch to explicit locking for port registration Jakub Kicinski
2022-03-10 9:17 ` Jiri Pirko
2022-03-10 20:08 ` Jakub Kicinski
2022-03-10 0:16 ` Jakub Kicinski [this message]
2022-03-10 0:16 ` [RFT net-next 6/6] devlink: pass devlink_port to port_split / port_unsplit callbacks Jakub Kicinski
2022-03-10 8:57 ` [RFT net-next 0/6] devlink: expose instance locking and simplify port splitting Ido Schimmel
2022-03-10 21:13 ` Ido Schimmel
2022-03-10 21:28 ` Jakub Kicinski
2022-03-14 18:46 ` Jakub Kicinski
2022-03-14 19:10 ` Ido Schimmel
2022-03-14 20:11 ` Jakub Kicinski
2022-03-15 7:39 ` Leon Romanovsky
2022-03-15 15:58 ` Jakub Kicinski
2022-03-15 17:54 ` Leon Romanovsky
2022-03-10 9:05 ` Jiri Pirko
2022-03-10 9:07 ` Leon Romanovsky
2022-03-10 20:13 ` Jakub Kicinski
2022-03-11 6:30 ` Leon Romanovsky
2022-03-11 10:48 ` Simon Horman
2022-03-11 16:34 ` Jakub Kicinski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220310001632.470337-6-kuba@kernel.org \
--to=kuba@kernel.org \
--cc=idosch@nvidia.com \
--cc=jiri@resnulli.us \
--cc=leonro@nvidia.com \
--cc=netdev@vger.kernel.org \
--cc=petrm@nvidia.com \
--cc=simon.horman@corigine.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).