* [PATCH net] gve: Add adminq lock for creating and destroying multiple queues
@ 2025-04-17 20:43 Harshitha Ramamurthy
2025-04-22 13:08 ` Przemek Kitszel
2025-04-24 0:14 ` Jakub Kicinski
0 siblings, 2 replies; 4+ messages in thread
From: Harshitha Ramamurthy @ 2025-04-17 20:43 UTC (permalink / raw)
To: netdev
Cc: jeroendb, hramamurthy, andrew+netdev, davem, edumazet, kuba,
pabeni, pkaligineedi, willemb, ziweixiao, shailend, linux-kernel,
stable
From: Ziwei Xiao <ziweixiao@google.com>
The original adminq lock is only protecting the gve_adminq_execute_cmd
which is aimed for sending out single adminq command. However, there are
other callers of gve_adminq_kick_and_wait and gve_adminq_issue_cmd that
need to take the mutex lock for mutual exclusion between them, which are
creating and destroying rx/tx queues. Add the adminq lock for those
unprotected callers.
Also this patch cleans up the error handling code of
gve_adminq_destroy_tx_queue.
Cc: stable@vger.kernel.org
Fixes: 1108566ca509 ("gve: Add adminq mutex lock")
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Ziwei Xiao <ziweixiao@google.com>
Signed-off-by: Harshitha Ramamurthy <hramamurthy@google.com>
---
drivers/net/ethernet/google/gve/gve_adminq.c | 54 ++++++++++++++------
1 file changed, 37 insertions(+), 17 deletions(-)
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 3e8fc33cc11f..659460812276 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -442,6 +442,8 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv)
int tail, head;
int i;
+ lockdep_assert_held(&priv->adminq_lock);
+
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
head = priv->adminq_prod_cnt;
@@ -467,9 +469,6 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv)
return 0;
}
-/* This function is not threadsafe - the caller is responsible for any
- * necessary locks.
- */
static int gve_adminq_issue_cmd(struct gve_priv *priv,
union gve_adminq_command *cmd_orig)
{
@@ -477,6 +476,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
u32 opcode;
u32 tail;
+ lockdep_assert_held(&priv->adminq_lock);
+
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
// Check if next command will overflow the buffer.
@@ -709,13 +710,19 @@ int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_que
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_create_tx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
@@ -788,19 +795,24 @@ int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = 0; i < num_queues; i++) {
err = gve_adminq_create_rx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
{
union gve_adminq_command cmd;
- int err;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
@@ -808,11 +820,7 @@ static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
.queue_id = cpu_to_be32(queue_index),
};
- err = gve_adminq_issue_cmd(priv, &cmd);
- if (err)
- return err;
-
- return 0;
+ return gve_adminq_issue_cmd(priv, &cmd);
}
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
@@ -820,13 +828,19 @@ int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_qu
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = start_id; i < start_id + num_queues; i++) {
err = gve_adminq_destroy_tx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd,
@@ -861,13 +875,19 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
int err;
int i;
+ mutex_lock(&priv->adminq_lock);
+
for (i = 0; i < num_queues; i++) {
err = gve_adminq_destroy_rx_queue(priv, i);
if (err)
- return err;
+ goto out;
}
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
}
static void gve_set_default_desc_cnt(struct gve_priv *priv,
--
2.49.0.777.g153de2bbd5-goog
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH net] gve: Add adminq lock for creating and destroying multiple queues
2025-04-17 20:43 [PATCH net] gve: Add adminq lock for creating and destroying multiple queues Harshitha Ramamurthy
@ 2025-04-22 13:08 ` Przemek Kitszel
2025-04-24 0:14 ` Jakub Kicinski
1 sibling, 0 replies; 4+ messages in thread
From: Przemek Kitszel @ 2025-04-22 13:08 UTC (permalink / raw)
To: Harshitha Ramamurthy
Cc: jeroendb, andrew+netdev, davem, edumazet, kuba, pabeni,
pkaligineedi, willemb, ziweixiao, shailend, linux-kernel, stable,
netdev
On 4/17/25 22:43, Harshitha Ramamurthy wrote:
> From: Ziwei Xiao <ziweixiao@google.com>
>
> The original adminq lock is only protecting the gve_adminq_execute_cmd
> which is aimed for sending out single adminq command. However, there are
> other callers of gve_adminq_kick_and_wait and gve_adminq_issue_cmd that
> need to take the mutex lock for mutual exclusion between them, which are
> creating and destroying rx/tx queues. Add the adminq lock for those
> unprotected callers.
>
> Also this patch cleans up the error handling code of
> gve_adminq_destroy_tx_queue.
>
> Cc: stable@vger.kernel.org
> Fixes: 1108566ca509 ("gve: Add adminq mutex lock")
This looks like a correct fix, it is also nice that you have added
lockdep annotations.
Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
> Reviewed-by: Willem de Bruijn <willemb@google.com>
> Signed-off-by: Ziwei Xiao <ziweixiao@google.com>
> Signed-off-by: Harshitha Ramamurthy <hramamurthy@google.com>
> ---
> drivers/net/ethernet/google/gve/gve_adminq.c | 54 ++++++++++++++------
> 1 file changed, 37 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
> index 3e8fc33cc11f..659460812276 100644
> --- a/drivers/net/ethernet/google/gve/gve_adminq.c
> +++ b/drivers/net/ethernet/google/gve/gve_adminq.c
> @@ -442,6 +442,8 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv)
> int tail, head;
> int i;
>
> + lockdep_assert_held(&priv->adminq_lock);
> +
> tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
> head = priv->adminq_prod_cnt;
>
> @@ -467,9 +469,6 @@ static int gve_adminq_kick_and_wait(struct gve_priv *priv)
> return 0;
> }
>
> -/* This function is not threadsafe - the caller is responsible for any
> - * necessary locks.
> - */
> static int gve_adminq_issue_cmd(struct gve_priv *priv,
> union gve_adminq_command *cmd_orig)
> {
> @@ -477,6 +476,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
> u32 opcode;
> u32 tail;
>
> + lockdep_assert_held(&priv->adminq_lock);
> +
> tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
>
> // Check if next command will overflow the buffer.
> @@ -709,13 +710,19 @@ int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_que
> int err;
> int i;
>
> + mutex_lock(&priv->adminq_lock);
> +
> for (i = start_id; i < start_id + num_queues; i++) {
> err = gve_adminq_create_tx_queue(priv, i);
> if (err)
> - return err;
> + goto out;
> }
>
> - return gve_adminq_kick_and_wait(priv);
> + err = gve_adminq_kick_and_wait(priv);
> +
> +out:
> + mutex_unlock(&priv->adminq_lock);
> + return err;
> }
>
> static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
> @@ -788,19 +795,24 @@ int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
> int err;
> int i;
>
> + mutex_lock(&priv->adminq_lock);
> +
> for (i = 0; i < num_queues; i++) {
> err = gve_adminq_create_rx_queue(priv, i);
> if (err)
> - return err;
> + goto out;
> }
>
> - return gve_adminq_kick_and_wait(priv);
> + err = gve_adminq_kick_and_wait(priv);
> +
> +out:
> + mutex_unlock(&priv->adminq_lock);
> + return err;
> }
>
> static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
> {
> union gve_adminq_command cmd;
> - int err;
>
> memset(&cmd, 0, sizeof(cmd));
> cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
> @@ -808,11 +820,7 @@ static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
> .queue_id = cpu_to_be32(queue_index),
> };
>
> - err = gve_adminq_issue_cmd(priv, &cmd);
> - if (err)
> - return err;
> -
> - return 0;
> + return gve_adminq_issue_cmd(priv, &cmd);
> }
>
> int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
> @@ -820,13 +828,19 @@ int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_qu
> int err;
> int i;
>
> + mutex_lock(&priv->adminq_lock);
> +
> for (i = start_id; i < start_id + num_queues; i++) {
> err = gve_adminq_destroy_tx_queue(priv, i);
> if (err)
> - return err;
> + goto out;
> }
>
> - return gve_adminq_kick_and_wait(priv);
> + err = gve_adminq_kick_and_wait(priv);
> +
> +out:
> + mutex_unlock(&priv->adminq_lock);
> + return err;
> }
>
> static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd,
> @@ -861,13 +875,19 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
> int err;
> int i;
>
> + mutex_lock(&priv->adminq_lock);
> +
> for (i = 0; i < num_queues; i++) {
> err = gve_adminq_destroy_rx_queue(priv, i);
> if (err)
> - return err;
> + goto out;
> }
>
> - return gve_adminq_kick_and_wait(priv);
> + err = gve_adminq_kick_and_wait(priv);
> +
> +out:
> + mutex_unlock(&priv->adminq_lock);
> + return err;
> }
>
> static void gve_set_default_desc_cnt(struct gve_priv *priv,
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH net] gve: Add adminq lock for creating and destroying multiple queues
2025-04-17 20:43 [PATCH net] gve: Add adminq lock for creating and destroying multiple queues Harshitha Ramamurthy
2025-04-22 13:08 ` Przemek Kitszel
@ 2025-04-24 0:14 ` Jakub Kicinski
2025-04-24 18:37 ` Ziwei Xiao
1 sibling, 1 reply; 4+ messages in thread
From: Jakub Kicinski @ 2025-04-24 0:14 UTC (permalink / raw)
To: Harshitha Ramamurthy
Cc: netdev, jeroendb, andrew+netdev, davem, edumazet, pabeni,
pkaligineedi, willemb, ziweixiao, shailend, linux-kernel, stable
On Thu, 17 Apr 2025 20:43:23 +0000 Harshitha Ramamurthy wrote:
> Also this patch cleans up the error handling code of
> gve_adminq_destroy_tx_queue.
> static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
> {
> union gve_adminq_command cmd;
> - int err;
>
> memset(&cmd, 0, sizeof(cmd));
> cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
> @@ -808,11 +820,7 @@ static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
> .queue_id = cpu_to_be32(queue_index),
> };
>
> - err = gve_adminq_issue_cmd(priv, &cmd);
> - if (err)
> - return err;
> -
> - return 0;
> + return gve_adminq_issue_cmd(priv, &cmd);
> }
You mean this cleanup? That's not appropriate for a stable fix...
Could you also explain which callers of this core are not already
under rtnl_lock and/pr the netdev instance lock?
--
pw-bot: cr
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH net] gve: Add adminq lock for creating and destroying multiple queues
2025-04-24 0:14 ` Jakub Kicinski
@ 2025-04-24 18:37 ` Ziwei Xiao
0 siblings, 0 replies; 4+ messages in thread
From: Ziwei Xiao @ 2025-04-24 18:37 UTC (permalink / raw)
To: Jakub Kicinski
Cc: Harshitha Ramamurthy, netdev, jeroendb, andrew+netdev, davem,
edumazet, pabeni, pkaligineedi, willemb, shailend, linux-kernel,
stable
On Wed, Apr 23, 2025 at 5:14 PM Jakub Kicinski <kuba@kernel.org> wrote:
>
> On Thu, 17 Apr 2025 20:43:23 +0000 Harshitha Ramamurthy wrote:
> > Also this patch cleans up the error handling code of
> > gve_adminq_destroy_tx_queue.
>
> > static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
> > {
> > union gve_adminq_command cmd;
> > - int err;
> >
> > memset(&cmd, 0, sizeof(cmd));
> > cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
> > @@ -808,11 +820,7 @@ static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
> > .queue_id = cpu_to_be32(queue_index),
> > };
> >
> > - err = gve_adminq_issue_cmd(priv, &cmd);
> > - if (err)
> > - return err;
> > -
> > - return 0;
> > + return gve_adminq_issue_cmd(priv, &cmd);
> > }
>
> You mean this cleanup? That's not appropriate for a stable fix...
>
> Could you also explain which callers of this core are not already
> under rtnl_lock and/pr the netdev instance lock?
I discovered this and thought that this applied more widely, but upon
rereading it turns out it only applies to upcoming timestamping
patches and a previous flow steering code attempt that was scuttled.
Current callers are under rtnl_lock or netdev_lock. Should not have
been sent to the net. So will send as part of the timestamping series.
Thanks.
> --
> pw-bot: cr
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2025-04-24 18:37 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-04-17 20:43 [PATCH net] gve: Add adminq lock for creating and destroying multiple queues Harshitha Ramamurthy
2025-04-22 13:08 ` Przemek Kitszel
2025-04-24 0:14 ` Jakub Kicinski
2025-04-24 18:37 ` Ziwei Xiao
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).