From: Shailend Chand <shailend@google.com>
To: netdev@vger.kernel.org
Cc: almasrymina@google.com, davem@davemloft.net, edumazet@google.com,
kuba@kernel.org, pabeni@redhat.com, willemb@google.com,
Shailend Chand <shailend@google.com>
Subject: [RFC PATCH net-next 9/9] gve: Implement queue api
Date: Thu, 18 Apr 2024 19:51:59 +0000 [thread overview]
Message-ID: <20240418195159.3461151-10-shailend@google.com> (raw)
In-Reply-To: <20240418195159.3461151-1-shailend@google.com>
An api enabling the net stack to reset driver queues is implemented for
gve.
Signed-off-by: Shailend Chand <shailend@google.com>
---
drivers/net/ethernet/google/gve/gve.h | 6 +
drivers/net/ethernet/google/gve/gve_dqo.h | 6 +
drivers/net/ethernet/google/gve/gve_main.c | 143 +++++++++++++++++++
drivers/net/ethernet/google/gve/gve_rx.c | 12 +-
drivers/net/ethernet/google/gve/gve_rx_dqo.c | 12 +-
5 files changed, 167 insertions(+), 12 deletions(-)
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 9f6a897c87cb..d752e525bde7 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -1147,6 +1147,12 @@ bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
int gve_rx_poll(struct gve_notify_block *block, int budget);
bool gve_rx_work_pending(struct gve_rx_ring *rx);
+int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx);
+void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg);
int gve_rx_alloc_rings(struct gve_priv *priv);
int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index b81584829c40..e83773fb891f 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -44,6 +44,12 @@ void gve_tx_free_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx);
void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx);
+int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx);
+void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg);
int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_free_rings_dqo(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index c348dff7cca6..5e652958f10f 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -17,6 +17,7 @@
#include <linux/workqueue.h>
#include <linux/utsname.h>
#include <linux/version.h>
+#include <net/netdev_queues.h>
#include <net/sch_generic.h>
#include <net/xdp_sock_drv.h>
#include "gve.h"
@@ -2070,6 +2071,15 @@ static void gve_turnup(struct gve_priv *priv)
gve_set_napi_enabled(priv);
}
+static void gve_turnup_and_check_status(struct gve_priv *priv)
+{
+ u32 status;
+
+ gve_turnup(priv);
+ status = ioread32be(&priv->reg_bar0->device_status);
+ gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
+}
+
static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gve_notify_block *block;
@@ -2530,6 +2540,138 @@ static void gve_write_version(u8 __iomem *driver_version_register)
writeb('\n', driver_version_register);
}
+static int gve_rx_queue_stop(struct net_device *dev, int idx,
+ void **out_per_q_mem)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_ring *rx;
+ int err;
+
+ if (!priv->rx)
+ return -EAGAIN;
+ if (idx < 0 || idx >= priv->rx_cfg.max_queues)
+ return -ERANGE;
+
+ /* Destroying queue 0 while other queues exist is not supported in DQO */
+ if (!gve_is_gqi(priv) && idx == 0)
+ return -ERANGE;
+
+ rx = kvzalloc(sizeof(*rx), GFP_KERNEL);
+ if (!rx)
+ return -ENOMEM;
+ *rx = priv->rx[idx];
+
+ /* Single-queue destruction requires quiescence on all queues */
+ gve_turndown(priv);
+
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_adminq_destroy_single_rx_queue(priv, idx);
+ if (err) {
+ kvfree(rx);
+ return err;
+ }
+
+ if (gve_is_gqi(priv))
+ gve_rx_stop_ring_gqi(priv, idx);
+ else
+ gve_rx_stop_ring_dqo(priv, idx);
+
+ /* Turn the unstopped queues back up */
+ gve_turnup_and_check_status(priv);
+
+ *out_per_q_mem = rx;
+ return 0;
+}
+
+static void gve_rx_queue_mem_free(struct net_device *dev, void *per_q_mem)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_alloc_rings_cfg cfg = {0};
+ struct gve_rx_ring *rx;
+
+ gve_rx_get_curr_alloc_cfg(priv, &cfg);
+ rx = (struct gve_rx_ring *)per_q_mem;
+ if (!rx)
+ return;
+
+ if (gve_is_gqi(priv))
+ gve_rx_free_ring_gqi(priv, rx, &cfg);
+ else
+ gve_rx_free_ring_dqo(priv, rx, &cfg);
+
+ kvfree(per_q_mem);
+}
+
+static void *gve_rx_queue_mem_alloc(struct net_device *dev, int idx)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_alloc_rings_cfg cfg = {0};
+ struct gve_rx_ring *rx;
+ int err;
+
+ gve_rx_get_curr_alloc_cfg(priv, &cfg);
+ if (idx < 0 || idx >= cfg.qcfg->max_queues)
+ return NULL;
+
+ rx = kvzalloc(sizeof(*rx), GFP_KERNEL);
+ if (!rx)
+ return NULL;
+
+ if (gve_is_gqi(priv))
+ err = gve_rx_alloc_ring_gqi(priv, &cfg, rx, idx);
+ else
+ err = gve_rx_alloc_ring_dqo(priv, &cfg, rx, idx);
+
+ if (err) {
+ kvfree(rx);
+ return NULL;
+ }
+ return rx;
+}
+
+static int gve_rx_queue_start(struct net_device *dev, int idx, void *per_q_mem)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_ring *rx;
+ int err;
+
+ if (!priv->rx)
+ return -EAGAIN;
+ if (idx < 0 || idx >= priv->rx_cfg.max_queues)
+ return -ERANGE;
+ rx = (struct gve_rx_ring *)per_q_mem;
+ priv->rx[idx] = *rx;
+
+ /* Single-queue creation requires quiescence on all queues */
+ gve_turndown(priv);
+
+ if (gve_is_gqi(priv))
+ gve_rx_start_ring_gqi(priv, idx);
+ else
+ gve_rx_start_ring_dqo(priv, idx);
+
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_adminq_create_single_rx_queue(priv, idx);
+ if (err)
+ return err;
+
+ if (gve_is_gqi(priv))
+ gve_rx_write_doorbell(priv, &priv->rx[idx]);
+ else
+ gve_rx_post_buffers_dqo(&priv->rx[idx]);
+
+ /* Turn the unstopped queues back up */
+ gve_turnup_and_check_status(priv);
+ return 0;
+}
+
+static const struct netdev_queue_mgmt_ops gve_queue_mgmt_ops = {
+ .ndo_queue_mem_alloc = gve_rx_queue_mem_alloc,
+ .ndo_queue_mem_free = gve_rx_queue_mem_free,
+ .ndo_queue_start = gve_rx_queue_start,
+ .ndo_queue_stop = gve_rx_queue_stop,
+};
+
static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int max_tx_queues, max_rx_queues;
@@ -2584,6 +2726,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
dev->ethtool_ops = &gve_ethtool_ops;
dev->netdev_ops = &gve_netdev_ops;
+ dev->queue_mgmt_ops = &gve_queue_mgmt_ops;
/* Set default and supported features.
*
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 1d235caab4c5..307bf97d4778 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -101,8 +101,8 @@ void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
gve_rx_reset_ring_gqi(priv, idx);
}
-static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
- struct gve_rx_alloc_rings_cfg *cfg)
+void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
struct device *dev = &priv->pdev->dev;
u32 slots = rx->mask + 1;
@@ -270,10 +270,10 @@ void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
gve_add_napi(priv, ntfy_idx, gve_napi_poll);
}
-static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
- struct gve_rx_alloc_rings_cfg *cfg,
- struct gve_rx_ring *rx,
- int idx)
+int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
struct device *hdev = &priv->pdev->dev;
u32 slots = cfg->ring_size;
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index dc2c6bd92e82..dcbc37118870 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -299,8 +299,8 @@ void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
gve_rx_reset_ring_dqo(priv, idx);
}
-static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
- struct gve_rx_alloc_rings_cfg *cfg)
+void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
struct device *hdev = &priv->pdev->dev;
size_t completion_queue_slots;
@@ -373,10 +373,10 @@ void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx)
gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
}
-static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
- struct gve_rx_alloc_rings_cfg *cfg,
- struct gve_rx_ring *rx,
- int idx)
+int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
struct device *hdev = &priv->pdev->dev;
size_t size;
--
2.44.0.769.g3c40516874-goog
next prev parent reply other threads:[~2024-04-18 19:52 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-18 19:51 [RFC PATCH net-next 0/9] gve: Implement netdev queue api Shailend Chand
2024-04-18 19:51 ` [RFC PATCH net-next 1/9] queue_api: define " Shailend Chand
2024-04-18 19:51 ` [RFC PATCH net-next 2/9] gve: Make the RX free queue funcs idempotent Shailend Chand
2024-04-18 19:51 ` [RFC PATCH net-next 3/9] gve: Add adminq funcs to add/remove a single Rx queue Shailend Chand
2024-04-18 19:51 ` [RFC PATCH net-next 4/9] gve: Make gve_turn(up|down) ignore stopped queues Shailend Chand
2024-04-18 19:51 ` [RFC PATCH net-next 5/9] gve: Make gve_turnup work for nonempty queues Shailend Chand
2024-04-18 19:51 ` [RFC PATCH net-next 6/9] gve: Avoid rescheduling napi if on wrong cpu Shailend Chand
2024-04-18 19:51 ` [RFC PATCH net-next 7/9] gve: Reset Rx ring state in the ring-stop funcs Shailend Chand
2024-04-18 19:51 ` [RFC PATCH net-next 8/9] gve: Account for stopped queues when reading NIC stats Shailend Chand
2024-04-18 19:51 ` Shailend Chand [this message]
2024-04-19 1:48 ` [RFC PATCH net-next 9/9] gve: Implement queue api Jakub Kicinski
2024-04-19 16:10 ` Mina Almasry
2024-04-20 3:25 ` Jakub Kicinski
2024-04-22 16:58 ` Mina Almasry
2024-04-22 18:41 ` Jakub Kicinski
2024-04-19 22:23 ` Shailend Chand
2024-04-23 17:55 ` David Wei
2024-04-23 17:33 ` David Wei
2024-04-18 21:55 ` [RFC PATCH net-next 0/9] gve: Implement netdev " Mina Almasry
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240418195159.3461151-10-shailend@google.com \
--to=shailend@google.com \
--cc=almasrymina@google.com \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=willemb@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).