* [PATCH 1/4] net/gve: fix comma operator compiler warning
2026-03-27 0:18 [PATCH 0/4] net/gve: clean up minor issues Jasper Tran O'Leary
@ 2026-03-27 0:18 ` Jasper Tran O'Leary
2026-03-27 0:18 ` [PATCH 2/4] net/gve: remove variable length array in rx refill Jasper Tran O'Leary
` (3 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Jasper Tran O'Leary @ 2026-03-27 0:18 UTC (permalink / raw)
To: stephen; +Cc: dev, Jasper Tran O'Leary, Joshua Washington
The gve_adminq_create_rx_queue function initialized the admin queue
command using commas rather than semicolons after two fields'
initializations in the GQI queue format branch. Change these commas
to semicolons to remove the warning.
Signed-off-by: Jasper Tran O'Leary <jtranoleary@google.com>
Reviewed-by: Joshua Washington <joshwash@google.com>
---
drivers/net/gve/base/gve_adminq.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/gve/base/gve_adminq.c b/drivers/net/gve/base/gve_adminq.c
index 9a94591..bcfc789 100644
--- a/drivers/net/gve/base/gve_adminq.c
+++ b/drivers/net/gve/base/gve_adminq.c
@@ -721,9 +721,9 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
GVE_RAW_ADDRESSING_QPL_ID : rxq->qpl->id;
cmd.create_rx_queue.rx_desc_ring_addr =
- cpu_to_be64(rxq->mz->iova),
+ cpu_to_be64(rxq->mz->iova);
cmd.create_rx_queue.rx_data_ring_addr =
- cpu_to_be64(rxq->data_mz->iova),
+ cpu_to_be64(rxq->data_mz->iova);
cmd.create_rx_queue.index = cpu_to_be32(queue_index);
cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rxq->rx_buf_len);
--
2.53.0.1118.gaef5881109-goog
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH 2/4] net/gve: remove variable length array in rx refill
2026-03-27 0:18 [PATCH 0/4] net/gve: clean up minor issues Jasper Tran O'Leary
2026-03-27 0:18 ` [PATCH 1/4] net/gve: fix comma operator compiler warning Jasper Tran O'Leary
@ 2026-03-27 0:18 ` Jasper Tran O'Leary
2026-03-27 0:18 ` [PATCH 3/4] net/gve: add checks for flow rule struct padding Jasper Tran O'Leary
` (2 subsequent siblings)
4 siblings, 0 replies; 6+ messages in thread
From: Jasper Tran O'Leary @ 2026-03-27 0:18 UTC (permalink / raw)
To: stephen; +Cc: dev, Jasper Tran O'Leary, Joshua Washington
The new_bufs array in gve_rx_refill_dqo was being allocated on the
stack as a variable length array. With a large descriptor count, this
could lead to excessive stack usage or potential stack overflows, and
triggers -Wvla warnings during compilation.
To fix this, replace the stack array with a buffer allocated on the
heap during queue setup. This buffer, refill_bufs, is stored in the
rxq structure and is sized to the queue's descriptor count. This moves
the allocation from the datapath stack to the heap at initialization,
resolving the stack overflow risk while maintaining the efficiency of a
single bulk mbuf allocation.
Signed-off-by: Jasper Tran O'Leary <jtranoleary@google.com>
Reviewed-by: Joshua Washington <joshwash@google.com>
---
drivers/net/gve/gve_ethdev.h | 1 +
drivers/net/gve/gve_rx_dqo.c | 27 +++++++++++++++++++++------
2 files changed, 22 insertions(+), 6 deletions(-)
diff --git a/drivers/net/gve/gve_ethdev.h b/drivers/net/gve/gve_ethdev.h
index 2d570d0..8fd098c 100644
--- a/drivers/net/gve/gve_ethdev.h
+++ b/drivers/net/gve/gve_ethdev.h
@@ -250,6 +250,7 @@ struct gve_rx_queue {
/* Only valid for DQO_RDA queue format */
struct gve_rx_queue *bufq;
+ struct rte_mbuf **refill_bufs;
uint8_t is_gqi_qpl;
};
diff --git a/drivers/net/gve/gve_rx_dqo.c b/drivers/net/gve/gve_rx_dqo.c
index 3aa82b3..8035aee 100644
--- a/drivers/net/gve/gve_rx_dqo.c
+++ b/drivers/net/gve/gve_rx_dqo.c
@@ -40,12 +40,12 @@ static inline void
gve_rx_refill_dqo(struct gve_rx_queue *rxq)
{
volatile struct gve_rx_desc_dqo *rx_buf_desc;
- struct rte_mbuf *new_bufs[rxq->nb_rx_desc];
uint16_t rx_mask = rxq->nb_rx_desc - 1;
uint16_t next_avail = rxq->bufq_tail;
struct rte_eth_dev *dev;
uint16_t nb_refill;
uint64_t dma_addr;
+ rte_iova_t iova;
int16_t buf_id;
int diag;
int i;
@@ -54,7 +54,7 @@ gve_rx_refill_dqo(struct gve_rx_queue *rxq)
if (nb_refill < rxq->free_thresh)
return;
- diag = rte_pktmbuf_alloc_bulk(rxq->mpool, new_bufs, nb_refill);
+ diag = rte_pktmbuf_alloc_bulk(rxq->mpool, rxq->refill_bufs, nb_refill);
if (unlikely(diag < 0)) {
rxq->stats.no_mbufs_bulk++;
rxq->stats.no_mbufs += nb_refill;
@@ -76,12 +76,14 @@ gve_rx_refill_dqo(struct gve_rx_queue *rxq)
PMD_DRV_DP_LOG(ERR,
"No free entries in sw_ring for port %d, queue %d.",
rxq->port_id, rxq->queue_id);
- rte_pktmbuf_free_bulk(new_bufs + i, nb_refill - i);
+ rte_pktmbuf_free_bulk(rxq->refill_bufs + i,
+ nb_refill - i);
nb_refill = i;
break;
}
- rxq->sw_ring[buf_id] = new_bufs[i];
- dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_bufs[i]));
+ rxq->sw_ring[buf_id] = rxq->refill_bufs[i];
+ iova = rte_mbuf_data_iova_default(rxq->refill_bufs[i]);
+ dma_addr = rte_cpu_to_le_64(iova);
rx_buf_desc->buf_id = buf_id;
rx_buf_desc->header_buf_addr = 0;
rx_buf_desc->buf_addr = dma_addr;
@@ -247,6 +249,7 @@ gve_rx_queue_release_dqo(struct rte_eth_dev *dev, uint16_t qid)
gve_release_rxq_mbufs_dqo(q);
rte_free(q->sw_ring);
rte_free(q->completed_buf_list);
+ rte_free(q->refill_bufs);
rte_memzone_free(q->compl_ring_mz);
rte_memzone_free(q->mz);
rte_memzone_free(q->qres_mz);
@@ -363,6 +366,16 @@ gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
goto free_rxq_sw_ring;
}
+ /* Allocate buffer for reallocating mbufs */
+ rxq->refill_bufs = rte_zmalloc_socket("gve rx refill bufs",
+ nb_desc * sizeof(*rxq->refill_bufs), RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (rxq->refill_bufs == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate rx refill bufs.");
+ err = -ENOMEM;
+ goto free_rxq_completed_buf_list;
+ }
+
/* Allocate RX buffer queue */
mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
nb_desc * sizeof(struct gve_rx_desc_dqo),
@@ -370,7 +383,7 @@ gve_rx_queue_setup_dqo(struct rte_eth_dev *dev, uint16_t queue_id,
if (mz == NULL) {
PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX buffer queue");
err = -ENOMEM;
- goto free_rxq_completed_buf_list;
+ goto free_rxq_refill_bufs;
}
rxq->rx_ring = (struct gve_rx_desc_dqo *)mz->addr;
rxq->rx_ring_phys_addr = mz->iova;
@@ -412,6 +425,8 @@ free_rxq_cq_mz:
rte_memzone_free(rxq->compl_ring_mz);
free_rxq_mz:
rte_memzone_free(rxq->mz);
+free_rxq_refill_bufs:
+ rte_free(rxq->refill_bufs);
free_rxq_completed_buf_list:
rte_free(rxq->completed_buf_list);
free_rxq_sw_ring:
--
2.53.0.1118.gaef5881109-goog
^ permalink raw reply related [flat|nested] 6+ messages in thread* [PATCH 4/4] net/gve: add minor fixes to flow rule code
2026-03-27 0:18 [PATCH 0/4] net/gve: clean up minor issues Jasper Tran O'Leary
` (2 preceding siblings ...)
2026-03-27 0:18 ` [PATCH 3/4] net/gve: add checks for flow rule struct padding Jasper Tran O'Leary
@ 2026-03-27 0:18 ` Jasper Tran O'Leary
2026-04-05 16:19 ` [PATCH 0/4] net/gve: clean up minor issues Stephen Hemminger
4 siblings, 0 replies; 6+ messages in thread
From: Jasper Tran O'Leary @ 2026-03-27 0:18 UTC (permalink / raw)
To: stephen; +Cc: dev, Jasper Tran O'Leary, Joshua Washington
Replace an unneeded goto statement with an early return. Add a comment
about the lifetime of the flow rule lock.
Signed-off-by: Jasper Tran O'Leary <jtranoleary@google.com>
Reviewed-by: Joshua Washington <joshwash@google.com>
---
drivers/net/gve/gve_ethdev.c | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/drivers/net/gve/gve_ethdev.c b/drivers/net/gve/gve_ethdev.c
index 6ce3ef3..fce90ad 100644
--- a/drivers/net/gve/gve_ethdev.c
+++ b/drivers/net/gve/gve_ethdev.c
@@ -521,8 +521,7 @@ gve_setup_flow_subsystem(struct gve_priv *priv)
priv->flow_rule_bmp_size, 0);
if (!priv->avail_flow_rule_bmp_mem) {
PMD_DRV_LOG(ERR, "Failed to alloc bitmap for flow rules.");
- err = -ENOMEM;
- goto free_flow_rule_bmp;
+ return -ENOMEM;
}
err = gve_flow_init_bmp(priv);
@@ -619,6 +618,11 @@ gve_dev_reset(struct rte_eth_dev *dev)
if (gve_get_flow_subsystem_ok(priv))
gve_teardown_flow_subsystem(priv);
+ /*
+ * Note that gve_teardown_flow_subsystem does not destroy the
+ * flow_rule_lock. The lock is preserved across device resets and only
+ * destroyed on dev_close.
+ */
gve_free_queues(dev);
gve_teardown_device_resources(priv);
gve_adminq_free(priv);
--
2.53.0.1118.gaef5881109-goog
^ permalink raw reply related [flat|nested] 6+ messages in thread