* [PATCH net v2 1/3] bnxt_en: refactor tpa_info alloc/free into helpers
2024-11-27 22:38 [PATCH net v2 0/3] bnxt_en: support header page pool in queue API David Wei
@ 2024-11-27 22:38 ` David Wei
2024-11-27 22:38 ` [PATCH net v2 2/3] bnxt_en: refactor bnxt_alloc_rx_rings() to call bnxt_alloc_rx_agg_bmap() David Wei
2024-11-27 22:38 ` [PATCH net v2 3/3] bnxt_en: handle tpa_info in queue API implementation David Wei
2 siblings, 0 replies; 10+ messages in thread
From: David Wei @ 2024-11-27 22:38 UTC (permalink / raw)
To: netdev, Michael Chan, Andy Gospodarek, Somnath Kotur
Cc: Andrew Lunn, David S. Miller, Eric Dumazet, Jakub Kicinski,
Paolo Abeni, David Wei
Refactor bnxt_rx_ring_info->tpa_info operations into helpers that work
on a single tpa_info in prep for queue API using them.
There are 2 pairs of operations:
* bnxt_alloc_one_tpa_info()
* bnxt_free_one_tpa_info()
These alloc/free the tpa_info array itself.
* bnxt_alloc_one_tpa_info_data()
* bnxt_free_one_tpa_info_data()
These alloc/free the frags stored in tpa_info array.
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: David Wei <dw@davidwei.uk>
---
drivers/net/ethernet/broadcom/bnxt/bnxt.c | 142 ++++++++++++++--------
1 file changed, 90 insertions(+), 52 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 57e69c0552ab..38783d125d55 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3421,15 +3421,11 @@ static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info
}
}
-static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
+static void bnxt_free_one_tpa_info_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
{
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
- struct bnxt_tpa_idx_map *map;
int i;
- if (!rxr->rx_tpa)
- goto skip_rx_tpa_free;
-
for (i = 0; i < bp->max_tpa; i++) {
struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
u8 *data = tpa_info->data;
@@ -3440,6 +3436,17 @@ static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
tpa_info->data = NULL;
page_pool_free_va(rxr->head_pool, data, false);
}
+}
+
+static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_tpa_idx_map *map;
+
+ if (!rxr->rx_tpa)
+ goto skip_rx_tpa_free;
+
+ bnxt_free_one_tpa_info_data(bp, rxr);
skip_rx_tpa_free:
if (!rxr->rx_buf_ring)
@@ -3467,7 +3474,7 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
return;
for (i = 0; i < bp->rx_nr_rings; i++)
- bnxt_free_one_rx_ring_skbs(bp, i);
+ bnxt_free_one_rx_ring_skbs(bp, &bp->rx_ring[i]);
}
static void bnxt_free_skbs(struct bnxt *bp)
@@ -3608,29 +3615,64 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
return 0;
}
+static void bnxt_free_one_tpa_info(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ int i;
+
+ kfree(rxr->rx_tpa_idx_map);
+ rxr->rx_tpa_idx_map = NULL;
+ if (rxr->rx_tpa) {
+ for (i = 0; i < bp->max_tpa; i++) {
+ kfree(rxr->rx_tpa[i].agg_arr);
+ rxr->rx_tpa[i].agg_arr = NULL;
+ }
+ }
+ kfree(rxr->rx_tpa);
+ rxr->rx_tpa = NULL;
+}
+
static void bnxt_free_tpa_info(struct bnxt *bp)
{
- int i, j;
+ int i;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- kfree(rxr->rx_tpa_idx_map);
- rxr->rx_tpa_idx_map = NULL;
- if (rxr->rx_tpa) {
- for (j = 0; j < bp->max_tpa; j++) {
- kfree(rxr->rx_tpa[j].agg_arr);
- rxr->rx_tpa[j].agg_arr = NULL;
- }
- }
- kfree(rxr->rx_tpa);
- rxr->rx_tpa = NULL;
+ bnxt_free_one_tpa_info(bp, rxr);
}
}
+static int bnxt_alloc_one_tpa_info(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct rx_agg_cmp *agg;
+ int i;
+
+ rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa)
+ return -ENOMEM;
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
+ return 0;
+ for (i = 0; i < bp->max_tpa; i++) {
+ agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
+ if (!agg)
+ return -ENOMEM;
+ rxr->rx_tpa[i].agg_arr = agg;
+ }
+ rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
+ GFP_KERNEL);
+ if (!rxr->rx_tpa_idx_map)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int bnxt_alloc_tpa_info(struct bnxt *bp)
{
- int i, j;
+ int i, rc;
bp->max_tpa = MAX_TPA;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
@@ -3641,25 +3683,10 @@ static int bnxt_alloc_tpa_info(struct bnxt *bp)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct rx_agg_cmp *agg;
-
- rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
- GFP_KERNEL);
- if (!rxr->rx_tpa)
- return -ENOMEM;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- continue;
- for (j = 0; j < bp->max_tpa; j++) {
- agg = kcalloc(MAX_SKB_FRAGS, sizeof(*agg), GFP_KERNEL);
- if (!agg)
- return -ENOMEM;
- rxr->rx_tpa[j].agg_arr = agg;
- }
- rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
- GFP_KERNEL);
- if (!rxr->rx_tpa_idx_map)
- return -ENOMEM;
+ rc = bnxt_alloc_one_tpa_info(bp, rxr);
+ if (rc)
+ return rc;
}
return 0;
}
@@ -4268,10 +4295,31 @@ static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
rxr->rx_agg_prod = prod;
}
+static int bnxt_alloc_one_tpa_info_data(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ dma_addr_t mapping;
+ u8 *data;
+ int i;
+
+ for (i = 0; i < bp->max_tpa; i++) {
+ data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
+ rxr->rx_tpa[i].mapping = mapping;
+ }
+
+ return 0;
+}
+
static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
{
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
- int i;
+ int rc;
bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
@@ -4281,19 +4329,9 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
if (rxr->rx_tpa) {
- dma_addr_t mapping;
- u8 *data;
-
- for (i = 0; i < bp->max_tpa; i++) {
- data = __bnxt_alloc_rx_frag(bp, &mapping, rxr,
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- rxr->rx_tpa[i].data = data;
- rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
- rxr->rx_tpa[i].mapping = mapping;
- }
+ rc = bnxt_alloc_one_tpa_info_data(bp, rxr);
+ if (rc)
+ return rc;
}
return 0;
}
@@ -13657,7 +13695,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
bnxt_reset_task(bp, true);
break;
}
- bnxt_free_one_rx_ring_skbs(bp, i);
+ bnxt_free_one_rx_ring_skbs(bp, rxr);
rxr->rx_prod = 0;
rxr->rx_agg_prod = 0;
rxr->rx_sw_agg_prod = 0;
--
2.43.5
^ permalink raw reply related [flat|nested] 10+ messages in thread* [PATCH net v2 2/3] bnxt_en: refactor bnxt_alloc_rx_rings() to call bnxt_alloc_rx_agg_bmap()
2024-11-27 22:38 [PATCH net v2 0/3] bnxt_en: support header page pool in queue API David Wei
2024-11-27 22:38 ` [PATCH net v2 1/3] bnxt_en: refactor tpa_info alloc/free into helpers David Wei
@ 2024-11-27 22:38 ` David Wei
2024-11-27 22:38 ` [PATCH net v2 3/3] bnxt_en: handle tpa_info in queue API implementation David Wei
2 siblings, 0 replies; 10+ messages in thread
From: David Wei @ 2024-11-27 22:38 UTC (permalink / raw)
To: netdev, Michael Chan, Andy Gospodarek, Somnath Kotur
Cc: Andrew Lunn, David S. Miller, Eric Dumazet, Jakub Kicinski,
Paolo Abeni, David Wei
Refactor bnxt_alloc_rx_rings() to call bnxt_alloc_rx_agg_bmap() for
allocating rx_agg_bmap.
Reviewed-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: David Wei <dw@davidwei.uk>
---
drivers/net/ethernet/broadcom/bnxt/bnxt.c | 36 ++++++++++-------------
1 file changed, 16 insertions(+), 20 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 38783d125d55..9b079bce1423 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3764,6 +3764,19 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
return PTR_ERR(pool);
}
+static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ u16 mem_size;
+
+ rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
int numa_node = dev_to_node(&bp->pdev->dev);
@@ -3808,19 +3821,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
ring->grp_idx = i;
if (agg_rings) {
- u16 mem_size;
-
ring = &rxr->rx_agg_ring_struct;
rc = bnxt_alloc_ring(bp, &ring->ring_mem);
if (rc)
return rc;
ring->grp_idx = i;
- rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
- mem_size = rxr->rx_agg_bmap_size / 8;
- rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
- if (!rxr->rx_agg_bmap)
- return -ENOMEM;
+ rc = bnxt_alloc_rx_agg_bmap(bp, rxr);
+ if (rc)
+ return rc;
}
}
if (bp->flags & BNXT_FLAG_TPA)
@@ -15325,19 +15334,6 @@ static const struct netdev_stat_ops bnxt_stat_ops = {
.get_base_stats = bnxt_get_base_stats,
};
-static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
-{
- u16 mem_size;
-
- rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
- mem_size = rxr->rx_agg_bmap_size / 8;
- rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
- if (!rxr->rx_agg_bmap)
- return -ENOMEM;
-
- return 0;
-}
-
static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
{
struct bnxt_rx_ring_info *rxr, *clone;
--
2.43.5
^ permalink raw reply related [flat|nested] 10+ messages in thread* [PATCH net v2 3/3] bnxt_en: handle tpa_info in queue API implementation
2024-11-27 22:38 [PATCH net v2 0/3] bnxt_en: support header page pool in queue API David Wei
2024-11-27 22:38 ` [PATCH net v2 1/3] bnxt_en: refactor tpa_info alloc/free into helpers David Wei
2024-11-27 22:38 ` [PATCH net v2 2/3] bnxt_en: refactor bnxt_alloc_rx_rings() to call bnxt_alloc_rx_agg_bmap() David Wei
@ 2024-11-27 22:38 ` David Wei
2024-11-28 3:46 ` Somnath Kotur
2024-11-30 22:15 ` Jakub Kicinski
2 siblings, 2 replies; 10+ messages in thread
From: David Wei @ 2024-11-27 22:38 UTC (permalink / raw)
To: netdev, Michael Chan, Andy Gospodarek, Somnath Kotur
Cc: Andrew Lunn, David S. Miller, Eric Dumazet, Jakub Kicinski,
Paolo Abeni, David Wei
Commit 7ed816be35ab ("eth: bnxt: use page pool for head frags") added a
page pool for header frags, which may be distinct from the existing pool
for the aggregation ring. Add support for this head_pool in the queue
API.
Signed-off-by: David Wei <dw@davidwei.uk>
---
drivers/net/ethernet/broadcom/bnxt/bnxt.c | 25 ++++++++++++++++++++---
1 file changed, 22 insertions(+), 3 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 9b079bce1423..08c7d3049562 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -15382,15 +15382,25 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
goto err_free_rx_agg_ring;
}
+ if (bp->flags & BNXT_FLAG_TPA) {
+ rc = bnxt_alloc_one_tpa_info(bp, clone);
+ if (rc)
+ goto err_free_tpa_info;
+ }
+
bnxt_init_one_rx_ring_rxbd(bp, clone);
bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_alloc_one_rx_ring_page(bp, clone, idx);
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_alloc_one_tpa_info_data(bp, clone);
return 0;
+err_free_tpa_info:
+ bnxt_free_one_tpa_info(bp, clone);
err_free_rx_agg_ring:
bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
err_free_rx_ring:
@@ -15398,9 +15408,11 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
err_rxq_info_unreg:
xdp_rxq_info_unreg(&clone->xdp_rxq);
err_page_pool_destroy:
- clone->page_pool->p.napi = NULL;
page_pool_destroy(clone->page_pool);
+ if (clone->page_pool != clone->head_pool)
+ page_pool_destroy(clone->head_pool);
clone->page_pool = NULL;
+ clone->head_pool = NULL;
return rc;
}
@@ -15410,13 +15422,15 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ring_struct *ring;
- bnxt_free_one_rx_ring(bp, rxr);
- bnxt_free_one_rx_agg_ring(bp, rxr);
+ bnxt_free_one_rx_ring_skbs(bp, rxr);
xdp_rxq_info_unreg(&rxr->xdp_rxq);
page_pool_destroy(rxr->page_pool);
+ if (rxr->page_pool != rxr->head_pool)
+ page_pool_destroy(rxr->head_pool);
rxr->page_pool = NULL;
+ rxr->head_pool = NULL;
ring = &rxr->rx_ring_struct;
bnxt_free_ring(bp, &ring->ring_mem);
@@ -15498,7 +15512,10 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
rxr->rx_agg_prod = clone->rx_agg_prod;
rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
rxr->rx_next_cons = clone->rx_next_cons;
+ rxr->rx_tpa = clone->rx_tpa;
+ rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
rxr->page_pool = clone->page_pool;
+ rxr->head_pool = clone->head_pool;
rxr->xdp_rxq = clone->xdp_rxq;
bnxt_copy_rx_ring(bp, rxr, clone);
@@ -15557,6 +15574,8 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
rxr->rx_next_cons = 0;
page_pool_disable_direct_recycling(rxr->page_pool);
+ if (rxr->page_pool != rxr->head_pool)
+ page_pool_disable_direct_recycling(rxr->head_pool);
memcpy(qmem, rxr, sizeof(*rxr));
bnxt_init_rx_ring_struct(bp, qmem);
--
2.43.5
^ permalink raw reply related [flat|nested] 10+ messages in thread* Re: [PATCH net v2 3/3] bnxt_en: handle tpa_info in queue API implementation
2024-11-27 22:38 ` [PATCH net v2 3/3] bnxt_en: handle tpa_info in queue API implementation David Wei
@ 2024-11-28 3:46 ` Somnath Kotur
2024-11-29 0:43 ` David Wei
2024-11-30 22:15 ` Jakub Kicinski
1 sibling, 1 reply; 10+ messages in thread
From: Somnath Kotur @ 2024-11-28 3:46 UTC (permalink / raw)
To: David Wei
Cc: netdev, Michael Chan, Andy Gospodarek, Andrew Lunn,
David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni
[-- Attachment #1: Type: text/plain, Size: 4105 bytes --]
On Thu, Nov 28, 2024 at 4:09 AM David Wei <dw@davidwei.uk> wrote:
>
> Commit 7ed816be35ab ("eth: bnxt: use page pool for head frags") added a
> page pool for header frags, which may be distinct from the existing pool
> for the aggregation ring. Add support for this head_pool in the queue
> API.
>
> Signed-off-by: David Wei <dw@davidwei.uk>
> ---
> drivers/net/ethernet/broadcom/bnxt/bnxt.c | 25 ++++++++++++++++++++---
> 1 file changed, 22 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> index 9b079bce1423..08c7d3049562 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> @@ -15382,15 +15382,25 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
> goto err_free_rx_agg_ring;
> }
>
> + if (bp->flags & BNXT_FLAG_TPA) {
> + rc = bnxt_alloc_one_tpa_info(bp, clone);
> + if (rc)
> + goto err_free_tpa_info;
> + }
> +
> bnxt_init_one_rx_ring_rxbd(bp, clone);
> bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
>
> bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
> if (bp->flags & BNXT_FLAG_AGG_RINGS)
> bnxt_alloc_one_rx_ring_page(bp, clone, idx);
> + if (bp->flags & BNXT_FLAG_TPA)
> + bnxt_alloc_one_tpa_info_data(bp, clone);
>
> return 0;
>
> +err_free_tpa_info:
> + bnxt_free_one_tpa_info(bp, clone);
> err_free_rx_agg_ring:
> bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
> err_free_rx_ring:
> @@ -15398,9 +15408,11 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
> err_rxq_info_unreg:
> xdp_rxq_info_unreg(&clone->xdp_rxq);
> err_page_pool_destroy:
> - clone->page_pool->p.napi = NULL;
> page_pool_destroy(clone->page_pool);
> + if (clone->page_pool != clone->head_pool)
Just curious, why is this check needed everywhere? Is there a case
where the 2 page pools can be the same ? I thought either there is a
page_pool for the header frags or none at all ?
> + page_pool_destroy(clone->head_pool);
> clone->page_pool = NULL;
> + clone->head_pool = NULL;
> return rc;
> }
>
> @@ -15410,13 +15422,15 @@ static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
> struct bnxt *bp = netdev_priv(dev);
> struct bnxt_ring_struct *ring;
>
> - bnxt_free_one_rx_ring(bp, rxr);
> - bnxt_free_one_rx_agg_ring(bp, rxr);
> + bnxt_free_one_rx_ring_skbs(bp, rxr);
>
> xdp_rxq_info_unreg(&rxr->xdp_rxq);
>
> page_pool_destroy(rxr->page_pool);
> + if (rxr->page_pool != rxr->head_pool)
> + page_pool_destroy(rxr->head_pool);
> rxr->page_pool = NULL;
> + rxr->head_pool = NULL;
>
> ring = &rxr->rx_ring_struct;
> bnxt_free_ring(bp, &ring->ring_mem);
> @@ -15498,7 +15512,10 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
> rxr->rx_agg_prod = clone->rx_agg_prod;
> rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
> rxr->rx_next_cons = clone->rx_next_cons;
> + rxr->rx_tpa = clone->rx_tpa;
> + rxr->rx_tpa_idx_map = clone->rx_tpa_idx_map;
> rxr->page_pool = clone->page_pool;
> + rxr->head_pool = clone->head_pool;
> rxr->xdp_rxq = clone->xdp_rxq;
>
> bnxt_copy_rx_ring(bp, rxr, clone);
> @@ -15557,6 +15574,8 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
> bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
> rxr->rx_next_cons = 0;
> page_pool_disable_direct_recycling(rxr->page_pool);
> + if (rxr->page_pool != rxr->head_pool)
> + page_pool_disable_direct_recycling(rxr->head_pool);
>
> memcpy(qmem, rxr, sizeof(*rxr));
> bnxt_init_rx_ring_struct(bp, qmem);
> --
> 2.43.5
>
[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4212 bytes --]
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [PATCH net v2 3/3] bnxt_en: handle tpa_info in queue API implementation
2024-11-28 3:46 ` Somnath Kotur
@ 2024-11-29 0:43 ` David Wei
2024-11-30 22:14 ` Jakub Kicinski
0 siblings, 1 reply; 10+ messages in thread
From: David Wei @ 2024-11-29 0:43 UTC (permalink / raw)
To: Somnath Kotur
Cc: netdev, Michael Chan, Andy Gospodarek, Andrew Lunn,
David S. Miller, Eric Dumazet, Jakub Kicinski, Paolo Abeni
On 2024-11-27 19:46, Somnath Kotur wrote:
> On Thu, Nov 28, 2024 at 4:09 AM David Wei <dw@davidwei.uk> wrote:
>>
>> Commit 7ed816be35ab ("eth: bnxt: use page pool for head frags") added a
>> page pool for header frags, which may be distinct from the existing pool
>> for the aggregation ring. Add support for this head_pool in the queue
>> API.
>>
>> Signed-off-by: David Wei <dw@davidwei.uk>
>> ---
>> drivers/net/ethernet/broadcom/bnxt/bnxt.c | 25 ++++++++++++++++++++---
>> 1 file changed, 22 insertions(+), 3 deletions(-)
>>
>> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
>> index 9b079bce1423..08c7d3049562 100644
>> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
>> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
>> @@ -15382,15 +15382,25 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
>> goto err_free_rx_agg_ring;
>> }
>>
>> + if (bp->flags & BNXT_FLAG_TPA) {
>> + rc = bnxt_alloc_one_tpa_info(bp, clone);
>> + if (rc)
>> + goto err_free_tpa_info;
>> + }
>> +
>> bnxt_init_one_rx_ring_rxbd(bp, clone);
>> bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
>>
>> bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
>> if (bp->flags & BNXT_FLAG_AGG_RINGS)
>> bnxt_alloc_one_rx_ring_page(bp, clone, idx);
>> + if (bp->flags & BNXT_FLAG_TPA)
>> + bnxt_alloc_one_tpa_info_data(bp, clone);
>>
>> return 0;
>>
>> +err_free_tpa_info:
>> + bnxt_free_one_tpa_info(bp, clone);
>> err_free_rx_agg_ring:
>> bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
>> err_free_rx_ring:
>> @@ -15398,9 +15408,11 @@ static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
>> err_rxq_info_unreg:
>> xdp_rxq_info_unreg(&clone->xdp_rxq);
>> err_page_pool_destroy:
>> - clone->page_pool->p.napi = NULL;
>> page_pool_destroy(clone->page_pool);
>> + if (clone->page_pool != clone->head_pool)
> Just curious, why is this check needed everywhere? Is there a case
> where the 2 page pools can be the same ? I thought either there is a
> page_pool for the header frags or none at all ?
Yes, frags are always allocated now from head_pool, which is by default
the same as page_pool.
If bnxt_separate_head_pool() then they are different.
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [PATCH net v2 3/3] bnxt_en: handle tpa_info in queue API implementation
2024-11-29 0:43 ` David Wei
@ 2024-11-30 22:14 ` Jakub Kicinski
2024-11-30 22:52 ` David Wei
0 siblings, 1 reply; 10+ messages in thread
From: Jakub Kicinski @ 2024-11-30 22:14 UTC (permalink / raw)
To: David Wei
Cc: Somnath Kotur, netdev, Michael Chan, Andy Gospodarek, Andrew Lunn,
David S. Miller, Eric Dumazet, Paolo Abeni
On Thu, 28 Nov 2024 16:43:40 -0800 David Wei wrote:
> > Just curious, why is this check needed everywhere? Is there a case
> > where the 2 page pools can be the same ? I thought either there is a
> > page_pool for the header frags or none at all ?
>
> Yes, frags are always allocated now from head_pool, which is by default
> the same as page_pool.
>
> If bnxt_separate_head_pool() then they are different.
Let's factor it out? We have 3 copies.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH net v2 3/3] bnxt_en: handle tpa_info in queue API implementation
2024-11-30 22:14 ` Jakub Kicinski
@ 2024-11-30 22:52 ` David Wei
0 siblings, 0 replies; 10+ messages in thread
From: David Wei @ 2024-11-30 22:52 UTC (permalink / raw)
To: Jakub Kicinski
Cc: Somnath Kotur, netdev, Michael Chan, Andy Gospodarek, Andrew Lunn,
David S. Miller, Eric Dumazet, Paolo Abeni
On 2024-11-30 14:14, Jakub Kicinski wrote:
> On Thu, 28 Nov 2024 16:43:40 -0800 David Wei wrote:
>>> Just curious, why is this check needed everywhere? Is there a case
>>> where the 2 page pools can be the same ? I thought either there is a
>>> page_pool for the header frags or none at all ?
>>
>> Yes, frags are always allocated now from head_pool, which is by default
>> the same as page_pool.
>>
>> If bnxt_separate_head_pool() then they are different.
>
> Let's factor it out? We have 3 copies.
Sounds good. Will refactor into a helper.
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH net v2 3/3] bnxt_en: handle tpa_info in queue API implementation
2024-11-27 22:38 ` [PATCH net v2 3/3] bnxt_en: handle tpa_info in queue API implementation David Wei
2024-11-28 3:46 ` Somnath Kotur
@ 2024-11-30 22:15 ` Jakub Kicinski
2024-11-30 22:59 ` David Wei
1 sibling, 1 reply; 10+ messages in thread
From: Jakub Kicinski @ 2024-11-30 22:15 UTC (permalink / raw)
To: David Wei
Cc: netdev, Michael Chan, Andy Gospodarek, Somnath Kotur, Andrew Lunn,
David S. Miller, Eric Dumazet, Paolo Abeni
On Wed, 27 Nov 2024 14:38:55 -0800 David Wei wrote:
> + if (bp->flags & BNXT_FLAG_TPA) {
> + rc = bnxt_alloc_one_tpa_info(bp, clone);
> + if (rc)
> + goto err_free_tpa_info;
> + }
> +
> bnxt_init_one_rx_ring_rxbd(bp, clone);
> bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
>
> bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
> if (bp->flags & BNXT_FLAG_AGG_RINGS)
> bnxt_alloc_one_rx_ring_page(bp, clone, idx);
> + if (bp->flags & BNXT_FLAG_TPA)
> + bnxt_alloc_one_tpa_info_data(bp, clone);
Could you explain the TPA related changes in the commit message?
Do we need to realloc the frags now since they don't come from
system memory?
^ permalink raw reply [flat|nested] 10+ messages in thread* Re: [PATCH net v2 3/3] bnxt_en: handle tpa_info in queue API implementation
2024-11-30 22:15 ` Jakub Kicinski
@ 2024-11-30 22:59 ` David Wei
0 siblings, 0 replies; 10+ messages in thread
From: David Wei @ 2024-11-30 22:59 UTC (permalink / raw)
To: Jakub Kicinski
Cc: netdev, Michael Chan, Andy Gospodarek, Somnath Kotur, Andrew Lunn,
David S. Miller, Eric Dumazet, Paolo Abeni
On 2024-11-30 14:15, Jakub Kicinski wrote:
> On Wed, 27 Nov 2024 14:38:55 -0800 David Wei wrote:
>> + if (bp->flags & BNXT_FLAG_TPA) {
>> + rc = bnxt_alloc_one_tpa_info(bp, clone);
>> + if (rc)
>> + goto err_free_tpa_info;
>> + }
>> +
>> bnxt_init_one_rx_ring_rxbd(bp, clone);
>> bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
>>
>> bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
>> if (bp->flags & BNXT_FLAG_AGG_RINGS)
>> bnxt_alloc_one_rx_ring_page(bp, clone, idx);
>> + if (bp->flags & BNXT_FLAG_TPA)
>> + bnxt_alloc_one_tpa_info_data(bp, clone);
>
> Could you explain the TPA related changes in the commit message?
Got it, I'll expand on why the TPA changes are made in the commit.
> Do we need to realloc the frags now since they don't come from
> system memory?
Yes, frags now come from head_pool instead of system memory. The old
head_pool is freed and a new head_pool is allocated during a queue
reset. Therefore the old tpa_info with frags allocated from the old
head_pool must be freed as well, otherwise the driver will attempt to
return frags back to a different page pool than the one it was allocated
from. When the frags were allocated from system memory using the generic
allocators, it didn't matter since they did not have their lifetimes
tied to page pools.
^ permalink raw reply [flat|nested] 10+ messages in thread