* [PATCH net-next] eth: bnxt: add netmem TX support
@ 2025-06-17 9:45 Taehee Yoo
2025-06-17 20:14 ` Mina Almasry
2025-06-17 20:27 ` Stanislav Fomichev
0 siblings, 2 replies; 5+ messages in thread
From: Taehee Yoo @ 2025-06-17 9:45 UTC (permalink / raw)
To: davem, kuba, pabeni, edumazet, andrew+netdev, horms, michael.chan,
pavan.chebbi, almasrymina, sdf, netdev
Cc: ap420073
Use netmem_dma_*() helpers and declare netmem_tx to support netmem TX.
By this change, all bnxt devices will support the netmem TX.
bnxt_start_xmit() uses memcpy() if a packet is too small. However,
netmem packets are unreadable, so memcpy() is not allowed.
It should check whether an skb is readable, and if an SKB is unreadable,
it is processed by the normal transmission logic.
netmem TX can be tested with ncdevmem.c
Signed-off-by: Taehee Yoo <ap420073@gmail.com>
---
drivers/net/ethernet/broadcom/bnxt/bnxt.c | 28 ++++++++++++++---------
1 file changed, 17 insertions(+), 11 deletions(-)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 869580b6f70d..4de9dc123a18 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -477,6 +477,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
__le32 lflags = 0;
+ skb_frag_t *frag;
i = skb_get_queue_mapping(skb);
if (unlikely(i >= bp->tx_nr_rings)) {
@@ -563,7 +564,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
- !lflags) {
+ skb_frags_readable(skb) && !lflags) {
struct tx_push_buffer *tx_push_buf = txr->tx_push;
struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
@@ -598,9 +599,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_copy_from_linear_data(skb, pdata, len);
pdata += len;
for (j = 0; j < last_frag; j++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
void *fptr;
+ frag = &skb_shinfo(skb)->frags[j];
fptr = skb_frag_address_safe(frag);
if (!fptr)
goto normal_tx;
@@ -708,8 +709,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
txbd0 = txbd;
for (i = 0; i < last_frag; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
+ frag = &skb_shinfo(skb)->frags[i];
prod = NEXT_TX(prod);
txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
@@ -721,7 +721,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_dma_error;
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
- dma_unmap_addr_set(tx_buf, mapping, mapping);
+ netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
+ mapping, mapping);
txbd->tx_bd_haddr = cpu_to_le64(mapping);
@@ -778,9 +779,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < last_frag; i++) {
prod = NEXT_TX(prod);
tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
- dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(&skb_shinfo(skb)->frags[i]),
- DMA_TO_DEVICE);
+ frag = &skb_shinfo(skb)->frags[i];
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf, mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
tx_free:
@@ -3422,9 +3425,11 @@ static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
tx_buf = &txr->tx_buf_ring[ring_idx];
- dma_unmap_page(&pdev->dev,
- dma_unmap_addr(tx_buf, mapping),
- skb_frag_size(frag), DMA_TO_DEVICE);
+ netmem_dma_unmap_page_attrs(&pdev->dev,
+ dma_unmap_addr(tx_buf,
+ mapping),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE, 0);
}
dev_kfree_skb(skb);
}
@@ -16713,6 +16718,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (BNXT_SUPPORTS_QUEUE_API(bp))
dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
dev->request_ops_lock = true;
+ dev->netmem_tx = true;
rc = register_netdev(dev);
if (rc)
--
2.34.1
^ permalink raw reply related [flat|nested] 5+ messages in thread* Re: [PATCH net-next] eth: bnxt: add netmem TX support
2025-06-17 9:45 [PATCH net-next] eth: bnxt: add netmem TX support Taehee Yoo
@ 2025-06-17 20:14 ` Mina Almasry
2025-06-18 13:20 ` Taehee Yoo
2025-06-17 20:27 ` Stanislav Fomichev
1 sibling, 1 reply; 5+ messages in thread
From: Mina Almasry @ 2025-06-17 20:14 UTC (permalink / raw)
To: Taehee Yoo, Pranjal Shrivastava, Shivaji Kant, Stanislav Fomichev,
Pavel Begunkov
Cc: davem, kuba, pabeni, edumazet, andrew+netdev, horms, michael.chan,
pavan.chebbi, netdev
On Tue, Jun 17, 2025 at 2:45 AM Taehee Yoo <ap420073@gmail.com> wrote:
>
> Use netmem_dma_*() helpers and declare netmem_tx to support netmem TX.
> By this change, all bnxt devices will support the netmem TX.
>
> bnxt_start_xmit() uses memcpy() if a packet is too small. However,
nit: this is slightly inaccurate. memcpy itself is not a problem (via
skb_copy_from_linear_data) is not an issue because I think that's
copying the linear part of the skb. What is really a is
skb_frag_address_safe(). Unreadable skbs have no valid address.
This made me realize that skb_frag_address_safe() is broken :( it
needs this check, similar to skb_frag_address():
```
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c05057869e08..da03ff71b05e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3681,7 +3681,12 @@ static inline void *skb_frag_address(const
skb_frag_t *frag)
*/
static inline void *skb_frag_address_safe(const skb_frag_t *frag)
{
- void *ptr = page_address(skb_frag_page(frag));
+ void *ptr;
+
+ if (!skb_frag_page(frag))
+ return NULL;
+
+ ptr = page_address(skb_frag_page(frag));
if (unlikely(!ptr))
return NULL;
```
I guess I'll send this fix to net.
> netmem packets are unreadable, so memcpy() is not allowed.
> It should check whether an skb is readable, and if an SKB is unreadable,
> it is processed by the normal transmission logic.
>
> netmem TX can be tested with ncdevmem.c
>
> Signed-off-by: Taehee Yoo <ap420073@gmail.com>
Seems like a straightforward conversion to using the netmem dma
mapping API. I don't see anything concerning/unusualy.
Acked-by: Mina Almasry <almasrymina@google.com>
--
Thanks,
Mina
^ permalink raw reply related [flat|nested] 5+ messages in thread* Re: [PATCH net-next] eth: bnxt: add netmem TX support
2025-06-17 20:14 ` Mina Almasry
@ 2025-06-18 13:20 ` Taehee Yoo
0 siblings, 0 replies; 5+ messages in thread
From: Taehee Yoo @ 2025-06-18 13:20 UTC (permalink / raw)
To: Mina Almasry
Cc: Pranjal Shrivastava, Shivaji Kant, Stanislav Fomichev,
Pavel Begunkov, davem, kuba, pabeni, edumazet, andrew+netdev,
horms, michael.chan, pavan.chebbi, netdev
On Wed, Jun 18, 2025 at 5:14 AM Mina Almasry <almasrymina@google.com> wrote:
>
Hi Mina,
Thanks a lot for your review!
> On Tue, Jun 17, 2025 at 2:45 AM Taehee Yoo <ap420073@gmail.com> wrote:
> >
> > Use netmem_dma_*() helpers and declare netmem_tx to support netmem TX.
> > By this change, all bnxt devices will support the netmem TX.
> >
> > bnxt_start_xmit() uses memcpy() if a packet is too small. However,
>
> nit: this is slightly inaccurate. memcpy itself is not a problem (via
> skb_copy_from_linear_data) is not an issue because I think that's
> copying the linear part of the skb. What is really a is
> skb_frag_address_safe(). Unreadable skbs have no valid address.
>
> This made me realize that skb_frag_address_safe() is broken :( it
> needs this check, similar to skb_frag_address():
You're right!
The real problem is the skb_frag_address_safe(), as you mentioned.
I will fix a git commit message in the v2.
Thanks a lot!
Taehee Yoo
>
> ```
> diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
> index c05057869e08..da03ff71b05e 100644
> --- a/include/linux/skbuff.h
> +++ b/include/linux/skbuff.h
> @@ -3681,7 +3681,12 @@ static inline void *skb_frag_address(const
> skb_frag_t *frag)
> */
> static inline void *skb_frag_address_safe(const skb_frag_t *frag)
> {
> - void *ptr = page_address(skb_frag_page(frag));
> + void *ptr;
> +
> + if (!skb_frag_page(frag))
> + return NULL;
> +
> + ptr = page_address(skb_frag_page(frag));
> if (unlikely(!ptr))
> return NULL;
> ```
>
> I guess I'll send this fix to net.
>
> > netmem packets are unreadable, so memcpy() is not allowed.
> > It should check whether an skb is readable, and if an SKB is unreadable,
> > it is processed by the normal transmission logic.
> >
> > netmem TX can be tested with ncdevmem.c
> >
> > Signed-off-by: Taehee Yoo <ap420073@gmail.com>
>
> Seems like a straightforward conversion to using the netmem dma
> mapping API. I don't see anything concerning/unusualy.
>
> Acked-by: Mina Almasry <almasrymina@google.com>
>
> --
> Thanks,
> Mina
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH net-next] eth: bnxt: add netmem TX support
2025-06-17 9:45 [PATCH net-next] eth: bnxt: add netmem TX support Taehee Yoo
2025-06-17 20:14 ` Mina Almasry
@ 2025-06-17 20:27 ` Stanislav Fomichev
2025-06-18 13:42 ` Taehee Yoo
1 sibling, 1 reply; 5+ messages in thread
From: Stanislav Fomichev @ 2025-06-17 20:27 UTC (permalink / raw)
To: Taehee Yoo
Cc: davem, kuba, pabeni, edumazet, andrew+netdev, horms, michael.chan,
pavan.chebbi, almasrymina, sdf, netdev
On 06/17, Taehee Yoo wrote:
> Use netmem_dma_*() helpers and declare netmem_tx to support netmem TX.
> By this change, all bnxt devices will support the netmem TX.
>
> bnxt_start_xmit() uses memcpy() if a packet is too small. However,
> netmem packets are unreadable, so memcpy() is not allowed.
> It should check whether an skb is readable, and if an SKB is unreadable,
> it is processed by the normal transmission logic.
>
> netmem TX can be tested with ncdevmem.c
>
> Signed-off-by: Taehee Yoo <ap420073@gmail.com>
> ---
> drivers/net/ethernet/broadcom/bnxt/bnxt.c | 28 ++++++++++++++---------
> 1 file changed, 17 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> index 869580b6f70d..4de9dc123a18 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> @@ -477,6 +477,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> struct bnxt_tx_ring_info *txr;
> struct bnxt_sw_tx_bd *tx_buf;
> __le32 lflags = 0;
> + skb_frag_t *frag;
>
> i = skb_get_queue_mapping(skb);
> if (unlikely(i >= bp->tx_nr_rings)) {
> @@ -563,7 +564,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
>
> if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
> - !lflags) {
> + skb_frags_readable(skb) && !lflags) {
> struct tx_push_buffer *tx_push_buf = txr->tx_push;
> struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
> struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
> @@ -598,9 +599,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> skb_copy_from_linear_data(skb, pdata, len);
> pdata += len;
> for (j = 0; j < last_frag; j++) {
> - skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
> void *fptr;
>
> + frag = &skb_shinfo(skb)->frags[j];
> fptr = skb_frag_address_safe(frag);
> if (!fptr)
> goto normal_tx;
> @@ -708,8 +709,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
> txbd0 = txbd;
> for (i = 0; i < last_frag; i++) {
> - skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
> -
> + frag = &skb_shinfo(skb)->frags[i];
> prod = NEXT_TX(prod);
> txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
>
> @@ -721,7 +721,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> goto tx_dma_error;
>
> tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
> - dma_unmap_addr_set(tx_buf, mapping, mapping);
> + netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
> + mapping, mapping);
>
> txbd->tx_bd_haddr = cpu_to_le64(mapping);
>
> @@ -778,9 +779,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> for (i = 0; i < last_frag; i++) {
> prod = NEXT_TX(prod);
> tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
> - dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
> - skb_frag_size(&skb_shinfo(skb)->frags[i]),
> - DMA_TO_DEVICE);
> + frag = &skb_shinfo(skb)->frags[i];
> + netmem_dma_unmap_page_attrs(&pdev->dev,
> + dma_unmap_addr(tx_buf, mapping),
> + skb_frag_size(frag),
> + DMA_TO_DEVICE, 0);
> }
>
> tx_free:
> @@ -3422,9 +3425,11 @@ static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
> skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
>
> tx_buf = &txr->tx_buf_ring[ring_idx];
> - dma_unmap_page(&pdev->dev,
> - dma_unmap_addr(tx_buf, mapping),
> - skb_frag_size(frag), DMA_TO_DEVICE);
> + netmem_dma_unmap_page_attrs(&pdev->dev,
> + dma_unmap_addr(tx_buf,
> + mapping),
> + skb_frag_size(frag),
> + DMA_TO_DEVICE, 0);
> }
> dev_kfree_skb(skb);
> }
> @@ -16713,6 +16718,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
> if (BNXT_SUPPORTS_QUEUE_API(bp))
> dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
> dev->request_ops_lock = true;
> + dev->netmem_tx = true;
>
> rc = register_netdev(dev);
> if (rc)
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Similar to what I had internally for testing. One thing to think about
here might be to put that netmem_tx=true under BNXT_SUPPORTS_QUEUE_API
conditional. This way both rx/tx will either be supported or not. But
since there is probably no real FW requirement for TX, should be good
as is.
^ permalink raw reply [flat|nested] 5+ messages in thread* Re: [PATCH net-next] eth: bnxt: add netmem TX support
2025-06-17 20:27 ` Stanislav Fomichev
@ 2025-06-18 13:42 ` Taehee Yoo
0 siblings, 0 replies; 5+ messages in thread
From: Taehee Yoo @ 2025-06-18 13:42 UTC (permalink / raw)
To: Stanislav Fomichev
Cc: davem, kuba, pabeni, edumazet, andrew+netdev, horms, michael.chan,
pavan.chebbi, almasrymina, sdf, netdev
On Wed, Jun 18, 2025 at 5:27 AM Stanislav Fomichev <stfomichev@gmail.com> wrote:
>
Hi Stanislav,
Thanks a lot for your review!
> On 06/17, Taehee Yoo wrote:
> > Use netmem_dma_*() helpers and declare netmem_tx to support netmem TX.
> > By this change, all bnxt devices will support the netmem TX.
> >
> > bnxt_start_xmit() uses memcpy() if a packet is too small. However,
> > netmem packets are unreadable, so memcpy() is not allowed.
> > It should check whether an skb is readable, and if an SKB is unreadable,
> > it is processed by the normal transmission logic.
> >
> > netmem TX can be tested with ncdevmem.c
> >
> > Signed-off-by: Taehee Yoo <ap420073@gmail.com>
> > ---
> > drivers/net/ethernet/broadcom/bnxt/bnxt.c | 28 ++++++++++++++---------
> > 1 file changed, 17 insertions(+), 11 deletions(-)
> >
> > diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> > index 869580b6f70d..4de9dc123a18 100644
> > --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> > +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> > @@ -477,6 +477,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> > struct bnxt_tx_ring_info *txr;
> > struct bnxt_sw_tx_bd *tx_buf;
> > __le32 lflags = 0;
> > + skb_frag_t *frag;
> >
> > i = skb_get_queue_mapping(skb);
> > if (unlikely(i >= bp->tx_nr_rings)) {
> > @@ -563,7 +564,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> > lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
> >
> > if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
> > - !lflags) {
> > + skb_frags_readable(skb) && !lflags) {
> > struct tx_push_buffer *tx_push_buf = txr->tx_push;
> > struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
> > struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
> > @@ -598,9 +599,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> > skb_copy_from_linear_data(skb, pdata, len);
> > pdata += len;
> > for (j = 0; j < last_frag; j++) {
> > - skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
> > void *fptr;
> >
> > + frag = &skb_shinfo(skb)->frags[j];
> > fptr = skb_frag_address_safe(frag);
> > if (!fptr)
> > goto normal_tx;
> > @@ -708,8 +709,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> > cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
> > txbd0 = txbd;
> > for (i = 0; i < last_frag; i++) {
> > - skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
> > -
> > + frag = &skb_shinfo(skb)->frags[i];
> > prod = NEXT_TX(prod);
> > txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
> >
> > @@ -721,7 +721,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> > goto tx_dma_error;
> >
> > tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
> > - dma_unmap_addr_set(tx_buf, mapping, mapping);
> > + netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
> > + mapping, mapping);
> >
> > txbd->tx_bd_haddr = cpu_to_le64(mapping);
> >
> > @@ -778,9 +779,11 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
> > for (i = 0; i < last_frag; i++) {
> > prod = NEXT_TX(prod);
> > tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
> > - dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
> > - skb_frag_size(&skb_shinfo(skb)->frags[i]),
> > - DMA_TO_DEVICE);
> > + frag = &skb_shinfo(skb)->frags[i];
> > + netmem_dma_unmap_page_attrs(&pdev->dev,
> > + dma_unmap_addr(tx_buf, mapping),
> > + skb_frag_size(frag),
> > + DMA_TO_DEVICE, 0);
> > }
> >
> > tx_free:
> > @@ -3422,9 +3425,11 @@ static void bnxt_free_one_tx_ring_skbs(struct bnxt *bp,
> > skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
> >
> > tx_buf = &txr->tx_buf_ring[ring_idx];
> > - dma_unmap_page(&pdev->dev,
> > - dma_unmap_addr(tx_buf, mapping),
> > - skb_frag_size(frag), DMA_TO_DEVICE);
> > + netmem_dma_unmap_page_attrs(&pdev->dev,
> > + dma_unmap_addr(tx_buf,
> > + mapping),
> > + skb_frag_size(frag),
> > + DMA_TO_DEVICE, 0);
> > }
> > dev_kfree_skb(skb);
> > }
> > @@ -16713,6 +16718,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
> > if (BNXT_SUPPORTS_QUEUE_API(bp))
> > dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
> > dev->request_ops_lock = true;
> > + dev->netmem_tx = true;
> >
> > rc = register_netdev(dev);
> > if (rc)
>
> Acked-by: Stanislav Fomichev <sdf@fomichev.me>
>
> Similar to what I had internally for testing. One thing to think about
> here might be to put that netmem_tx=true under BNXT_SUPPORTS_QUEUE_API
> conditional. This way both rx/tx will either be supported or not. But
> since there is probably no real FW requirement for TX, should be good
> as is.
I agree with you.
Since netmem TX doesn't require any specific hardware or firmware
features, it should be safe to enable this for all bnxt devices.
Thanks a lot!
Taehee Yoo
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2025-06-18 13:43 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-17 9:45 [PATCH net-next] eth: bnxt: add netmem TX support Taehee Yoo
2025-06-17 20:14 ` Mina Almasry
2025-06-18 13:20 ` Taehee Yoo
2025-06-17 20:27 ` Stanislav Fomichev
2025-06-18 13:42 ` Taehee Yoo
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox