From: Horatiu Vultur <horatiu.vultur@microchip.com>
To: William Tu <u9012063@gmail.com>
Cc: <netdev@vger.kernel.org>, <jsankararama@vmware.com>,
<gyang@vmware.com>, <doshir@vmware.com>,
<alexander.duyck@gmail.com>, <alexandr.lobakin@intel.com>,
<bang@vmware.com>, <maciej.fijalkowski@intel.com>,
<witu@nvidia.com>, Alexander Duyck <alexanderduyck@fb.com>
Subject: Re: [PATCH RFC v18 net-next] vmxnet3: Add XDP support.
Date: Sun, 19 Mar 2023 18:20:52 +0100 [thread overview]
Message-ID: <20230319172052.gkzu7h227ulkog6o@soft-dev3-1> (raw)
In-Reply-To: <20230318214953.36834-1-u9012063@gmail.com>
The 03/18/2023 14:49, William Tu wrote:
Hi William,
...
> +
> +static int
> +vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
> + struct xdp_frame *xdpf,
> + struct vmxnet3_tx_queue *tq, bool dma_map)
> +{
> + struct vmxnet3_tx_buf_info *tbi = NULL;
> + union Vmxnet3_GenericDesc *gdesc;
> + struct vmxnet3_tx_ctx ctx;
> + int tx_num_deferred;
> + struct page *page;
> + u32 buf_size;
> + int ret = 0;
This doesn't seem to be used anywhere, so it can be removed.
> + u32 dw2;
> +
> + dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
> + dw2 |= xdpf->len;
> + ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
> + gdesc = ctx.sop_txd;
> +
> + buf_size = xdpf->len;
> + tbi = tq->buf_info + tq->tx_ring.next2fill;
> +
> + if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) {
> + tq->stats.tx_ring_full++;
> + return -ENOSPC;
> + }
> +
> + tbi->map_type = VMXNET3_MAP_XDP;
> + if (dma_map) { /* ndo_xdp_xmit */
> + tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
> + xdpf->data, buf_size,
> + DMA_TO_DEVICE);
> + if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
> + return -EFAULT;
> + tbi->map_type |= VMXNET3_MAP_SINGLE;
> + } else { /* XDP buffer from page pool */
> + page = virt_to_page(xdpf->data);
> + tbi->dma_addr = page_pool_get_dma_addr(page) +
> + XDP_PACKET_HEADROOM;
Shouldn't this be VMXNET3_XDP_HEADROOM?
> + dma_sync_single_for_device(&adapter->pdev->dev,
> + tbi->dma_addr, buf_size,
> + DMA_BIDIRECTIONAL);
Shouldn't this be DMA_TO_DEVICE instead of DMA_BIDERECTIONAL?
> + }
> + tbi->xdpf = xdpf;
> + tbi->len = buf_size;
> +
> + gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
> + WARN_ON_ONCE(gdesc->txd.gen == tq->tx_ring.gen);
> +
> + gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
> + gdesc->dword[2] = cpu_to_le32(dw2);
> +
> + /* Setup the EOP desc */
> + gdesc->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
> +
> + gdesc->txd.om = 0;
> + gdesc->txd.msscof = 0;
> + gdesc->txd.hlen = 0;
> + gdesc->txd.ti = 0;
> +
> + tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
> + le32_add_cpu(&tq->shared->txNumDeferred, 1);
> + tx_num_deferred++;
> +
> + vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
> +
> + /* set the last buf_info for the pkt */
> + tbi->sop_idx = ctx.sop_txd - tq->tx_ring.base;
> +
> + dma_wmb();
> + gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
> + VMXNET3_TXD_GEN);
> +
> + /* No need to handle the case when tx_num_deferred doesn't reach
> + * threshold. Backend driver at hypervisor side will poll and reset
> + * tq->shared->txNumDeferred to 0.
> + */
> + if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
> + tq->shared->txNumDeferred = 0;
> + VMXNET3_WRITE_BAR0_REG(adapter,
> + VMXNET3_REG_TXPROD + tq->qid * 8,
> + tq->tx_ring.next2fill);
> + }
> +
> + return ret;
> +}
...
> +static int
> +vmxnet3_run_xdp(struct vmxnet3_rx_queue *rq, struct xdp_buff *xdp,
> + struct bpf_prog *prog)
> +{
> + struct xdp_frame *xdpf;
> + struct page *page;
> + int err;
> + u32 act;
> +
> + act = bpf_prog_run_xdp(prog, xdp);
> + rq->stats.xdp_packets++;
> + page = virt_to_page(xdp->data_hard_start);
> +
> + switch (act) {
> + case XDP_PASS:
> + return act;
> + case XDP_REDIRECT:
> + err = xdp_do_redirect(rq->adapter->netdev, xdp, prog);
> + if (!err)
> + rq->stats.xdp_redirects++;
> + else
> + rq->stats.xdp_drops++;
> + return act;
> + case XDP_TX:
> + xdpf = xdp_convert_buff_to_frame(xdp);
If you want, I think you can drop xdp_convert_buff_to_frame() and pass
directly the page. And then inside vmxnet3_unmap_pkt() you can use
page_pool_recycle_direct().
Of course this requires few other changes (I think you need a new
map_type) but in the end you might save some CPU usage.
> + if (unlikely(!xdpf ||
> + vmxnet3_xdp_xmit_back(rq->adapter, xdpf))) {
> + rq->stats.xdp_drops++;
> + page_pool_recycle_direct(rq->page_pool, page);
> + } else {
> + rq->stats.xdp_tx++;
> + }
> + return act;
> + default:
> + bpf_warn_invalid_xdp_action(rq->adapter->netdev, prog, act);
> + fallthrough;
> + case XDP_ABORTED:
> + trace_xdp_exception(rq->adapter->netdev, prog, act);
> + rq->stats.xdp_aborted++;
> + break;
> + case XDP_DROP:
> + rq->stats.xdp_drops++;
> + break;
> + }
> +
> + page_pool_recycle_direct(rq->page_pool, page);
> +
> + return act;
> +}
> +
--
/Horatiu
next prev parent reply other threads:[~2023-03-19 17:21 UTC|newest]
Thread overview: 3+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-18 21:49 [PATCH RFC v18 net-next] vmxnet3: Add XDP support William Tu
2023-03-19 17:20 ` Horatiu Vultur [this message]
2023-03-25 17:02 ` William Tu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230319172052.gkzu7h227ulkog6o@soft-dev3-1 \
--to=horatiu.vultur@microchip.com \
--cc=alexander.duyck@gmail.com \
--cc=alexanderduyck@fb.com \
--cc=alexandr.lobakin@intel.com \
--cc=bang@vmware.com \
--cc=doshir@vmware.com \
--cc=gyang@vmware.com \
--cc=jsankararama@vmware.com \
--cc=maciej.fijalkowski@intel.com \
--cc=netdev@vger.kernel.org \
--cc=u9012063@gmail.com \
--cc=witu@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox