From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
To: netdev@vger.kernel.org
Cc: "David S. Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
Jason Wang <jasowang@redhat.com>,
Xuan Zhuo <xuanzhuo@linux.alibaba.com>,
Alexei Starovoitov <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
Jesper Dangaard Brouer <hawk@kernel.org>,
John Fastabend <john.fastabend@gmail.com>,
virtualization@lists.linux-foundation.org, bpf@vger.kernel.org
Subject: [PATCH net-next v3 17/27] virtio_net: xsk: tx: support wakeup
Date: Fri, 29 Dec 2023 15:30:58 +0800 [thread overview]
Message-ID: <20231229073108.57778-18-xuanzhuo@linux.alibaba.com> (raw)
In-Reply-To: <20231229073108.57778-1-xuanzhuo@linux.alibaba.com>
xsk wakeup is used to trigger the logic for xsk xmit by xsk framework or
user.
Virtio-net does not support to actively generate an interruption, so it
tries to trigger tx NAPI on the local cpu.
Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
drivers/net/virtio/main.c | 20 ++++++--------------
drivers/net/virtio/virtio_net.h | 9 +++++++++
drivers/net/virtio/xsk.c | 23 +++++++++++++++++++++++
drivers/net/virtio/xsk.h | 1 +
4 files changed, 39 insertions(+), 14 deletions(-)
diff --git a/drivers/net/virtio/main.c b/drivers/net/virtio/main.c
index cb6c8916f605..2c82418b0344 100644
--- a/drivers/net/virtio/main.c
+++ b/drivers/net/virtio/main.c
@@ -233,15 +233,6 @@ static void disable_delayed_refill(struct virtnet_info *vi)
spin_unlock_bh(&vi->refill_lock);
}
-static void virtqueue_napi_schedule(struct napi_struct *napi,
- struct virtqueue *vq)
-{
- if (napi_schedule_prep(napi)) {
- virtqueue_disable_cb(vq);
- __napi_schedule(napi);
- }
-}
-
static void virtqueue_napi_complete(struct napi_struct *napi,
struct virtqueue *vq, int processed)
{
@@ -250,7 +241,7 @@ static void virtqueue_napi_complete(struct napi_struct *napi,
opaque = virtqueue_enable_cb_prepare(vq);
if (napi_complete_done(napi, processed)) {
if (unlikely(virtqueue_poll(vq, opaque)))
- virtqueue_napi_schedule(napi, vq);
+ virtnet_vq_napi_schedule(napi, vq);
} else {
virtqueue_disable_cb(vq);
}
@@ -265,7 +256,7 @@ static void skb_xmit_done(struct virtqueue *vq)
virtqueue_disable_cb(vq);
if (napi->weight)
- virtqueue_napi_schedule(napi, vq);
+ virtnet_vq_napi_schedule(napi, vq);
else
/* We were probably waiting for more output buffers. */
netif_wake_subqueue(vi->dev, vq2txq(vq));
@@ -635,7 +626,7 @@ void virtnet_check_sq_full_and_disable(struct virtnet_info *vi,
netif_stop_subqueue(dev, qnum);
if (use_napi) {
if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
- virtqueue_napi_schedule(&sq->napi, sq->vq);
+ virtnet_vq_napi_schedule(&sq->napi, sq->vq);
} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
free_old_xmit(sq, false);
@@ -1802,7 +1793,7 @@ static void skb_recv_done(struct virtqueue *rvq)
struct virtnet_info *vi = rvq->vdev->priv;
struct virtnet_rq *rq = &vi->rq[vq2rxq(rvq)];
- virtqueue_napi_schedule(&rq->napi, rvq);
+ virtnet_vq_napi_schedule(&rq->napi, rvq);
}
static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
@@ -1814,7 +1805,7 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
* Call local_bh_enable after to trigger softIRQ processing.
*/
local_bh_disable();
- virtqueue_napi_schedule(napi, vq);
+ virtnet_vq_napi_schedule(napi, vq);
local_bh_enable();
}
@@ -3785,6 +3776,7 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
.ndo_bpf = virtnet_xdp,
.ndo_xdp_xmit = virtnet_xdp_xmit,
+ .ndo_xsk_wakeup = virtnet_xsk_wakeup,
.ndo_features_check = passthru_features_check,
.ndo_get_phys_port_name = virtnet_get_phys_port_name,
.ndo_set_features = virtnet_set_features,
diff --git a/drivers/net/virtio/virtio_net.h b/drivers/net/virtio/virtio_net.h
index 7dcbd1d40fba..82a56d640b11 100644
--- a/drivers/net/virtio/virtio_net.h
+++ b/drivers/net/virtio/virtio_net.h
@@ -284,6 +284,15 @@ static inline bool virtnet_is_xdp_raw_buffer_queue(struct virtnet_info *vi, int
return false;
}
+static inline void virtnet_vq_napi_schedule(struct napi_struct *napi,
+ struct virtqueue *vq)
+{
+ if (napi_schedule_prep(napi)) {
+ virtqueue_disable_cb(vq);
+ __napi_schedule(napi);
+ }
+}
+
void virtnet_rx_pause(struct virtnet_info *vi, struct virtnet_rq *rq);
void virtnet_rx_resume(struct virtnet_info *vi, struct virtnet_rq *rq);
void virtnet_tx_pause(struct virtnet_info *vi, struct virtnet_sq *sq);
diff --git a/drivers/net/virtio/xsk.c b/drivers/net/virtio/xsk.c
index d2a96424ade9..9e5523ff5707 100644
--- a/drivers/net/virtio/xsk.c
+++ b/drivers/net/virtio/xsk.c
@@ -95,6 +95,29 @@ bool virtnet_xsk_xmit(struct virtnet_sq *sq, struct xsk_buff_pool *pool,
return sent == budget;
}
+int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct virtnet_sq *sq;
+
+ if (!netif_running(dev))
+ return -ENETDOWN;
+
+ if (qid >= vi->curr_queue_pairs)
+ return -EINVAL;
+
+ sq = &vi->sq[qid];
+
+ if (napi_if_scheduled_mark_missed(&sq->napi))
+ return 0;
+
+ local_bh_disable();
+ virtnet_vq_napi_schedule(&sq->napi, sq->vq);
+ local_bh_enable();
+
+ return 0;
+}
+
static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct virtnet_rq *rq,
struct xsk_buff_pool *pool)
{
diff --git a/drivers/net/virtio/xsk.h b/drivers/net/virtio/xsk.h
index 73ca8cd5308b..1bd19dcda649 100644
--- a/drivers/net/virtio/xsk.h
+++ b/drivers/net/virtio/xsk.h
@@ -17,4 +17,5 @@ static inline void *virtnet_xsk_to_ptr(u32 len)
int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp);
bool virtnet_xsk_xmit(struct virtnet_sq *sq, struct xsk_buff_pool *pool,
int budget);
+int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag);
#endif
--
2.32.0.3.g01195cf9f
next prev parent reply other threads:[~2023-12-29 7:31 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-12-29 7:30 [PATCH net-next v3 00/27] virtio-net: support AF_XDP zero copy Xuan Zhuo
2023-12-29 7:30 ` [PATCH net-next v3 01/27] virtio_net: rename free_old_xmit_skbs to free_old_xmit Xuan Zhuo
2023-12-29 7:30 ` [PATCH net-next v3 02/27] virtio_net: unify the code for recycling the xmit ptr Xuan Zhuo
2023-12-29 7:30 ` [PATCH net-next v3 03/27] virtio_net: independent directory Xuan Zhuo
2023-12-29 7:30 ` [PATCH net-next v3 04/27] virtio_net: move core structures to virtio_net.h Xuan Zhuo
2023-12-29 7:30 ` [PATCH net-next v3 05/27] virtio_net: add prefix virtnet to all struct inside virtio_net.h Xuan Zhuo
2023-12-29 7:30 ` [PATCH net-next v3 06/27] virtio_ring: introduce virtqueue_get_buf_ctx_dma() Xuan Zhuo
2024-01-11 8:34 ` Jason Wang
2024-01-16 7:32 ` Xuan Zhuo
2024-01-22 4:18 ` Jason Wang
2024-01-22 6:04 ` Xuan Zhuo
2024-01-22 6:54 ` Jason Wang
2023-12-29 7:30 ` [PATCH net-next v3 07/27] virtio_ring: virtqueue_disable_and_recycle let the callback detach bufs Xuan Zhuo
2023-12-29 7:30 ` [PATCH net-next v3 08/27] virtio_ring: introduce virtqueue_detach_unused_buf_dma() Xuan Zhuo
2023-12-29 7:30 ` [PATCH net-next v3 09/27] virtio_ring: introduce virtqueue_get_dma_premapped() Xuan Zhuo
2023-12-29 7:30 ` [PATCH net-next v3 10/27] virtio_net: sq support premapped mode Xuan Zhuo
2023-12-29 7:30 ` [PATCH net-next v3 11/27] virtio_net: separate virtnet_rx_resize() Xuan Zhuo
2023-12-29 7:30 ` [PATCH net-next v3 12/27] virtio_net: separate virtnet_tx_resize() Xuan Zhuo
2023-12-29 7:30 ` [PATCH net-next v3 13/27] virtio_net: xsk: bind/unbind xsk Xuan Zhuo
2023-12-29 7:30 ` [PATCH net-next v3 14/27] virtio_net: xsk: prevent disable tx napi Xuan Zhuo
2023-12-29 7:30 ` [PATCH net-next v3 15/27] virtio_net: move some api to header Xuan Zhuo
2023-12-29 7:30 ` [PATCH net-next v3 16/27] virtio_net: xsk: tx: support xmit xsk buffer Xuan Zhuo
2023-12-30 0:28 ` kernel test robot
2023-12-29 7:30 ` Xuan Zhuo [this message]
2023-12-29 7:30 ` [PATCH net-next v3 18/27] virtio_net: xsk: tx: handle the transmitted " Xuan Zhuo
2023-12-29 7:31 ` [PATCH net-next v3 19/27] virtio_net: xsk: tx: free the unused " Xuan Zhuo
2023-12-29 7:31 ` [PATCH net-next v3 20/27] virtio_net: separate receive_mergeable Xuan Zhuo
2023-12-29 7:31 ` [PATCH net-next v3 21/27] virtio_net: separate receive_buf Xuan Zhuo
2023-12-29 7:31 ` [PATCH net-next v3 22/27] virtio_net: xsk: rx: support fill with xsk buffer Xuan Zhuo
2023-12-29 7:31 ` [PATCH net-next v3 23/27] virtio_net: xsk: rx: support recv merge mode Xuan Zhuo
2023-12-29 21:03 ` kernel test robot
2023-12-30 1:01 ` kernel test robot
2023-12-29 7:31 ` [PATCH net-next v3 24/27] virtio_net: xsk: rx: support recv small mode Xuan Zhuo
2023-12-29 7:31 ` [PATCH net-next v3 25/27] virtio_net: xsk: rx: free the unused xsk buffer Xuan Zhuo
2023-12-29 7:31 ` [PATCH net-next v3 26/27] virtio_net: update tx timeout record Xuan Zhuo
2023-12-29 7:31 ` [PATCH net-next v3 27/27] virtio_net: xdp_features add NETDEV_XDP_ACT_XSK_ZEROCOPY Xuan Zhuo
2024-01-11 3:27 ` [PATCH net-next v3 00/27] virtio-net: support AF_XDP zero copy Jason Wang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231229073108.57778-18-xuanzhuo@linux.alibaba.com \
--to=xuanzhuo@linux.alibaba.com \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=hawk@kernel.org \
--cc=jasowang@redhat.com \
--cc=john.fastabend@gmail.com \
--cc=kuba@kernel.org \
--cc=mst@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=virtualization@lists.linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).