netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: Jason Wang <jasowang@redhat.com>
Cc: netdev@vger.kernel.org, linux-kernel@vger.kernel.org,
	virtualization@lists.linux-foundation.org
Subject: Re: [PATCH net-next 1/2] virtio-net: correctly update XDP_TX counters
Date: Tue, 31 Jul 2018 14:22:26 +0300	[thread overview]
Message-ID: <20180731142219-mutt-send-email-mst@kernel.org> (raw)
In-Reply-To: <1533030219-9904-1-git-send-email-jasowang@redhat.com>

On Tue, Jul 31, 2018 at 05:43:38PM +0800, Jason Wang wrote:
> Commit 5b8f3c8d30a6 ("virtio_net: Add XDP related stats") tries to
> count TX XDP stats in virtnet_receive(). This will cause several
> issues:
> 
> - virtnet_xdp_sq() was called without checking whether or not XDP is
>   set. This may cause out of bound access when there's no enough txq
>   for XDP.
> - Stats were updated even if there's no XDP/XDP_TX.
> 
> Fixing this by reusing virtnet_xdp_xmit() for XDP_TX which can counts
> TX XDP counter itself and remove the unnecessary tx stats embedded in
> rx stats.
> 
> Reported-by: syzbot+604f8271211546f5b3c7@syzkaller.appspotmail.com
> Fixes: 5b8f3c8d30a6 ("virtio_net: Add XDP related stats")
> Cc: Toshiaki Makita <makita.toshiaki@lab.ntt.co.jp>
> Signed-off-by: Jason Wang <jasowang@redhat.com>

Acked-by: Michael S. Tsirkin <mst@redhat.com>

> ---
>  drivers/net/virtio_net.c | 39 ++++-----------------------------------
>  1 file changed, 4 insertions(+), 35 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 1880c86..72d3f68 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -105,10 +105,6 @@ struct virtnet_rq_stats {
>  
>  struct virtnet_rx_stats {
>  	struct virtnet_rq_stat_items rx;
> -	struct {
> -		unsigned int xdp_tx;
> -		unsigned int xdp_tx_drops;
> -	} tx;
>  };
>  
>  #define VIRTNET_SQ_STAT(m)	offsetof(struct virtnet_sq_stats, m)
> @@ -485,22 +481,6 @@ static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
>  	return &vi->sq[qp];
>  }
>  
> -static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi,
> -				   struct xdp_frame *xdpf)
> -{
> -	struct xdp_frame *xdpf_sent;
> -	struct send_queue *sq;
> -	unsigned int len;
> -
> -	sq = virtnet_xdp_sq(vi);
> -
> -	/* Free up any pending old buffers before queueing new ones. */
> -	while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
> -		xdp_return_frame(xdpf_sent);
> -
> -	return __virtnet_xdp_xmit_one(vi, sq, xdpf);
> -}
> -
>  static int virtnet_xdp_xmit(struct net_device *dev,
>  			    int n, struct xdp_frame **frames, u32 flags)
>  {
> @@ -707,10 +687,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
>  			xdpf = convert_to_xdp_frame(&xdp);
>  			if (unlikely(!xdpf))
>  				goto err_xdp;
> -			stats->tx.xdp_tx++;
> -			err = __virtnet_xdp_tx_xmit(vi, xdpf);
> -			if (unlikely(err)) {
> -				stats->tx.xdp_tx_drops++;
> +			err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> +			if (unlikely(err < 0)) {
>  				trace_xdp_exception(vi->dev, xdp_prog, act);
>  				goto err_xdp;
>  			}
> @@ -879,10 +857,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
>  			xdpf = convert_to_xdp_frame(&xdp);
>  			if (unlikely(!xdpf))
>  				goto err_xdp;
> -			stats->tx.xdp_tx++;
> -			err = __virtnet_xdp_tx_xmit(vi, xdpf);
> -			if (unlikely(err)) {
> -				stats->tx.xdp_tx_drops++;
> +			err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
> +			if (unlikely(err < 0)) {
>  				trace_xdp_exception(vi->dev, xdp_prog, act);
>  				if (unlikely(xdp_page != page))
>  					put_page(xdp_page);
> @@ -1315,7 +1291,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
>  {
>  	struct virtnet_info *vi = rq->vq->vdev->priv;
>  	struct virtnet_rx_stats stats = {};
> -	struct send_queue *sq;
>  	unsigned int len;
>  	void *buf;
>  	int i;
> @@ -1351,12 +1326,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
>  	}
>  	u64_stats_update_end(&rq->stats.syncp);
>  
> -	sq = virtnet_xdp_sq(vi);
> -	u64_stats_update_begin(&sq->stats.syncp);
> -	sq->stats.xdp_tx += stats.tx.xdp_tx;
> -	sq->stats.xdp_tx_drops += stats.tx.xdp_tx_drops;
> -	u64_stats_update_end(&sq->stats.syncp);
> -
>  	return stats.rx.packets;
>  }
>  
> -- 
> 2.7.4

  parent reply	other threads:[~2018-07-31 11:22 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-07-31  9:43 [PATCH net-next 1/2] virtio-net: correctly update XDP_TX counters Jason Wang
2018-07-31  9:43 ` [PATCH net-next 2/2] virtio-net: get rid of unnecessary container of rq stats Jason Wang
2018-07-31 10:02   ` Toshiaki Makita
2018-08-01  1:39     ` Jason Wang
2018-08-01  1:46       ` Toshiaki Makita
2018-07-31 11:22   ` Michael S. Tsirkin
2018-07-31 17:03   ` David Miller
2018-07-31  9:57 ` [PATCH net-next 1/2] virtio-net: correctly update XDP_TX counters Toshiaki Makita
2018-08-01  1:31   ` Jason Wang
2018-08-01  1:42     ` Toshiaki Makita
2018-07-31 11:22 ` Michael S. Tsirkin [this message]
2018-07-31 17:03 ` David Miller

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180731142219-mutt-send-email-mst@kernel.org \
    --to=mst@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).