virtualization.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: Cindylu <lulu@redhat.com>,
	virtualization@lists.linux-foundation.org, mst@redhat.com,
	jfreimann@redhat.com, rusty@rustcorp.com.au, jmaxwell@redhat.com
Subject: Re: [RHEL7]virtio-net: switch to use XPS to choose txq
Date: Mon, 23 Dec 2019 10:27:03 +0800	[thread overview]
Message-ID: <bef78003-144d-ccb7-79a6-f16d6cccdad8@redhat.com> (raw)
In-Reply-To: <20191220114813.7052-1-lulu@redhat.com>


On 2019/12/20 下午7:48, Cindylu wrote:
> Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1769479
> Upstream Status: 9bb8ca86075f37d3c169b9c46f8e7c6d3165e18f
> Brew:  https://brewweb.devel.redhat.com/taskinfo?taskID=25482050
> Tested: verified by customer
>
> commit 9bb8ca86075f37d3c169b9c46f8e7c6d3165e18f
> Author: Jason Wang <jasowang@redhat.com>
> Date:   Tue Nov 5 18:19:45 2013 +0800
>
>      virtio-net: switch to use XPS to choose txq
>
>      We used to use a percpu structure vq_index to record the cpu to queue
>      mapping, this is suboptimal since it duplicates the work of XPS and
>      loses all other XPS functionality such as allowing user to configure
>      their own transmission steering strategy.
>
>      So this patch switches to use XPS and suggest a default mapping when
>      the number of cpus is equal to the number of queues. With XPS support,
>      there's no need for keeping per-cpu vq_index and .ndo_select_queue(),
>      so they were removed also.
>
>      Cc: Rusty Russell <rusty@rustcorp.com.au>
>      Cc: Michael S. Tsirkin <mst@redhat.com>
>      Acked-by: Rusty Russell <rusty@rustcorp.com.au>
>      Acked-by: Michael S. Tsirkin <mst@redhat.com>
>      Signed-off-by: Jason Wang <jasowang@redhat.com>
>      Signed-off-by: David S. Miller <davem@davemloft.net>
> ---
>   drivers/net/virtio_net.c | 49 ++--------------------------------------
>   1 file changed, 2 insertions(+), 47 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 61c64d92211..d5335774c60 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -125,9 +125,6 @@ struct virtnet_info {
>   	/* Does the affinity hint is set for virtqueues? */
>   	bool affinity_hint_set;
>   
> -	/* Per-cpu variable to show the mapping from CPU to virtqueue */
> -	int __percpu *vq_index;
> -
>   	/* CPU hot plug notifier */
>   	struct notifier_block nb;
>   
> @@ -1124,7 +1121,6 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
>   static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
>   {
>   	int i;
> -	int cpu;
>   
>   	if (vi->affinity_hint_set) {
>   		for (i = 0; i < vi->max_queue_pairs; i++) {
> @@ -1134,16 +1130,6 @@ static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
>   
>   		vi->affinity_hint_set = false;
>   	}
> -
> -	i = 0;
> -	for_each_online_cpu(cpu) {
> -		if (cpu == hcpu) {
> -			*per_cpu_ptr(vi->vq_index, cpu) = -1;
> -		} else {
> -			*per_cpu_ptr(vi->vq_index, cpu) =
> -				++i % vi->curr_queue_pairs;
> -		}
> -	}
>   }
>   
>   static void virtnet_set_affinity(struct virtnet_info *vi)
> @@ -1165,7 +1151,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
>   	for_each_online_cpu(cpu) {
>   		virtqueue_set_affinity(vi->rq[i].vq, cpu);
>   		virtqueue_set_affinity(vi->sq[i].vq, cpu);
> -		*per_cpu_ptr(vi->vq_index, cpu) = i;
> +		netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
>   		i++;
>   	}
>   
> @@ -1268,29 +1254,6 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
>   	.get_channels = virtnet_get_channels,
>   };
>   
> -/* To avoid contending a lock hold by a vcpu who would exit to host, select the
> - * txq based on the processor id.
> - */
> -static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb,
> -			void *accel_priv, select_queue_fallback_t fallback)
> -{
> -	int txq;
> -	struct virtnet_info *vi = netdev_priv(dev);
> -
> -	if (skb_rx_queue_recorded(skb)) {
> -		txq = skb_get_rx_queue(skb);
> -	} else {
> -		txq = *__this_cpu_ptr(vi->vq_index);
> -		if (txq == -1)
> -			txq = 0;
> -	}
> -
> -	while (unlikely(txq >= dev->real_num_tx_queues))
> -		txq -= dev->real_num_tx_queues;
> -
> -	return txq;
> -}
> -
>   static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
>   				      size_t len)
>   {
> @@ -1317,7 +1280,6 @@ static const struct net_device_ops virtnet_netdev = {
>   	.ndo_get_stats64     = virtnet_stats,
>   	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
>   	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
> -	.ndo_select_queue     = virtnet_select_queue,
>   #ifdef CONFIG_NET_POLL_CONTROLLER
>   	.ndo_poll_controller = virtnet_netpoll,
>   #endif
> @@ -1642,10 +1604,6 @@ static int virtnet_probe(struct virtio_device *vdev)
>   	if (vi->stats == NULL)
>   		goto free;
>   
> -	vi->vq_index = alloc_percpu(int);
> -	if (vi->vq_index == NULL)
> -		goto free_stats;
> -
>   	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
>   
>   	/* If we can receive ANY GSO packets, we must allocate large ones. */
> @@ -1695,7 +1653,7 @@ static int virtnet_probe(struct virtio_device *vdev)
>   	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
>   	err = init_vqs(vi);
>   	if (err)
> -		goto free_index;
> +		goto free_stats;
>   
>   	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
>   	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
> @@ -1764,8 +1722,6 @@ free_failover:
>   free_vqs:
>   	cancel_delayed_work_sync(&vi->refill);
>   	virtnet_del_vqs(vi);
> -free_index:
> -	free_percpu(vi->vq_index);
>   free_stats:
>   	free_percpu(vi->stats);
>   free:
> @@ -1801,7 +1757,6 @@ static void virtnet_remove(struct virtio_device *vdev)
>   	remove_vq_common(vi);
>   
>   
> -	free_percpu(vi->vq_index);
>   	free_percpu(vi->stats);
>   	free_netdev(vi->dev);
>   }


Acked-by: Jason Wang <jasowang@redhat.com>

Thanks

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

           reply	other threads:[~2019-12-23  2:27 UTC|newest]

Thread overview: expand[flat|nested]  mbox.gz  Atom feed
 [parent not found: <20191220114813.7052-1-lulu@redhat.com>]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=bef78003-144d-ccb7-79a6-f16d6cccdad8@redhat.com \
    --to=jasowang@redhat.com \
    --cc=jfreimann@redhat.com \
    --cc=jmaxwell@redhat.com \
    --cc=lulu@redhat.com \
    --cc=mst@redhat.com \
    --cc=rusty@rustcorp.com.au \
    --cc=virtualization@lists.linux-foundation.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).