* [PATCH V6 2/3] virtio-net: split out clean affinity function
2013-01-21 11:25 [PATCH V6 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive Wanlong Gao
@ 2013-01-21 11:25 ` Wanlong Gao
2013-01-25 3:28 ` Jason Wang
2013-01-21 11:25 ` [PATCH V6 3/3] virtio-net: reset virtqueue affinity when doing cpu hotplug Wanlong Gao
` (3 subsequent siblings)
4 siblings, 1 reply; 15+ messages in thread
From: Wanlong Gao @ 2013-01-21 11:25 UTC (permalink / raw)
To: linux-kernel; +Cc: Michael S. Tsirkin, netdev, virtualization, Eric Dumazet
Split out the clean affinity function to virtnet_clean_affinity().
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Eric Dumazet <erdnetdev@gmail.com>
Cc: virtualization@lists.linux-foundation.org
Cc: netdev@vger.kernel.org
Signed-off-by: Wanlong Gao <gaowanlong@cn.fujitsu.com>
---
V5->V6: NEW
drivers/net/virtio_net.c | 67 +++++++++++++++++++++++++++---------------------
1 file changed, 38 insertions(+), 29 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 70cd957..1a35a8c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1016,48 +1016,57 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
return 0;
}
-static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
+static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
{
int i;
int cpu;
- /* In multiqueue mode, when the number of cpu is equal to the number of
- * queue pairs, we let the queue pairs to be private to one cpu by
- * setting the affinity hint to eliminate the contention.
- */
- if ((vi->curr_queue_pairs == 1 ||
- vi->max_queue_pairs != num_online_cpus()) && set) {
- if (vi->affinity_hint_set)
- set = false;
- else
- return;
- }
-
- if (set) {
- i = 0;
- for_each_online_cpu(cpu) {
- virtqueue_set_affinity(vi->rq[i].vq, cpu);
- virtqueue_set_affinity(vi->sq[i].vq, cpu);
- *per_cpu_ptr(vi->vq_index, cpu) = i;
- i++;
- }
-
- vi->affinity_hint_set = true;
- } else {
- for(i = 0; i < vi->max_queue_pairs; i++) {
+ if (vi->affinity_hint_set) {
+ for (i = 0; i < vi->max_queue_pairs; i++) {
virtqueue_set_affinity(vi->rq[i].vq, -1);
virtqueue_set_affinity(vi->sq[i].vq, -1);
}
i = 0;
- for_each_online_cpu(cpu)
+ for_each_online_cpu(cpu) {
+ if (cpu == hcpu)
+ continue;
*per_cpu_ptr(vi->vq_index, cpu) =
++i % vi->curr_queue_pairs;
+ }
vi->affinity_hint_set = false;
}
}
+static void virtnet_set_affinity(struct virtnet_info *vi)
+{
+ int i;
+ int cpu;
+
+ /* In multiqueue mode, when the number of cpu is equal to the number of
+ * queue pairs, we let the queue pairs to be private to one cpu by
+ * setting the affinity hint to eliminate the contention.
+ */
+ if (vi->curr_queue_pairs == 1 ||
+ vi->max_queue_pairs != num_online_cpus()) {
+ if (vi->affinity_hint_set)
+ virtnet_clean_affinity(vi, -1);
+ else
+ return;
+ }
+
+ i = 0;
+ for_each_online_cpu(cpu) {
+ virtqueue_set_affinity(vi->rq[i].vq, cpu);
+ virtqueue_set_affinity(vi->sq[i].vq, cpu);
+ *per_cpu_ptr(vi->vq_index, cpu) = i;
+ i++;
+ }
+
+ vi->affinity_hint_set = true;
+}
+
static void virtnet_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
@@ -1105,7 +1114,7 @@ static int virtnet_set_channels(struct net_device *dev,
netif_set_real_num_rx_queues(dev, queue_pairs);
get_online_cpus();
- virtnet_set_affinity(vi, true);
+ virtnet_set_affinity(vi);
put_online_cpus();
}
@@ -1274,7 +1283,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
{
struct virtio_device *vdev = vi->vdev;
- virtnet_set_affinity(vi, false);
+ virtnet_clean_affinity(vi, -1);
vdev->config->del_vqs(vdev);
@@ -1398,7 +1407,7 @@ static int init_vqs(struct virtnet_info *vi)
goto err_free;
get_online_cpus();
- virtnet_set_affinity(vi, true);
+ virtnet_set_affinity(vi);
put_online_cpus();
return 0;
--
1.8.1
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH V6 2/3] virtio-net: split out clean affinity function
2013-01-21 11:25 ` [PATCH V6 2/3] virtio-net: split out clean affinity function Wanlong Gao
@ 2013-01-25 3:28 ` Jason Wang
2013-01-25 4:20 ` Wanlong Gao
0 siblings, 1 reply; 15+ messages in thread
From: Jason Wang @ 2013-01-25 3:28 UTC (permalink / raw)
To: Wanlong Gao
Cc: Michael S. Tsirkin, netdev, linux-kernel, virtualization,
Eric Dumazet
On 01/21/2013 07:25 PM, Wanlong Gao wrote:
> Split out the clean affinity function to virtnet_clean_affinity().
>
> Cc: Rusty Russell <rusty@rustcorp.com.au>
> Cc: "Michael S. Tsirkin" <mst@redhat.com>
> Cc: Jason Wang <jasowang@redhat.com>
> Cc: Eric Dumazet <erdnetdev@gmail.com>
> Cc: virtualization@lists.linux-foundation.org
> Cc: netdev@vger.kernel.org
> Signed-off-by: Wanlong Gao <gaowanlong@cn.fujitsu.com>
> ---
> V5->V6: NEW
>
> drivers/net/virtio_net.c | 67 +++++++++++++++++++++++++++---------------------
> 1 file changed, 38 insertions(+), 29 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 70cd957..1a35a8c 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -1016,48 +1016,57 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
> return 0;
> }
>
> -static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
> +static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
> {
> int i;
> int cpu;
>
> - /* In multiqueue mode, when the number of cpu is equal to the number of
> - * queue pairs, we let the queue pairs to be private to one cpu by
> - * setting the affinity hint to eliminate the contention.
> - */
> - if ((vi->curr_queue_pairs == 1 ||
> - vi->max_queue_pairs != num_online_cpus()) && set) {
> - if (vi->affinity_hint_set)
> - set = false;
> - else
> - return;
> - }
> -
> - if (set) {
> - i = 0;
> - for_each_online_cpu(cpu) {
> - virtqueue_/set_affinity(vi->rq[i].vq, cpu);
> - virtqueue_set_affinity(vi->sq[i].vq, cpu);
> - *per_cpu_ptr(vi->vq_index, cpu) = i;
> - i++;
> - }
> -
> - vi->affinity_hint_set = true;
> - } else {
> - for(i = 0; i < vi->max_queue_pairs; i++) {
> + if (vi->affinity_hint_set) {
> + for (i = 0; i < vi->max_queue_pairs; i++) {
> virtqueue_set_affinity(vi->rq[i].vq, -1);
> virtqueue_set_affinity(vi->sq[i].vq, -1);
> }
>
> i = 0;
> - for_each_online_cpu(cpu)
> + for_each_online_cpu(cpu) {
> + if (cpu == hcpu)
> + continue;
> *per_cpu_ptr(vi->vq_index, cpu) =
> ++i % vi->curr_queue_pairs;
> + }
>
Some questions here:
- Did we need reset the affinity of the queue here like the this?
virtqueue_set_affinity(vi->sq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
virtqueue_set_affinity(vi->rq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
- Looks like we need also reset the percpu index when
vi->affinity_hint_set is false.
- Does this really need this reset? Consider we're going to reset the
percpu in CPU_DEAD?
Thanks
> vi->affinity_hint_set = false;
> }
> }
>
> +static void virtnet_set_affinity(struct virtnet_info *vi)
> +{
> + int i;
> + int cpu;
> +
> + /* In multiqueue mode, when the number of cpu is equal to the number of
> + * queue pairs, we let the queue pairs to be private to one cpu by
> + * setting the affinity hint to eliminate the contention.
> + */
> + if (vi->curr_queue_pairs == 1 ||
> + vi->max_queue_pairs != num_online_cpus()) {
> + if (vi->affinity_hint_set)
> + virtnet_clean_affinity(vi, -1);
> + else
> + return;
> + }
> +
> + i = 0;
> + for_each_online_cpu(cpu) {
> + virtqueue_set_affinity(vi->rq[i].vq, cpu);
> + virtqueue_set_affinity(vi->sq[i].vq, cpu);
> + *per_cpu_ptr(vi->vq_index, cpu) = i;
> + i++;
> + }
> +
> + vi->affinity_hint_set = true;
> +}
> +
> static void virtnet_get_ringparam(struct net_device *dev,
> struct ethtool_ringparam *ring)
> {
> @@ -1105,7 +1114,7 @@ static int virtnet_set_channels(struct net_device *dev,
> netif_set_real_num_rx_queues(dev, queue_pairs);
>
> get_online_cpus();
> - virtnet_set_affinity(vi, true);
> + virtnet_set_affinity(vi);
> put_online_cpus();
> }
>
> @@ -1274,7 +1283,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
> {
> struct virtio_device *vdev = vi->vdev;
>
> - virtnet_set_affinity(vi, false);
> + virtnet_clean_affinity(vi, -1);
>
> vdev->config->del_vqs(vdev);
>
> @@ -1398,7 +1407,7 @@ static int init_vqs(struct virtnet_info *vi)
> goto err_free;
>
> get_online_cpus();
> - virtnet_set_affinity(vi, true);
> + virtnet_set_affinity(vi);
> put_online_cpus();
>
> return 0;
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH V6 2/3] virtio-net: split out clean affinity function
2013-01-25 3:28 ` Jason Wang
@ 2013-01-25 4:20 ` Wanlong Gao
2013-01-25 5:13 ` Jason Wang
0 siblings, 1 reply; 15+ messages in thread
From: Wanlong Gao @ 2013-01-25 4:20 UTC (permalink / raw)
To: Jason Wang
Cc: Michael S. Tsirkin, netdev, linux-kernel, virtualization,
Eric Dumazet
On 01/25/2013 11:28 AM, Jason Wang wrote:
> On 01/21/2013 07:25 PM, Wanlong Gao wrote:
>> Split out the clean affinity function to virtnet_clean_affinity().
>>
>> Cc: Rusty Russell <rusty@rustcorp.com.au>
>> Cc: "Michael S. Tsirkin" <mst@redhat.com>
>> Cc: Jason Wang <jasowang@redhat.com>
>> Cc: Eric Dumazet <erdnetdev@gmail.com>
>> Cc: virtualization@lists.linux-foundation.org
>> Cc: netdev@vger.kernel.org
>> Signed-off-by: Wanlong Gao <gaowanlong@cn.fujitsu.com>
>> ---
>> V5->V6: NEW
>>
>> drivers/net/virtio_net.c | 67 +++++++++++++++++++++++++++---------------------
>> 1 file changed, 38 insertions(+), 29 deletions(-)
>>
>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>> index 70cd957..1a35a8c 100644
>> --- a/drivers/net/virtio_net.c
>> +++ b/drivers/net/virtio_net.c
>> @@ -1016,48 +1016,57 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
>> return 0;
>> }
>>
>> -static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
>> +static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
>> {
>> int i;
>> int cpu;
>>
>> - /* In multiqueue mode, when the number of cpu is equal to the number of
>> - * queue pairs, we let the queue pairs to be private to one cpu by
>> - * setting the affinity hint to eliminate the contention.
>> - */
>> - if ((vi->curr_queue_pairs == 1 ||
>> - vi->max_queue_pairs != num_online_cpus()) && set) {
>> - if (vi->affinity_hint_set)
>> - set = false;
>> - else
>> - return;
>> - }
>> -
>> - if (set) {
>> - i = 0;
>> - for_each_online_cpu(cpu) {
>> - virtqueue_/set_affinity(vi->rq[i].vq, cpu);
>> - virtqueue_set_affinity(vi->sq[i].vq, cpu);
>> - *per_cpu_ptr(vi->vq_index, cpu) = i;
>> - i++;
>> - }
>> -
>> - vi->affinity_hint_set = true;
>> - } else {
>> - for(i = 0; i < vi->max_queue_pairs; i++) {
>> + if (vi->affinity_hint_set) {
>> + for (i = 0; i < vi->max_queue_pairs; i++) {
>> virtqueue_set_affinity(vi->rq[i].vq, -1);
>> virtqueue_set_affinity(vi->sq[i].vq, -1);
>> }
>>
>> i = 0;
>> - for_each_online_cpu(cpu)
>> + for_each_online_cpu(cpu) {
>> + if (cpu == hcpu)
>> + continue;
>> *per_cpu_ptr(vi->vq_index, cpu) =
>> ++i % vi->curr_queue_pairs;
>> + }
>>
>
> Some questions here:
>
> - Did we need reset the affinity of the queue here like the this?
>
> virtqueue_set_affinity(vi->sq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
> virtqueue_set_affinity(vi->rq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
I think no, we are going to unset the affinity of all the set queues,
include hcpu.
>
> - Looks like we need also reset the percpu index when
> vi->affinity_hint_set is false.
Yes, follow this and the comment on [1/3].
> - Does this really need this reset? Consider we're going to reset the
> percpu in CPU_DEAD?
I think resetting when CPU_DOWN_PREPARE can avoid selecting the wrong queue
on the dying CPU.
Thanks,
Wanlong Gao
>
> Thanks
>> vi->affinity_hint_set = false;
>> }
>> }
>>
>> +static void virtnet_set_affinity(struct virtnet_info *vi)
>> +{
>> + int i;
>> + int cpu;
>> +
>> + /* In multiqueue mode, when the number of cpu is equal to the number of
>> + * queue pairs, we let the queue pairs to be private to one cpu by
>> + * setting the affinity hint to eliminate the contention.
>> + */
>> + if (vi->curr_queue_pairs == 1 ||
>> + vi->max_queue_pairs != num_online_cpus()) {
>> + if (vi->affinity_hint_set)
>> + virtnet_clean_affinity(vi, -1);
>> + else
>> + return;
>> + }
>> +
>> + i = 0;
>> + for_each_online_cpu(cpu) {
>> + virtqueue_set_affinity(vi->rq[i].vq, cpu);
>> + virtqueue_set_affinity(vi->sq[i].vq, cpu);
>> + *per_cpu_ptr(vi->vq_index, cpu) = i;
>> + i++;
>> + }
>> +
>> + vi->affinity_hint_set = true;
>> +}
>> +
>> static void virtnet_get_ringparam(struct net_device *dev,
>> struct ethtool_ringparam *ring)
>> {
>> @@ -1105,7 +1114,7 @@ static int virtnet_set_channels(struct net_device *dev,
>> netif_set_real_num_rx_queues(dev, queue_pairs);
>>
>> get_online_cpus();
>> - virtnet_set_affinity(vi, true);
>> + virtnet_set_affinity(vi);
>> put_online_cpus();
>> }
>>
>> @@ -1274,7 +1283,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
>> {
>> struct virtio_device *vdev = vi->vdev;
>>
>> - virtnet_set_affinity(vi, false);
>> + virtnet_clean_affinity(vi, -1);
>>
>> vdev->config->del_vqs(vdev);
>>
>> @@ -1398,7 +1407,7 @@ static int init_vqs(struct virtnet_info *vi)
>> goto err_free;
>>
>> get_online_cpus();
>> - virtnet_set_affinity(vi, true);
>> + virtnet_set_affinity(vi);
>> put_online_cpus();
>>
>> return 0;
>
>
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH V6 2/3] virtio-net: split out clean affinity function
2013-01-25 4:20 ` Wanlong Gao
@ 2013-01-25 5:13 ` Jason Wang
2013-01-25 5:40 ` Wanlong Gao
0 siblings, 1 reply; 15+ messages in thread
From: Jason Wang @ 2013-01-25 5:13 UTC (permalink / raw)
To: gaowanlong
Cc: Michael S. Tsirkin, netdev, linux-kernel, virtualization,
Eric Dumazet
On 01/25/2013 12:20 PM, Wanlong Gao wrote:
> On 01/25/2013 11:28 AM, Jason Wang wrote:
>> On 01/21/2013 07:25 PM, Wanlong Gao wrote:
>>> Split out the clean affinity function to virtnet_clean_affinity().
>>>
>>> Cc: Rusty Russell <rusty@rustcorp.com.au>
>>> Cc: "Michael S. Tsirkin" <mst@redhat.com>
>>> Cc: Jason Wang <jasowang@redhat.com>
>>> Cc: Eric Dumazet <erdnetdev@gmail.com>
>>> Cc: virtualization@lists.linux-foundation.org
>>> Cc: netdev@vger.kernel.org
>>> Signed-off-by: Wanlong Gao <gaowanlong@cn.fujitsu.com>
>>> ---
>>> V5->V6: NEW
>>>
>>> drivers/net/virtio_net.c | 67 +++++++++++++++++++++++++++---------------------
>>> 1 file changed, 38 insertions(+), 29 deletions(-)
>>>
>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>>> index 70cd957..1a35a8c 100644
>>> --- a/drivers/net/virtio_net.c
>>> +++ b/drivers/net/virtio_net.c
>>> @@ -1016,48 +1016,57 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
>>> return 0;
>>> }
>>>
>>> -static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
>>> +static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
>>> {
>>> int i;
>>> int cpu;
>>>
>>> - /* In multiqueue mode, when the number of cpu is equal to the number of
>>> - * queue pairs, we let the queue pairs to be private to one cpu by
>>> - * setting the affinity hint to eliminate the contention.
>>> - */
>>> - if ((vi->curr_queue_pairs == 1 ||
>>> - vi->max_queue_pairs != num_online_cpus()) && set) {
>>> - if (vi->affinity_hint_set)
>>> - set = false;
>>> - else
>>> - return;
>>> - }
>>> -
>>> - if (set) {
>>> - i = 0;
>>> - for_each_online_cpu(cpu) {
>>> - virtqueue_/set_affinity(vi->rq[i].vq, cpu);
>>> - virtqueue_set_affinity(vi->sq[i].vq, cpu);
>>> - *per_cpu_ptr(vi->vq_index, cpu) = i;
>>> - i++;
>>> - }
>>> -
>>> - vi->affinity_hint_set = true;
>>> - } else {
>>> - for(i = 0; i < vi->max_queue_pairs; i++) {
>>> + if (vi->affinity_hint_set) {
>>> + for (i = 0; i < vi->max_queue_pairs; i++) {
>>> virtqueue_set_affinity(vi->rq[i].vq, -1);
>>> virtqueue_set_affinity(vi->sq[i].vq, -1);
>>> }
>>>
>>> i = 0;
>>> - for_each_online_cpu(cpu)
>>> + for_each_online_cpu(cpu) {
>>> + if (cpu == hcpu)
>>> + continue;
>>> *per_cpu_ptr(vi->vq_index, cpu) =
>>> ++i % vi->curr_queue_pairs;
>>> + }
>>>
>> Some questions here:
>>
>> - Did we need reset the affinity of the queue here like the this?
>>
>> virtqueue_set_affinity(vi->sq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
>> virtqueue_set_affinity(vi->rq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
> I think no, we are going to unset the affinity of all the set queues,
> include hcpu.
>
>> - Looks like we need also reset the percpu index when
>> vi->affinity_hint_set is false.
> Yes, follow this and the comment on [1/3].
>
>> - Does this really need this reset? Consider we're going to reset the
>> percpu in CPU_DEAD?
> I think resetting when CPU_DOWN_PREPARE can avoid selecting the wrong queue
> on the dying CPU.
Didn't understand this. What does 'wrong queue' here mean? Looks like
you didn't change the preferable queue of the dying CPU and just change
all others.
>
> Thanks,
> Wanlong Gao
>
>> Thanks
>>> vi->affinity_hint_set = false;
>>> }
>>> }
>>>
>>> +static void virtnet_set_affinity(struct virtnet_info *vi)
>>> +{
>>> + int i;
>>> + int cpu;
>>> +
>>> + /* In multiqueue mode, when the number of cpu is equal to the number of
>>> + * queue pairs, we let the queue pairs to be private to one cpu by
>>> + * setting the affinity hint to eliminate the contention.
>>> + */
>>> + if (vi->curr_queue_pairs == 1 ||
>>> + vi->max_queue_pairs != num_online_cpus()) {
>>> + if (vi->affinity_hint_set)
>>> + virtnet_clean_affinity(vi, -1);
>>> + else
>>> + return;
>>> + }
>>> +
>>> + i = 0;
>>> + for_each_online_cpu(cpu) {
>>> + virtqueue_set_affinity(vi->rq[i].vq, cpu);
>>> + virtqueue_set_affinity(vi->sq[i].vq, cpu);
>>> + *per_cpu_ptr(vi->vq_index, cpu) = i;
>>> + i++;
>>> + }
>>> +
>>> + vi->affinity_hint_set = true;
>>> +}
>>> +
>>> static void virtnet_get_ringparam(struct net_device *dev,
>>> struct ethtool_ringparam *ring)
>>> {
>>> @@ -1105,7 +1114,7 @@ static int virtnet_set_channels(struct net_device *dev,
>>> netif_set_real_num_rx_queues(dev, queue_pairs);
>>>
>>> get_online_cpus();
>>> - virtnet_set_affinity(vi, true);
>>> + virtnet_set_affinity(vi);
>>> put_online_cpus();
>>> }
>>>
>>> @@ -1274,7 +1283,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
>>> {
>>> struct virtio_device *vdev = vi->vdev;
>>>
>>> - virtnet_set_affinity(vi, false);
>>> + virtnet_clean_affinity(vi, -1);
>>>
>>> vdev->config->del_vqs(vdev);
>>>
>>> @@ -1398,7 +1407,7 @@ static int init_vqs(struct virtnet_info *vi)
>>> goto err_free;
>>>
>>> get_online_cpus();
>>> - virtnet_set_affinity(vi, true);
>>> + virtnet_set_affinity(vi);
>>> put_online_cpus();
>>>
>>> return 0;
>>
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH V6 2/3] virtio-net: split out clean affinity function
2013-01-25 5:13 ` Jason Wang
@ 2013-01-25 5:40 ` Wanlong Gao
2013-01-25 6:12 ` Jason Wang
0 siblings, 1 reply; 15+ messages in thread
From: Wanlong Gao @ 2013-01-25 5:40 UTC (permalink / raw)
To: Jason Wang
Cc: linux-kernel, Rusty Russell, Michael S. Tsirkin, Eric Dumazet,
virtualization, netdev, Wanlong Gao
On 01/25/2013 01:13 PM, Jason Wang wrote:
> On 01/25/2013 12:20 PM, Wanlong Gao wrote:
>> On 01/25/2013 11:28 AM, Jason Wang wrote:
>>> On 01/21/2013 07:25 PM, Wanlong Gao wrote:
>>>> Split out the clean affinity function to virtnet_clean_affinity().
>>>>
>>>> Cc: Rusty Russell <rusty@rustcorp.com.au>
>>>> Cc: "Michael S. Tsirkin" <mst@redhat.com>
>>>> Cc: Jason Wang <jasowang@redhat.com>
>>>> Cc: Eric Dumazet <erdnetdev@gmail.com>
>>>> Cc: virtualization@lists.linux-foundation.org
>>>> Cc: netdev@vger.kernel.org
>>>> Signed-off-by: Wanlong Gao <gaowanlong@cn.fujitsu.com>
>>>> ---
>>>> V5->V6: NEW
>>>>
>>>> drivers/net/virtio_net.c | 67 +++++++++++++++++++++++++++---------------------
>>>> 1 file changed, 38 insertions(+), 29 deletions(-)
>>>>
>>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>>>> index 70cd957..1a35a8c 100644
>>>> --- a/drivers/net/virtio_net.c
>>>> +++ b/drivers/net/virtio_net.c
>>>> @@ -1016,48 +1016,57 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
>>>> return 0;
>>>> }
>>>>
>>>> -static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
>>>> +static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
>>>> {
>>>> int i;
>>>> int cpu;
>>>>
>>>> - /* In multiqueue mode, when the number of cpu is equal to the number of
>>>> - * queue pairs, we let the queue pairs to be private to one cpu by
>>>> - * setting the affinity hint to eliminate the contention.
>>>> - */
>>>> - if ((vi->curr_queue_pairs == 1 ||
>>>> - vi->max_queue_pairs != num_online_cpus()) && set) {
>>>> - if (vi->affinity_hint_set)
>>>> - set = false;
>>>> - else
>>>> - return;
>>>> - }
>>>> -
>>>> - if (set) {
>>>> - i = 0;
>>>> - for_each_online_cpu(cpu) {
>>>> - virtqueue_/set_affinity(vi->rq[i].vq, cpu);
>>>> - virtqueue_set_affinity(vi->sq[i].vq, cpu);
>>>> - *per_cpu_ptr(vi->vq_index, cpu) = i;
>>>> - i++;
>>>> - }
>>>> -
>>>> - vi->affinity_hint_set = true;
>>>> - } else {
>>>> - for(i = 0; i < vi->max_queue_pairs; i++) {
>>>> + if (vi->affinity_hint_set) {
>>>> + for (i = 0; i < vi->max_queue_pairs; i++) {
>>>> virtqueue_set_affinity(vi->rq[i].vq, -1);
>>>> virtqueue_set_affinity(vi->sq[i].vq, -1);
>>>> }
>>>>
>>>> i = 0;
>>>> - for_each_online_cpu(cpu)
>>>> + for_each_online_cpu(cpu) {
>>>> + if (cpu == hcpu)
>>>> + continue;
>>>> *per_cpu_ptr(vi->vq_index, cpu) =
>>>> ++i % vi->curr_queue_pairs;
>>>> + }
>>>>
>>> Some questions here:
>>>
>>> - Did we need reset the affinity of the queue here like the this?
>>>
>>> virtqueue_set_affinity(vi->sq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
>>> virtqueue_set_affinity(vi->rq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
>> I think no, we are going to unset the affinity of all the set queues,
>> include hcpu.
>>
>>> - Looks like we need also reset the percpu index when
>>> vi->affinity_hint_set is false.
>> Yes, follow this and the comment on [1/3].
>>
>>> - Does this really need this reset? Consider we're going to reset the
>>> percpu in CPU_DEAD?
>> I think resetting when CPU_DOWN_PREPARE can avoid selecting the wrong queue
>> on the dying CPU.
>
> Didn't understand this. What does 'wrong queue' here mean? Looks like
> you didn't change the preferable queue of the dying CPU and just change
> all others.
How about setting the vq index to -1 on hcpu when doing DOWN_PREPARE?
So that let it select txq to 0 when the CPU is dying.
Thanks,
Wanlong Gao
>>
>> Thanks,
>> Wanlong Gao
>>
>>> Thanks
>>>> vi->affinity_hint_set = false;
>>>> }
>>>> }
>>>>
>>>> +static void virtnet_set_affinity(struct virtnet_info *vi)
>>>> +{
>>>> + int i;
>>>> + int cpu;
>>>> +
>>>> + /* In multiqueue mode, when the number of cpu is equal to the number of
>>>> + * queue pairs, we let the queue pairs to be private to one cpu by
>>>> + * setting the affinity hint to eliminate the contention.
>>>> + */
>>>> + if (vi->curr_queue_pairs == 1 ||
>>>> + vi->max_queue_pairs != num_online_cpus()) {
>>>> + if (vi->affinity_hint_set)
>>>> + virtnet_clean_affinity(vi, -1);
>>>> + else
>>>> + return;
>>>> + }
>>>> +
>>>> + i = 0;
>>>> + for_each_online_cpu(cpu) {
>>>> + virtqueue_set_affinity(vi->rq[i].vq, cpu);
>>>> + virtqueue_set_affinity(vi->sq[i].vq, cpu);
>>>> + *per_cpu_ptr(vi->vq_index, cpu) = i;
>>>> + i++;
>>>> + }
>>>> +
>>>> + vi->affinity_hint_set = true;
>>>> +}
>>>> +
>>>> static void virtnet_get_ringparam(struct net_device *dev,
>>>> struct ethtool_ringparam *ring)
>>>> {
>>>> @@ -1105,7 +1114,7 @@ static int virtnet_set_channels(struct net_device *dev,
>>>> netif_set_real_num_rx_queues(dev, queue_pairs);
>>>>
>>>> get_online_cpus();
>>>> - virtnet_set_affinity(vi, true);
>>>> + virtnet_set_affinity(vi);
>>>> put_online_cpus();
>>>> }
>>>>
>>>> @@ -1274,7 +1283,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
>>>> {
>>>> struct virtio_device *vdev = vi->vdev;
>>>>
>>>> - virtnet_set_affinity(vi, false);
>>>> + virtnet_clean_affinity(vi, -1);
>>>>
>>>> vdev->config->del_vqs(vdev);
>>>>
>>>> @@ -1398,7 +1407,7 @@ static int init_vqs(struct virtnet_info *vi)
>>>> goto err_free;
>>>>
>>>> get_online_cpus();
>>>> - virtnet_set_affinity(vi, true);
>>>> + virtnet_set_affinity(vi);
>>>> put_online_cpus();
>>>>
>>>> return 0;
>>>
>
>
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH V6 2/3] virtio-net: split out clean affinity function
2013-01-25 5:40 ` Wanlong Gao
@ 2013-01-25 6:12 ` Jason Wang
2013-01-25 6:42 ` Wanlong Gao
0 siblings, 1 reply; 15+ messages in thread
From: Jason Wang @ 2013-01-25 6:12 UTC (permalink / raw)
To: gaowanlong
Cc: Michael S. Tsirkin, netdev, linux-kernel, virtualization,
Eric Dumazet
On 01/25/2013 01:40 PM, Wanlong Gao wrote:
> On 01/25/2013 01:13 PM, Jason Wang wrote:
>> On 01/25/2013 12:20 PM, Wanlong Gao wrote:
>>> On 01/25/2013 11:28 AM, Jason Wang wrote:
>>>> On 01/21/2013 07:25 PM, Wanlong Gao wrote:
>>>>> Split out the clean affinity function to virtnet_clean_affinity().
>>>>>
>>>>> Cc: Rusty Russell <rusty@rustcorp.com.au>
>>>>> Cc: "Michael S. Tsirkin" <mst@redhat.com>
>>>>> Cc: Jason Wang <jasowang@redhat.com>
>>>>> Cc: Eric Dumazet <erdnetdev@gmail.com>
>>>>> Cc: virtualization@lists.linux-foundation.org
>>>>> Cc: netdev@vger.kernel.org
>>>>> Signed-off-by: Wanlong Gao <gaowanlong@cn.fujitsu.com>
>>>>> ---
>>>>> V5->V6: NEW
>>>>>
>>>>> drivers/net/virtio_net.c | 67 +++++++++++++++++++++++++++---------------------
>>>>> 1 file changed, 38 insertions(+), 29 deletions(-)
>>>>>
>>>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>>>>> index 70cd957..1a35a8c 100644
>>>>> --- a/drivers/net/virtio_net.c
>>>>> +++ b/drivers/net/virtio_net.c
>>>>> @@ -1016,48 +1016,57 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
>>>>> return 0;
>>>>> }
>>>>>
>>>>> -static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
>>>>> +static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
>>>>> {
>>>>> int i;
>>>>> int cpu;
>>>>>
>>>>> - /* In multiqueue mode, when the number of cpu is equal to the number of
>>>>> - * queue pairs, we let the queue pairs to be private to one cpu by
>>>>> - * setting the affinity hint to eliminate the contention.
>>>>> - */
>>>>> - if ((vi->curr_queue_pairs == 1 ||
>>>>> - vi->max_queue_pairs != num_online_cpus()) && set) {
>>>>> - if (vi->affinity_hint_set)
>>>>> - set = false;
>>>>> - else
>>>>> - return;
>>>>> - }
>>>>> -
>>>>> - if (set) {
>>>>> - i = 0;
>>>>> - for_each_online_cpu(cpu) {
>>>>> - virtqueue_/set_affinity(vi->rq[i].vq, cpu);
>>>>> - virtqueue_set_affinity(vi->sq[i].vq, cpu);
>>>>> - *per_cpu_ptr(vi->vq_index, cpu) = i;
>>>>> - i++;
>>>>> - }
>>>>> -
>>>>> - vi->affinity_hint_set = true;
>>>>> - } else {
>>>>> - for(i = 0; i < vi->max_queue_pairs; i++) {
>>>>> + if (vi->affinity_hint_set) {
>>>>> + for (i = 0; i < vi->max_queue_pairs; i++) {
>>>>> virtqueue_set_affinity(vi->rq[i].vq, -1);
>>>>> virtqueue_set_affinity(vi->sq[i].vq, -1);
>>>>> }
>>>>>
>>>>> i = 0;
>>>>> - for_each_online_cpu(cpu)
>>>>> + for_each_online_cpu(cpu) {
>>>>> + if (cpu == hcpu)
>>>>> + continue;
>>>>> *per_cpu_ptr(vi->vq_index, cpu) =
>>>>> ++i % vi->curr_queue_pairs;
>>>>> + }
>>>>>
>>>> Some questions here:
>>>>
>>>> - Did we need reset the affinity of the queue here like the this?
>>>>
>>>> virtqueue_set_affinity(vi->sq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
>>>> virtqueue_set_affinity(vi->rq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
>>> I think no, we are going to unset the affinity of all the set queues,
>>> include hcpu.
>>>
>>>> - Looks like we need also reset the percpu index when
>>>> vi->affinity_hint_set is false.
>>> Yes, follow this and the comment on [1/3].
>>>
>>>> - Does this really need this reset? Consider we're going to reset the
>>>> percpu in CPU_DEAD?
>>> I think resetting when CPU_DOWN_PREPARE can avoid selecting the wrong queue
>>> on the dying CPU.
>> Didn't understand this. What does 'wrong queue' here mean? Looks like
>> you didn't change the preferable queue of the dying CPU and just change
>> all others.
> How about setting the vq index to -1 on hcpu when doing DOWN_PREPARE?
> So that let it select txq to 0 when the CPU is dying.
Looks safe, so look like what you're going to solve here is the the race
between cpu hotplug and virtnet_set_channels(). A possible better
solution is to serialize them by protecting virtnet_set_queues() by
get_online_cpus() also. After this, we can make sure the number of
channels were not changed during cpu hotplug, and looks like there's no
need to reset the preferable queues in DOWN_PREPARE.
What's your opinion?
Thanks
>
> Thanks,
> Wanlong Gao
>
>>> Thanks,
>>> Wanlong Gao
>>>
>>>> Thanks
>>>>> vi->affinity_hint_set = false;
>>>>> }
>>>>> }
>>>>>
>>>>> +static void virtnet_set_affinity(struct virtnet_info *vi)
>>>>> +{
>>>>> + int i;
>>>>> + int cpu;
>>>>> +
>>>>> + /* In multiqueue mode, when the number of cpu is equal to the number of
>>>>> + * queue pairs, we let the queue pairs to be private to one cpu by
>>>>> + * setting the affinity hint to eliminate the contention.
>>>>> + */
>>>>> + if (vi->curr_queue_pairs == 1 ||
>>>>> + vi->max_queue_pairs != num_online_cpus()) {
>>>>> + if (vi->affinity_hint_set)
>>>>> + virtnet_clean_affinity(vi, -1);
>>>>> + else
>>>>> + return;
>>>>> + }
>>>>> +
>>>>> + i = 0;
>>>>> + for_each_online_cpu(cpu) {
>>>>> + virtqueue_set_affinity(vi->rq[i].vq, cpu);
>>>>> + virtqueue_set_affinity(vi->sq[i].vq, cpu);
>>>>> + *per_cpu_ptr(vi->vq_index, cpu) = i;
>>>>> + i++;
>>>>> + }
>>>>> +
>>>>> + vi->affinity_hint_set = true;
>>>>> +}
>>>>> +
>>>>> static void virtnet_get_ringparam(struct net_device *dev,
>>>>> struct ethtool_ringparam *ring)
>>>>> {
>>>>> @@ -1105,7 +1114,7 @@ static int virtnet_set_channels(struct net_device *dev,
>>>>> netif_set_real_num_rx_queues(dev, queue_pairs);
>>>>>
>>>>> get_online_cpus();
>>>>> - virtnet_set_affinity(vi, true);
>>>>> + virtnet_set_affinity(vi);
>>>>> put_online_cpus();
>>>>> }
>>>>>
>>>>> @@ -1274,7 +1283,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
>>>>> {
>>>>> struct virtio_device *vdev = vi->vdev;
>>>>>
>>>>> - virtnet_set_affinity(vi, false);
>>>>> + virtnet_clean_affinity(vi, -1);
>>>>>
>>>>> vdev->config->del_vqs(vdev);
>>>>>
>>>>> @@ -1398,7 +1407,7 @@ static int init_vqs(struct virtnet_info *vi)
>>>>> goto err_free;
>>>>>
>>>>> get_online_cpus();
>>>>> - virtnet_set_affinity(vi, true);
>>>>> + virtnet_set_affinity(vi);
>>>>> put_online_cpus();
>>>>>
>>>>> return 0;
>>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH V6 2/3] virtio-net: split out clean affinity function
2013-01-25 6:12 ` Jason Wang
@ 2013-01-25 6:42 ` Wanlong Gao
2013-01-25 7:04 ` Jason Wang
0 siblings, 1 reply; 15+ messages in thread
From: Wanlong Gao @ 2013-01-25 6:42 UTC (permalink / raw)
To: Jason Wang
Cc: linux-kernel, Rusty Russell, Michael S. Tsirkin, Eric Dumazet,
virtualization, netdev, Wanlong Gao
On 01/25/2013 02:12 PM, Jason Wang wrote:
> On 01/25/2013 01:40 PM, Wanlong Gao wrote:
>> On 01/25/2013 01:13 PM, Jason Wang wrote:
>>> On 01/25/2013 12:20 PM, Wanlong Gao wrote:
>>>> On 01/25/2013 11:28 AM, Jason Wang wrote:
>>>>> On 01/21/2013 07:25 PM, Wanlong Gao wrote:
>>>>>> Split out the clean affinity function to virtnet_clean_affinity().
>>>>>>
>>>>>> Cc: Rusty Russell <rusty@rustcorp.com.au>
>>>>>> Cc: "Michael S. Tsirkin" <mst@redhat.com>
>>>>>> Cc: Jason Wang <jasowang@redhat.com>
>>>>>> Cc: Eric Dumazet <erdnetdev@gmail.com>
>>>>>> Cc: virtualization@lists.linux-foundation.org
>>>>>> Cc: netdev@vger.kernel.org
>>>>>> Signed-off-by: Wanlong Gao <gaowanlong@cn.fujitsu.com>
>>>>>> ---
>>>>>> V5->V6: NEW
>>>>>>
>>>>>> drivers/net/virtio_net.c | 67 +++++++++++++++++++++++++++---------------------
>>>>>> 1 file changed, 38 insertions(+), 29 deletions(-)
>>>>>>
>>>>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>>>>>> index 70cd957..1a35a8c 100644
>>>>>> --- a/drivers/net/virtio_net.c
>>>>>> +++ b/drivers/net/virtio_net.c
>>>>>> @@ -1016,48 +1016,57 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
>>>>>> return 0;
>>>>>> }
>>>>>>
>>>>>> -static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
>>>>>> +static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
>>>>>> {
>>>>>> int i;
>>>>>> int cpu;
>>>>>>
>>>>>> - /* In multiqueue mode, when the number of cpu is equal to the number of
>>>>>> - * queue pairs, we let the queue pairs to be private to one cpu by
>>>>>> - * setting the affinity hint to eliminate the contention.
>>>>>> - */
>>>>>> - if ((vi->curr_queue_pairs == 1 ||
>>>>>> - vi->max_queue_pairs != num_online_cpus()) && set) {
>>>>>> - if (vi->affinity_hint_set)
>>>>>> - set = false;
>>>>>> - else
>>>>>> - return;
>>>>>> - }
>>>>>> -
>>>>>> - if (set) {
>>>>>> - i = 0;
>>>>>> - for_each_online_cpu(cpu) {
>>>>>> - virtqueue_/set_affinity(vi->rq[i].vq, cpu);
>>>>>> - virtqueue_set_affinity(vi->sq[i].vq, cpu);
>>>>>> - *per_cpu_ptr(vi->vq_index, cpu) = i;
>>>>>> - i++;
>>>>>> - }
>>>>>> -
>>>>>> - vi->affinity_hint_set = true;
>>>>>> - } else {
>>>>>> - for(i = 0; i < vi->max_queue_pairs; i++) {
>>>>>> + if (vi->affinity_hint_set) {
>>>>>> + for (i = 0; i < vi->max_queue_pairs; i++) {
>>>>>> virtqueue_set_affinity(vi->rq[i].vq, -1);
>>>>>> virtqueue_set_affinity(vi->sq[i].vq, -1);
>>>>>> }
>>>>>>
>>>>>> i = 0;
>>>>>> - for_each_online_cpu(cpu)
>>>>>> + for_each_online_cpu(cpu) {
>>>>>> + if (cpu == hcpu)
>>>>>> + continue;
>>>>>> *per_cpu_ptr(vi->vq_index, cpu) =
>>>>>> ++i % vi->curr_queue_pairs;
>>>>>> + }
>>>>>>
>>>>> Some questions here:
>>>>>
>>>>> - Did we need reset the affinity of the queue here like the this?
>>>>>
>>>>> virtqueue_set_affinity(vi->sq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
>>>>> virtqueue_set_affinity(vi->rq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
>>>> I think no, we are going to unset the affinity of all the set queues,
>>>> include hcpu.
>>>>
>>>>> - Looks like we need also reset the percpu index when
>>>>> vi->affinity_hint_set is false.
>>>> Yes, follow this and the comment on [1/3].
>>>>
>>>>> - Does this really need this reset? Consider we're going to reset the
>>>>> percpu in CPU_DEAD?
>>>> I think resetting when CPU_DOWN_PREPARE can avoid selecting the wrong queue
>>>> on the dying CPU.
>>> Didn't understand this. What does 'wrong queue' here mean? Looks like
>>> you didn't change the preferable queue of the dying CPU and just change
>>> all others.
>> How about setting the vq index to -1 on hcpu when doing DOWN_PREPARE?
>> So that let it select txq to 0 when the CPU is dying.
>
> Looks safe, so look like what you're going to solve here is the the race
> between cpu hotplug and virtnet_set_channels(). A possible better
> solution is to serialize them by protecting virtnet_set_queues() by
> get_online_cpus() also. After this, we can make sure the number of
> channels were not changed during cpu hotplug, and looks like there's no
> need to reset the preferable queues in DOWN_PREPARE.
>
> What's your opinion?
IMHO, serialize every time will take lock and may slow down this path,
but the hot unplug path will be more cold than it. So I prefer reset the
preferable queues in DOWN_PREPARE but not serialize them. Agree?
Thanks,
Wanlong Gao
>
> Thanks
>>
>> Thanks,
>> Wanlong Gao
>>
>>>> Thanks,
>>>> Wanlong Gao
>>>>
>>>>> Thanks
>>>>>> vi->affinity_hint_set = false;
>>>>>> }
>>>>>> }
>>>>>>
>>>>>> +static void virtnet_set_affinity(struct virtnet_info *vi)
>>>>>> +{
>>>>>> + int i;
>>>>>> + int cpu;
>>>>>> +
>>>>>> + /* In multiqueue mode, when the number of cpu is equal to the number of
>>>>>> + * queue pairs, we let the queue pairs to be private to one cpu by
>>>>>> + * setting the affinity hint to eliminate the contention.
>>>>>> + */
>>>>>> + if (vi->curr_queue_pairs == 1 ||
>>>>>> + vi->max_queue_pairs != num_online_cpus()) {
>>>>>> + if (vi->affinity_hint_set)
>>>>>> + virtnet_clean_affinity(vi, -1);
>>>>>> + else
>>>>>> + return;
>>>>>> + }
>>>>>> +
>>>>>> + i = 0;
>>>>>> + for_each_online_cpu(cpu) {
>>>>>> + virtqueue_set_affinity(vi->rq[i].vq, cpu);
>>>>>> + virtqueue_set_affinity(vi->sq[i].vq, cpu);
>>>>>> + *per_cpu_ptr(vi->vq_index, cpu) = i;
>>>>>> + i++;
>>>>>> + }
>>>>>> +
>>>>>> + vi->affinity_hint_set = true;
>>>>>> +}
>>>>>> +
>>>>>> static void virtnet_get_ringparam(struct net_device *dev,
>>>>>> struct ethtool_ringparam *ring)
>>>>>> {
>>>>>> @@ -1105,7 +1114,7 @@ static int virtnet_set_channels(struct net_device *dev,
>>>>>> netif_set_real_num_rx_queues(dev, queue_pairs);
>>>>>>
>>>>>> get_online_cpus();
>>>>>> - virtnet_set_affinity(vi, true);
>>>>>> + virtnet_set_affinity(vi);
>>>>>> put_online_cpus();
>>>>>> }
>>>>>>
>>>>>> @@ -1274,7 +1283,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
>>>>>> {
>>>>>> struct virtio_device *vdev = vi->vdev;
>>>>>>
>>>>>> - virtnet_set_affinity(vi, false);
>>>>>> + virtnet_clean_affinity(vi, -1);
>>>>>>
>>>>>> vdev->config->del_vqs(vdev);
>>>>>>
>>>>>> @@ -1398,7 +1407,7 @@ static int init_vqs(struct virtnet_info *vi)
>>>>>> goto err_free;
>>>>>>
>>>>>> get_online_cpus();
>>>>>> - virtnet_set_affinity(vi, true);
>>>>>> + virtnet_set_affinity(vi);
>>>>>> put_online_cpus();
>>>>>>
>>>>>> return 0;
>>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at http://vger.kernel.org/majordomo-info.html
>> Please read the FAQ at http://www.tux.org/lkml/
>
>
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH V6 2/3] virtio-net: split out clean affinity function
2013-01-25 6:42 ` Wanlong Gao
@ 2013-01-25 7:04 ` Jason Wang
2013-01-25 7:22 ` Wanlong Gao
0 siblings, 1 reply; 15+ messages in thread
From: Jason Wang @ 2013-01-25 7:04 UTC (permalink / raw)
To: gaowanlong
Cc: Michael S. Tsirkin, netdev, linux-kernel, virtualization,
Eric Dumazet
On 01/25/2013 02:42 PM, Wanlong Gao wrote:
> On 01/25/2013 02:12 PM, Jason Wang wrote:
>> On 01/25/2013 01:40 PM, Wanlong Gao wrote:
>>> On 01/25/2013 01:13 PM, Jason Wang wrote:
>>>> On 01/25/2013 12:20 PM, Wanlong Gao wrote:
>>>>> On 01/25/2013 11:28 AM, Jason Wang wrote:
>>>>>> On 01/21/2013 07:25 PM, Wanlong Gao wrote:
>>>>>>> Split out the clean affinity function to virtnet_clean_affinity().
>>>>>>>
>>>>>>> Cc: Rusty Russell <rusty@rustcorp.com.au>
>>>>>>> Cc: "Michael S. Tsirkin" <mst@redhat.com>
>>>>>>> Cc: Jason Wang <jasowang@redhat.com>
>>>>>>> Cc: Eric Dumazet <erdnetdev@gmail.com>
>>>>>>> Cc: virtualization@lists.linux-foundation.org
>>>>>>> Cc: netdev@vger.kernel.org
>>>>>>> Signed-off-by: Wanlong Gao <gaowanlong@cn.fujitsu.com>
>>>>>>> ---
>>>>>>> V5->V6: NEW
>>>>>>>
>>>>>>> drivers/net/virtio_net.c | 67 +++++++++++++++++++++++++++---------------------
>>>>>>> 1 file changed, 38 insertions(+), 29 deletions(-)
>>>>>>>
>>>>>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>>>>>>> index 70cd957..1a35a8c 100644
>>>>>>> --- a/drivers/net/virtio_net.c
>>>>>>> +++ b/drivers/net/virtio_net.c
>>>>>>> @@ -1016,48 +1016,57 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
>>>>>>> return 0;
>>>>>>> }
>>>>>>>
>>>>>>> -static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
>>>>>>> +static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
>>>>>>> {
>>>>>>> int i;
>>>>>>> int cpu;
>>>>>>>
>>>>>>> - /* In multiqueue mode, when the number of cpu is equal to the number of
>>>>>>> - * queue pairs, we let the queue pairs to be private to one cpu by
>>>>>>> - * setting the affinity hint to eliminate the contention.
>>>>>>> - */
>>>>>>> - if ((vi->curr_queue_pairs == 1 ||
>>>>>>> - vi->max_queue_pairs != num_online_cpus()) && set) {
>>>>>>> - if (vi->affinity_hint_set)
>>>>>>> - set = false;
>>>>>>> - else
>>>>>>> - return;
>>>>>>> - }
>>>>>>> -
>>>>>>> - if (set) {
>>>>>>> - i = 0;
>>>>>>> - for_each_online_cpu(cpu) {
>>>>>>> - virtqueue_/set_affinity(vi->rq[i].vq, cpu);
>>>>>>> - virtqueue_set_affinity(vi->sq[i].vq, cpu);
>>>>>>> - *per_cpu_ptr(vi->vq_index, cpu) = i;
>>>>>>> - i++;
>>>>>>> - }
>>>>>>> -
>>>>>>> - vi->affinity_hint_set = true;
>>>>>>> - } else {
>>>>>>> - for(i = 0; i < vi->max_queue_pairs; i++) {
>>>>>>> + if (vi->affinity_hint_set) {
>>>>>>> + for (i = 0; i < vi->max_queue_pairs; i++) {
>>>>>>> virtqueue_set_affinity(vi->rq[i].vq, -1);
>>>>>>> virtqueue_set_affinity(vi->sq[i].vq, -1);
>>>>>>> }
>>>>>>>
>>>>>>> i = 0;
>>>>>>> - for_each_online_cpu(cpu)
>>>>>>> + for_each_online_cpu(cpu) {
>>>>>>> + if (cpu == hcpu)
>>>>>>> + continue;
>>>>>>> *per_cpu_ptr(vi->vq_index, cpu) =
>>>>>>> ++i % vi->curr_queue_pairs;
>>>>>>> + }
>>>>>>>
>>>>>> Some questions here:
>>>>>>
>>>>>> - Did we need reset the affinity of the queue here like the this?
>>>>>>
>>>>>> virtqueue_set_affinity(vi->sq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
>>>>>> virtqueue_set_affinity(vi->rq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
>>>>> I think no, we are going to unset the affinity of all the set queues,
>>>>> include hcpu.
>>>>>
>>>>>> - Looks like we need also reset the percpu index when
>>>>>> vi->affinity_hint_set is false.
>>>>> Yes, follow this and the comment on [1/3].
>>>>>
>>>>>> - Does this really need this reset? Consider we're going to reset the
>>>>>> percpu in CPU_DEAD?
>>>>> I think resetting when CPU_DOWN_PREPARE can avoid selecting the wrong queue
>>>>> on the dying CPU.
>>>> Didn't understand this. What does 'wrong queue' here mean? Looks like
>>>> you didn't change the preferable queue of the dying CPU and just change
>>>> all others.
>>> How about setting the vq index to -1 on hcpu when doing DOWN_PREPARE?
>>> So that let it select txq to 0 when the CPU is dying.
>> Looks safe, so look like what you're going to solve here is the the race
>> between cpu hotplug and virtnet_set_channels(). A possible better
>> solution is to serialize them by protecting virtnet_set_queues() by
>> get_online_cpus() also. After this, we can make sure the number of
>> channels were not changed during cpu hotplug, and looks like there's no
>> need to reset the preferable queues in DOWN_PREPARE.
>>
>> What's your opinion?
> IMHO, serialize every time will take lock and may slow down this path,
> but the hot unplug path will be more cold than it. So I prefer reset the
> preferable queues in DOWN_PREPARE but not serialize them. Agree?
I think it's ok since we're in control path. And the point is when
you're trying to reset the affinity / preferable queues during cpu
hotplug callback, there will be another request in
virtnet_set_channels() which changing the number of queues. So the the
result of cpus == queues may out of date. Anyway you need some
synchronization.
>
> Thanks,
> Wanlong Gao
>
>> Thanks
>>> Thanks,
>>> Wanlong Gao
>>>
>>>>> Thanks,
>>>>> Wanlong Gao
>>>>>
>>>>>> Thanks
>>>>>>> vi->affinity_hint_set = false;
>>>>>>> }
>>>>>>> }
>>>>>>>
>>>>>>> +static void virtnet_set_affinity(struct virtnet_info *vi)
>>>>>>> +{
>>>>>>> + int i;
>>>>>>> + int cpu;
>>>>>>> +
>>>>>>> + /* In multiqueue mode, when the number of cpu is equal to the number of
>>>>>>> + * queue pairs, we let the queue pairs to be private to one cpu by
>>>>>>> + * setting the affinity hint to eliminate the contention.
>>>>>>> + */
>>>>>>> + if (vi->curr_queue_pairs == 1 ||
>>>>>>> + vi->max_queue_pairs != num_online_cpus()) {
>>>>>>> + if (vi->affinity_hint_set)
>>>>>>> + virtnet_clean_affinity(vi, -1);
>>>>>>> + else
>>>>>>> + return;
>>>>>>> + }
>>>>>>> +
>>>>>>> + i = 0;
>>>>>>> + for_each_online_cpu(cpu) {
>>>>>>> + virtqueue_set_affinity(vi->rq[i].vq, cpu);
>>>>>>> + virtqueue_set_affinity(vi->sq[i].vq, cpu);
>>>>>>> + *per_cpu_ptr(vi->vq_index, cpu) = i;
>>>>>>> + i++;
>>>>>>> + }
>>>>>>> +
>>>>>>> + vi->affinity_hint_set = true;
>>>>>>> +}
>>>>>>> +
>>>>>>> static void virtnet_get_ringparam(struct net_device *dev,
>>>>>>> struct ethtool_ringparam *ring)
>>>>>>> {
>>>>>>> @@ -1105,7 +1114,7 @@ static int virtnet_set_channels(struct net_device *dev,
>>>>>>> netif_set_real_num_rx_queues(dev, queue_pairs);
>>>>>>>
>>>>>>> get_online_cpus();
>>>>>>> - virtnet_set_affinity(vi, true);
>>>>>>> + virtnet_set_affinity(vi);
>>>>>>> put_online_cpus();
>>>>>>> }
>>>>>>>
>>>>>>> @@ -1274,7 +1283,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
>>>>>>> {
>>>>>>> struct virtio_device *vdev = vi->vdev;
>>>>>>>
>>>>>>> - virtnet_set_affinity(vi, false);
>>>>>>> + virtnet_clean_affinity(vi, -1);
>>>>>>>
>>>>>>> vdev->config->del_vqs(vdev);
>>>>>>>
>>>>>>> @@ -1398,7 +1407,7 @@ static int init_vqs(struct virtnet_info *vi)
>>>>>>> goto err_free;
>>>>>>>
>>>>>>> get_online_cpus();
>>>>>>> - virtnet_set_affinity(vi, true);
>>>>>>> + virtnet_set_affinity(vi);
>>>>>>> put_online_cpus();
>>>>>>>
>>>>>>> return 0;
>>> --
>>> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
>>> the body of a message to majordomo@vger.kernel.org
>>> More majordomo info at http://vger.kernel.org/majordomo-info.html
>>> Please read the FAQ at http://www.tux.org/lkml/
>>
> --
> To unsubscribe from this list: send the line "unsubscribe netdev" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH V6 2/3] virtio-net: split out clean affinity function
2013-01-25 7:04 ` Jason Wang
@ 2013-01-25 7:22 ` Wanlong Gao
0 siblings, 0 replies; 15+ messages in thread
From: Wanlong Gao @ 2013-01-25 7:22 UTC (permalink / raw)
To: Jason Wang
Cc: Michael S. Tsirkin, netdev, linux-kernel, virtualization,
Eric Dumazet
On 01/25/2013 03:04 PM, Jason Wang wrote:
> On 01/25/2013 02:42 PM, Wanlong Gao wrote:
>> On 01/25/2013 02:12 PM, Jason Wang wrote:
>>> On 01/25/2013 01:40 PM, Wanlong Gao wrote:
>>>> On 01/25/2013 01:13 PM, Jason Wang wrote:
>>>>> On 01/25/2013 12:20 PM, Wanlong Gao wrote:
>>>>>> On 01/25/2013 11:28 AM, Jason Wang wrote:
>>>>>>> On 01/21/2013 07:25 PM, Wanlong Gao wrote:
>>>>>>>> Split out the clean affinity function to virtnet_clean_affinity().
>>>>>>>>
>>>>>>>> Cc: Rusty Russell <rusty@rustcorp.com.au>
>>>>>>>> Cc: "Michael S. Tsirkin" <mst@redhat.com>
>>>>>>>> Cc: Jason Wang <jasowang@redhat.com>
>>>>>>>> Cc: Eric Dumazet <erdnetdev@gmail.com>
>>>>>>>> Cc: virtualization@lists.linux-foundation.org
>>>>>>>> Cc: netdev@vger.kernel.org
>>>>>>>> Signed-off-by: Wanlong Gao <gaowanlong@cn.fujitsu.com>
>>>>>>>> ---
>>>>>>>> V5->V6: NEW
>>>>>>>>
>>>>>>>> drivers/net/virtio_net.c | 67 +++++++++++++++++++++++++++---------------------
>>>>>>>> 1 file changed, 38 insertions(+), 29 deletions(-)
>>>>>>>>
>>>>>>>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
>>>>>>>> index 70cd957..1a35a8c 100644
>>>>>>>> --- a/drivers/net/virtio_net.c
>>>>>>>> +++ b/drivers/net/virtio_net.c
>>>>>>>> @@ -1016,48 +1016,57 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
>>>>>>>> return 0;
>>>>>>>> }
>>>>>>>>
>>>>>>>> -static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
>>>>>>>> +static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
>>>>>>>> {
>>>>>>>> int i;
>>>>>>>> int cpu;
>>>>>>>>
>>>>>>>> - /* In multiqueue mode, when the number of cpu is equal to the number of
>>>>>>>> - * queue pairs, we let the queue pairs to be private to one cpu by
>>>>>>>> - * setting the affinity hint to eliminate the contention.
>>>>>>>> - */
>>>>>>>> - if ((vi->curr_queue_pairs == 1 ||
>>>>>>>> - vi->max_queue_pairs != num_online_cpus()) && set) {
>>>>>>>> - if (vi->affinity_hint_set)
>>>>>>>> - set = false;
>>>>>>>> - else
>>>>>>>> - return;
>>>>>>>> - }
>>>>>>>> -
>>>>>>>> - if (set) {
>>>>>>>> - i = 0;
>>>>>>>> - for_each_online_cpu(cpu) {
>>>>>>>> - virtqueue_/set_affinity(vi->rq[i].vq, cpu);
>>>>>>>> - virtqueue_set_affinity(vi->sq[i].vq, cpu);
>>>>>>>> - *per_cpu_ptr(vi->vq_index, cpu) = i;
>>>>>>>> - i++;
>>>>>>>> - }
>>>>>>>> -
>>>>>>>> - vi->affinity_hint_set = true;
>>>>>>>> - } else {
>>>>>>>> - for(i = 0; i < vi->max_queue_pairs; i++) {
>>>>>>>> + if (vi->affinity_hint_set) {
>>>>>>>> + for (i = 0; i < vi->max_queue_pairs; i++) {
>>>>>>>> virtqueue_set_affinity(vi->rq[i].vq, -1);
>>>>>>>> virtqueue_set_affinity(vi->sq[i].vq, -1);
>>>>>>>> }
>>>>>>>>
>>>>>>>> i = 0;
>>>>>>>> - for_each_online_cpu(cpu)
>>>>>>>> + for_each_online_cpu(cpu) {
>>>>>>>> + if (cpu == hcpu)
>>>>>>>> + continue;
>>>>>>>> *per_cpu_ptr(vi->vq_index, cpu) =
>>>>>>>> ++i % vi->curr_queue_pairs;
>>>>>>>> + }
>>>>>>>>
>>>>>>> Some questions here:
>>>>>>>
>>>>>>> - Did we need reset the affinity of the queue here like the this?
>>>>>>>
>>>>>>> virtqueue_set_affinity(vi->sq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
>>>>>>> virtqueue_set_affinity(vi->rq[*per_cpu_ptr(vi->vq_index, hcpu)], -1);
>>>>>> I think no, we are going to unset the affinity of all the set queues,
>>>>>> include hcpu.
>>>>>>
>>>>>>> - Looks like we need also reset the percpu index when
>>>>>>> vi->affinity_hint_set is false.
>>>>>> Yes, follow this and the comment on [1/3].
>>>>>>
>>>>>>> - Does this really need this reset? Consider we're going to reset the
>>>>>>> percpu in CPU_DEAD?
>>>>>> I think resetting when CPU_DOWN_PREPARE can avoid selecting the wrong queue
>>>>>> on the dying CPU.
>>>>> Didn't understand this. What does 'wrong queue' here mean? Looks like
>>>>> you didn't change the preferable queue of the dying CPU and just change
>>>>> all others.
>>>> How about setting the vq index to -1 on hcpu when doing DOWN_PREPARE?
>>>> So that let it select txq to 0 when the CPU is dying.
>>> Looks safe, so look like what you're going to solve here is the the race
>>> between cpu hotplug and virtnet_set_channels(). A possible better
>>> solution is to serialize them by protecting virtnet_set_queues() by
>>> get_online_cpus() also. After this, we can make sure the number of
>>> channels were not changed during cpu hotplug, and looks like there's no
>>> need to reset the preferable queues in DOWN_PREPARE.
>>>
>>> What's your opinion?
>> IMHO, serialize every time will take lock and may slow down this path,
>> but the hot unplug path will be more cold than it. So I prefer reset the
>> preferable queues in DOWN_PREPARE but not serialize them. Agree?
>
> I think it's ok since we're in control path. And the point is when
> you're trying to reset the affinity / preferable queues during cpu
> hotplug callback, there will be another request in
> virtnet_set_channels() which changing the number of queues. So the the
> result of cpus == queues may out of date. Anyway you need some
> synchronization.
Agree, then I will add {get|put}_online_cpus to serialize this, thank you.
Regards,
Wanlong Gao
>
>>
>> Thanks,
>> Wanlong Gao
>>
>>> Thanks
>>>> Thanks,
>>>> Wanlong Gao
>>>>
>>>>>> Thanks,
>>>>>> Wanlong Gao
>>>>>>
>>>>>>> Thanks
>>>>>>>> vi->affinity_hint_set = false;
>>>>>>>> }
>>>>>>>> }
>>>>>>>>
>>>>>>>> +static void virtnet_set_affinity(struct virtnet_info *vi)
>>>>>>>> +{
>>>>>>>> + int i;
>>>>>>>> + int cpu;
>>>>>>>> +
>>>>>>>> + /* In multiqueue mode, when the number of cpu is equal to the number of
>>>>>>>> + * queue pairs, we let the queue pairs to be private to one cpu by
>>>>>>>> + * setting the affinity hint to eliminate the contention.
>>>>>>>> + */
>>>>>>>> + if (vi->curr_queue_pairs == 1 ||
>>>>>>>> + vi->max_queue_pairs != num_online_cpus()) {
>>>>>>>> + if (vi->affinity_hint_set)
>>>>>>>> + virtnet_clean_affinity(vi, -1);
>>>>>>>> + else
>>>>>>>> + return;
>>>>>>>> + }
>>>>>>>> +
>>>>>>>> + i = 0;
>>>>>>>> + for_each_online_cpu(cpu) {
>>>>>>>> + virtqueue_set_affinity(vi->rq[i].vq, cpu);
>>>>>>>> + virtqueue_set_affinity(vi->sq[i].vq, cpu);
>>>>>>>> + *per_cpu_ptr(vi->vq_index, cpu) = i;
>>>>>>>> + i++;
>>>>>>>> + }
>>>>>>>> +
>>>>>>>> + vi->affinity_hint_set = true;
>>>>>>>> +}
>>>>>>>> +
>>>>>>>> static void virtnet_get_ringparam(struct net_device *dev,
>>>>>>>> struct ethtool_ringparam *ring)
>>>>>>>> {
>>>>>>>> @@ -1105,7 +1114,7 @@ static int virtnet_set_channels(struct net_device *dev,
>>>>>>>> netif_set_real_num_rx_queues(dev, queue_pairs);
>>>>>>>>
>>>>>>>> get_online_cpus();
>>>>>>>> - virtnet_set_affinity(vi, true);
>>>>>>>> + virtnet_set_affinity(vi);
>>>>>>>> put_online_cpus();
>>>>>>>> }
>>>>>>>>
>>>>>>>> @@ -1274,7 +1283,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
>>>>>>>> {
>>>>>>>> struct virtio_device *vdev = vi->vdev;
>>>>>>>>
>>>>>>>> - virtnet_set_affinity(vi, false);
>>>>>>>> + virtnet_clean_affinity(vi, -1);
>>>>>>>>
>>>>>>>> vdev->config->del_vqs(vdev);
>>>>>>>>
>>>>>>>> @@ -1398,7 +1407,7 @@ static int init_vqs(struct virtnet_info *vi)
>>>>>>>> goto err_free;
>>>>>>>>
>>>>>>>> get_online_cpus();
>>>>>>>> - virtnet_set_affinity(vi, true);
>>>>>>>> + virtnet_set_affinity(vi);
>>>>>>>> put_online_cpus();
>>>>>>>>
>>>>>>>> return 0;
>>>> --
>>>> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
>>>> the body of a message to majordomo@vger.kernel.org
>>>> More majordomo info at http://vger.kernel.org/majordomo-info.html
>>>> Please read the FAQ at http://www.tux.org/lkml/
>>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe netdev" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at http://vger.kernel.org/majordomo-info.html
>
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH V6 3/3] virtio-net: reset virtqueue affinity when doing cpu hotplug
2013-01-21 11:25 [PATCH V6 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive Wanlong Gao
2013-01-21 11:25 ` [PATCH V6 2/3] virtio-net: split out clean affinity function Wanlong Gao
@ 2013-01-21 11:25 ` Wanlong Gao
2013-01-22 1:12 ` [PATCH V6 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive Rusty Russell
` (2 subsequent siblings)
4 siblings, 0 replies; 15+ messages in thread
From: Wanlong Gao @ 2013-01-21 11:25 UTC (permalink / raw)
To: linux-kernel
Cc: Rusty Russell, Michael S. Tsirkin, Jason Wang, Eric Dumazet,
virtualization, netdev, Wanlong Gao
Add a cpu notifier to virtio-net, so that we can reset the
virtqueue affinity if the cpu hotplug happens. It improve
the performance through enabling or disabling the virtqueue
affinity after doing cpu hotplug.
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Eric Dumazet <erdnetdev@gmail.com>
Cc: virtualization@lists.linux-foundation.org
Cc: netdev@vger.kernel.org
Signed-off-by: Wanlong Gao <gaowanlong@cn.fujitsu.com>
---
V5->V6:
deal with CPU_DOWN_PREPARE separately by just cleaning affinity
V4->V5:
New method to deal with the cpu hotplug actions (Rusty)
drivers/net/virtio_net.c | 33 +++++++++++++++++++++++++++++++++
1 file changed, 33 insertions(+)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 1a35a8c..1d93999 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -26,6 +26,7 @@
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
static int napi_weight = 128;
module_param(napi_weight, int, 0444);
@@ -126,6 +127,9 @@ struct virtnet_info {
/* Per-cpu variable to show the mapping from CPU to virtqueue */
int __percpu *vq_index;
+
+ /* CPU hot plug notifier */
+ struct notifier_block nb;
};
struct skb_vnet_hdr {
@@ -1067,6 +1071,26 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
vi->affinity_hint_set = true;
}
+static int virtnet_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
+
+ switch(action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ case CPU_DEAD:
+ virtnet_set_affinity(vi);
+ break;
+ case CPU_DOWN_PREPARE:
+ virtnet_clean_affinity(vi, (long)hcpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
static void virtnet_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
@@ -1541,6 +1565,13 @@ static int virtnet_probe(struct virtio_device *vdev)
}
}
+ vi->nb.notifier_call = &virtnet_cpu_callback;
+ err = register_hotcpu_notifier(&vi->nb);
+ if (err) {
+ pr_debug("virtio_net: registering cpu notifier failed\n");
+ goto free_recv_bufs;
+ }
+
/* Assume link up if device can't report link status,
otherwise get link status from config. */
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
@@ -1587,6 +1618,8 @@ static void virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
+ unregister_hotcpu_notifier(&vi->nb);
+
/* Prevent config work handler from accessing the device. */
mutex_lock(&vi->config_lock);
vi->config_enable = false;
--
1.8.1
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH V6 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive
2013-01-21 11:25 [PATCH V6 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive Wanlong Gao
2013-01-21 11:25 ` [PATCH V6 2/3] virtio-net: split out clean affinity function Wanlong Gao
2013-01-21 11:25 ` [PATCH V6 3/3] virtio-net: reset virtqueue affinity when doing cpu hotplug Wanlong Gao
@ 2013-01-22 1:12 ` Rusty Russell
2013-01-24 2:28 ` Wanlong Gao
2013-01-24 17:19 ` Michael S. Tsirkin
2013-01-25 3:26 ` Jason Wang
4 siblings, 1 reply; 15+ messages in thread
From: Rusty Russell @ 2013-01-22 1:12 UTC (permalink / raw)
To: Wanlong Gao, linux-kernel
Cc: Michael S. Tsirkin, Jason Wang, Eric Dumazet, virtualization,
netdev, Wanlong Gao
Wanlong Gao <gaowanlong@cn.fujitsu.com> writes:
> As Michael mentioned, set affinity and select queue will not work very
> well when CPU IDs are not consecutive, this can happen with hot unplug.
> Fix this bug by traversal the online CPUs, and create a per cpu variable
> to find the mapping from CPU to the preferable virtual-queue.
This series looks fairly sane at a glance, to me, but MST is the Ack you
need.
Thanks,
Rusty.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH V6 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive
2013-01-22 1:12 ` [PATCH V6 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive Rusty Russell
@ 2013-01-24 2:28 ` Wanlong Gao
0 siblings, 0 replies; 15+ messages in thread
From: Wanlong Gao @ 2013-01-24 2:28 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: Rusty Russell, linux-kernel, Jason Wang, Eric Dumazet,
virtualization, netdev
On 01/22/2013 09:12 AM, Rusty Russell wrote:
> Wanlong Gao <gaowanlong@cn.fujitsu.com> writes:
>
>> As Michael mentioned, set affinity and select queue will not work very
>> well when CPU IDs are not consecutive, this can happen with hot unplug.
>> Fix this bug by traversal the online CPUs, and create a per cpu variable
>> to find the mapping from CPU to the preferable virtual-queue.
>
> This series looks fairly sane at a glance, to me, but MST is the Ack you
> need.
Hi Michael,
Any comments?
Thanks,
Wanlong Gao
>
> Thanks,
> Rusty.
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH V6 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive
2013-01-21 11:25 [PATCH V6 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive Wanlong Gao
` (2 preceding siblings ...)
2013-01-22 1:12 ` [PATCH V6 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive Rusty Russell
@ 2013-01-24 17:19 ` Michael S. Tsirkin
2013-01-25 3:26 ` Jason Wang
4 siblings, 0 replies; 15+ messages in thread
From: Michael S. Tsirkin @ 2013-01-24 17:19 UTC (permalink / raw)
To: Wanlong Gao; +Cc: netdev, linux-kernel, virtualization, Eric Dumazet
On Mon, Jan 21, 2013 at 07:25:22PM +0800, Wanlong Gao wrote:
> As Michael mentioned, set affinity and select queue will not work very
> well when CPU IDs are not consecutive, this can happen with hot unplug.
> Fix this bug by traversal the online CPUs, and create a per cpu variable
> to find the mapping from CPU to the preferable virtual-queue.
>
> Cc: Rusty Russell <rusty@rustcorp.com.au>
> Cc: "Michael S. Tsirkin" <mst@redhat.com>
> Cc: Jason Wang <jasowang@redhat.com>
> Cc: Eric Dumazet <erdnetdev@gmail.com>
> Cc: virtualization@lists.linux-foundation.org
> Cc: netdev@vger.kernel.org
> Signed-off-by: Wanlong Gao <gaowanlong@cn.fujitsu.com>
For the series:
Acked-by: Michael S. Tsirkin <mst@redhat.com>
> ---
> V5->V6:
> remove {get|put}_online_cpus from virtnet_del_vqs (Jason)
> V4->V5:
> Add get/put_online_cpus to avoid CPUs go up and down during operations (Rusty)
>
> V3->V4:
> move vq_index into virtnet_info (Jason)
> change the mapping value when not setting affinity (Jason)
> address the comments about select_queue (Rusty)
>
> drivers/net/virtio_net.c | 58 +++++++++++++++++++++++++++++++++++++++---------
> 1 file changed, 47 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index a6fcf15..70cd957 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -123,6 +123,9 @@ struct virtnet_info {
>
> /* Does the affinity hint is set for virtqueues? */
> bool affinity_hint_set;
> +
> + /* Per-cpu variable to show the mapping from CPU to virtqueue */
> + int __percpu *vq_index;
> };
>
> struct skb_vnet_hdr {
> @@ -1016,6 +1019,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
> static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
> {
> int i;
> + int cpu;
>
> /* In multiqueue mode, when the number of cpu is equal to the number of
> * queue pairs, we let the queue pairs to be private to one cpu by
> @@ -1029,16 +1033,29 @@ static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
> return;
> }
>
> - for (i = 0; i < vi->max_queue_pairs; i++) {
> - int cpu = set ? i : -1;
> - virtqueue_set_affinity(vi->rq[i].vq, cpu);
> - virtqueue_set_affinity(vi->sq[i].vq, cpu);
> - }
> + if (set) {
> + i = 0;
> + for_each_online_cpu(cpu) {
> + virtqueue_set_affinity(vi->rq[i].vq, cpu);
> + virtqueue_set_affinity(vi->sq[i].vq, cpu);
> + *per_cpu_ptr(vi->vq_index, cpu) = i;
> + i++;
> + }
>
> - if (set)
> vi->affinity_hint_set = true;
> - else
> + } else {
> + for(i = 0; i < vi->max_queue_pairs; i++) {
> + virtqueue_set_affinity(vi->rq[i].vq, -1);
> + virtqueue_set_affinity(vi->sq[i].vq, -1);
> + }
> +
> + i = 0;
> + for_each_online_cpu(cpu)
> + *per_cpu_ptr(vi->vq_index, cpu) =
> + ++i % vi->curr_queue_pairs;
> +
> vi->affinity_hint_set = false;
> + }
> }
>
> static void virtnet_get_ringparam(struct net_device *dev,
> @@ -1087,7 +1104,9 @@ static int virtnet_set_channels(struct net_device *dev,
> netif_set_real_num_tx_queues(dev, queue_pairs);
> netif_set_real_num_rx_queues(dev, queue_pairs);
>
> + get_online_cpus();
> virtnet_set_affinity(vi, true);
> + put_online_cpus();
> }
>
> return err;
> @@ -1127,12 +1146,19 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
>
> /* To avoid contending a lock hold by a vcpu who would exit to host, select the
> * txq based on the processor id.
> - * TODO: handle cpu hotplug.
> */
> static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
> {
> - int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
> - smp_processor_id();
> + int txq;
> + struct virtnet_info *vi = netdev_priv(dev);
> +
> + if (skb_rx_queue_recorded(skb)) {
> + txq = skb_get_rx_queue(skb);
> + } else {
> + txq = *__this_cpu_ptr(vi->vq_index);
> + if (txq == -1)
> + txq = 0;
> + }
>
> while (unlikely(txq >= dev->real_num_tx_queues))
> txq -= dev->real_num_tx_queues;
> @@ -1371,7 +1397,10 @@ static int init_vqs(struct virtnet_info *vi)
> if (ret)
> goto err_free;
>
> + get_online_cpus();
> virtnet_set_affinity(vi, true);
> + put_online_cpus();
> +
> return 0;
>
> err_free:
> @@ -1453,6 +1482,10 @@ static int virtnet_probe(struct virtio_device *vdev)
> if (vi->stats == NULL)
> goto free;
>
> + vi->vq_index = alloc_percpu(int);
> + if (vi->vq_index == NULL)
> + goto free_stats;
> +
> mutex_init(&vi->config_lock);
> vi->config_enable = true;
> INIT_WORK(&vi->config_work, virtnet_config_changed_work);
> @@ -1476,7 +1509,7 @@ static int virtnet_probe(struct virtio_device *vdev)
> /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
> err = init_vqs(vi);
> if (err)
> - goto free_stats;
> + goto free_index;
>
> netif_set_real_num_tx_queues(dev, 1);
> netif_set_real_num_rx_queues(dev, 1);
> @@ -1520,6 +1553,8 @@ free_recv_bufs:
> free_vqs:
> cancel_delayed_work_sync(&vi->refill);
> virtnet_del_vqs(vi);
> +free_index:
> + free_percpu(vi->vq_index);
> free_stats:
> free_percpu(vi->stats);
> free:
> @@ -1554,6 +1589,7 @@ static void virtnet_remove(struct virtio_device *vdev)
>
> flush_work(&vi->config_work);
>
> + free_percpu(vi->vq_index);
> free_percpu(vi->stats);
> free_netdev(vi->dev);
> }
> --
> 1.8.1
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: [PATCH V6 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive
2013-01-21 11:25 [PATCH V6 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive Wanlong Gao
` (3 preceding siblings ...)
2013-01-24 17:19 ` Michael S. Tsirkin
@ 2013-01-25 3:26 ` Jason Wang
4 siblings, 0 replies; 15+ messages in thread
From: Jason Wang @ 2013-01-25 3:26 UTC (permalink / raw)
To: Wanlong Gao
Cc: Michael S. Tsirkin, netdev, linux-kernel, virtualization,
Eric Dumazet
On 01/21/2013 07:25 PM, Wanlong Gao wrote:
> As Michael mentioned, set affinity and select queue will not work very
> well when CPU IDs are not consecutive, this can happen with hot unplug.
> Fix this bug by traversal the online CPUs, and create a per cpu variable
> to find the mapping from CPU to the preferable virtual-queue.
>
> Cc: Rusty Russell <rusty@rustcorp.com.au>
> Cc: "Michael S. Tsirkin" <mst@redhat.com>
> Cc: Jason Wang <jasowang@redhat.com>
> Cc: Eric Dumazet <erdnetdev@gmail.com>
> Cc: virtualization@lists.linux-foundation.org
> Cc: netdev@vger.kernel.org
> Signed-off-by: Wanlong Gao <gaowanlong@cn.fujitsu.com>
> ---
> V5->V6:
> remove {get|put}_online_cpus from virtnet_del_vqs (Jason)
> V4->V5:
> Add get/put_online_cpus to avoid CPUs go up and down during operations (Rusty)
>
> V3->V4:
> move vq_index into virtnet_info (Jason)
> change the mapping value when not setting affinity (Jason)
> address the comments about select_queue (Rusty)
>
> drivers/net/virtio_net.c | 58 +++++++++++++++++++++++++++++++++++++++---------
> 1 file changed, 47 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index a6fcf15..70cd957 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -123,6 +123,9 @@ struct virtnet_info {
>
> /* Does the affinity hint is set for virtqueues? */
> bool affinity_hint_set;
> +
> + /* Per-cpu variable to show the mapping from CPU to virtqueue */
> + int __percpu *vq_index;
> };
>
> struct skb_vnet_hdr {
> @@ -1016,6 +1019,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
> static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
> {
> int i;
> + int cpu;
>
> /* In multiqueue mode, when the number of cpu is equal to the number of
> * queue pairs, we let the queue pairs to be private to one cpu by
> @@ -1029,16 +1033,29 @@ static void virtnet_set_affinity(struct virtnet_info *vi, bool set)
> return;
> }
>
> - for (i = 0; i < vi->max_queue_pairs; i++) {
> - int cpu = set ? i : -1;
> - virtqueue_set_affinity(vi->rq[i].vq, cpu);
> - virtqueue_set_affinity(vi->sq[i].vq, cpu);
> - }
> + if (set) {
> + i = 0;
> + for_each_online_cpu(cpu) {
> + virtqueue_set_affinity(vi->rq[i].vq, cpu);
> + virtqueue_set_affinity(vi->sq[i].vq, cpu);
> + *per_cpu_ptr(vi->vq_index, cpu) = i;
> + i++;
> + }
>
> - if (set)
> vi->affinity_hint_set = true;
> - else
> + } else {
> + for(i = 0; i < vi->max_queue_pairs; i++) {
> + virtqueue_set_affinity(vi->rq[i].vq, -1);
> + virtqueue_set_affinity(vi->sq[i].vq, -1);
> + }
> +
> + i = 0;
> + for_each_online_cpu(cpu)
> + *per_cpu_ptr(vi->vq_index, cpu) =
> + ++i % vi->curr_queue_pairs;
> +
> vi->affinity_hint_set = false;
> + }
> }
This looks wrong, since you always choose txq based on the per-cpu, I
think the per-cpu index should be set unconditionally even if cpus !=
queues. Consider you may boot a guest with 4 vcpus and 2 queues, you
need initialize it also in this case. Otherwise, you may always get txq
0 to be selected.
>
> static void virtnet_get_ringparam(struct net_device *dev,
> @@ -1087,7 +1104,9 @@ static int virtnet_set_channels(struct net_device *dev,
> netif_set_real_num_tx_queues(dev, queue_pairs);
> netif_set_real_num_rx_queues(dev, queue_pairs);
>
> + get_online_cpus();
> virtnet_set_affinity(vi, true);
> + put_online_cpus();
> }
>
> return err;
> @@ -1127,12 +1146,19 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
>
> /* To avoid contending a lock hold by a vcpu who would exit to host, select the
> * txq based on the processor id.
> - * TODO: handle cpu hotplug.
> */
> static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb)
> {
> - int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
> - smp_processor_id();
> + int txq;
> + struct virtnet_info *vi = netdev_priv(dev);
> +
> + if (skb_rx_queue_recorded(skb)) {
> + txq = skb_get_rx_queue(skb);
> + } else {
> + txq = *__this_cpu_ptr(vi->vq_index);
> + if (txq == -1)
> + txq = 0;
> + }
>
> while (unlikely(txq >= dev->real_num_tx_queues))
> txq -= dev->real_num_tx_queues;
> @@ -1371,7 +1397,10 @@ static int init_vqs(struct virtnet_info *vi)
> if (ret)
> goto err_free;
>
> + get_online_cpus();
> virtnet_set_affinity(vi, true);
> + put_online_cpus();
> +
> return 0;
>
> err_free:
> @@ -1453,6 +1482,10 @@ static int virtnet_probe(struct virtio_device *vdev)
> if (vi->stats == NULL)
> goto free;
>
> + vi->vq_index = alloc_percpu(int);
> + if (vi->vq_index == NULL)
> + goto free_stats;
> +
> mutex_init(&vi->config_lock);
> vi->config_enable = true;
> INIT_WORK(&vi->config_work, virtnet_config_changed_work);
> @@ -1476,7 +1509,7 @@ static int virtnet_probe(struct virtio_device *vdev)
> /* Allocate/initialize the rx/tx queues, and invoke find_vqs */
> err = init_vqs(vi);
> if (err)
> - goto free_stats;
> + goto free_index;
>
> netif_set_real_num_tx_queues(dev, 1);
> netif_set_real_num_rx_queues(dev, 1);
> @@ -1520,6 +1553,8 @@ free_recv_bufs:
> free_vqs:
> cancel_delayed_work_sync(&vi->refill);
> virtnet_del_vqs(vi);
> +free_index:
> + free_percpu(vi->vq_index);
> free_stats:
> free_percpu(vi->stats);
> free:
> @@ -1554,6 +1589,7 @@ static void virtnet_remove(struct virtio_device *vdev)
>
> flush_work(&vi->config_work);
>
> + free_percpu(vi->vq_index);
> free_percpu(vi->stats);
> free_netdev(vi->dev);
> }
^ permalink raw reply [flat|nested] 15+ messages in thread