From mboxrd@z Thu Jan 1 00:00:00 1970 From: Rusty Russell Subject: Re: [PATCH v2] virtio-net: fill only rx queues which are being used Date: Mon, 29 Apr 2013 12:00:31 +0930 Message-ID: <87vc76cblk.fsf@rustcorp.com.au> References: <1366733864-32297-1-git-send-email-sasha.levin@oracle.com> Mime-Version: 1.0 Content-Type: text/plain; charset=us-ascii Cc: penberg@kernel.org, will.deacon@arm.com, marc.zyngier@arm.com, kvm@vger.kernel.org, asias@redhat.com, jasowang@redhat.com, Sasha Levin To: Sasha Levin , mst@redhat.com Return-path: Received: from ozlabs.org ([203.10.76.45]:49668 "EHLO ozlabs.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756956Ab3D2DSV (ORCPT ); Sun, 28 Apr 2013 23:18:21 -0400 In-Reply-To: <1366733864-32297-1-git-send-email-sasha.levin@oracle.com> Sender: kvm-owner@vger.kernel.org List-ID: Sasha Levin writes: > Due to MQ support we may allocate a whole bunch of rx queues but > never use them. With this patch we'll safe the space used by > the receive buffers until they are actually in use: Thanks, applied! Cheers, Rusty. > > sh-4.2# free -h > total used free shared buffers cached > Mem: 490M 35M 455M 0B 0B 4.1M > -/+ buffers/cache: 31M 459M > Swap: 0B 0B 0B > sh-4.2# ethtool -L eth0 combined 8 > sh-4.2# free -h > total used free shared buffers cached > Mem: 490M 162M 327M 0B 0B 4.1M > -/+ buffers/cache: 158M 331M > Swap: 0B 0B 0B > > Signed-off-by: Sasha Levin > --- > drivers/net/virtio_net.c | 15 ++++++++++----- > 1 file changed, 10 insertions(+), 5 deletions(-) > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c > index 6bfc511..196e721 100644 > --- a/drivers/net/virtio_net.c > +++ b/drivers/net/virtio_net.c > @@ -581,7 +581,7 @@ static void refill_work(struct work_struct *work) > bool still_empty; > int i; > > - for (i = 0; i < vi->max_queue_pairs; i++) { > + for (i = 0; i < vi->curr_queue_pairs; i++) { > struct receive_queue *rq = &vi->rq[i]; > > napi_disable(&rq->napi); > @@ -636,7 +636,7 @@ static int virtnet_open(struct net_device *dev) > struct virtnet_info *vi = netdev_priv(dev); > int i; > > - for (i = 0; i < vi->max_queue_pairs; i++) { > + for (i = 0; i < vi->curr_queue_pairs; i++) { > /* Make sure we have some buffers: if oom use wq. */ > if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) > schedule_delayed_work(&vi->refill, 0); > @@ -900,6 +900,7 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) > struct scatterlist sg; > struct virtio_net_ctrl_mq s; > struct net_device *dev = vi->dev; > + int i; > > if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) > return 0; > @@ -912,8 +913,12 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) > dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", > queue_pairs); > return -EINVAL; > - } else > + } else { > + for (i = vi->curr_queue_pairs; i < queue_pairs; i++) > + if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) > + schedule_delayed_work(&vi->refill, 0); > vi->curr_queue_pairs = queue_pairs; > + } > > return 0; > } > @@ -1568,7 +1573,7 @@ static int virtnet_probe(struct virtio_device *vdev) > } > > /* Last of all, set up some receive buffers. */ > - for (i = 0; i < vi->max_queue_pairs; i++) { > + for (i = 0; i < vi->curr_queue_pairs; i++) { > try_fill_recv(&vi->rq[i], GFP_KERNEL); > > /* If we didn't even get one input buffer, we're useless. */ > @@ -1692,7 +1697,7 @@ static int virtnet_restore(struct virtio_device *vdev) > > netif_device_attach(vi->dev); > > - for (i = 0; i < vi->max_queue_pairs; i++) > + for (i = 0; i < vi->curr_queue_pairs; i++) > if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) > schedule_delayed_work(&vi->refill, 0); > > -- > 1.8.2.1