From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:50587) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1auygP-00017r-8c for qemu-devel@nongnu.org; Tue, 26 Apr 2016 04:49:26 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1auygN-00020l-1I for qemu-devel@nongnu.org; Tue, 26 Apr 2016 04:49:25 -0400 Received: from e06smtp05.uk.ibm.com ([195.75.94.101]:46659) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1auygM-00020P-Ob for qemu-devel@nongnu.org; Tue, 26 Apr 2016 04:49:22 -0400 Received: from localhost by e06smtp05.uk.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted for from ; Tue, 26 Apr 2016 09:49:19 +0100 References: <1461657924-1933-1-git-send-email-zhoujie2011@cn.fujitsu.com> From: Christian Borntraeger Message-ID: <571F2B8B.8030505@de.ibm.com> Date: Tue, 26 Apr 2016 10:49:15 +0200 MIME-Version: 1.0 In-Reply-To: <1461657924-1933-1-git-send-email-zhoujie2011@cn.fujitsu.com> Content-Type: text/plain; charset=windows-1252 Content-Transfer-Encoding: 7bit Subject: Re: [Qemu-devel] [PATCH] hw/net/virtio-net: Allocating Large sized arrays to heap List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: Zhou Jie , qemu-devel@nongnu.org Cc: qemu-trivial@nongnu.org, mst@redhat.com On 04/26/2016 10:05 AM, Zhou Jie wrote: > virtio_net_flush_tx has a huge stack usage of 16392 bytes approx. > Moving large arrays to heap to reduce stack usage. > > Signed-off-by: Zhou Jie > --- > hw/net/virtio-net.c | 15 +++++++++++---- > 1 file changed, 11 insertions(+), 4 deletions(-) > > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c > index 5798f87..cab7bbc 100644 > --- a/hw/net/virtio-net.c > +++ b/hw/net/virtio-net.c > @@ -1213,6 +1213,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q) > VirtIONet *n = q->n; > VirtIODevice *vdev = VIRTIO_DEVICE(n); > VirtQueueElement *elem; > + struct iovec *sg = NULL, *sg2 = NULL; > int32_t num_packets = 0; > int queue_index = vq2q(virtio_get_queue_index(q->tx_vq)); > if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { > @@ -1224,10 +1225,12 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q) > return num_packets; > } > > + sg = g_new(struct iovec, VIRTQUEUE_MAX_SIZE); > + sg2 = g_new(struct iovec, VIRTQUEUE_MAX_SIZE + 1); As I said in another mail, 16k is usually perfectly fine for a userspace stack and doing allocations in a hot path might actually hurt performance. Unless we have a real problem (e.g. very long call stack on a small thread stack) I would prefer to not change this. Do you have seen a real problem due to this? > for (;;) { > ssize_t ret; > unsigned int out_num; > - struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg; > + struct iovec *out_sg; > struct virtio_net_hdr_mrg_rxbuf mhdr; > > elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement)); > @@ -1252,7 +1255,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q) > virtio_net_hdr_swap(vdev, (void *) &mhdr); > sg2[0].iov_base = &mhdr; > sg2[0].iov_len = n->guest_hdr_len; > - out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, > + out_num = iov_copy(&sg2[1], VIRTQUEUE_MAX_SIZE, > out_sg, out_num, > n->guest_hdr_len, -1); > if (out_num == VIRTQUEUE_MAX_SIZE) { > @@ -1269,10 +1272,10 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q) > */ > assert(n->host_hdr_len <= n->guest_hdr_len); > if (n->host_hdr_len != n->guest_hdr_len) { > - unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg), > + unsigned sg_num = iov_copy(sg, VIRTQUEUE_MAX_SIZE, > out_sg, out_num, > 0, n->host_hdr_len); > - sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num, > + sg_num += iov_copy(sg + sg_num, VIRTQUEUE_MAX_SIZE - sg_num, > out_sg, out_num, > n->guest_hdr_len, -1); > out_num = sg_num; > @@ -1284,6 +1287,8 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q) > if (ret == 0) { > virtio_queue_set_notification(q->tx_vq, 0); > q->async_tx.elem = elem; > + g_free(sg); > + g_free(sg2); > return -EBUSY; > } > > @@ -1296,6 +1301,8 @@ drop: > break; > } > } > + g_free(sg); > + g_free(sg2); > return num_packets; > } >