From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from mailman by lists.gnu.org with tmda-scanned (Exim 4.43) id 1Lncbp-000165-Ck for qemu-devel@nongnu.org; Sat, 28 Mar 2009 13:46:17 -0400 Received: from exim by lists.gnu.org with spam-scanned (Exim 4.43) id 1Lncbo-00015R-GW for qemu-devel@nongnu.org; Sat, 28 Mar 2009 13:46:16 -0400 Received: from [199.232.76.173] (port=48773 helo=monty-python.gnu.org) by lists.gnu.org with esmtp (Exim 4.43) id 1Lncbo-00015E-9G for qemu-devel@nongnu.org; Sat, 28 Mar 2009 13:46:16 -0400 Received: from savannah.gnu.org ([199.232.41.3]:43365 helo=sv.gnu.org) by monty-python.gnu.org with esmtps (TLS-1.0:RSA_AES_256_CBC_SHA1:32) (Exim 4.60) (envelope-from ) id 1Lncbn-0000eg-Tc for qemu-devel@nongnu.org; Sat, 28 Mar 2009 13:46:16 -0400 Received: from cvs.savannah.gnu.org ([199.232.41.69]) by sv.gnu.org with esmtp (Exim 4.69) (envelope-from ) id 1Lncbn-0008JY-AL for qemu-devel@nongnu.org; Sat, 28 Mar 2009 17:46:15 +0000 Received: from aliguori by cvs.savannah.gnu.org with local (Exim 4.69) (envelope-from ) id 1Lncbm-0008JT-Um for qemu-devel@nongnu.org; Sat, 28 Mar 2009 17:46:15 +0000 MIME-Version: 1.0 Errors-To: aliguori Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit From: Anthony Liguori Message-Id: Date: Sat, 28 Mar 2009 17:46:14 +0000 Subject: [Qemu-devel] [6903] virtio-blk: use generic vectored I/O APIs (Christoph Hellwig) Reply-To: qemu-devel@nongnu.org List-Id: qemu-devel.nongnu.org List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Revision: 6903 http://svn.sv.gnu.org/viewvc/?view=rev&root=qemu&revision=6903 Author: aliguori Date: 2009-03-28 17:46:14 +0000 (Sat, 28 Mar 2009) Log Message: ----------- virtio-blk: use generic vectored I/O APIs (Christoph Hellwig) Use the generic bdrv_aio_readv/bdrv_aio_writev APIs instead of linearizing buffers directly. This enables using the future native preadv/pwritev support. Signed-off-by: Christoph Hellwig Signed-off-by: Anthony Liguori Modified Paths: -------------- trunk/hw/virtio-blk.c Modified: trunk/hw/virtio-blk.c =================================================================== --- trunk/hw/virtio-blk.c 2009-03-28 17:46:10 UTC (rev 6902) +++ trunk/hw/virtio-blk.c 2009-03-28 17:46:14 UTC (rev 6903) @@ -35,8 +35,7 @@ VirtQueueElement elem; struct virtio_blk_inhdr *in; struct virtio_blk_outhdr *out; - size_t size; - uint8_t *buffer; + QEMUIOVector qiov; struct VirtIOBlockReq *next; } VirtIOBlockReq; @@ -45,10 +44,9 @@ VirtIOBlock *s = req->dev; req->in->status = status; - virtqueue_push(s->vq, &req->elem, req->size + sizeof(*req->in)); + virtqueue_push(s->vq, &req->elem, req->qiov.size + sizeof(*req->in)); virtio_notify(&s->vdev, s->vq); - qemu_free(req->buffer); qemu_free(req); } @@ -76,24 +74,7 @@ { VirtIOBlockReq *req = opaque; - /* Copy read data to the guest */ - if (!ret && !(req->out->type & VIRTIO_BLK_T_OUT)) { - size_t offset = 0; - int i; - - for (i = 0; i < req->elem.in_num - 1; i++) { - size_t len; - - /* Be pretty defensive wrt malicious guests */ - len = MIN(req->elem.in_sg[i].iov_len, - req->size - offset); - - memcpy(req->elem.in_sg[i].iov_base, - req->buffer + offset, - len); - offset += len; - } - } else if (ret && (req->out->type & VIRTIO_BLK_T_OUT)) { + if (ret && (req->out->type & VIRTIO_BLK_T_OUT)) { if (virtio_blk_handle_write_error(req, -ret)) return; } @@ -122,39 +103,16 @@ return req; } -static int virtio_blk_handle_write(VirtIOBlockReq *req) +static void virtio_blk_handle_write(VirtIOBlockReq *req) { - if (!req->buffer) { - size_t offset = 0; - int i; + bdrv_aio_writev(req->dev->bs, req->out->sector, &req->qiov, + req->qiov.size / 512, virtio_blk_rw_complete, req); +} - for (i = 1; i < req->elem.out_num; i++) - req->size += req->elem.out_sg[i].iov_len; - - req->buffer = qemu_memalign(512, req->size); - if (req->buffer == NULL) { - qemu_free(req); - return -1; - } - - /* We copy the data from the SG list to avoid splitting up the request. - This helps performance a lot until we can pass full sg lists as AIO - operations */ - for (i = 1; i < req->elem.out_num; i++) { - size_t len; - - len = MIN(req->elem.out_sg[i].iov_len, - req->size - offset); - memcpy(req->buffer + offset, - req->elem.out_sg[i].iov_base, - len); - offset += len; - } - } - - bdrv_aio_write(req->dev->bs, req->out->sector, req->buffer, req->size / 512, - virtio_blk_rw_complete, req); - return 0; +static void virtio_blk_handle_read(VirtIOBlockReq *req) +{ + bdrv_aio_readv(req->dev->bs, req->out->sector, &req->qiov, + req->qiov.size / 512, virtio_blk_rw_complete, req); } static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) @@ -163,8 +121,6 @@ VirtIOBlockReq *req; while ((req = virtio_blk_get_request(s))) { - int i; - if (req->elem.out_num < 1 || req->elem.in_num < 1) { fprintf(stderr, "virtio-blk missing headers\n"); exit(1); @@ -187,23 +143,13 @@ virtio_notify(vdev, vq); qemu_free(req); } else if (req->out->type & VIRTIO_BLK_T_OUT) { - if (virtio_blk_handle_write(req) < 0) - break; + qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1], + req->elem.out_num - 1); + virtio_blk_handle_write(req); } else { - for (i = 0; i < req->elem.in_num - 1; i++) - req->size += req->elem.in_sg[i].iov_len; - - req->buffer = qemu_memalign(512, req->size); - if (req->buffer == NULL) { - qemu_free(req); - break; - } - - bdrv_aio_read(s->bs, req->out->sector, - req->buffer, - req->size / 512, - virtio_blk_rw_complete, - req); + qemu_iovec_init_external(&req->qiov, &req->elem.in_sg[0], + req->elem.in_num - 1); + virtio_blk_handle_read(req); } } /*