* [Qemu-devel] [PATCH 0/2] virtio-blk: Switch to blk_aio_ioctl
@ 2015-01-20 3:28 Fam Zheng
2015-01-20 3:28 ` [Qemu-devel] [PATCH 1/2] virtio-blk: Pass req to virtio_blk_handle_scsi_req Fam Zheng
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: Fam Zheng @ 2015-01-20 3:28 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Paolo Bonzini, Stefan Hajnoczi
There are user complaints on guest's unresponsiveness when ioctl is blocked due
to the lost connection to backend or other issues. This series changes scsi
request processing of virtio-blk to an asynchronous manner.
Fam Zheng (2):
virtio-blk: Pass req to virtio_blk_handle_scsi_req
virtio-blk: Use blk_aio_ioctl
hw/block/virtio-blk.c | 134 ++++++++++++++++++++++++++---------------
include/hw/virtio/virtio-blk.h | 3 -
2 files changed, 84 insertions(+), 53 deletions(-)
--
1.9.3
^ permalink raw reply [flat|nested] 5+ messages in thread
* [Qemu-devel] [PATCH 1/2] virtio-blk: Pass req to virtio_blk_handle_scsi_req
2015-01-20 3:28 [Qemu-devel] [PATCH 0/2] virtio-blk: Switch to blk_aio_ioctl Fam Zheng
@ 2015-01-20 3:28 ` Fam Zheng
2015-01-20 3:28 ` [Qemu-devel] [PATCH 2/2] virtio-blk: Use blk_aio_ioctl Fam Zheng
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Fam Zheng @ 2015-01-20 3:28 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Paolo Bonzini, Stefan Hajnoczi
In preparation for calling blk_aio_ioctl. Also make the function static
as no other files need it.
Signed-off-by: Fam Zheng <famz@redhat.com>
---
hw/block/virtio-blk.c | 9 +++++----
include/hw/virtio/virtio-blk.h | 3 ---
2 files changed, 5 insertions(+), 7 deletions(-)
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index b19b102..60cb1d8 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -127,12 +127,13 @@ static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
return req;
}
-int virtio_blk_handle_scsi_req(VirtIOBlock *blk,
- VirtQueueElement *elem)
+static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req)
{
int status = VIRTIO_BLK_S_OK;
struct virtio_scsi_inhdr *scsi = NULL;
- VirtIODevice *vdev = VIRTIO_DEVICE(blk);
+ VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
+ VirtQueueElement *elem = &req->elem;
+ VirtIOBlock *blk = req->dev;
#ifdef __linux__
int i;
@@ -252,7 +253,7 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
{
int status;
- status = virtio_blk_handle_scsi_req(req->dev, &req->elem);
+ status = virtio_blk_handle_scsi_req(req);
virtio_blk_req_complete(req, status);
virtio_blk_free_request(req);
}
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index 3979dc4..4652b70 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -153,9 +153,6 @@ VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s);
void virtio_blk_free_request(VirtIOBlockReq *req);
-int virtio_blk_handle_scsi_req(VirtIOBlock *blk,
- VirtQueueElement *elem);
-
void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb);
void virtio_submit_multiwrite(BlockBackend *blk, MultiReqBuffer *mrb);
--
1.9.3
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [Qemu-devel] [PATCH 2/2] virtio-blk: Use blk_aio_ioctl
2015-01-20 3:28 [Qemu-devel] [PATCH 0/2] virtio-blk: Switch to blk_aio_ioctl Fam Zheng
2015-01-20 3:28 ` [Qemu-devel] [PATCH 1/2] virtio-blk: Pass req to virtio_blk_handle_scsi_req Fam Zheng
@ 2015-01-20 3:28 ` Fam Zheng
2015-01-20 10:13 ` [Qemu-devel] [PATCH 0/2] virtio-blk: Switch to blk_aio_ioctl Paolo Bonzini
2015-01-22 18:04 ` Kevin Wolf
3 siblings, 0 replies; 5+ messages in thread
From: Fam Zheng @ 2015-01-20 3:28 UTC (permalink / raw)
To: qemu-devel; +Cc: Kevin Wolf, Paolo Bonzini, Stefan Hajnoczi
Use the asynchronous interface of ioctl. This will not make the VM
unresponsive if the ioctl takes a long time.
Signed-off-by: Fam Zheng <famz@redhat.com>
---
hw/block/virtio-blk.c | 125 +++++++++++++++++++++++++++++++-------------------
1 file changed, 79 insertions(+), 46 deletions(-)
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 60cb1d8..4032fca 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -115,6 +115,56 @@ static void virtio_blk_flush_complete(void *opaque, int ret)
virtio_blk_free_request(req);
}
+#ifdef __linux__
+
+typedef struct {
+ VirtIOBlockReq *req;
+ struct sg_io_hdr hdr;
+} VirtIOBlockIoctlReq;
+
+static void virtio_blk_ioctl_complete(void *opaque, int status)
+{
+ VirtIOBlockIoctlReq *ioctl_req = opaque;
+ VirtIOBlockReq *req = ioctl_req->req;
+ VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
+ struct virtio_scsi_inhdr *scsi;
+ struct sg_io_hdr *hdr;
+
+ scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base;
+
+ if (status) {
+ status = VIRTIO_BLK_S_UNSUPP;
+ virtio_stl_p(vdev, &scsi->errors, 255);
+ goto out;
+ }
+
+ hdr = &ioctl_req->hdr;
+ /*
+ * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
+ * clear the masked_status field [hence status gets cleared too, see
+ * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
+ * status has occurred. However they do set DRIVER_SENSE in driver_status
+ * field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
+ */
+ if (hdr->status == 0 && hdr->sb_len_wr > 0) {
+ hdr->status = CHECK_CONDITION;
+ }
+
+ virtio_stl_p(vdev, &scsi->errors,
+ hdr->status | (hdr->msg_status << 8) |
+ (hdr->host_status << 16) | (hdr->driver_status << 24));
+ virtio_stl_p(vdev, &scsi->residual, hdr->resid);
+ virtio_stl_p(vdev, &scsi->sense_len, hdr->sb_len_wr);
+ virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len);
+
+out:
+ virtio_blk_req_complete(req, status);
+ virtio_blk_free_request(req);
+ g_free(ioctl_req);
+}
+
+#endif
+
static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
{
VirtIOBlockReq *req = virtio_blk_alloc_request(s);
@@ -137,7 +187,7 @@ static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req)
#ifdef __linux__
int i;
- struct sg_io_hdr hdr;
+ VirtIOBlockIoctlReq *ioctl_req;
#endif
/*
@@ -172,71 +222,52 @@ static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req)
}
#ifdef __linux__
- memset(&hdr, 0, sizeof(struct sg_io_hdr));
- hdr.interface_id = 'S';
- hdr.cmd_len = elem->out_sg[1].iov_len;
- hdr.cmdp = elem->out_sg[1].iov_base;
- hdr.dxfer_len = 0;
+ ioctl_req = g_new0(VirtIOBlockIoctlReq, 1);
+ ioctl_req->req = req;
+ ioctl_req->hdr.interface_id = 'S';
+ ioctl_req->hdr.cmd_len = elem->out_sg[1].iov_len;
+ ioctl_req->hdr.cmdp = elem->out_sg[1].iov_base;
+ ioctl_req->hdr.dxfer_len = 0;
if (elem->out_num > 2) {
/*
* If there are more than the minimally required 2 output segments
* there is write payload starting from the third iovec.
*/
- hdr.dxfer_direction = SG_DXFER_TO_DEV;
- hdr.iovec_count = elem->out_num - 2;
+ ioctl_req->hdr.dxfer_direction = SG_DXFER_TO_DEV;
+ ioctl_req->hdr.iovec_count = elem->out_num - 2;
- for (i = 0; i < hdr.iovec_count; i++)
- hdr.dxfer_len += elem->out_sg[i + 2].iov_len;
+ for (i = 0; i < ioctl_req->hdr.iovec_count; i++) {
+ ioctl_req->hdr.dxfer_len += elem->out_sg[i + 2].iov_len;
+ }
- hdr.dxferp = elem->out_sg + 2;
+ ioctl_req->hdr.dxferp = elem->out_sg + 2;
} else if (elem->in_num > 3) {
/*
* If we have more than 3 input segments the guest wants to actually
* read data.
*/
- hdr.dxfer_direction = SG_DXFER_FROM_DEV;
- hdr.iovec_count = elem->in_num - 3;
- for (i = 0; i < hdr.iovec_count; i++)
- hdr.dxfer_len += elem->in_sg[i].iov_len;
+ ioctl_req->hdr.dxfer_direction = SG_DXFER_FROM_DEV;
+ ioctl_req->hdr.iovec_count = elem->in_num - 3;
+ for (i = 0; i < ioctl_req->hdr.iovec_count; i++) {
+ ioctl_req->hdr.dxfer_len += elem->in_sg[i].iov_len;
+ }
- hdr.dxferp = elem->in_sg;
+ ioctl_req->hdr.dxferp = elem->in_sg;
} else {
/*
* Some SCSI commands don't actually transfer any data.
*/
- hdr.dxfer_direction = SG_DXFER_NONE;
+ ioctl_req->hdr.dxfer_direction = SG_DXFER_NONE;
}
- hdr.sbp = elem->in_sg[elem->in_num - 3].iov_base;
- hdr.mx_sb_len = elem->in_sg[elem->in_num - 3].iov_len;
-
- status = blk_ioctl(blk->blk, SG_IO, &hdr);
- if (status) {
- status = VIRTIO_BLK_S_UNSUPP;
- goto fail;
- }
+ ioctl_req->hdr.sbp = elem->in_sg[elem->in_num - 3].iov_base;
+ ioctl_req->hdr.mx_sb_len = elem->in_sg[elem->in_num - 3].iov_len;
- /*
- * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
- * clear the masked_status field [hence status gets cleared too, see
- * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
- * status has occurred. However they do set DRIVER_SENSE in driver_status
- * field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
- */
- if (hdr.status == 0 && hdr.sb_len_wr > 0) {
- hdr.status = CHECK_CONDITION;
- }
-
- virtio_stl_p(vdev, &scsi->errors,
- hdr.status | (hdr.msg_status << 8) |
- (hdr.host_status << 16) | (hdr.driver_status << 24));
- virtio_stl_p(vdev, &scsi->residual, hdr.resid);
- virtio_stl_p(vdev, &scsi->sense_len, hdr.sb_len_wr);
- virtio_stl_p(vdev, &scsi->data_len, hdr.dxfer_len);
-
- return status;
+ blk_aio_ioctl(blk->blk, SG_IO, &ioctl_req->hdr,
+ virtio_blk_ioctl_complete, ioctl_req);
+ return -EINPROGRESS;
#else
abort();
#endif
@@ -254,8 +285,10 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
int status;
status = virtio_blk_handle_scsi_req(req);
- virtio_blk_req_complete(req, status);
- virtio_blk_free_request(req);
+ if (status != -EINPROGRESS) {
+ virtio_blk_req_complete(req, status);
+ virtio_blk_free_request(req);
+ }
}
void virtio_submit_multiwrite(BlockBackend *blk, MultiReqBuffer *mrb)
--
1.9.3
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [Qemu-devel] [PATCH 0/2] virtio-blk: Switch to blk_aio_ioctl
2015-01-20 3:28 [Qemu-devel] [PATCH 0/2] virtio-blk: Switch to blk_aio_ioctl Fam Zheng
2015-01-20 3:28 ` [Qemu-devel] [PATCH 1/2] virtio-blk: Pass req to virtio_blk_handle_scsi_req Fam Zheng
2015-01-20 3:28 ` [Qemu-devel] [PATCH 2/2] virtio-blk: Use blk_aio_ioctl Fam Zheng
@ 2015-01-20 10:13 ` Paolo Bonzini
2015-01-22 18:04 ` Kevin Wolf
3 siblings, 0 replies; 5+ messages in thread
From: Paolo Bonzini @ 2015-01-20 10:13 UTC (permalink / raw)
To: Fam Zheng, qemu-devel; +Cc: Kevin Wolf, Stefan Hajnoczi
On 20/01/2015 04:28, Fam Zheng wrote:
> There are user complaints on guest's unresponsiveness when ioctl is blocked due
> to the lost connection to backend or other issues. This series changes scsi
> request processing of virtio-blk to an asynchronous manner.
>
>
>
> Fam Zheng (2):
> virtio-blk: Pass req to virtio_blk_handle_scsi_req
> virtio-blk: Use blk_aio_ioctl
>
> hw/block/virtio-blk.c | 134 ++++++++++++++++++++++++++---------------
> include/hw/virtio/virtio-blk.h | 3 -
> 2 files changed, 84 insertions(+), 53 deletions(-)
>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [Qemu-devel] [PATCH 0/2] virtio-blk: Switch to blk_aio_ioctl
2015-01-20 3:28 [Qemu-devel] [PATCH 0/2] virtio-blk: Switch to blk_aio_ioctl Fam Zheng
` (2 preceding siblings ...)
2015-01-20 10:13 ` [Qemu-devel] [PATCH 0/2] virtio-blk: Switch to blk_aio_ioctl Paolo Bonzini
@ 2015-01-22 18:04 ` Kevin Wolf
3 siblings, 0 replies; 5+ messages in thread
From: Kevin Wolf @ 2015-01-22 18:04 UTC (permalink / raw)
To: Fam Zheng; +Cc: Paolo Bonzini, qemu-devel, Stefan Hajnoczi
Am 20.01.2015 um 04:28 hat Fam Zheng geschrieben:
> There are user complaints on guest's unresponsiveness when ioctl is blocked due
> to the lost connection to backend or other issues. This series changes scsi
> request processing of virtio-blk to an asynchronous manner.
Thanks, applied to the block branch.
Kevin
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2015-01-22 18:05 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-01-20 3:28 [Qemu-devel] [PATCH 0/2] virtio-blk: Switch to blk_aio_ioctl Fam Zheng
2015-01-20 3:28 ` [Qemu-devel] [PATCH 1/2] virtio-blk: Pass req to virtio_blk_handle_scsi_req Fam Zheng
2015-01-20 3:28 ` [Qemu-devel] [PATCH 2/2] virtio-blk: Use blk_aio_ioctl Fam Zheng
2015-01-20 10:13 ` [Qemu-devel] [PATCH 0/2] virtio-blk: Switch to blk_aio_ioctl Paolo Bonzini
2015-01-22 18:04 ` Kevin Wolf
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).