From: Fam Zheng <famz@redhat.com>
To: qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Stefan Hajnoczi <stefanha@redhat.com>
Subject: [Qemu-devel] [PATCH 3/3] virtio-scsi: Handle TMF request cancellation asynchronously
Date: Thu, 18 Sep 2014 10:36:39 +0800 [thread overview]
Message-ID: <1411007799-23199-4-git-send-email-famz@redhat.com> (raw)
In-Reply-To: <1411007799-23199-1-git-send-email-famz@redhat.com>
For VIRTIO_SCSI_T_TMF_ABORT_TASK and VIRTIO_SCSI_T_TMF_ABORT_TASK_SET,
use scsi_req_cancel_async to start the cancellation. In
virtio_scsi_handle_ctrl, wait for virtio_scsi_cancel_dep_complete before
completing the request.
Signed-off-by: Fam Zheng <famz@redhat.com>
---
hw/scsi/virtio-scsi.c | 45 ++++++++++++++++++++++++++++++++++++++-------
1 file changed, 38 insertions(+), 7 deletions(-)
diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c
index 86aba88..323140e 100644
--- a/hw/scsi/virtio-scsi.c
+++ b/hw/scsi/virtio-scsi.c
@@ -226,12 +226,27 @@ static void *virtio_scsi_load_request(QEMUFile *f, SCSIRequest *sreq)
return req;
}
-static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
+static void virtio_scsi_cancel_req(SCSIDevice *d, SCSIRequest *req,
+ VirtIOSCSIReq *tmf_req)
+{
+ if (!tmf_req->sreq) {
+ /* Allocate a dummy sreq in order to keep track of the
+ * dependency */
+ tmf_req->sreq = scsi_req_alloc(NULL, NULL, 0, 0, req);
+ }
+ scsi_req_cancel_async(req, tmf_req->sreq);
+}
+
+/* Return true if the request is ready to be completed and return to guest;
+ * false if the request will be completed (by some other events) later, for
+ * example in the case of async cancellation. */
+static bool virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
{
SCSIDevice *d = virtio_scsi_device_find(s, req->req.tmf.lun);
SCSIRequest *r, *next;
BusChild *kid;
int target;
+ bool ret = true;
/* Here VIRTIO_SCSI_S_OK means "FUNCTION COMPLETE". */
req->resp.tmf.response = VIRTIO_SCSI_S_OK;
@@ -264,7 +279,8 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
*/
req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
} else {
- scsi_req_cancel(r);
+ virtio_scsi_cancel_req(d, r, req);
+ ret = false;
}
}
break;
@@ -299,7 +315,8 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
req->resp.tmf.response = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED;
break;
} else {
- scsi_req_cancel(r);
+ virtio_scsi_cancel_req(d, r, req);
+ ret = false;
}
}
}
@@ -323,20 +340,22 @@ static void virtio_scsi_do_tmf(VirtIOSCSI *s, VirtIOSCSIReq *req)
break;
}
- return;
+ return ret;
incorrect_lun:
req->resp.tmf.response = VIRTIO_SCSI_S_INCORRECT_LUN;
- return;
+ return ret;
fail:
req->resp.tmf.response = VIRTIO_SCSI_S_BAD_TARGET;
+ return ret;
}
static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOSCSI *s = (VirtIOSCSI *)vdev;
VirtIOSCSIReq *req;
+ bool should_complete = true;
while ((req = virtio_scsi_pop_req(s, vq))) {
int type;
@@ -353,7 +372,7 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
sizeof(VirtIOSCSICtrlTMFResp)) < 0) {
virtio_scsi_bad_req();
} else {
- virtio_scsi_do_tmf(s, req);
+ should_complete = virtio_scsi_do_tmf(s, req);
}
} else if (req->req.tmf.type == VIRTIO_SCSI_T_AN_QUERY ||
@@ -366,7 +385,9 @@ static void virtio_scsi_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
req->resp.an.response = VIRTIO_SCSI_S_OK;
}
}
- virtio_scsi_complete_req(req);
+ if (should_complete) {
+ virtio_scsi_complete_req(req);
+ }
}
}
@@ -437,6 +458,15 @@ static QEMUSGList *virtio_scsi_get_sg_list(SCSIRequest *r)
return &req->qsgl;
}
+static void virtio_scsi_cancel_dep_complete(SCSIRequest *r)
+{
+ VirtIOSCSIReq *req = r->hba_private;
+
+ scsi_req_unref(req->sreq);
+ req->sreq = NULL;
+ virtio_scsi_complete_req(req);
+}
+
static void virtio_scsi_request_cancelled(SCSIRequest *r)
{
VirtIOSCSIReq *req = r->hba_private;
@@ -679,6 +709,7 @@ static struct SCSIBusInfo virtio_scsi_scsi_info = {
.complete = virtio_scsi_command_complete,
.cancel = virtio_scsi_request_cancelled,
+ .cancel_dep_complete = virtio_scsi_cancel_dep_complete,
.change = virtio_scsi_change,
.hotplug = virtio_scsi_hotplug,
.hot_unplug = virtio_scsi_hot_unplug,
--
1.9.3
prev parent reply other threads:[~2014-09-18 2:37 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-09-18 2:36 [Qemu-devel] [PATCH 0/3] virtio-scsi: Asynchronous cancellation Fam Zheng
2014-09-18 2:36 ` [Qemu-devel] [PATCH 1/3] scsi-bus: Unify request unref in scsi_req_cancel Fam Zheng
2014-09-18 8:59 ` Paolo Bonzini
2014-09-18 2:36 ` [Qemu-devel] [PATCH 2/3] scsi: Introduce scsi_req_cancel_async Fam Zheng
2014-09-18 9:17 ` Paolo Bonzini
2014-09-18 9:18 ` Paolo Bonzini
2014-09-18 2:36 ` Fam Zheng [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1411007799-23199-4-git-send-email-famz@redhat.com \
--to=famz@redhat.com \
--cc=kwolf@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).