From: Stefan Hajnoczi <stefanha@redhat.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
Fam Zheng <famz@redhat.com>,
Stefan Hajnoczi <stefanha@redhat.com>
Subject: [Qemu-devel] [PULL 29/42] virtio-blk: Factor out virtio_blk_handle_scsi_req from virtio_blk_handle_scsi
Date: Fri, 6 Jun 2014 18:13:50 +0200 [thread overview]
Message-ID: <1402071243-16702-30-git-send-email-stefanha@redhat.com> (raw)
In-Reply-To: <1402071243-16702-1-git-send-email-stefanha@redhat.com>
From: Fam Zheng <famz@redhat.com>
The common logic to process a scsi request in a VirtQueueElement is
extracted to a function to share with dataplane.
This makes VirtIOBlockReq.scsi unused, so drop it.
Signed-off-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
---
hw/block/virtio-blk.c | 75 +++++++++++++++++++++++-------------------
include/hw/virtio/virtio-blk.h | 3 ++
2 files changed, 44 insertions(+), 34 deletions(-)
diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
index 5e9433d..0b1446e 100644
--- a/hw/block/virtio-blk.c
+++ b/hw/block/virtio-blk.c
@@ -33,7 +33,6 @@ typedef struct VirtIOBlockReq
VirtQueueElement elem;
struct virtio_blk_inhdr *in;
struct virtio_blk_outhdr *out;
- struct virtio_scsi_inhdr *scsi;
QEMUIOVector qiov;
struct VirtIOBlockReq *next;
BlockAcctCookie acct;
@@ -125,13 +124,15 @@ static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
return req;
}
-static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
+int virtio_blk_handle_scsi_req(VirtIOBlock *blk,
+ VirtQueueElement *elem)
{
+ int status = VIRTIO_BLK_S_OK;
+ struct virtio_scsi_inhdr *scsi = NULL;
#ifdef __linux__
- int ret;
int i;
+ struct sg_io_hdr hdr;
#endif
- int status = VIRTIO_BLK_S_OK;
/*
* We require at least one output segment each for the virtio_blk_outhdr
@@ -140,19 +141,18 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
* We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
* and the sense buffer pointer in the input segments.
*/
- if (req->elem.out_num < 2 || req->elem.in_num < 3) {
- virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
- g_free(req);
- return;
+ if (elem->out_num < 2 || elem->in_num < 3) {
+ status = VIRTIO_BLK_S_IOERR;
+ goto fail;
}
/*
* The scsi inhdr is placed in the second-to-last input segment, just
* before the regular inhdr.
*/
- req->scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base;
+ scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base;
- if (!req->dev->blk.scsi) {
+ if (!blk->blk.scsi) {
status = VIRTIO_BLK_S_UNSUPP;
goto fail;
}
@@ -160,43 +160,42 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
/*
* No support for bidirection commands yet.
*/
- if (req->elem.out_num > 2 && req->elem.in_num > 3) {
+ if (elem->out_num > 2 && elem->in_num > 3) {
status = VIRTIO_BLK_S_UNSUPP;
goto fail;
}
#ifdef __linux__
- struct sg_io_hdr hdr;
memset(&hdr, 0, sizeof(struct sg_io_hdr));
hdr.interface_id = 'S';
- hdr.cmd_len = req->elem.out_sg[1].iov_len;
- hdr.cmdp = req->elem.out_sg[1].iov_base;
+ hdr.cmd_len = elem->out_sg[1].iov_len;
+ hdr.cmdp = elem->out_sg[1].iov_base;
hdr.dxfer_len = 0;
- if (req->elem.out_num > 2) {
+ if (elem->out_num > 2) {
/*
* If there are more than the minimally required 2 output segments
* there is write payload starting from the third iovec.
*/
hdr.dxfer_direction = SG_DXFER_TO_DEV;
- hdr.iovec_count = req->elem.out_num - 2;
+ hdr.iovec_count = elem->out_num - 2;
for (i = 0; i < hdr.iovec_count; i++)
- hdr.dxfer_len += req->elem.out_sg[i + 2].iov_len;
+ hdr.dxfer_len += elem->out_sg[i + 2].iov_len;
- hdr.dxferp = req->elem.out_sg + 2;
+ hdr.dxferp = elem->out_sg + 2;
- } else if (req->elem.in_num > 3) {
+ } else if (elem->in_num > 3) {
/*
* If we have more than 3 input segments the guest wants to actually
* read data.
*/
hdr.dxfer_direction = SG_DXFER_FROM_DEV;
- hdr.iovec_count = req->elem.in_num - 3;
+ hdr.iovec_count = elem->in_num - 3;
for (i = 0; i < hdr.iovec_count; i++)
- hdr.dxfer_len += req->elem.in_sg[i].iov_len;
+ hdr.dxfer_len += elem->in_sg[i].iov_len;
- hdr.dxferp = req->elem.in_sg;
+ hdr.dxferp = elem->in_sg;
} else {
/*
* Some SCSI commands don't actually transfer any data.
@@ -204,11 +203,11 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
hdr.dxfer_direction = SG_DXFER_NONE;
}
- hdr.sbp = req->elem.in_sg[req->elem.in_num - 3].iov_base;
- hdr.mx_sb_len = req->elem.in_sg[req->elem.in_num - 3].iov_len;
+ hdr.sbp = elem->in_sg[elem->in_num - 3].iov_base;
+ hdr.mx_sb_len = elem->in_sg[elem->in_num - 3].iov_len;
- ret = bdrv_ioctl(req->dev->bs, SG_IO, &hdr);
- if (ret) {
+ status = bdrv_ioctl(blk->bs, SG_IO, &hdr);
+ if (status) {
status = VIRTIO_BLK_S_UNSUPP;
goto fail;
}
@@ -224,23 +223,31 @@ static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
hdr.status = CHECK_CONDITION;
}
- stl_p(&req->scsi->errors,
+ stl_p(&scsi->errors,
hdr.status | (hdr.msg_status << 8) |
(hdr.host_status << 16) | (hdr.driver_status << 24));
- stl_p(&req->scsi->residual, hdr.resid);
- stl_p(&req->scsi->sense_len, hdr.sb_len_wr);
- stl_p(&req->scsi->data_len, hdr.dxfer_len);
+ stl_p(&scsi->residual, hdr.resid);
+ stl_p(&scsi->sense_len, hdr.sb_len_wr);
+ stl_p(&scsi->data_len, hdr.dxfer_len);
- virtio_blk_req_complete(req, status);
- g_free(req);
- return;
+ return status;
#else
abort();
#endif
fail:
/* Just put anything nonzero so that the ioctl fails in the guest. */
- stl_p(&req->scsi->errors, 255);
+ if (scsi) {
+ stl_p(&scsi->errors, 255);
+ }
+ return status;
+}
+
+static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
+{
+ int status;
+
+ status = virtio_blk_handle_scsi_req(req->dev, &req->elem);
virtio_blk_req_complete(req, status);
g_free(req);
}
diff --git a/include/hw/virtio/virtio-blk.h b/include/hw/virtio/virtio-blk.h
index e4c41ff..4bc9b54 100644
--- a/include/hw/virtio/virtio-blk.h
+++ b/include/hw/virtio/virtio-blk.h
@@ -155,4 +155,7 @@ typedef struct VirtIOBlock {
void virtio_blk_set_conf(DeviceState *dev, VirtIOBlkConf *blk);
+int virtio_blk_handle_scsi_req(VirtIOBlock *blk,
+ VirtQueueElement *elem);
+
#endif
--
1.9.3
next prev parent reply other threads:[~2014-06-06 16:15 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-06-06 16:13 [Qemu-devel] [PULL 00/42] Block patches Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 01/42] aio: fix qemu_bh_schedule() bh->ctx race condition Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 02/42] block: use BlockDriverState AioContext Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 03/42] block: acquire AioContext in bdrv_*_all() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 04/42] block: acquire AioContext in bdrv_drain_all() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 05/42] block: add bdrv_set_aio_context() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 06/42] blkdebug: use BlockDriverState's AioContext Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 07/42] blkverify: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 08/42] curl: " Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 09/42] gluster: use BlockDriverState's AioContext Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 10/42] iscsi: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 11/42] nbd: " Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 12/42] nfs: " Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 13/42] qed: use BlockDriverState's AioContext Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 14/42] quorum: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 15/42] block/raw-posix: " Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 16/42] block/linux-aio: fix memory and fd leak Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 17/42] block/raw-win32: create one QEMUWin32AIOState per BDRVRawState Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 18/42] block/raw-win32: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 19/42] rbd: use BlockDriverState's AioContext Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 20/42] sheepdog: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 21/42] ssh: use BlockDriverState's AioContext Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 22/42] vmdk: implement .bdrv_detach/attach_aio_context() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 23/42] dataplane: use the QEMU block layer for I/O Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 24/42] dataplane: delete IOQueue since it is no longer used Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 25/42] dataplane: implement async flush Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 26/42] raw-posix: drop raw_get_aio_fd() since it is no longer used Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 27/42] block: Move declaration of bdrv_get_aio_context to block.h Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 28/42] virtio-blk: Allow config-wce in dataplane Stefan Hajnoczi
2014-06-06 16:13 ` Stefan Hajnoczi [this message]
2014-06-06 16:13 ` [Qemu-devel] [PULL 30/42] dataplane: Support VIRTIO_BLK_T_SCSI_CMD Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 31/42] throttle: add throttle_detach/attach_aio_context() Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 32/42] throttle: add detach/attach test case Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 33/42] blockdev: acquire AioContext in block_set_io_throttle Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 34/42] block: fix wrong order in live block migration setup Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 35/42] qemu-img: Document check exit codes Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 36/42] rbd: Fix leaks in rbd_start_aio() error path Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 37/42] sheepdog: fix vdi object update after live snapshot Stefan Hajnoczi
2014-06-06 16:13 ` [Qemu-devel] [PULL 38/42] sheepdog: reload only header in a case of " Stefan Hajnoczi
2014-06-06 16:14 ` [Qemu-devel] [PULL 39/42] qapi: Extract qapi/common.json definitions Stefan Hajnoczi
2014-06-06 16:14 ` [Qemu-devel] [PULL 40/42] qapi: create two block related json modules Stefan Hajnoczi
2014-06-06 16:14 ` [Qemu-devel] [PULL 41/42] qapi: Extract qapi/block-core.json definitions Stefan Hajnoczi
2014-06-06 16:14 ` [Qemu-devel] [PULL 42/42] qapi: Extract qapi/block.json definitions Stefan Hajnoczi
2014-06-09 12:04 ` [Qemu-devel] [PULL 00/42] Block patches Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1402071243-16702-30-git-send-email-stefanha@redhat.com \
--to=stefanha@redhat.com \
--cc=famz@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).