From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Subject: [Qemu-devel] [RFC PATCH 9/9] pvscsi: implement s/g operation without a bounce buffer
Date: Mon, 6 Jun 2011 18:27:00 +0200 [thread overview]
Message-ID: <1307377620-7538-10-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1307377620-7538-1-git-send-email-pbonzini@redhat.com>
This implements the new callbacks in the pvscsi device.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
hw/vmw_pvscsi.c | 115 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 files changed, 114 insertions(+), 1 deletions(-)
diff --git a/hw/vmw_pvscsi.c b/hw/vmw_pvscsi.c
index 1cb6715..ac16e52 100644
--- a/hw/vmw_pvscsi.c
+++ b/hw/vmw_pvscsi.c
@@ -49,6 +49,7 @@ typedef struct {
QEMUBH *complete_reqs_bh;
int mmio_io_addr;
+ uint32_t use_iovec;
/* zeroed on reset */
uint32_t cmd_latch;
@@ -387,6 +388,112 @@ static bool pvscsi_check_addresses(PVSCSIRequest *p)
}
}
+static bool pvscsi_iovec_add(PVSCSIRequest *p, target_phys_addr_t addr,
+ uint64_t len)
+{
+ while (len) {
+ target_phys_addr_t n = len;
+ uint8_t *buf = cpu_physical_memory_map_fast(addr, &n);
+ if (!buf) {
+ return false;
+ }
+ qemu_iovec_add(&p->sreq->qiov, buf, n);
+ addr += n;
+ len -= n;
+ }
+ return true;
+}
+
+static bool pvscsi_get_sg_list_iovec(PVSCSIRequest *p, uint64_t len)
+{
+ int n;
+ PVSCSISGState sg = p->sg;
+ while (len) {
+ while (!sg.resid) {
+ pvscsi_get_next_sg_elem(&sg);
+ trace_pvscsi_sg_elem(p->req.context, sg.dataAddr, sg.resid);
+ }
+ assert(len > 0);
+ n = MIN((unsigned) len, sg.resid);
+ if (n) {
+ if (!pvscsi_iovec_add(p, sg.dataAddr, n)) {
+ return false;
+ }
+ }
+
+ sg.dataAddr += n;
+
+ len -= n;
+ sg.resid -= n;
+ }
+ return true;
+}
+
+static void pvscsi_get_iovec(SCSIRequest *req, uint64_t len)
+{
+ PVSCSIState *s = DO_UPCAST(PVSCSIState, dev.qdev, req->bus->qbus.parent);
+ PVSCSIRequest *p = pvscsi_find_request(s, req);
+ bool ok;
+
+ if (!s->use_iovec) {
+ return;
+ }
+ if (p->req.flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) {
+ ok = pvscsi_get_sg_list_iovec(p, len);
+ } else {
+ ok = pvscsi_iovec_add(p, p->req.dataAddr, MIN(len, p->req.dataLen));
+ }
+ if (!ok) {
+ qemu_iovec_reset(&p->sreq->qiov);
+ }
+}
+
+/* Callback to indicate that the SCSI layer has completed a transfer. */
+static void pvscsi_unmap_iovec(SCSIRequest *req, uint64_t len)
+{
+ PVSCSIState *s = DO_UPCAST(PVSCSIState, dev.qdev, req->bus->qbus.parent);
+ PVSCSIRequest *p = pvscsi_find_request(s, req);
+ int to_host = (p->req.flags & PVSCSI_FLAG_CMD_DIR_TOHOST) != 0;
+ QEMUIOVector *qiov = &req->qiov;
+ int i;
+
+ if (!p) {
+ fprintf(stderr, "PVSCSI: Can't find request for tag 0x%x\n", req->tag);
+ return;
+ }
+
+ trace_pvscsi_transfer_data(p->req.context, len);
+ if (!len) {
+ /* Short transfer. */
+ p->cmp.hostStatus = BTSTAT_DATARUN;
+ scsi_req_cancel(req);
+ return;
+ }
+
+ for (i = 0; i < qiov->niov; i++) {
+ uint64_t n = req->qiov.iov[i].iov_len;
+ uint64_t access_len = MIN(len, n);
+ cpu_physical_memory_unmap(req->qiov.iov[i].iov_base, n,
+ to_host, access_len);
+
+ if (p->req.flags & PVSCSI_FLAG_CMD_WITH_SG_LIST) {
+ while (!p->sg.resid) {
+ pvscsi_get_next_sg_elem(&p->sg);
+ }
+ assert(n <= p->sg.resid);
+ p->sg.dataAddr += n;
+ p->sg.resid -= n;
+ }
+
+ assert(access_len <= p->resid);
+ p->cmp.dataLen += access_len;
+ p->resid -= access_len;
+ len -= access_len;
+ }
+
+ scsi_req_continue(req);
+}
+
/* Callback to indicate that the SCSI layer has completed a transfer. */
static void pvscsi_transfer_data(SCSIRequest *req, uint32_t len)
{
@@ -837,7 +944,9 @@ static int pvscsi_uninit(PCIDevice *d)
static struct SCSIBusOps pvscsi_scsi_ops = {
.transfer_data = pvscsi_transfer_data,
.complete = pvscsi_command_complete,
- .cancel = pvscsi_request_cancelled
+ .cancel = pvscsi_request_cancelled,
+ .get_iovec = pvscsi_get_iovec,
+ .unmap_iovec = pvscsi_unmap_iovec
};
static int pvscsi_init(PCIDevice *dev)
@@ -891,6 +1000,10 @@ static PCIDeviceInfo pvscsi_info = {
.qdev.reset = pvscsi_reset,
.init = pvscsi_init,
.exit = pvscsi_uninit,
+ .qdev.props = (Property[]) {
+ DEFINE_PROP_BIT("sg", PVSCSIState, use_iovec, 0, true),
+ DEFINE_PROP_END_OF_LIST(),
+ },
};
static void vmw_pvscsi_register_devices(void)
--
1.7.4.4
prev parent reply other threads:[~2011-06-06 16:27 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-06-06 16:26 [Qemu-devel] [RFC PATCH 0/9] scsi: support s/g operation without a bounce buffer Paolo Bonzini
2011-06-06 16:26 ` [Qemu-devel] [RFC PATCH 1/9] make qbus_reset_all public Paolo Bonzini
2011-06-06 16:26 ` [Qemu-devel] [RFC PATCH 2/9] pvscsi: first commit Paolo Bonzini
2011-06-06 16:26 ` [Qemu-devel] [RFC PATCH 3/9] pvscsi: check validity of DMA addresses in advance Paolo Bonzini
2011-06-06 16:26 ` [Qemu-devel] [RFC PATCH 4/9] scsi: always use get_sense Paolo Bonzini
2011-06-06 16:26 ` [Qemu-devel] [RFC PATCH 5/9] scsi-disk: lazily allocate bounce buffer Paolo Bonzini
2011-06-06 16:26 ` [Qemu-devel] [RFC PATCH 6/9] allow switching a qiov between internal and external storage Paolo Bonzini
2011-06-06 16:26 ` [Qemu-devel] [RFC PATCH 7/9] scsi: push qiov to SCSIRequest Paolo Bonzini
2011-06-06 16:26 ` [Qemu-devel] [RFC PATCH 8/9] scsi: add get_iovec/unmap_iovec to SCSIBusOps Paolo Bonzini
2011-06-06 16:27 ` Paolo Bonzini [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1307377620-7538-10-git-send-email-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).