From: Peter Lieven <pl@kamp.de>
To: qemu-devel@nongnu.org, qemu-block@nongnu.org
Cc: kwolf@redhat.com, stefanha@gmail.com, jcody@redhat.com,
jsnow@redhat.com, Peter Lieven <pl@kamp.de>
Subject: [Qemu-devel] [PATCH 3/5] ide: add support for cancelable read requests
Date: Mon, 21 Sep 2015 14:25:26 +0200 [thread overview]
Message-ID: <1442838328-23117-4-git-send-email-pl@kamp.de> (raw)
In-Reply-To: <1442838328-23117-1-git-send-email-pl@kamp.de>
this patch adds a new aio readv compatible function which copies
all data through a bounce buffer. The benefit is that these requests
can be flagged as canceled to avoid guest memory corruption when
a canceled request is completed by the backend at a later stage.
If an IDE protocol wants to use this function it has to pipe
all read requests through ide_readv_cancelable and it may then
enable requests_cancelable in the IDEState.
If this state is enable we can avoid the blocking blk_drain_all
in case of a BMDMA reset.
Currently only read operations are cancelable thus we can only
use this logic for read-only devices.
Signed-off-by: Peter Lieven <pl@kamp.de>
---
hw/ide/core.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
hw/ide/internal.h | 16 ++++++++++++++++
hw/ide/pci.c | 42 ++++++++++++++++++++++++++++--------------
3 files changed, 98 insertions(+), 14 deletions(-)
diff --git a/hw/ide/core.c b/hw/ide/core.c
index 317406d..24547ce 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -561,6 +561,59 @@ static bool ide_sect_range_ok(IDEState *s,
return true;
}
+static void ide_readv_cancelable_cb(void *opaque, int ret)
+{
+ IDECancelableRequest *req = opaque;
+ if (!req->canceled) {
+ if (!ret) {
+ qemu_iovec_from_buf(req->org_qiov, 0, req->buf, req->org_qiov->size);
+ }
+ req->org_cb(req->org_opaque, ret);
+ }
+ QLIST_REMOVE(req, list);
+ qemu_vfree(req->buf);
+ qemu_iovec_destroy(&req->qiov);
+ g_free(req);
+}
+
+#define MAX_CANCELABLE_REQS 16
+
+BlockAIOCB *ide_readv_cancelable(IDEState *s, int64_t sector_num,
+ QEMUIOVector *iov, int nb_sectors,
+ BlockCompletionFunc *cb, void *opaque)
+{
+ BlockAIOCB *aioreq;
+ IDECancelableRequest *req;
+ int c = 0;
+
+ QLIST_FOREACH(req, &s->cancelable_requests, list) {
+ c++;
+ }
+ if (c > MAX_CANCELABLE_REQS) {
+ return NULL;
+ }
+
+ req = g_new0(IDECancelableRequest, 1);
+ qemu_iovec_init(&req->qiov, 1);
+ req->buf = qemu_blockalign(blk_bs(s->blk), iov->size);
+ qemu_iovec_add(&req->qiov, req->buf, iov->size);
+ req->org_qiov = iov;
+ req->org_cb = cb;
+ req->org_opaque = opaque;
+
+ aioreq = blk_aio_readv(s->blk, sector_num, &req->qiov, nb_sectors,
+ ide_readv_cancelable_cb, req);
+ if (aioreq == NULL) {
+ qemu_vfree(req->buf);
+ qemu_iovec_destroy(&req->qiov);
+ g_free(req);
+ } else {
+ QLIST_INSERT_HEAD(&s->cancelable_requests, req, list);
+ }
+
+ return aioreq;
+}
+
static void ide_sector_read(IDEState *s);
static void ide_sector_read_cb(void *opaque, int ret)
@@ -805,6 +858,7 @@ void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
s->bus->retry_unit = s->unit;
s->bus->retry_sector_num = ide_get_sector(s);
s->bus->retry_nsector = s->nsector;
+ s->bus->s = s;
if (s->bus->dma->ops->start_dma) {
s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
}
diff --git a/hw/ide/internal.h b/hw/ide/internal.h
index 05e93ff..ad188c2 100644
--- a/hw/ide/internal.h
+++ b/hw/ide/internal.h
@@ -343,6 +343,16 @@ enum ide_dma_cmd {
#define ide_cmd_is_read(s) \
((s)->dma_cmd == IDE_DMA_READ)
+typedef struct IDECancelableRequest {
+ QLIST_ENTRY(IDECancelableRequest) list;
+ QEMUIOVector qiov;
+ uint8_t *buf;
+ QEMUIOVector *org_qiov;
+ BlockCompletionFunc *org_cb;
+ void *org_opaque;
+ bool canceled;
+} IDECancelableRequest;
+
/* NOTE: IDEState represents in fact one drive */
struct IDEState {
IDEBus *bus;
@@ -396,6 +406,8 @@ struct IDEState {
BlockAIOCB *pio_aiocb;
struct iovec iov;
QEMUIOVector qiov;
+ QLIST_HEAD(, IDECancelableRequest) cancelable_requests;
+ bool requests_cancelable;
/* ATA DMA state */
int32_t io_buffer_offset;
int32_t io_buffer_size;
@@ -468,6 +480,7 @@ struct IDEBus {
uint8_t retry_unit;
int64_t retry_sector_num;
uint32_t retry_nsector;
+ IDEState *s;
};
#define TYPE_IDE_DEVICE "ide-device"
@@ -572,6 +585,9 @@ void ide_set_inactive(IDEState *s, bool more);
BlockAIOCB *ide_issue_trim(BlockBackend *blk,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque);
+BlockAIOCB *ide_readv_cancelable(IDEState *s, int64_t sector_num,
+ QEMUIOVector *iov, int nb_sectors,
+ BlockCompletionFunc *cb, void *opaque);
/* hw/ide/atapi.c */
void ide_atapi_cmd(IDEState *s);
diff --git a/hw/ide/pci.c b/hw/ide/pci.c
index d31ff88..5587183 100644
--- a/hw/ide/pci.c
+++ b/hw/ide/pci.c
@@ -240,21 +240,35 @@ void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val)
/* Ignore writes to SSBM if it keeps the old value */
if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) {
if (!(val & BM_CMD_START)) {
- /*
- * We can't cancel Scatter Gather DMA in the middle of the
- * operation or a partial (not full) DMA transfer would reach
- * the storage so we wait for completion instead (we beahve
- * like if the DMA was completed by the time the guest trying
- * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
- * set).
- *
- * In the future we'll be able to safely cancel the I/O if the
- * whole DMA operation will be submitted to disk with a single
- * aio operation with preadv/pwritev.
- */
if (bm->bus->dma->aiocb) {
- blk_drain_all();
- assert(bm->bus->dma->aiocb == NULL);
+ if (bm->bus->s && bm->bus->s->requests_cancelable) {
+ /*
+ * If the used IDE protocol supports request cancelation we
+ * can flag requests as canceled here and disable DMA.
+ * The IDE protocol used MUST use ide_readv_cancelable for all
+ * read operations and then subsequently can enable this code
+ * path. Currently this is only supported for read-only
+ * devices.
+ */
+ IDECancelableRequest *req;
+ QLIST_FOREACH(req, &bm->bus->s->cancelable_requests, list) {
+ if (!req->canceled) {
+ req->org_cb(req->org_opaque, -ECANCELED);
+ }
+ req->canceled = true;
+ }
+ } else {
+ /*
+ * We can't cancel Scatter Gather DMA in the middle of the
+ * operation or a partial (not full) DMA transfer would reach
+ * the storage so we wait for completion instead (we beahve
+ * like if the DMA was completed by the time the guest trying
+ * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
+ * set).
+ */
+ blk_drain_all();
+ assert(bm->bus->dma->aiocb == NULL);
+ }
}
bm->status &= ~BM_STATUS_DMAING;
} else {
--
1.9.1
next prev parent reply other threads:[~2015-09-21 12:26 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-09-21 12:25 [Qemu-devel] [PATCH 0/5] ide: avoid main-loop hang on CDROM/NFS failure Peter Lieven
2015-09-21 12:25 ` [Qemu-devel] [PATCH 1/5] ide/atapi: make PIO read requests async Peter Lieven
2015-10-02 21:02 ` John Snow
2015-10-05 21:15 ` John Snow
2015-10-06 8:46 ` Peter Lieven
2015-10-06 12:08 ` Peter Lieven
2015-10-07 16:42 ` John Snow
2015-10-07 18:53 ` Peter Lieven
2015-10-08 12:06 ` Peter Lieven
2015-10-08 16:44 ` John Snow
2015-10-09 8:21 ` Kevin Wolf
2015-10-09 11:18 ` Peter Lieven
2015-10-09 16:32 ` John Snow
2015-10-14 18:19 ` Peter Lieven
2015-10-14 18:21 ` John Snow
2015-10-16 10:56 ` Peter Lieven
2015-10-06 8:57 ` Kevin Wolf
2015-10-06 9:20 ` Peter Lieven
2015-10-06 17:07 ` John Snow
2015-10-06 17:12 ` Peter Lieven
2015-10-06 17:56 ` John Snow
2015-10-06 18:31 ` Peter Lieven
2015-10-06 18:34 ` John Snow
2015-10-06 15:54 ` John Snow
2015-10-07 7:28 ` Kevin Wolf
2015-10-06 13:05 ` Laszlo Ersek
2015-09-21 12:25 ` [Qemu-devel] [PATCH 2/5] ide/atapi: blk_aio_readv may return NULL Peter Lieven
2015-09-21 12:25 ` Peter Lieven [this message]
2015-09-21 12:25 ` [Qemu-devel] [PATCH 4/5] ide/atapi: enable cancelable requests Peter Lieven
2015-09-21 12:25 ` [Qemu-devel] [PATCH 5/5] block/nfs: cache allocated filesize for read-only files Peter Lieven
2015-09-21 20:58 ` [Qemu-devel] [PATCH 0/5] ide: avoid main-loop hang on CDROM/NFS failure John Snow
2015-09-21 21:22 ` Peter Lieven
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1442838328-23117-4-git-send-email-pl@kamp.de \
--to=pl@kamp.de \
--cc=jcody@redhat.com \
--cc=jsnow@redhat.com \
--cc=kwolf@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).