qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Subject: [Qemu-devel] [PATCH v2 08/15] block: add bdrv_co_discard and bdrv_aio_discard support
Date: Fri, 16 Sep 2011 16:25:45 +0200	[thread overview]
Message-ID: <1316183152-5481-9-git-send-email-pbonzini@redhat.com> (raw)
In-Reply-To: <1316183152-5481-1-git-send-email-pbonzini@redhat.com>

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
 block.c      |  140 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 block.h      |    3 +
 block_int.h  |    9 +++-
 trace-events |    1 +
 4 files changed, 148 insertions(+), 5 deletions(-)

diff --git a/block.c b/block.c
index f4b9089..7853982 100644
--- a/block.c
+++ b/block.c
@@ -53,6 +53,9 @@ static BlockDriverAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
         BlockDriverCompletionFunc *cb, void *opaque);
 static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs,
         BlockDriverCompletionFunc *cb, void *opaque);
+static BlockDriverAIOCB *bdrv_aio_discard_em(BlockDriverState *bs,
+        int64_t sector_num, int nb_sectors,
+        BlockDriverCompletionFunc *cb, void *opaque);
 static BlockDriverAIOCB *bdrv_aio_noop_em(BlockDriverState *bs,
         BlockDriverCompletionFunc *cb, void *opaque);
 static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
@@ -60,6 +63,8 @@ static int bdrv_read_em(BlockDriverState *bs, int64_t sector_num,
 static int bdrv_write_em(BlockDriverState *bs, int64_t sector_num,
                          const uint8_t *buf, int nb_sectors);
 static int bdrv_flush_em(BlockDriverState *bs);
+static int bdrv_discard_em(BlockDriverState *bs, int64_t sector_num,
+                           int nb_sectors);
 static BlockDriverAIOCB *bdrv_co_aio_readv_em(BlockDriverState *bs,
         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
         BlockDriverCompletionFunc *cb, void *opaque);
@@ -68,6 +73,9 @@ static BlockDriverAIOCB *bdrv_co_aio_writev_em(BlockDriverState *bs,
         BlockDriverCompletionFunc *cb, void *opaque);
 static BlockDriverAIOCB *bdrv_co_aio_flush_em(BlockDriverState *bs,
         BlockDriverCompletionFunc *cb, void *opaque);
+static BlockDriverAIOCB *bdrv_co_aio_discard_em(BlockDriverState *bs,
+        int64_t sector_num, int nb_sectors,
+        BlockDriverCompletionFunc *cb, void *opaque);
 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
                                          int64_t sector_num, int nb_sectors,
                                          QEMUIOVector *iov);
@@ -75,6 +83,8 @@ static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
                                          int64_t sector_num, int nb_sectors,
                                          QEMUIOVector *iov);
 static int coroutine_fn bdrv_co_flush_em(BlockDriverState *bs);
+static int coroutine_fn bdrv_co_discard_em(BlockDriverState *bs,
+        int64_t sector_num, int nb_sectors);
 
 static QTAILQ_HEAD(, BlockDriverState) bdrv_states =
     QTAILQ_HEAD_INITIALIZER(bdrv_states);
@@ -209,6 +219,14 @@ void bdrv_register(BlockDriver *bdrv)
         bdrv->bdrv_aio_flush = bdrv_aio_flush_em;
     }
 
+    if (bdrv->bdrv_co_discard) {
+        bdrv->bdrv_aio_discard = bdrv_co_aio_discard_em;
+    } else if (bdrv->bdrv_aio_discard) {
+        bdrv->bdrv_co_discard = bdrv_co_discard_em;
+    } else {
+        bdrv->bdrv_aio_discard = bdrv_aio_discard_em;
+    }
+
     /* add synchronous IO emulation layer */
     if (!bdrv->bdrv_read) {
         bdrv->bdrv_read = bdrv_read_em;
@@ -217,6 +235,9 @@ void bdrv_register(BlockDriver *bdrv)
     if (!bdrv->bdrv_flush) {
         bdrv->bdrv_flush = bdrv_flush_em;
     }
+    if (!bdrv->bdrv_discard) {
+        bdrv->bdrv_discard = bdrv_discard_em;
+    }
 
     QLIST_INSERT_HEAD(&bdrv_drivers, bdrv, list);
 }
@@ -1791,10 +1812,18 @@ int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
     if (!bs->drv) {
         return -ENOMEDIUM;
     }
-    if (!bs->drv->bdrv_discard) {
-        return 0;
+    if (bdrv_check_request(bs, sector_num, nb_sectors)) {
+        return -EIO;
+    }
+    if (bs->drv->bdrv_co_discard && qemu_in_coroutine()) {
+        return bs->drv->bdrv_co_discard(bs, sector_num, nb_sectors);
     }
-    return bs->drv->bdrv_discard(bs, sector_num, nb_sectors);
+
+    if (bs->drv->bdrv_discard) {
+        return bs->drv->bdrv_discard(bs, sector_num, nb_sectors);
+    }
+
+    return 0;
 }
 
 /*
@@ -2656,6 +2685,24 @@ BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
     return drv->bdrv_aio_flush(bs, cb, opaque);
 }
 
+BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
+        int64_t sector_num, int nb_sectors,
+        BlockDriverCompletionFunc *cb, void *opaque)
+{
+    trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
+
+    if (!bs->drv) {
+        return NULL;
+    }
+    if (bs->read_only) {
+        return NULL;
+    }
+    if (bdrv_check_request(bs, sector_num, nb_sectors)) {
+        return NULL;
+    }
+    return bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors, cb, opaque);
+}
+
 void bdrv_aio_cancel(BlockDriverAIOCB *acb)
 {
     acb->pool->cancel(acb);
@@ -2873,6 +2920,52 @@ static BlockDriverAIOCB *bdrv_aio_flush_em(BlockDriverState *bs,
     return &acb->common;
 }
 
+static void coroutine_fn bdrv_co_discard(void *opaque)
+{
+    BlockDriverAIOCBCoroutine *acb = opaque;
+    BlockDriverState *bs = acb->common.bs;
+
+    acb->req.error = bs->drv->bdrv_co_discard(bs, acb->req.sector,
+                                              acb->req.nb_sectors);
+    acb->bh = qemu_bh_new(bdrv_co_em_bh, acb);
+    qemu_bh_schedule(acb->bh);
+}
+
+static BlockDriverAIOCB *bdrv_co_aio_discard_em(BlockDriverState *bs,
+        int64_t sector_num, int nb_sectors,
+        BlockDriverCompletionFunc *cb, void *opaque)
+{
+    Coroutine *co;
+    BlockDriverAIOCBCoroutine *acb;
+
+    acb = qemu_aio_get(&bdrv_em_co_aio_pool, bs, cb, opaque);
+    acb->req.sector = sector_num;
+    acb->req.nb_sectors = nb_sectors;
+    co = qemu_coroutine_create(bdrv_co_discard);
+    qemu_coroutine_enter(co, acb);
+
+    return &acb->common;
+}
+
+static BlockDriverAIOCB *bdrv_aio_discard_em(BlockDriverState *bs,
+        int64_t sector_num, int nb_sectors,
+        BlockDriverCompletionFunc *cb, void *opaque)
+{
+    BlockDriverAIOCBSync *acb;
+
+    acb = qemu_aio_get(&bdrv_em_aio_pool, bs, cb, opaque);
+    acb->is_write = 1; /* don't bounce in the completion hadler */
+    acb->qiov = NULL;
+    acb->bounce = NULL;
+
+    if (!acb->bh) {
+        acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
+    }
+    acb->ret = bdrv_discard(bs, sector_num, nb_sectors);
+    qemu_bh_schedule(acb->bh);
+    return &acb->common;
+}
+
 static BlockDriverAIOCB *bdrv_aio_noop_em(BlockDriverState *bs,
         BlockDriverCompletionFunc *cb, void *opaque)
 {
@@ -2975,6 +3068,30 @@ fail:
     return async_ret;
 }
 
+static int bdrv_discard_em(BlockDriverState *bs, int64_t sector_num,
+                           int nb_sectors)
+
+{
+    int async_ret;
+    BlockDriverAIOCB *acb;
+
+    async_ret = NOT_DONE;
+    acb = bdrv_aio_discard(bs, sector_num, nb_sectors,
+                           bdrv_em_cb, &async_ret);
+    if (acb == NULL) {
+        async_ret = -1;
+        goto fail;
+    }
+
+    while (async_ret == NOT_DONE) {
+        qemu_aio_wait();
+    }
+
+
+fail:
+    return async_ret;
+}
+
 void bdrv_init(void)
 {
     module_call_init(MODULE_INIT_BLOCK);
@@ -3083,6 +3200,23 @@ static int coroutine_fn bdrv_co_flush_em(BlockDriverState *bs)
     return co.ret;
 }
 
+static int coroutine_fn bdrv_co_discard_em(BlockDriverState *bs,
+        int64_t sector_num, int nb_sectors)
+{
+    CoroutineIOCompletion co = {
+        .coroutine = qemu_coroutine_self(),
+    };
+    BlockDriverAIOCB *acb;
+
+    acb = bdrv_aio_discard(bs, sector_num, nb_sectors,
+                           bdrv_co_io_em_complete, &co);
+    if (!acb) {
+        return -EIO;
+    }
+    qemu_coroutine_yield();
+    return co.ret;
+}
+
 /**************************************************************/
 /* removable device support */
 
diff --git a/block.h b/block.h
index 16bfa0a..94cd395 100644
--- a/block.h
+++ b/block.h
@@ -156,6 +156,9 @@ BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
                                   BlockDriverCompletionFunc *cb, void *opaque);
 BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
                                  BlockDriverCompletionFunc *cb, void *opaque);
+BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
+                                   int64_t sector_num, int nb_sectors,
+                                   BlockDriverCompletionFunc *cb, void *opaque);
 void bdrv_aio_cancel(BlockDriverAIOCB *acb);
 
 typedef struct BlockRequest {
diff --git a/block_int.h b/block_int.h
index bb39b0b..4222bda 100644
--- a/block_int.h
+++ b/block_int.h
@@ -63,6 +63,8 @@ struct BlockDriver {
     void (*bdrv_close)(BlockDriverState *bs);
     int (*bdrv_create)(const char *filename, QEMUOptionParameter *options);
     int (*bdrv_flush)(BlockDriverState *bs);
+    int (*bdrv_discard)(BlockDriverState *bs, int64_t sector_num,
+                        int nb_sectors);
     int (*bdrv_is_allocated)(BlockDriverState *bs, int64_t sector_num,
                              int nb_sectors, int *pnum);
     int (*bdrv_set_key)(BlockDriverState *bs, const char *key);
@@ -76,14 +78,17 @@ struct BlockDriver {
         BlockDriverCompletionFunc *cb, void *opaque);
     BlockDriverAIOCB *(*bdrv_aio_flush)(BlockDriverState *bs,
         BlockDriverCompletionFunc *cb, void *opaque);
-    int (*bdrv_discard)(BlockDriverState *bs, int64_t sector_num,
-                        int nb_sectors);
+    BlockDriverAIOCB *(*bdrv_aio_discard)(BlockDriverState *bs,
+        int64_t sector_num, int nb_sectors,
+        BlockDriverCompletionFunc *cb, void *opaque);
 
     int coroutine_fn (*bdrv_co_readv)(BlockDriverState *bs,
         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
     int coroutine_fn (*bdrv_co_writev)(BlockDriverState *bs,
         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
     int coroutine_fn (*bdrv_co_flush)(BlockDriverState *bs);
+    int coroutine_fn (*bdrv_co_discard)(BlockDriverState *bs,
+        int64_t sector_num, int nb_sectors);
 
     int (*bdrv_aio_multiwrite)(BlockDriverState *bs, BlockRequest *reqs,
         int num_reqs);
diff --git a/trace-events b/trace-events
index fe64684..2dcfb9c 100644
--- a/trace-events
+++ b/trace-events
@@ -59,6 +59,7 @@ multiwrite_cb(void *mcb, int ret) "mcb %p ret %d"
 bdrv_aio_multiwrite(void *mcb, int num_callbacks, int num_reqs) "mcb %p num_callbacks %d num_reqs %d"
 bdrv_aio_multiwrite_earlyfail(void *mcb) "mcb %p"
 bdrv_aio_multiwrite_latefail(void *mcb, int i) "mcb %p i %d"
+bdrv_aio_discard(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
 bdrv_aio_flush(void *bs, void *opaque) "bs %p opaque %p"
 bdrv_aio_readv(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
 bdrv_aio_writev(void *bs, int64_t sector_num, int nb_sectors, void *opaque) "bs %p sector_num %"PRId64" nb_sectors %d opaque %p"
-- 
1.7.6

  parent reply	other threads:[~2011-09-16 14:26 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-09-16 14:25 [Qemu-devel] [PATCH v2 00/15] nbd improvements Paolo Bonzini
2011-09-16 14:25 ` [Qemu-devel] [PATCH v2 01/15] sheepdog: add coroutine_fn markers Paolo Bonzini
2011-09-17  5:32   ` MORITA Kazutaka
2011-09-16 14:25 ` [Qemu-devel] [PATCH v2 02/15] add socket_set_block Paolo Bonzini
2011-09-17  5:33   ` MORITA Kazutaka
2011-09-16 14:25 ` [Qemu-devel] [PATCH v2 03/15] sheepdog: move coroutine send/recv function to generic code Paolo Bonzini
2011-09-17  6:29   ` MORITA Kazutaka
2011-09-17 14:49     ` Paolo Bonzini
2011-09-17 17:16       ` MORITA Kazutaka
2011-09-19  7:47       ` Kevin Wolf
2011-09-19  9:34         ` Paolo Bonzini
2011-09-16 14:25 ` [Qemu-devel] [PATCH v2 04/15] coroutine-io: handle zero returns from recv Paolo Bonzini
2011-09-17  6:50   ` MORITA Kazutaka
2011-09-16 14:25 ` [Qemu-devel] [PATCH v2 05/15] block: emulate .bdrv_flush() using .bdrv_aio_flush() Paolo Bonzini
2011-09-16 14:25 ` [Qemu-devel] [PATCH v2 06/15] block: group together the plugging of synchronous IO emulation Paolo Bonzini
2011-09-16 14:25 ` [Qemu-devel] [PATCH v2 07/15] block: add bdrv_co_flush support Paolo Bonzini
2011-09-16 14:25 ` Paolo Bonzini [this message]
2011-09-16 14:25 ` [Qemu-devel] [PATCH v2 09/15] nbd: fix error handling in the server Paolo Bonzini
2011-09-16 14:25 ` [Qemu-devel] [PATCH v2 10/15] nbd: add support for NBD_CMD_FLUSH Paolo Bonzini
2011-09-16 14:25 ` [Qemu-devel] [PATCH v2 11/15] nbd: add support for NBD_CMD_FLAG_FUA Paolo Bonzini
2011-09-16 14:25 ` [Qemu-devel] [PATCH v2 12/15] nbd: add support for NBD_CMD_TRIM Paolo Bonzini
2011-09-16 14:25 ` [Qemu-devel] [PATCH v2 13/15] nbd: switch to asynchronous operation Paolo Bonzini
2011-09-16 14:25 ` [Qemu-devel] [PATCH v2 14/15] nbd: split requests Paolo Bonzini
2011-09-16 14:25 ` [Qemu-devel] [PATCH v2 15/15] nbd: allow multiple in-flight requests Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1316183152-5481-9-git-send-email-pbonzini@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).