From: Maxim Levitsky <mlevitsk@redhat.com>
To: qemu-devel@nongnu.org
Cc: Kevin Wolf <kwolf@redhat.com>, Fam Zheng <fam@euphon.net>,
qemu-block@nongnu.org, Max Reitz <mreitz@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Maxim Levitsky <mlevitsk@redhat.com>,
John Snow <jsnow@redhat.com>
Subject: [Qemu-devel] [PATCH v4] block/nvme: add support for discard
Date: Wed, 3 Jul 2019 19:07:54 +0300 [thread overview]
Message-ID: <20190703160754.12361-1-mlevitsk@redhat.com> (raw)
In-Reply-To: <20190703155944.9637-7-mlevitsk@redhat.com>
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
---
block/nvme.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++
block/trace-events | 2 ++
2 files changed, 83 insertions(+)
diff --git a/block/nvme.c b/block/nvme.c
index 02e0846643..96a715dcc1 100644
--- a/block/nvme.c
+++ b/block/nvme.c
@@ -111,6 +111,7 @@ typedef struct {
bool plugged;
bool supports_write_zeros;
+ bool supports_discard;
CoMutex dma_map_lock;
CoQueue dma_flush_queue;
@@ -460,6 +461,7 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp)
s->page_size / sizeof(uint64_t) * s->page_size);
s->supports_write_zeros = (idctrl->oncs & NVME_ONCS_WRITE_ZEROS) != 0;
+ s->supports_discard = (idctrl->oncs & NVME_ONCS_DSM) != 0;
memset(resp, 0, 4096);
@@ -1149,6 +1151,84 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
}
+static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
+ int64_t offset,
+ int bytes)
+{
+ BDRVNVMeState *s = bs->opaque;
+ NVMeQueuePair *ioq = s->queues[1];
+ NVMeRequest *req;
+ NvmeDsmRange *buf;
+ QEMUIOVector local_qiov;
+ int r;
+
+ NvmeCmd cmd = {
+ .opcode = NVME_CMD_DSM,
+ .nsid = cpu_to_le32(s->nsid),
+ .cdw10 = 0, /*number of ranges - 0 based*/
+ .cdw11 = cpu_to_le32(1 << 2), /*deallocate bit*/
+ };
+
+ NVMeCoData data = {
+ .ctx = bdrv_get_aio_context(bs),
+ .ret = -EINPROGRESS,
+ };
+
+ if (!s->supports_discard) {
+ return -ENOTSUP;
+ }
+
+ assert(s->nr_queues > 1);
+
+ buf = qemu_try_blockalign0(bs, 4096);
+ if (!buf) {
+ return -ENOMEM;
+ }
+
+ buf->nlb = cpu_to_le32(bytes >> s->blkshift);
+ buf->slba = cpu_to_le64(offset >> s->blkshift);
+ buf->cattr = 0;
+
+ qemu_iovec_init(&local_qiov, 1);
+ qemu_iovec_add(&local_qiov, buf, 4096);
+
+ req = nvme_get_free_req(ioq);
+ assert(req);
+
+ qemu_co_mutex_lock(&s->dma_map_lock);
+ r = nvme_cmd_map_qiov(bs, &cmd, req, &local_qiov);
+ qemu_co_mutex_unlock(&s->dma_map_lock);
+
+ if (r) {
+ req->busy = false;
+ return r;
+ }
+
+ trace_nvme_dsm(s, offset, bytes);
+
+ nvme_submit_command(s, ioq, req, &cmd, nvme_rw_cb, &data);
+
+ data.co = qemu_coroutine_self();
+ while (data.ret == -EINPROGRESS) {
+ qemu_coroutine_yield();
+ }
+
+ qemu_co_mutex_lock(&s->dma_map_lock);
+ r = nvme_cmd_unmap_qiov(bs, &local_qiov);
+ qemu_co_mutex_unlock(&s->dma_map_lock);
+ if (r) {
+ return r;
+ }
+
+ trace_nvme_dsm_done(s, offset, bytes, data.ret);
+
+ qemu_iovec_destroy(&local_qiov);
+ qemu_vfree(buf);
+ return data.ret;
+
+}
+
+
static int nvme_reopen_prepare(BDRVReopenState *reopen_state,
BlockReopenQueue *queue, Error **errp)
{
@@ -1363,6 +1443,7 @@ static BlockDriver bdrv_nvme = {
.bdrv_co_pwritev = nvme_co_pwritev,
.bdrv_co_pwrite_zeroes = nvme_co_pwrite_zeroes,
+ .bdrv_co_pdiscard = nvme_co_pdiscard,
.bdrv_co_flush_to_disk = nvme_co_flush,
.bdrv_reopen_prepare = nvme_reopen_prepare,
diff --git a/block/trace-events b/block/trace-events
index 12f363bb44..f763f79d99 100644
--- a/block/trace-events
+++ b/block/trace-events
@@ -152,6 +152,8 @@ nvme_write_zeros(void *s, uint64_t offset, uint64_t bytes, int flags) "s %p offs
nvme_qiov_unaligned(const void *qiov, int n, void *base, size_t size, int align) "qiov %p n %d base %p size 0x%zx align 0x%x"
nvme_prw_buffered(void *s, uint64_t offset, uint64_t bytes, int niov, int is_write) "s %p offset %"PRId64" bytes %"PRId64" niov %d is_write %d"
nvme_rw_done(void *s, int is_write, uint64_t offset, uint64_t bytes, int ret) "s %p is_write %d offset %"PRId64" bytes %"PRId64" ret %d"
+nvme_dsm(void *s, uint64_t offset, uint64_t bytes) "s %p offset %"PRId64" bytes %"PRId64""
+nvme_dsm_done(void *s, uint64_t offset, uint64_t bytes, int ret) "s %p offset %"PRId64" bytes %"PRId64" ret %d"
nvme_dma_map_flush(void *s) "s %p"
nvme_free_req_queue_wait(void *q) "q %p"
nvme_cmd_map_qiov(void *s, void *cmd, void *req, void *qiov, int entries) "s %p cmd %p req %p qiov %p entries %d"
--
2.17.2
next prev parent reply other threads:[~2019-07-03 16:28 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-07-03 15:59 [Qemu-devel] [PATCH v3 0/6] Few fixes for userspace NVME driver Maxim Levitsky
2019-07-03 15:59 ` [Qemu-devel] [PATCH v3 1/6] block/nvme: don't touch the completion entries Maxim Levitsky
2019-07-05 11:03 ` Max Reitz
2019-07-07 8:43 ` Maxim Levitsky
2019-07-08 12:23 ` Max Reitz
2019-07-08 12:51 ` Maxim Levitsky
2019-07-08 13:00 ` Max Reitz
2019-07-08 13:06 ` Maxim Levitsky
2019-07-03 15:59 ` [Qemu-devel] [PATCH v3 2/6] block/nvme: fix doorbell stride Maxim Levitsky
2019-07-05 11:09 ` Max Reitz
2019-07-05 11:10 ` Max Reitz
2019-07-07 8:47 ` Maxim Levitsky
2019-07-03 15:59 ` [Qemu-devel] [PATCH v3 3/6] block/nvme: support larger that 512 bytes sector devices Maxim Levitsky
2019-07-05 11:58 ` Max Reitz
2019-07-07 8:51 ` Maxim Levitsky
2019-07-03 15:59 ` [Qemu-devel] [PATCH v3 4/6] block/nvme: add support for image creation Maxim Levitsky
2019-07-05 12:09 ` Max Reitz
2019-07-07 9:03 ` Maxim Levitsky
2019-07-03 15:59 ` [Qemu-devel] [PATCH v3 5/6] block/nvme: add support for write zeros Maxim Levitsky
2019-07-05 13:33 ` Max Reitz
2019-07-07 9:19 ` Maxim Levitsky
2019-07-03 15:59 ` [Qemu-devel] [PATCH v3 6/6] block/nvme: add support for discard Maxim Levitsky
2019-07-03 16:07 ` Maxim Levitsky [this message]
2019-07-05 13:50 ` [Qemu-devel] [PATCH v4] " Max Reitz
2019-07-07 9:40 ` Maxim Levitsky
2019-07-03 20:43 ` [Qemu-devel] [PATCH v3 0/6] Few fixes for userspace NVME driver no-reply
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190703160754.12361-1-mlevitsk@redhat.com \
--to=mlevitsk@redhat.com \
--cc=fam@euphon.net \
--cc=jsnow@redhat.com \
--cc=kwolf@redhat.com \
--cc=mreitz@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).