From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:50539) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZF2J7-0007fh-A9 for qemu-devel@nongnu.org; Tue, 14 Jul 2015 11:39:48 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1ZF2J6-0004pE-83 for qemu-devel@nongnu.org; Tue, 14 Jul 2015 11:39:45 -0400 From: Kevin Wolf Date: Tue, 14 Jul 2015 17:39:22 +0200 Message-Id: <1436888372-27871-2-git-send-email-kwolf@redhat.com> In-Reply-To: <1436888372-27871-1-git-send-email-kwolf@redhat.com> References: <1436888372-27871-1-git-send-email-kwolf@redhat.com> Subject: [Qemu-devel] [PULL 01/11] nvme: implement the Flush command List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-block@nongnu.org Cc: kwolf@redhat.com, qemu-devel@nongnu.org From: Christoph Hellwig Implement a real flush instead of faking it. This is especially important as Qemu assume Write back cashing by default and thus requires a working cache flush operation for data integrity. Signed-off-by: Christoph Hellwig Acked-by: Keith Busch Signed-off-by: Kevin Wolf --- hw/block/nvme.c | 19 ++++++++++++++++--- hw/block/nvme.h | 1 + 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/hw/block/nvme.c b/hw/block/nvme.c index c6a6a0e..dc9caf0 100644 --- a/hw/block/nvme.c +++ b/hw/block/nvme.c @@ -207,11 +207,23 @@ static void nvme_rw_cb(void *opaque, int ret) } else { req->status = NVME_INTERNAL_DEV_ERROR; } - - qemu_sglist_destroy(&req->qsg); + if (req->has_sg) { + qemu_sglist_destroy(&req->qsg); + } nvme_enqueue_req_completion(cq, req); } +static uint16_t nvme_flush(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, + NvmeRequest *req) +{ + req->has_sg = false; + block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0, + BLOCK_ACCT_FLUSH); + req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, req); + + return NVME_NO_COMPLETE; +} + static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, NvmeRequest *req) { @@ -235,6 +247,7 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd, } assert((nlb << data_shift) == req->qsg.size); + req->has_sg = true; dma_acct_start(n->conf.blk, &req->acct, &req->qsg, is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ); req->aiocb = is_write ? @@ -256,7 +269,7 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) ns = &n->namespaces[nsid - 1]; switch (cmd->opcode) { case NVME_CMD_FLUSH: - return NVME_SUCCESS; + return nvme_flush(n, ns, cmd, req); case NVME_CMD_WRITE: case NVME_CMD_READ: return nvme_rw(n, ns, cmd, req); diff --git a/hw/block/nvme.h b/hw/block/nvme.h index b6ccb65..bf3a3cc 100644 --- a/hw/block/nvme.h +++ b/hw/block/nvme.h @@ -638,6 +638,7 @@ typedef struct NvmeRequest { struct NvmeSQueue *sq; BlockAIOCB *aiocb; uint16_t status; + bool has_sg; NvmeCqe cqe; BlockAcctCookie acct; QEMUSGList qsg; -- 1.8.3.1