* [PATCH v7 01/22] hw/block/nvme: fix pci doorbell size calculation
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 02/22] hw/block/nvme: rename trace events to pci_nvme Klaus Jensen
` (21 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
The size of the BAR is 0x1000 (main registers) + 8 bytes for each
queue. Currently, the size of the BAR is calculated like so:
n->reg_size = pow2ceil(0x1004 + 2 * (n->num_queues + 1) * 4);
Since the 'num_queues' parameter already accounts for the admin queue,
this should in any case not need to be incremented by one. Also, the
size should be initialized to (0x1000).
n->reg_size = pow2ceil(0x1000 + 2 * n->num_queues * 4);
This, with the default value of num_queues (64), we will set aside room
for 1 admin queue and 63 I/O queues (4 bytes per doorbell, 2 doorbells
per queue).
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200514044611.734782-2-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index a21eeca2fbf9..c1476e8b2a60 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -53,6 +53,9 @@
#include "trace.h"
#include "nvme.h"
+#define NVME_REG_SIZE 0x1000
+#define NVME_DB_SIZE 4
+
#define NVME_GUEST_ERR(trace, fmt, ...) \
do { \
(trace_##trace)(__VA_ARGS__); \
@@ -1401,7 +1404,9 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
pcie_endpoint_cap_init(pci_dev, 0x80);
n->num_namespaces = 1;
- n->reg_size = pow2ceil(0x1004 + 2 * (n->num_queues + 1) * 4);
+
+ /* num_queues is really number of pairs, so each has two doorbells */
+ n->reg_size = pow2ceil(NVME_REG_SIZE + 2 * n->num_queues * NVME_DB_SIZE);
n->ns_size = bs_size / (uint64_t)n->num_namespaces;
n->namespaces = g_new0(NvmeNamespace, n->num_namespaces);
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 02/22] hw/block/nvme: rename trace events to pci_nvme
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 01/22] hw/block/nvme: fix pci doorbell size calculation Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 03/22] hw/block/nvme: remove superfluous breaks Klaus Jensen
` (20 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Change the prefix of all nvme device related trace events to 'pci_nvme'
to not clash with trace events from the nvme block driver.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200514044611.734782-3-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 198 +++++++++++++++++++++---------------------
hw/block/trace-events | 180 +++++++++++++++++++-------------------
2 files changed, 188 insertions(+), 190 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index c1476e8b2a60..e8f5c5ab829b 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -125,16 +125,16 @@ static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
{
if (cq->irq_enabled) {
if (msix_enabled(&(n->parent_obj))) {
- trace_nvme_irq_msix(cq->vector);
+ trace_pci_nvme_irq_msix(cq->vector);
msix_notify(&(n->parent_obj), cq->vector);
} else {
- trace_nvme_irq_pin();
+ trace_pci_nvme_irq_pin();
assert(cq->cqid < 64);
n->irq_status |= 1 << cq->cqid;
nvme_irq_check(n);
}
} else {
- trace_nvme_irq_masked();
+ trace_pci_nvme_irq_masked();
}
}
@@ -159,7 +159,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
int num_prps = (len >> n->page_bits) + 1;
if (unlikely(!prp1)) {
- trace_nvme_err_invalid_prp();
+ trace_pci_nvme_err_invalid_prp();
return NVME_INVALID_FIELD | NVME_DNR;
} else if (n->cmbsz && prp1 >= n->ctrl_mem.addr &&
prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
@@ -173,7 +173,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
len -= trans_len;
if (len) {
if (unlikely(!prp2)) {
- trace_nvme_err_invalid_prp2_missing();
+ trace_pci_nvme_err_invalid_prp2_missing();
goto unmap;
}
if (len > n->page_size) {
@@ -189,7 +189,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
if (i == n->max_prp_ents - 1 && len > n->page_size) {
if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
- trace_nvme_err_invalid_prplist_ent(prp_ent);
+ trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
goto unmap;
}
@@ -202,7 +202,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
}
if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
- trace_nvme_err_invalid_prplist_ent(prp_ent);
+ trace_pci_nvme_err_invalid_prplist_ent(prp_ent);
goto unmap;
}
@@ -217,7 +217,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
}
} else {
if (unlikely(prp2 & (n->page_size - 1))) {
- trace_nvme_err_invalid_prp2_align(prp2);
+ trace_pci_nvme_err_invalid_prp2_align(prp2);
goto unmap;
}
if (qsg->nsg) {
@@ -265,20 +265,20 @@ static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len,
QEMUIOVector iov;
uint16_t status = NVME_SUCCESS;
- trace_nvme_dma_read(prp1, prp2);
+ trace_pci_nvme_dma_read(prp1, prp2);
if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) {
return NVME_INVALID_FIELD | NVME_DNR;
}
if (qsg.nsg > 0) {
if (unlikely(dma_buf_read(ptr, len, &qsg))) {
- trace_nvme_err_invalid_dma();
+ trace_pci_nvme_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
qemu_sglist_destroy(&qsg);
} else {
if (unlikely(qemu_iovec_from_buf(&iov, 0, ptr, len) != len)) {
- trace_nvme_err_invalid_dma();
+ trace_pci_nvme_err_invalid_dma();
status = NVME_INVALID_FIELD | NVME_DNR;
}
qemu_iovec_destroy(&iov);
@@ -367,7 +367,7 @@ static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
uint32_t count = nlb << data_shift;
if (unlikely(slba + nlb > ns->id_ns.nsze)) {
- trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
+ trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
return NVME_LBA_RANGE | NVME_DNR;
}
@@ -395,11 +395,11 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0;
enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
- trace_nvme_rw(is_write ? "write" : "read", nlb, data_size, slba);
+ trace_pci_nvme_rw(is_write ? "write" : "read", nlb, data_size, slba);
if (unlikely((slba + nlb) > ns->id_ns.nsze)) {
block_acct_invalid(blk_get_stats(n->conf.blk), acct);
- trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
+ trace_pci_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
return NVME_LBA_RANGE | NVME_DNR;
}
@@ -434,7 +434,7 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
uint32_t nsid = le32_to_cpu(cmd->nsid);
if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
- trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
+ trace_pci_nvme_err_invalid_ns(nsid, n->num_namespaces);
return NVME_INVALID_NSID | NVME_DNR;
}
@@ -448,7 +448,7 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
case NVME_CMD_READ:
return nvme_rw(n, ns, cmd, req);
default:
- trace_nvme_err_invalid_opc(cmd->opcode);
+ trace_pci_nvme_err_invalid_opc(cmd->opcode);
return NVME_INVALID_OPCODE | NVME_DNR;
}
}
@@ -473,11 +473,11 @@ static uint16_t nvme_del_sq(NvmeCtrl *n, NvmeCmd *cmd)
uint16_t qid = le16_to_cpu(c->qid);
if (unlikely(!qid || nvme_check_sqid(n, qid))) {
- trace_nvme_err_invalid_del_sq(qid);
+ trace_pci_nvme_err_invalid_del_sq(qid);
return NVME_INVALID_QID | NVME_DNR;
}
- trace_nvme_del_sq(qid);
+ trace_pci_nvme_del_sq(qid);
sq = n->sq[qid];
while (!QTAILQ_EMPTY(&sq->out_req_list)) {
@@ -541,26 +541,26 @@ static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeCmd *cmd)
uint16_t qflags = le16_to_cpu(c->sq_flags);
uint64_t prp1 = le64_to_cpu(c->prp1);
- trace_nvme_create_sq(prp1, sqid, cqid, qsize, qflags);
+ trace_pci_nvme_create_sq(prp1, sqid, cqid, qsize, qflags);
if (unlikely(!cqid || nvme_check_cqid(n, cqid))) {
- trace_nvme_err_invalid_create_sq_cqid(cqid);
+ trace_pci_nvme_err_invalid_create_sq_cqid(cqid);
return NVME_INVALID_CQID | NVME_DNR;
}
if (unlikely(!sqid || !nvme_check_sqid(n, sqid))) {
- trace_nvme_err_invalid_create_sq_sqid(sqid);
+ trace_pci_nvme_err_invalid_create_sq_sqid(sqid);
return NVME_INVALID_QID | NVME_DNR;
}
if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
- trace_nvme_err_invalid_create_sq_size(qsize);
+ trace_pci_nvme_err_invalid_create_sq_size(qsize);
return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
}
if (unlikely(!prp1 || prp1 & (n->page_size - 1))) {
- trace_nvme_err_invalid_create_sq_addr(prp1);
+ trace_pci_nvme_err_invalid_create_sq_addr(prp1);
return NVME_INVALID_FIELD | NVME_DNR;
}
if (unlikely(!(NVME_SQ_FLAGS_PC(qflags)))) {
- trace_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags));
+ trace_pci_nvme_err_invalid_create_sq_qflags(NVME_SQ_FLAGS_PC(qflags));
return NVME_INVALID_FIELD | NVME_DNR;
}
sq = g_malloc0(sizeof(*sq));
@@ -586,17 +586,17 @@ static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
uint16_t qid = le16_to_cpu(c->qid);
if (unlikely(!qid || nvme_check_cqid(n, qid))) {
- trace_nvme_err_invalid_del_cq_cqid(qid);
+ trace_pci_nvme_err_invalid_del_cq_cqid(qid);
return NVME_INVALID_CQID | NVME_DNR;
}
cq = n->cq[qid];
if (unlikely(!QTAILQ_EMPTY(&cq->sq_list))) {
- trace_nvme_err_invalid_del_cq_notempty(qid);
+ trace_pci_nvme_err_invalid_del_cq_notempty(qid);
return NVME_INVALID_QUEUE_DEL;
}
nvme_irq_deassert(n, cq);
- trace_nvme_del_cq(qid);
+ trace_pci_nvme_del_cq(qid);
nvme_free_cq(cq, n);
return NVME_SUCCESS;
}
@@ -629,27 +629,27 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
uint16_t qflags = le16_to_cpu(c->cq_flags);
uint64_t prp1 = le64_to_cpu(c->prp1);
- trace_nvme_create_cq(prp1, cqid, vector, qsize, qflags,
- NVME_CQ_FLAGS_IEN(qflags) != 0);
+ trace_pci_nvme_create_cq(prp1, cqid, vector, qsize, qflags,
+ NVME_CQ_FLAGS_IEN(qflags) != 0);
if (unlikely(!cqid || !nvme_check_cqid(n, cqid))) {
- trace_nvme_err_invalid_create_cq_cqid(cqid);
+ trace_pci_nvme_err_invalid_create_cq_cqid(cqid);
return NVME_INVALID_CQID | NVME_DNR;
}
if (unlikely(!qsize || qsize > NVME_CAP_MQES(n->bar.cap))) {
- trace_nvme_err_invalid_create_cq_size(qsize);
+ trace_pci_nvme_err_invalid_create_cq_size(qsize);
return NVME_MAX_QSIZE_EXCEEDED | NVME_DNR;
}
if (unlikely(!prp1)) {
- trace_nvme_err_invalid_create_cq_addr(prp1);
+ trace_pci_nvme_err_invalid_create_cq_addr(prp1);
return NVME_INVALID_FIELD | NVME_DNR;
}
if (unlikely(vector > n->num_queues)) {
- trace_nvme_err_invalid_create_cq_vector(vector);
+ trace_pci_nvme_err_invalid_create_cq_vector(vector);
return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
}
if (unlikely(!(NVME_CQ_FLAGS_PC(qflags)))) {
- trace_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags));
+ trace_pci_nvme_err_invalid_create_cq_qflags(NVME_CQ_FLAGS_PC(qflags));
return NVME_INVALID_FIELD | NVME_DNR;
}
@@ -664,7 +664,7 @@ static uint16_t nvme_identify_ctrl(NvmeCtrl *n, NvmeIdentify *c)
uint64_t prp1 = le64_to_cpu(c->prp1);
uint64_t prp2 = le64_to_cpu(c->prp2);
- trace_nvme_identify_ctrl();
+ trace_pci_nvme_identify_ctrl();
return nvme_dma_read_prp(n, (uint8_t *)&n->id_ctrl, sizeof(n->id_ctrl),
prp1, prp2);
@@ -677,10 +677,10 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
uint64_t prp1 = le64_to_cpu(c->prp1);
uint64_t prp2 = le64_to_cpu(c->prp2);
- trace_nvme_identify_ns(nsid);
+ trace_pci_nvme_identify_ns(nsid);
if (unlikely(nsid == 0 || nsid > n->num_namespaces)) {
- trace_nvme_err_invalid_ns(nsid, n->num_namespaces);
+ trace_pci_nvme_err_invalid_ns(nsid, n->num_namespaces);
return NVME_INVALID_NSID | NVME_DNR;
}
@@ -700,7 +700,7 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
uint16_t ret;
int i, j = 0;
- trace_nvme_identify_nslist(min_nsid);
+ trace_pci_nvme_identify_nslist(min_nsid);
list = g_malloc0(data_len);
for (i = 0; i < n->num_namespaces; i++) {
@@ -729,14 +729,14 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
case 0x02:
return nvme_identify_nslist(n, c);
default:
- trace_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
+ trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
return NVME_INVALID_FIELD | NVME_DNR;
}
}
static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts)
{
- trace_nvme_setfeat_timestamp(ts);
+ trace_pci_nvme_setfeat_timestamp(ts);
n->host_timestamp = le64_to_cpu(ts);
n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
@@ -769,7 +769,7 @@ static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n)
/* If the host timestamp is non-zero, set the timestamp origin */
ts.origin = n->host_timestamp ? 0x01 : 0x00;
- trace_nvme_getfeat_timestamp(ts.all);
+ trace_pci_nvme_getfeat_timestamp(ts.all);
return cpu_to_le64(ts.all);
}
@@ -793,17 +793,17 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
switch (dw10) {
case NVME_VOLATILE_WRITE_CACHE:
result = blk_enable_write_cache(n->conf.blk);
- trace_nvme_getfeat_vwcache(result ? "enabled" : "disabled");
+ trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled");
break;
case NVME_NUMBER_OF_QUEUES:
result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
- trace_nvme_getfeat_numq(result);
+ trace_pci_nvme_getfeat_numq(result);
break;
case NVME_TIMESTAMP:
return nvme_get_feature_timestamp(n, cmd);
break;
default:
- trace_nvme_err_invalid_getfeat(dw10);
+ trace_pci_nvme_err_invalid_getfeat(dw10);
return NVME_INVALID_FIELD | NVME_DNR;
}
@@ -839,9 +839,9 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
break;
case NVME_NUMBER_OF_QUEUES:
- trace_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
- ((dw11 >> 16) & 0xFFFF) + 1,
- n->num_queues - 1, n->num_queues - 1);
+ trace_pci_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
+ ((dw11 >> 16) & 0xFFFF) + 1,
+ n->num_queues - 1, n->num_queues - 1);
req->cqe.result =
cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
break;
@@ -851,7 +851,7 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
break;
default:
- trace_nvme_err_invalid_setfeat(dw10);
+ trace_pci_nvme_err_invalid_setfeat(dw10);
return NVME_INVALID_FIELD | NVME_DNR;
}
return NVME_SUCCESS;
@@ -875,7 +875,7 @@ static uint16_t nvme_admin_cmd(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
case NVME_ADM_CMD_GET_FEATURES:
return nvme_get_feature(n, cmd, req);
default:
- trace_nvme_err_invalid_admin_opc(cmd->opcode);
+ trace_pci_nvme_err_invalid_admin_opc(cmd->opcode);
return NVME_INVALID_OPCODE | NVME_DNR;
}
}
@@ -938,77 +938,77 @@ static int nvme_start_ctrl(NvmeCtrl *n)
uint32_t page_size = 1 << page_bits;
if (unlikely(n->cq[0])) {
- trace_nvme_err_startfail_cq();
+ trace_pci_nvme_err_startfail_cq();
return -1;
}
if (unlikely(n->sq[0])) {
- trace_nvme_err_startfail_sq();
+ trace_pci_nvme_err_startfail_sq();
return -1;
}
if (unlikely(!n->bar.asq)) {
- trace_nvme_err_startfail_nbarasq();
+ trace_pci_nvme_err_startfail_nbarasq();
return -1;
}
if (unlikely(!n->bar.acq)) {
- trace_nvme_err_startfail_nbaracq();
+ trace_pci_nvme_err_startfail_nbaracq();
return -1;
}
if (unlikely(n->bar.asq & (page_size - 1))) {
- trace_nvme_err_startfail_asq_misaligned(n->bar.asq);
+ trace_pci_nvme_err_startfail_asq_misaligned(n->bar.asq);
return -1;
}
if (unlikely(n->bar.acq & (page_size - 1))) {
- trace_nvme_err_startfail_acq_misaligned(n->bar.acq);
+ trace_pci_nvme_err_startfail_acq_misaligned(n->bar.acq);
return -1;
}
if (unlikely(NVME_CC_MPS(n->bar.cc) <
NVME_CAP_MPSMIN(n->bar.cap))) {
- trace_nvme_err_startfail_page_too_small(
+ trace_pci_nvme_err_startfail_page_too_small(
NVME_CC_MPS(n->bar.cc),
NVME_CAP_MPSMIN(n->bar.cap));
return -1;
}
if (unlikely(NVME_CC_MPS(n->bar.cc) >
NVME_CAP_MPSMAX(n->bar.cap))) {
- trace_nvme_err_startfail_page_too_large(
+ trace_pci_nvme_err_startfail_page_too_large(
NVME_CC_MPS(n->bar.cc),
NVME_CAP_MPSMAX(n->bar.cap));
return -1;
}
if (unlikely(NVME_CC_IOCQES(n->bar.cc) <
NVME_CTRL_CQES_MIN(n->id_ctrl.cqes))) {
- trace_nvme_err_startfail_cqent_too_small(
+ trace_pci_nvme_err_startfail_cqent_too_small(
NVME_CC_IOCQES(n->bar.cc),
NVME_CTRL_CQES_MIN(n->bar.cap));
return -1;
}
if (unlikely(NVME_CC_IOCQES(n->bar.cc) >
NVME_CTRL_CQES_MAX(n->id_ctrl.cqes))) {
- trace_nvme_err_startfail_cqent_too_large(
+ trace_pci_nvme_err_startfail_cqent_too_large(
NVME_CC_IOCQES(n->bar.cc),
NVME_CTRL_CQES_MAX(n->bar.cap));
return -1;
}
if (unlikely(NVME_CC_IOSQES(n->bar.cc) <
NVME_CTRL_SQES_MIN(n->id_ctrl.sqes))) {
- trace_nvme_err_startfail_sqent_too_small(
+ trace_pci_nvme_err_startfail_sqent_too_small(
NVME_CC_IOSQES(n->bar.cc),
NVME_CTRL_SQES_MIN(n->bar.cap));
return -1;
}
if (unlikely(NVME_CC_IOSQES(n->bar.cc) >
NVME_CTRL_SQES_MAX(n->id_ctrl.sqes))) {
- trace_nvme_err_startfail_sqent_too_large(
+ trace_pci_nvme_err_startfail_sqent_too_large(
NVME_CC_IOSQES(n->bar.cc),
NVME_CTRL_SQES_MAX(n->bar.cap));
return -1;
}
if (unlikely(!NVME_AQA_ASQS(n->bar.aqa))) {
- trace_nvme_err_startfail_asqent_sz_zero();
+ trace_pci_nvme_err_startfail_asqent_sz_zero();
return -1;
}
if (unlikely(!NVME_AQA_ACQS(n->bar.aqa))) {
- trace_nvme_err_startfail_acqent_sz_zero();
+ trace_pci_nvme_err_startfail_acqent_sz_zero();
return -1;
}
@@ -1031,14 +1031,14 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
unsigned size)
{
if (unlikely(offset & (sizeof(uint32_t) - 1))) {
- NVME_GUEST_ERR(nvme_ub_mmiowr_misaligned32,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_misaligned32,
"MMIO write not 32-bit aligned,"
" offset=0x%"PRIx64"", offset);
/* should be ignored, fall through for now */
}
if (unlikely(size < sizeof(uint32_t))) {
- NVME_GUEST_ERR(nvme_ub_mmiowr_toosmall,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_toosmall,
"MMIO write smaller than 32-bits,"
" offset=0x%"PRIx64", size=%u",
offset, size);
@@ -1048,32 +1048,30 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
switch (offset) {
case 0xc: /* INTMS */
if (unlikely(msix_enabled(&(n->parent_obj)))) {
- NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
"undefined access to interrupt mask set"
" when MSI-X is enabled");
/* should be ignored, fall through for now */
}
n->bar.intms |= data & 0xffffffff;
n->bar.intmc = n->bar.intms;
- trace_nvme_mmio_intm_set(data & 0xffffffff,
- n->bar.intmc);
+ trace_pci_nvme_mmio_intm_set(data & 0xffffffff, n->bar.intmc);
nvme_irq_check(n);
break;
case 0x10: /* INTMC */
if (unlikely(msix_enabled(&(n->parent_obj)))) {
- NVME_GUEST_ERR(nvme_ub_mmiowr_intmask_with_msix,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_intmask_with_msix,
"undefined access to interrupt mask clr"
" when MSI-X is enabled");
/* should be ignored, fall through for now */
}
n->bar.intms &= ~(data & 0xffffffff);
n->bar.intmc = n->bar.intms;
- trace_nvme_mmio_intm_clr(data & 0xffffffff,
- n->bar.intmc);
+ trace_pci_nvme_mmio_intm_clr(data & 0xffffffff, n->bar.intmc);
nvme_irq_check(n);
break;
case 0x14: /* CC */
- trace_nvme_mmio_cfg(data & 0xffffffff);
+ trace_pci_nvme_mmio_cfg(data & 0xffffffff);
/* Windows first sends data, then sends enable bit */
if (!NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc) &&
!NVME_CC_SHN(data) && !NVME_CC_SHN(n->bar.cc))
@@ -1084,42 +1082,42 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
if (NVME_CC_EN(data) && !NVME_CC_EN(n->bar.cc)) {
n->bar.cc = data;
if (unlikely(nvme_start_ctrl(n))) {
- trace_nvme_err_startfail();
+ trace_pci_nvme_err_startfail();
n->bar.csts = NVME_CSTS_FAILED;
} else {
- trace_nvme_mmio_start_success();
+ trace_pci_nvme_mmio_start_success();
n->bar.csts = NVME_CSTS_READY;
}
} else if (!NVME_CC_EN(data) && NVME_CC_EN(n->bar.cc)) {
- trace_nvme_mmio_stopped();
+ trace_pci_nvme_mmio_stopped();
nvme_clear_ctrl(n);
n->bar.csts &= ~NVME_CSTS_READY;
}
if (NVME_CC_SHN(data) && !(NVME_CC_SHN(n->bar.cc))) {
- trace_nvme_mmio_shutdown_set();
+ trace_pci_nvme_mmio_shutdown_set();
nvme_clear_ctrl(n);
n->bar.cc = data;
n->bar.csts |= NVME_CSTS_SHST_COMPLETE;
} else if (!NVME_CC_SHN(data) && NVME_CC_SHN(n->bar.cc)) {
- trace_nvme_mmio_shutdown_cleared();
+ trace_pci_nvme_mmio_shutdown_cleared();
n->bar.csts &= ~NVME_CSTS_SHST_COMPLETE;
n->bar.cc = data;
}
break;
case 0x1C: /* CSTS */
if (data & (1 << 4)) {
- NVME_GUEST_ERR(nvme_ub_mmiowr_ssreset_w1c_unsupported,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ssreset_w1c_unsupported,
"attempted to W1C CSTS.NSSRO"
" but CAP.NSSRS is zero (not supported)");
} else if (data != 0) {
- NVME_GUEST_ERR(nvme_ub_mmiowr_ro_csts,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_ro_csts,
"attempted to set a read only bit"
" of controller status");
}
break;
case 0x20: /* NSSR */
if (data == 0x4E564D65) {
- trace_nvme_ub_mmiowr_ssreset_unsupported();
+ trace_pci_nvme_ub_mmiowr_ssreset_unsupported();
} else {
/* The spec says that writes of other values have no effect */
return;
@@ -1127,55 +1125,55 @@ static void nvme_write_bar(NvmeCtrl *n, hwaddr offset, uint64_t data,
break;
case 0x24: /* AQA */
n->bar.aqa = data & 0xffffffff;
- trace_nvme_mmio_aqattr(data & 0xffffffff);
+ trace_pci_nvme_mmio_aqattr(data & 0xffffffff);
break;
case 0x28: /* ASQ */
n->bar.asq = data;
- trace_nvme_mmio_asqaddr(data);
+ trace_pci_nvme_mmio_asqaddr(data);
break;
case 0x2c: /* ASQ hi */
n->bar.asq |= data << 32;
- trace_nvme_mmio_asqaddr_hi(data, n->bar.asq);
+ trace_pci_nvme_mmio_asqaddr_hi(data, n->bar.asq);
break;
case 0x30: /* ACQ */
- trace_nvme_mmio_acqaddr(data);
+ trace_pci_nvme_mmio_acqaddr(data);
n->bar.acq = data;
break;
case 0x34: /* ACQ hi */
n->bar.acq |= data << 32;
- trace_nvme_mmio_acqaddr_hi(data, n->bar.acq);
+ trace_pci_nvme_mmio_acqaddr_hi(data, n->bar.acq);
break;
case 0x38: /* CMBLOC */
- NVME_GUEST_ERR(nvme_ub_mmiowr_cmbloc_reserved,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbloc_reserved,
"invalid write to reserved CMBLOC"
" when CMBSZ is zero, ignored");
return;
case 0x3C: /* CMBSZ */
- NVME_GUEST_ERR(nvme_ub_mmiowr_cmbsz_readonly,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_cmbsz_readonly,
"invalid write to read only CMBSZ, ignored");
return;
case 0xE00: /* PMRCAP */
- NVME_GUEST_ERR(nvme_ub_mmiowr_pmrcap_readonly,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrcap_readonly,
"invalid write to PMRCAP register, ignored");
return;
case 0xE04: /* TODO PMRCTL */
break;
case 0xE08: /* PMRSTS */
- NVME_GUEST_ERR(nvme_ub_mmiowr_pmrsts_readonly,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrsts_readonly,
"invalid write to PMRSTS register, ignored");
return;
case 0xE0C: /* PMREBS */
- NVME_GUEST_ERR(nvme_ub_mmiowr_pmrebs_readonly,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrebs_readonly,
"invalid write to PMREBS register, ignored");
return;
case 0xE10: /* PMRSWTP */
- NVME_GUEST_ERR(nvme_ub_mmiowr_pmrswtp_readonly,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_pmrswtp_readonly,
"invalid write to PMRSWTP register, ignored");
return;
case 0xE14: /* TODO PMRMSC */
break;
default:
- NVME_GUEST_ERR(nvme_ub_mmiowr_invalid,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiowr_invalid,
"invalid MMIO write,"
" offset=0x%"PRIx64", data=%"PRIx64"",
offset, data);
@@ -1190,12 +1188,12 @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
uint64_t val = 0;
if (unlikely(addr & (sizeof(uint32_t) - 1))) {
- NVME_GUEST_ERR(nvme_ub_mmiord_misaligned32,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiord_misaligned32,
"MMIO read not 32-bit aligned,"
" offset=0x%"PRIx64"", addr);
/* should RAZ, fall through for now */
} else if (unlikely(size < sizeof(uint32_t))) {
- NVME_GUEST_ERR(nvme_ub_mmiord_toosmall,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiord_toosmall,
"MMIO read smaller than 32-bits,"
" offset=0x%"PRIx64"", addr);
/* should RAZ, fall through for now */
@@ -1213,7 +1211,7 @@ static uint64_t nvme_mmio_read(void *opaque, hwaddr addr, unsigned size)
}
memcpy(&val, ptr + addr, size);
} else {
- NVME_GUEST_ERR(nvme_ub_mmiord_invalid_ofs,
+ NVME_GUEST_ERR(pci_nvme_ub_mmiord_invalid_ofs,
"MMIO read beyond last register,"
" offset=0x%"PRIx64", returning 0", addr);
}
@@ -1226,7 +1224,7 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
uint32_t qid;
if (unlikely(addr & ((1 << 2) - 1))) {
- NVME_GUEST_ERR(nvme_ub_db_wr_misaligned,
+ NVME_GUEST_ERR(pci_nvme_ub_db_wr_misaligned,
"doorbell write not 32-bit aligned,"
" offset=0x%"PRIx64", ignoring", addr);
return;
@@ -1241,7 +1239,7 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
qid = (addr - (0x1000 + (1 << 2))) >> 3;
if (unlikely(nvme_check_cqid(n, qid))) {
- NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cq,
+ NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq,
"completion queue doorbell write"
" for nonexistent queue,"
" sqid=%"PRIu32", ignoring", qid);
@@ -1250,7 +1248,7 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
cq = n->cq[qid];
if (unlikely(new_head >= cq->size)) {
- NVME_GUEST_ERR(nvme_ub_db_wr_invalid_cqhead,
+ NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cqhead,
"completion queue doorbell write value"
" beyond queue size, sqid=%"PRIu32","
" new_head=%"PRIu16", ignoring",
@@ -1279,7 +1277,7 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
qid = (addr - 0x1000) >> 3;
if (unlikely(nvme_check_sqid(n, qid))) {
- NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sq,
+ NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq,
"submission queue doorbell write"
" for nonexistent queue,"
" sqid=%"PRIu32", ignoring", qid);
@@ -1288,7 +1286,7 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
sq = n->sq[qid];
if (unlikely(new_tail >= sq->size)) {
- NVME_GUEST_ERR(nvme_ub_db_wr_invalid_sqtail,
+ NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sqtail,
"submission queue doorbell write value"
" beyond queue size, sqid=%"PRIu32","
" new_tail=%"PRIu16", ignoring",
diff --git a/hw/block/trace-events b/hw/block/trace-events
index aca54bda1423..958fcc5508d1 100644
--- a/hw/block/trace-events
+++ b/hw/block/trace-events
@@ -29,100 +29,100 @@ hd_geometry_guess(void *blk, uint32_t cyls, uint32_t heads, uint32_t secs, int t
# nvme.c
# nvme traces for successful events
-nvme_irq_msix(uint32_t vector) "raising MSI-X IRQ vector %u"
-nvme_irq_pin(void) "pulsing IRQ pin"
-nvme_irq_masked(void) "IRQ is masked"
-nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2=0x%"PRIx64""
-nvme_rw(const char *verb, uint32_t blk_count, uint64_t byte_count, uint64_t lba) "%s %"PRIu32" blocks (%"PRIu64" bytes) from LBA %"PRIu64""
-nvme_create_sq(uint64_t addr, uint16_t sqid, uint16_t cqid, uint16_t qsize, uint16_t qflags) "create submission queue, addr=0x%"PRIx64", sqid=%"PRIu16", cqid=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16""
-nvme_create_cq(uint64_t addr, uint16_t cqid, uint16_t vector, uint16_t size, uint16_t qflags, int ien) "create completion queue, addr=0x%"PRIx64", cqid=%"PRIu16", vector=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16", ien=%d"
-nvme_del_sq(uint16_t qid) "deleting submission queue sqid=%"PRIu16""
-nvme_del_cq(uint16_t cqid) "deleted completion queue, cqid=%"PRIu16""
-nvme_identify_ctrl(void) "identify controller"
-nvme_identify_ns(uint16_t ns) "identify namespace, nsid=%"PRIu16""
-nvme_identify_nslist(uint16_t ns) "identify namespace list, nsid=%"PRIu16""
-nvme_getfeat_vwcache(const char* result) "get feature volatile write cache, result=%s"
-nvme_getfeat_numq(int result) "get feature number of queues, result=%d"
-nvme_setfeat_numq(int reqcq, int reqsq, int gotcq, int gotsq) "requested cq_count=%d sq_count=%d, responding with cq_count=%d sq_count=%d"
-nvme_setfeat_timestamp(uint64_t ts) "set feature timestamp = 0x%"PRIx64""
-nvme_getfeat_timestamp(uint64_t ts) "get feature timestamp = 0x%"PRIx64""
-nvme_mmio_intm_set(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask set, data=0x%"PRIx64", new_mask=0x%"PRIx64""
-nvme_mmio_intm_clr(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask clr, data=0x%"PRIx64", new_mask=0x%"PRIx64""
-nvme_mmio_cfg(uint64_t data) "wrote MMIO, config controller config=0x%"PRIx64""
-nvme_mmio_aqattr(uint64_t data) "wrote MMIO, admin queue attributes=0x%"PRIx64""
-nvme_mmio_asqaddr(uint64_t data) "wrote MMIO, admin submission queue address=0x%"PRIx64""
-nvme_mmio_acqaddr(uint64_t data) "wrote MMIO, admin completion queue address=0x%"PRIx64""
-nvme_mmio_asqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin submission queue high half=0x%"PRIx64", new_address=0x%"PRIx64""
-nvme_mmio_acqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin completion queue high half=0x%"PRIx64", new_address=0x%"PRIx64""
-nvme_mmio_start_success(void) "setting controller enable bit succeeded"
-nvme_mmio_stopped(void) "cleared controller enable bit"
-nvme_mmio_shutdown_set(void) "shutdown bit set"
-nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
+pci_nvme_irq_msix(uint32_t vector) "raising MSI-X IRQ vector %u"
+pci_nvme_irq_pin(void) "pulsing IRQ pin"
+pci_nvme_irq_masked(void) "IRQ is masked"
+pci_nvme_dma_read(uint64_t prp1, uint64_t prp2) "DMA read, prp1=0x%"PRIx64" prp2=0x%"PRIx64""
+pci_nvme_rw(const char *verb, uint32_t blk_count, uint64_t byte_count, uint64_t lba) "%s %"PRIu32" blocks (%"PRIu64" bytes) from LBA %"PRIu64""
+pci_nvme_create_sq(uint64_t addr, uint16_t sqid, uint16_t cqid, uint16_t qsize, uint16_t qflags) "create submission queue, addr=0x%"PRIx64", sqid=%"PRIu16", cqid=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16""
+pci_nvme_create_cq(uint64_t addr, uint16_t cqid, uint16_t vector, uint16_t size, uint16_t qflags, int ien) "create completion queue, addr=0x%"PRIx64", cqid=%"PRIu16", vector=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16", ien=%d"
+pci_nvme_del_sq(uint16_t qid) "deleting submission queue sqid=%"PRIu16""
+pci_nvme_del_cq(uint16_t cqid) "deleted completion queue, cqid=%"PRIu16""
+pci_nvme_identify_ctrl(void) "identify controller"
+pci_nvme_identify_ns(uint16_t ns) "identify namespace, nsid=%"PRIu16""
+pci_nvme_identify_nslist(uint16_t ns) "identify namespace list, nsid=%"PRIu16""
+pci_nvme_getfeat_vwcache(const char* result) "get feature volatile write cache, result=%s"
+pci_nvme_getfeat_numq(int result) "get feature number of queues, result=%d"
+pci_nvme_setfeat_numq(int reqcq, int reqsq, int gotcq, int gotsq) "requested cq_count=%d sq_count=%d, responding with cq_count=%d sq_count=%d"
+pci_nvme_setfeat_timestamp(uint64_t ts) "set feature timestamp = 0x%"PRIx64""
+pci_nvme_getfeat_timestamp(uint64_t ts) "get feature timestamp = 0x%"PRIx64""
+pci_nvme_mmio_intm_set(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask set, data=0x%"PRIx64", new_mask=0x%"PRIx64""
+pci_nvme_mmio_intm_clr(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask clr, data=0x%"PRIx64", new_mask=0x%"PRIx64""
+pci_nvme_mmio_cfg(uint64_t data) "wrote MMIO, config controller config=0x%"PRIx64""
+pci_nvme_mmio_aqattr(uint64_t data) "wrote MMIO, admin queue attributes=0x%"PRIx64""
+pci_nvme_mmio_asqaddr(uint64_t data) "wrote MMIO, admin submission queue address=0x%"PRIx64""
+pci_nvme_mmio_acqaddr(uint64_t data) "wrote MMIO, admin completion queue address=0x%"PRIx64""
+pci_nvme_mmio_asqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin submission queue high half=0x%"PRIx64", new_address=0x%"PRIx64""
+pci_nvme_mmio_acqaddr_hi(uint64_t data, uint64_t new_addr) "wrote MMIO, admin completion queue high half=0x%"PRIx64", new_address=0x%"PRIx64""
+pci_nvme_mmio_start_success(void) "setting controller enable bit succeeded"
+pci_nvme_mmio_stopped(void) "cleared controller enable bit"
+pci_nvme_mmio_shutdown_set(void) "shutdown bit set"
+pci_nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
# nvme traces for error conditions
-nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size"
-nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is null or not page aligned: 0x%"PRIx64""
-nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64""
-nvme_err_invalid_prp2_missing(void) "PRP2 is null and more data to be transferred"
-nvme_err_invalid_prp(void) "invalid PRP"
-nvme_err_invalid_ns(uint32_t ns, uint32_t limit) "invalid namespace %u not within 1-%u"
-nvme_err_invalid_opc(uint8_t opc) "invalid opcode 0x%"PRIx8""
-nvme_err_invalid_admin_opc(uint8_t opc) "invalid admin opcode 0x%"PRIx8""
-nvme_err_invalid_lba_range(uint64_t start, uint64_t len, uint64_t limit) "Invalid LBA start=%"PRIu64" len=%"PRIu64" limit=%"PRIu64""
-nvme_err_invalid_del_sq(uint16_t qid) "invalid submission queue deletion, sid=%"PRIu16""
-nvme_err_invalid_create_sq_cqid(uint16_t cqid) "failed creating submission queue, invalid cqid=%"PRIu16""
-nvme_err_invalid_create_sq_sqid(uint16_t sqid) "failed creating submission queue, invalid sqid=%"PRIu16""
-nvme_err_invalid_create_sq_size(uint16_t qsize) "failed creating submission queue, invalid qsize=%"PRIu16""
-nvme_err_invalid_create_sq_addr(uint64_t addr) "failed creating submission queue, addr=0x%"PRIx64""
-nvme_err_invalid_create_sq_qflags(uint16_t qflags) "failed creating submission queue, qflags=%"PRIu16""
-nvme_err_invalid_del_cq_cqid(uint16_t cqid) "failed deleting completion queue, cqid=%"PRIu16""
-nvme_err_invalid_del_cq_notempty(uint16_t cqid) "failed deleting completion queue, it is not empty, cqid=%"PRIu16""
-nvme_err_invalid_create_cq_cqid(uint16_t cqid) "failed creating completion queue, cqid=%"PRIu16""
-nvme_err_invalid_create_cq_size(uint16_t size) "failed creating completion queue, size=%"PRIu16""
-nvme_err_invalid_create_cq_addr(uint64_t addr) "failed creating completion queue, addr=0x%"PRIx64""
-nvme_err_invalid_create_cq_vector(uint16_t vector) "failed creating completion queue, vector=%"PRIu16""
-nvme_err_invalid_create_cq_qflags(uint16_t qflags) "failed creating completion queue, qflags=%"PRIu16""
-nvme_err_invalid_identify_cns(uint16_t cns) "identify, invalid cns=0x%"PRIx16""
-nvme_err_invalid_getfeat(int dw10) "invalid get features, dw10=0x%"PRIx32""
-nvme_err_invalid_setfeat(uint32_t dw10) "invalid set features, dw10=0x%"PRIx32""
-nvme_err_startfail_cq(void) "nvme_start_ctrl failed because there are non-admin completion queues"
-nvme_err_startfail_sq(void) "nvme_start_ctrl failed because there are non-admin submission queues"
-nvme_err_startfail_nbarasq(void) "nvme_start_ctrl failed because the admin submission queue address is null"
-nvme_err_startfail_nbaracq(void) "nvme_start_ctrl failed because the admin completion queue address is null"
-nvme_err_startfail_asq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin submission queue address is misaligned: 0x%"PRIx64""
-nvme_err_startfail_acq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin completion queue address is misaligned: 0x%"PRIx64""
-nvme_err_startfail_page_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too small: log2size=%u, min=%u"
-nvme_err_startfail_page_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too large: log2size=%u, max=%u"
-nvme_err_startfail_cqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too small: log2size=%u, min=%u"
-nvme_err_startfail_cqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too large: log2size=%u, max=%u"
-nvme_err_startfail_sqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too small: log2size=%u, min=%u"
-nvme_err_startfail_sqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too large: log2size=%u, max=%u"
-nvme_err_startfail_asqent_sz_zero(void) "nvme_start_ctrl failed because the admin submission queue size is zero"
-nvme_err_startfail_acqent_sz_zero(void) "nvme_start_ctrl failed because the admin completion queue size is zero"
-nvme_err_startfail(void) "setting controller enable bit failed"
+pci_nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size"
+pci_nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is null or not page aligned: 0x%"PRIx64""
+pci_nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64""
+pci_nvme_err_invalid_prp2_missing(void) "PRP2 is null and more data to be transferred"
+pci_nvme_err_invalid_prp(void) "invalid PRP"
+pci_nvme_err_invalid_ns(uint32_t ns, uint32_t limit) "invalid namespace %u not within 1-%u"
+pci_nvme_err_invalid_opc(uint8_t opc) "invalid opcode 0x%"PRIx8""
+pci_nvme_err_invalid_admin_opc(uint8_t opc) "invalid admin opcode 0x%"PRIx8""
+pci_nvme_err_invalid_lba_range(uint64_t start, uint64_t len, uint64_t limit) "Invalid LBA start=%"PRIu64" len=%"PRIu64" limit=%"PRIu64""
+pci_nvme_err_invalid_del_sq(uint16_t qid) "invalid submission queue deletion, sid=%"PRIu16""
+pci_nvme_err_invalid_create_sq_cqid(uint16_t cqid) "failed creating submission queue, invalid cqid=%"PRIu16""
+pci_nvme_err_invalid_create_sq_sqid(uint16_t sqid) "failed creating submission queue, invalid sqid=%"PRIu16""
+pci_nvme_err_invalid_create_sq_size(uint16_t qsize) "failed creating submission queue, invalid qsize=%"PRIu16""
+pci_nvme_err_invalid_create_sq_addr(uint64_t addr) "failed creating submission queue, addr=0x%"PRIx64""
+pci_nvme_err_invalid_create_sq_qflags(uint16_t qflags) "failed creating submission queue, qflags=%"PRIu16""
+pci_nvme_err_invalid_del_cq_cqid(uint16_t cqid) "failed deleting completion queue, cqid=%"PRIu16""
+pci_nvme_err_invalid_del_cq_notempty(uint16_t cqid) "failed deleting completion queue, it is not empty, cqid=%"PRIu16""
+pci_nvme_err_invalid_create_cq_cqid(uint16_t cqid) "failed creating completion queue, cqid=%"PRIu16""
+pci_nvme_err_invalid_create_cq_size(uint16_t size) "failed creating completion queue, size=%"PRIu16""
+pci_nvme_err_invalid_create_cq_addr(uint64_t addr) "failed creating completion queue, addr=0x%"PRIx64""
+pci_nvme_err_invalid_create_cq_vector(uint16_t vector) "failed creating completion queue, vector=%"PRIu16""
+pci_nvme_err_invalid_create_cq_qflags(uint16_t qflags) "failed creating completion queue, qflags=%"PRIu16""
+pci_nvme_err_invalid_identify_cns(uint16_t cns) "identify, invalid cns=0x%"PRIx16""
+pci_nvme_err_invalid_getfeat(int dw10) "invalid get features, dw10=0x%"PRIx32""
+pci_nvme_err_invalid_setfeat(uint32_t dw10) "invalid set features, dw10=0x%"PRIx32""
+pci_nvme_err_startfail_cq(void) "nvme_start_ctrl failed because there are non-admin completion queues"
+pci_nvme_err_startfail_sq(void) "nvme_start_ctrl failed because there are non-admin submission queues"
+pci_nvme_err_startfail_nbarasq(void) "nvme_start_ctrl failed because the admin submission queue address is null"
+pci_nvme_err_startfail_nbaracq(void) "nvme_start_ctrl failed because the admin completion queue address is null"
+pci_nvme_err_startfail_asq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin submission queue address is misaligned: 0x%"PRIx64""
+pci_nvme_err_startfail_acq_misaligned(uint64_t addr) "nvme_start_ctrl failed because the admin completion queue address is misaligned: 0x%"PRIx64""
+pci_nvme_err_startfail_page_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too small: log2size=%u, min=%u"
+pci_nvme_err_startfail_page_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the page size is too large: log2size=%u, max=%u"
+pci_nvme_err_startfail_cqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too small: log2size=%u, min=%u"
+pci_nvme_err_startfail_cqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the completion queue entry size is too large: log2size=%u, max=%u"
+pci_nvme_err_startfail_sqent_too_small(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too small: log2size=%u, min=%u"
+pci_nvme_err_startfail_sqent_too_large(uint8_t log2ps, uint8_t maxlog2ps) "nvme_start_ctrl failed because the submission queue entry size is too large: log2size=%u, max=%u"
+pci_nvme_err_startfail_asqent_sz_zero(void) "nvme_start_ctrl failed because the admin submission queue size is zero"
+pci_nvme_err_startfail_acqent_sz_zero(void) "nvme_start_ctrl failed because the admin completion queue size is zero"
+pci_nvme_err_startfail(void) "setting controller enable bit failed"
# Traces for undefined behavior
-nvme_ub_mmiowr_misaligned32(uint64_t offset) "MMIO write not 32-bit aligned, offset=0x%"PRIx64""
-nvme_ub_mmiowr_toosmall(uint64_t offset, unsigned size) "MMIO write smaller than 32 bits, offset=0x%"PRIx64", size=%u"
-nvme_ub_mmiowr_intmask_with_msix(void) "undefined access to interrupt mask set when MSI-X is enabled"
-nvme_ub_mmiowr_ro_csts(void) "attempted to set a read only bit of controller status"
-nvme_ub_mmiowr_ssreset_w1c_unsupported(void) "attempted to W1C CSTS.NSSRO but CAP.NSSRS is zero (not supported)"
-nvme_ub_mmiowr_ssreset_unsupported(void) "attempted NVM subsystem reset but CAP.NSSRS is zero (not supported)"
-nvme_ub_mmiowr_cmbloc_reserved(void) "invalid write to reserved CMBLOC when CMBSZ is zero, ignored"
-nvme_ub_mmiowr_cmbsz_readonly(void) "invalid write to read only CMBSZ, ignored"
-nvme_ub_mmiowr_pmrcap_readonly(void) "invalid write to read only PMRCAP, ignored"
-nvme_ub_mmiowr_pmrsts_readonly(void) "invalid write to read only PMRSTS, ignored"
-nvme_ub_mmiowr_pmrebs_readonly(void) "invalid write to read only PMREBS, ignored"
-nvme_ub_mmiowr_pmrswtp_readonly(void) "invalid write to read only PMRSWTP, ignored"
-nvme_ub_mmiowr_invalid(uint64_t offset, uint64_t data) "invalid MMIO write, offset=0x%"PRIx64", data=0x%"PRIx64""
-nvme_ub_mmiord_misaligned32(uint64_t offset) "MMIO read not 32-bit aligned, offset=0x%"PRIx64""
-nvme_ub_mmiord_toosmall(uint64_t offset) "MMIO read smaller than 32-bits, offset=0x%"PRIx64""
-nvme_ub_mmiord_invalid_ofs(uint64_t offset) "MMIO read beyond last register, offset=0x%"PRIx64", returning 0"
-nvme_ub_db_wr_misaligned(uint64_t offset) "doorbell write not 32-bit aligned, offset=0x%"PRIx64", ignoring"
-nvme_ub_db_wr_invalid_cq(uint32_t qid) "completion queue doorbell write for nonexistent queue, cqid=%"PRIu32", ignoring"
-nvme_ub_db_wr_invalid_cqhead(uint32_t qid, uint16_t new_head) "completion queue doorbell write value beyond queue size, cqid=%"PRIu32", new_head=%"PRIu16", ignoring"
-nvme_ub_db_wr_invalid_sq(uint32_t qid) "submission queue doorbell write for nonexistent queue, sqid=%"PRIu32", ignoring"
-nvme_ub_db_wr_invalid_sqtail(uint32_t qid, uint16_t new_tail) "submission queue doorbell write value beyond queue size, sqid=%"PRIu32", new_head=%"PRIu16", ignoring"
+pci_nvme_ub_mmiowr_misaligned32(uint64_t offset) "MMIO write not 32-bit aligned, offset=0x%"PRIx64""
+pci_nvme_ub_mmiowr_toosmall(uint64_t offset, unsigned size) "MMIO write smaller than 32 bits, offset=0x%"PRIx64", size=%u"
+pci_nvme_ub_mmiowr_intmask_with_msix(void) "undefined access to interrupt mask set when MSI-X is enabled"
+pci_nvme_ub_mmiowr_ro_csts(void) "attempted to set a read only bit of controller status"
+pci_nvme_ub_mmiowr_ssreset_w1c_unsupported(void) "attempted to W1C CSTS.NSSRO but CAP.NSSRS is zero (not supported)"
+pci_nvme_ub_mmiowr_ssreset_unsupported(void) "attempted NVM subsystem reset but CAP.NSSRS is zero (not supported)"
+pci_nvme_ub_mmiowr_cmbloc_reserved(void) "invalid write to reserved CMBLOC when CMBSZ is zero, ignored"
+pci_nvme_ub_mmiowr_cmbsz_readonly(void) "invalid write to read only CMBSZ, ignored"
+pci_nvme_ub_mmiowr_pmrcap_readonly(void) "invalid write to read only PMRCAP, ignored"
+pci_nvme_ub_mmiowr_pmrsts_readonly(void) "invalid write to read only PMRSTS, ignored"
+pci_nvme_ub_mmiowr_pmrebs_readonly(void) "invalid write to read only PMREBS, ignored"
+pci_nvme_ub_mmiowr_pmrswtp_readonly(void) "invalid write to read only PMRSWTP, ignored"
+pci_nvme_ub_mmiowr_invalid(uint64_t offset, uint64_t data) "invalid MMIO write, offset=0x%"PRIx64", data=0x%"PRIx64""
+pci_nvme_ub_mmiord_misaligned32(uint64_t offset) "MMIO read not 32-bit aligned, offset=0x%"PRIx64""
+pci_nvme_ub_mmiord_toosmall(uint64_t offset) "MMIO read smaller than 32-bits, offset=0x%"PRIx64""
+pci_nvme_ub_mmiord_invalid_ofs(uint64_t offset) "MMIO read beyond last register, offset=0x%"PRIx64", returning 0"
+pci_nvme_ub_db_wr_misaligned(uint64_t offset) "doorbell write not 32-bit aligned, offset=0x%"PRIx64", ignoring"
+pci_nvme_ub_db_wr_invalid_cq(uint32_t qid) "completion queue doorbell write for nonexistent queue, cqid=%"PRIu32", ignoring"
+pci_nvme_ub_db_wr_invalid_cqhead(uint32_t qid, uint16_t new_head) "completion queue doorbell write value beyond queue size, cqid=%"PRIu32", new_head=%"PRIu16", ignoring"
+pci_nvme_ub_db_wr_invalid_sq(uint32_t qid) "submission queue doorbell write for nonexistent queue, sqid=%"PRIu32", ignoring"
+pci_nvme_ub_db_wr_invalid_sqtail(uint32_t qid, uint16_t new_tail) "submission queue doorbell write value beyond queue size, sqid=%"PRIu32", new_head=%"PRIu16", ignoring"
# xen-block.c
xen_block_realize(const char *type, uint32_t disk, uint32_t partition) "%s d%up%u"
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 03/22] hw/block/nvme: remove superfluous breaks
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 01/22] hw/block/nvme: fix pci doorbell size calculation Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 02/22] hw/block/nvme: rename trace events to pci_nvme Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 04/22] hw/block/nvme: move device parameters to separate struct Klaus Jensen
` (19 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
These break statements was left over when commit 3036a626e9ef ("nvme:
add Get/Set Feature Timestamp support") was merged.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200514044611.734782-4-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 4 ----
1 file changed, 4 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index e8f5c5ab829b..0d3f8f345f9a 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -801,7 +801,6 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
break;
case NVME_TIMESTAMP:
return nvme_get_feature_timestamp(n, cmd);
- break;
default:
trace_pci_nvme_err_invalid_getfeat(dw10);
return NVME_INVALID_FIELD | NVME_DNR;
@@ -845,11 +844,8 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
req->cqe.result =
cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
break;
-
case NVME_TIMESTAMP:
return nvme_set_feature_timestamp(n, cmd);
- break;
-
default:
trace_pci_nvme_err_invalid_setfeat(dw10);
return NVME_INVALID_FIELD | NVME_DNR;
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 04/22] hw/block/nvme: move device parameters to separate struct
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (2 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 03/22] hw/block/nvme: remove superfluous breaks Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 05/22] hw/block/nvme: use constants in identify Klaus Jensen
` (18 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Move device configuration parameters to separate struct to make it
explicit what is configurable and what is set internally.
Signed-off-by: Klaus Jensen <klaus.jensen@cnexlabs.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20200514044611.734782-5-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 49 ++++++++++++++++++++++++++-----------------------
hw/block/nvme.h | 11 ++++++++---
2 files changed, 34 insertions(+), 26 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 0d3f8f345f9a..bc2d9d2091d6 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -77,12 +77,12 @@ static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
{
- return sqid < n->num_queues && n->sq[sqid] != NULL ? 0 : -1;
+ return sqid < n->params.num_queues && n->sq[sqid] != NULL ? 0 : -1;
}
static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid)
{
- return cqid < n->num_queues && n->cq[cqid] != NULL ? 0 : -1;
+ return cqid < n->params.num_queues && n->cq[cqid] != NULL ? 0 : -1;
}
static void nvme_inc_cq_tail(NvmeCQueue *cq)
@@ -644,7 +644,7 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
trace_pci_nvme_err_invalid_create_cq_addr(prp1);
return NVME_INVALID_FIELD | NVME_DNR;
}
- if (unlikely(vector > n->num_queues)) {
+ if (unlikely(vector > n->params.num_queues)) {
trace_pci_nvme_err_invalid_create_cq_vector(vector);
return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
}
@@ -796,7 +796,8 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled");
break;
case NVME_NUMBER_OF_QUEUES:
- result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
+ result = cpu_to_le32((n->params.num_queues - 2) |
+ ((n->params.num_queues - 2) << 16));
trace_pci_nvme_getfeat_numq(result);
break;
case NVME_TIMESTAMP:
@@ -840,9 +841,10 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
case NVME_NUMBER_OF_QUEUES:
trace_pci_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
((dw11 >> 16) & 0xFFFF) + 1,
- n->num_queues - 1, n->num_queues - 1);
- req->cqe.result =
- cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
+ n->params.num_queues - 1,
+ n->params.num_queues - 1);
+ req->cqe.result = cpu_to_le32((n->params.num_queues - 2) |
+ ((n->params.num_queues - 2) << 16));
break;
case NVME_TIMESTAMP:
return nvme_set_feature_timestamp(n, cmd);
@@ -913,12 +915,12 @@ static void nvme_clear_ctrl(NvmeCtrl *n)
blk_drain(n->conf.blk);
- for (i = 0; i < n->num_queues; i++) {
+ for (i = 0; i < n->params.num_queues; i++) {
if (n->sq[i] != NULL) {
nvme_free_sq(n->sq[i], n);
}
}
- for (i = 0; i < n->num_queues; i++) {
+ for (i = 0; i < n->params.num_queues; i++) {
if (n->cq[i] != NULL) {
nvme_free_cq(n->cq[i], n);
}
@@ -1348,7 +1350,7 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
int64_t bs_size;
uint8_t *pci_conf;
- if (!n->num_queues) {
+ if (!n->params.num_queues) {
error_setg(errp, "num_queues can't be zero");
return;
}
@@ -1364,12 +1366,12 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
return;
}
- if (!n->serial) {
+ if (!n->params.serial) {
error_setg(errp, "serial property not set");
return;
}
- if (!n->cmb_size_mb && n->pmrdev) {
+ if (!n->params.cmb_size_mb && n->pmrdev) {
if (host_memory_backend_is_mapped(n->pmrdev)) {
char *path = object_get_canonical_path_component(OBJECT(n->pmrdev));
error_setg(errp, "can't use already busy memdev: %s", path);
@@ -1400,25 +1402,26 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
n->num_namespaces = 1;
/* num_queues is really number of pairs, so each has two doorbells */
- n->reg_size = pow2ceil(NVME_REG_SIZE + 2 * n->num_queues * NVME_DB_SIZE);
+ n->reg_size = pow2ceil(NVME_REG_SIZE +
+ 2 * n->params.num_queues * NVME_DB_SIZE);
n->ns_size = bs_size / (uint64_t)n->num_namespaces;
n->namespaces = g_new0(NvmeNamespace, n->num_namespaces);
- n->sq = g_new0(NvmeSQueue *, n->num_queues);
- n->cq = g_new0(NvmeCQueue *, n->num_queues);
+ n->sq = g_new0(NvmeSQueue *, n->params.num_queues);
+ n->cq = g_new0(NvmeCQueue *, n->params.num_queues);
memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n,
"nvme", n->reg_size);
pci_register_bar(pci_dev, 0,
PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64,
&n->iomem);
- msix_init_exclusive_bar(pci_dev, n->num_queues, 4, NULL);
+ msix_init_exclusive_bar(pci_dev, n->params.num_queues, 4, NULL);
id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' ');
strpadcpy((char *)id->fr, sizeof(id->fr), "1.0", ' ');
- strpadcpy((char *)id->sn, sizeof(id->sn), n->serial, ' ');
+ strpadcpy((char *)id->sn, sizeof(id->sn), n->params.serial, ' ');
id->rab = 6;
id->ieee[0] = 0x00;
id->ieee[1] = 0x02;
@@ -1447,7 +1450,7 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
n->bar.vs = 0x00010200;
n->bar.intmc = n->bar.intms = 0;
- if (n->cmb_size_mb) {
+ if (n->params.cmb_size_mb) {
NVME_CMBLOC_SET_BIR(n->bar.cmbloc, 2);
NVME_CMBLOC_SET_OFST(n->bar.cmbloc, 0);
@@ -1458,7 +1461,7 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1);
NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1);
NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */
- NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->cmb_size_mb);
+ NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->params.cmb_size_mb);
n->cmbloc = n->bar.cmbloc;
n->cmbsz = n->bar.cmbsz;
@@ -1542,7 +1545,7 @@ static void nvme_exit(PCIDevice *pci_dev)
g_free(n->cq);
g_free(n->sq);
- if (n->cmb_size_mb) {
+ if (n->params.cmb_size_mb) {
g_free(n->cmbuf);
}
@@ -1556,9 +1559,9 @@ static Property nvme_props[] = {
DEFINE_BLOCK_PROPERTIES(NvmeCtrl, conf),
DEFINE_PROP_LINK("pmrdev", NvmeCtrl, pmrdev, TYPE_MEMORY_BACKEND,
HostMemoryBackend *),
- DEFINE_PROP_STRING("serial", NvmeCtrl, serial),
- DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, cmb_size_mb, 0),
- DEFINE_PROP_UINT32("num_queues", NvmeCtrl, num_queues, 64),
+ DEFINE_PROP_STRING("serial", NvmeCtrl, params.serial),
+ DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, params.cmb_size_mb, 0),
+ DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 64),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/block/nvme.h b/hw/block/nvme.h
index 6520a9f0bead..9df244c93c02 100644
--- a/hw/block/nvme.h
+++ b/hw/block/nvme.h
@@ -1,7 +1,14 @@
#ifndef HW_NVME_H
#define HW_NVME_H
+
#include "block/nvme.h"
+typedef struct NvmeParams {
+ char *serial;
+ uint32_t num_queues;
+ uint32_t cmb_size_mb;
+} NvmeParams;
+
typedef struct NvmeAsyncEvent {
QSIMPLEQ_ENTRY(NvmeAsyncEvent) entry;
NvmeAerResult result;
@@ -63,6 +70,7 @@ typedef struct NvmeCtrl {
MemoryRegion ctrl_mem;
NvmeBar bar;
BlockConf conf;
+ NvmeParams params;
uint32_t page_size;
uint16_t page_bits;
@@ -71,10 +79,8 @@ typedef struct NvmeCtrl {
uint16_t sqe_size;
uint32_t reg_size;
uint32_t num_namespaces;
- uint32_t num_queues;
uint32_t max_q_ents;
uint64_t ns_size;
- uint32_t cmb_size_mb;
uint32_t cmbsz;
uint32_t cmbloc;
uint8_t *cmbuf;
@@ -82,7 +88,6 @@ typedef struct NvmeCtrl {
uint64_t host_timestamp; /* Timestamp sent by the host */
uint64_t timestamp_set_qemu_clock_ms; /* QEMU clock time */
- char *serial;
HostMemoryBackend *pmrdev;
NvmeNamespace *namespaces;
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 05/22] hw/block/nvme: use constants in identify
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (3 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 04/22] hw/block/nvme: move device parameters to separate struct Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 06/22] hw/block/nvme: refactor nvme_addr_read Klaus Jensen
` (17 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200514044611.734782-6-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 8 ++++----
include/block/nvme.h | 8 ++++++++
2 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index bc2d9d2091d6..2a26b8859a27 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -692,7 +692,7 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeIdentify *c)
static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c)
{
- static const int data_len = 4 * KiB;
+ static const int data_len = NVME_IDENTIFY_DATA_SIZE;
uint32_t min_nsid = le32_to_cpu(c->nsid);
uint64_t prp1 = le64_to_cpu(c->prp1);
uint64_t prp2 = le64_to_cpu(c->prp2);
@@ -722,11 +722,11 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd)
NvmeIdentify *c = (NvmeIdentify *)cmd;
switch (le32_to_cpu(c->cns)) {
- case 0x00:
+ case NVME_ID_CNS_NS:
return nvme_identify_ns(n, c);
- case 0x01:
+ case NVME_ID_CNS_CTRL:
return nvme_identify_ctrl(n, c);
- case 0x02:
+ case NVME_ID_CNS_NS_ACTIVE_LIST:
return nvme_identify_nslist(n, c);
default:
trace_pci_nvme_err_invalid_identify_cns(le32_to_cpu(c->cns));
diff --git a/include/block/nvme.h b/include/block/nvme.h
index 5525c8e34308..1720ee1d5158 100644
--- a/include/block/nvme.h
+++ b/include/block/nvme.h
@@ -705,6 +705,14 @@ typedef struct NvmePSD {
uint8_t resv[16];
} NvmePSD;
+#define NVME_IDENTIFY_DATA_SIZE 4096
+
+enum {
+ NVME_ID_CNS_NS = 0x0,
+ NVME_ID_CNS_CTRL = 0x1,
+ NVME_ID_CNS_NS_ACTIVE_LIST = 0x2,
+};
+
typedef struct NvmeIdCtrl {
uint16_t vid;
uint16_t ssvid;
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 06/22] hw/block/nvme: refactor nvme_addr_read
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (4 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 05/22] hw/block/nvme: use constants in identify Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 07/22] hw/block/nvme: fix pin-based interrupt behavior Klaus Jensen
` (16 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Pull the controller memory buffer check to its own function. The check
will be used on its own in later patches.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200514044611.734782-7-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 16 ++++++++++++----
1 file changed, 12 insertions(+), 4 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 2a26b8859a27..d6fcf078a4c9 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -65,14 +65,22 @@
static void nvme_process_sq(void *opaque);
+static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
+{
+ hwaddr low = n->ctrl_mem.addr;
+ hwaddr hi = n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size);
+
+ return addr >= low && addr < hi;
+}
+
static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
{
- if (n->cmbsz && addr >= n->ctrl_mem.addr &&
- addr < (n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size))) {
+ if (n->cmbsz && nvme_addr_is_cmb(n, addr)) {
memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
- } else {
- pci_dma_read(&n->parent_obj, addr, buf, size);
+ return;
}
+
+ pci_dma_read(&n->parent_obj, addr, buf, size);
}
static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 07/22] hw/block/nvme: fix pin-based interrupt behavior
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (5 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 06/22] hw/block/nvme: refactor nvme_addr_read Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 08/22] hw/block/nvme: add max_ioqpairs device parameter Klaus Jensen
` (15 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Michael S. Tsirkin, Klaus Jensen, qemu-devel,
Max Reitz, Klaus Jensen, Keith Busch, Javier Gonzalez,
Maxim Levitsky, Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
First, since the device only supports MSI-X or pin-based interrupt, if
MSI-X is not enabled, it should not accept interrupt vectors different
from 0 when creating completion queues.
Secondly, the irq_status NvmeCtrl member is meant to be compared to the
INTMS register, so it should only be 32 bits wide. And it is really only
useful when used with multi-message MSI.
Third, since we do not force a 1-to-1 correspondence between cqid and
interrupt vector, the irq_status register should not have bits set
according to cqid, but according to the associated interrupt vector.
Fix these issues, but keep irq_status available so we can easily support
multi-message MSI down the line.
Fixes: 5e9aa92eb1a5 ("hw/block: Fix pin-based interrupt behaviour of NVMe")
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Message-Id: <20200514044611.734782-8-its@irrelevant.dk>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 12 ++++++++----
hw/block/nvme.h | 2 +-
2 files changed, 9 insertions(+), 5 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index d6fcf078a4c9..ee514625ee85 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -137,8 +137,8 @@ static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
msix_notify(&(n->parent_obj), cq->vector);
} else {
trace_pci_nvme_irq_pin();
- assert(cq->cqid < 64);
- n->irq_status |= 1 << cq->cqid;
+ assert(cq->vector < 32);
+ n->irq_status |= 1 << cq->vector;
nvme_irq_check(n);
}
} else {
@@ -152,8 +152,8 @@ static void nvme_irq_deassert(NvmeCtrl *n, NvmeCQueue *cq)
if (msix_enabled(&(n->parent_obj))) {
return;
} else {
- assert(cq->cqid < 64);
- n->irq_status &= ~(1 << cq->cqid);
+ assert(cq->vector < 32);
+ n->irq_status &= ~(1 << cq->vector);
nvme_irq_check(n);
}
}
@@ -652,6 +652,10 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
trace_pci_nvme_err_invalid_create_cq_addr(prp1);
return NVME_INVALID_FIELD | NVME_DNR;
}
+ if (unlikely(!msix_enabled(&n->parent_obj) && vector)) {
+ trace_pci_nvme_err_invalid_create_cq_vector(vector);
+ return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
+ }
if (unlikely(vector > n->params.num_queues)) {
trace_pci_nvme_err_invalid_create_cq_vector(vector);
return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
diff --git a/hw/block/nvme.h b/hw/block/nvme.h
index 9df244c93c02..91f16c812582 100644
--- a/hw/block/nvme.h
+++ b/hw/block/nvme.h
@@ -84,7 +84,7 @@ typedef struct NvmeCtrl {
uint32_t cmbsz;
uint32_t cmbloc;
uint8_t *cmbuf;
- uint64_t irq_status;
+ uint32_t irq_status;
uint64_t host_timestamp; /* Timestamp sent by the host */
uint64_t timestamp_set_qemu_clock_ms; /* QEMU clock time */
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 08/22] hw/block/nvme: add max_ioqpairs device parameter
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (6 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 07/22] hw/block/nvme: fix pin-based interrupt behavior Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 09/22] hw/block/nvme: remove redundant cmbloc/cmbsz members Klaus Jensen
` (14 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
The num_queues device paramater has a slightly confusing meaning because
it accounts for the admin queue pair which is not really optional.
Secondly, it is really a maximum value of queues allowed.
Add a new max_ioqpairs parameter that only accounts for I/O queue pairs,
but keep num_queues for compatibility.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200514044611.734782-10-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 51 ++++++++++++++++++++++++++++++-------------------
hw/block/nvme.h | 3 ++-
2 files changed, 33 insertions(+), 21 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index ee514625ee85..1c1d2f8b7768 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -20,7 +20,7 @@
* -device nvme,drive=<drive_id>,serial=<serial>,id=<id[optional]>, \
* cmb_size_mb=<cmb_size_mb[optional]>, \
* [pmrdev=<mem_backend_file_id>,] \
- * num_queues=<N[optional]>
+ * max_ioqpairs=<N[optional]>
*
* Note cmb_size_mb denotes size of CMB in MB. CMB is assumed to be at
* offset 0 in BAR2 and supports only WDS, RDS and SQS for now.
@@ -36,6 +36,7 @@
#include "qemu/osdep.h"
#include "qemu/units.h"
+#include "qemu/error-report.h"
#include "hw/block/block.h"
#include "hw/pci/msix.h"
#include "hw/pci/pci.h"
@@ -85,12 +86,12 @@ static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
{
- return sqid < n->params.num_queues && n->sq[sqid] != NULL ? 0 : -1;
+ return sqid < n->params.max_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1;
}
static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid)
{
- return cqid < n->params.num_queues && n->cq[cqid] != NULL ? 0 : -1;
+ return cqid < n->params.max_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1;
}
static void nvme_inc_cq_tail(NvmeCQueue *cq)
@@ -656,7 +657,7 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
trace_pci_nvme_err_invalid_create_cq_vector(vector);
return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
}
- if (unlikely(vector > n->params.num_queues)) {
+ if (unlikely(vector > n->params.max_ioqpairs)) {
trace_pci_nvme_err_invalid_create_cq_vector(vector);
return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
}
@@ -808,8 +809,8 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
trace_pci_nvme_getfeat_vwcache(result ? "enabled" : "disabled");
break;
case NVME_NUMBER_OF_QUEUES:
- result = cpu_to_le32((n->params.num_queues - 2) |
- ((n->params.num_queues - 2) << 16));
+ result = cpu_to_le32((n->params.max_ioqpairs - 1) |
+ ((n->params.max_ioqpairs - 1) << 16));
trace_pci_nvme_getfeat_numq(result);
break;
case NVME_TIMESTAMP:
@@ -853,10 +854,10 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
case NVME_NUMBER_OF_QUEUES:
trace_pci_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
((dw11 >> 16) & 0xFFFF) + 1,
- n->params.num_queues - 1,
- n->params.num_queues - 1);
- req->cqe.result = cpu_to_le32((n->params.num_queues - 2) |
- ((n->params.num_queues - 2) << 16));
+ n->params.max_ioqpairs,
+ n->params.max_ioqpairs);
+ req->cqe.result = cpu_to_le32((n->params.max_ioqpairs - 1) |
+ ((n->params.max_ioqpairs - 1) << 16));
break;
case NVME_TIMESTAMP:
return nvme_set_feature_timestamp(n, cmd);
@@ -927,12 +928,12 @@ static void nvme_clear_ctrl(NvmeCtrl *n)
blk_drain(n->conf.blk);
- for (i = 0; i < n->params.num_queues; i++) {
+ for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
if (n->sq[i] != NULL) {
nvme_free_sq(n->sq[i], n);
}
}
- for (i = 0; i < n->params.num_queues; i++) {
+ for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
if (n->cq[i] != NULL) {
nvme_free_cq(n->cq[i], n);
}
@@ -1362,8 +1363,17 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
int64_t bs_size;
uint8_t *pci_conf;
- if (!n->params.num_queues) {
- error_setg(errp, "num_queues can't be zero");
+ if (n->params.num_queues) {
+ warn_report("num_queues is deprecated; please use max_ioqpairs "
+ "instead");
+
+ n->params.max_ioqpairs = n->params.num_queues - 1;
+ }
+
+ if (n->params.max_ioqpairs < 1 ||
+ n->params.max_ioqpairs > PCI_MSIX_FLAGS_QSIZE) {
+ error_setg(errp, "max_ioqpairs must be between 1 and %d",
+ PCI_MSIX_FLAGS_QSIZE);
return;
}
@@ -1413,21 +1423,21 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
n->num_namespaces = 1;
- /* num_queues is really number of pairs, so each has two doorbells */
+ /* add one to max_ioqpairs to account for the admin queue pair */
n->reg_size = pow2ceil(NVME_REG_SIZE +
- 2 * n->params.num_queues * NVME_DB_SIZE);
+ 2 * (n->params.max_ioqpairs + 1) * NVME_DB_SIZE);
n->ns_size = bs_size / (uint64_t)n->num_namespaces;
n->namespaces = g_new0(NvmeNamespace, n->num_namespaces);
- n->sq = g_new0(NvmeSQueue *, n->params.num_queues);
- n->cq = g_new0(NvmeCQueue *, n->params.num_queues);
+ n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1);
+ n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1);
memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n,
"nvme", n->reg_size);
pci_register_bar(pci_dev, 0,
PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64,
&n->iomem);
- msix_init_exclusive_bar(pci_dev, n->params.num_queues, 4, NULL);
+ msix_init_exclusive_bar(pci_dev, n->params.max_ioqpairs + 1, 4, NULL);
id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
@@ -1573,7 +1583,8 @@ static Property nvme_props[] = {
HostMemoryBackend *),
DEFINE_PROP_STRING("serial", NvmeCtrl, params.serial),
DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, params.cmb_size_mb, 0),
- DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 64),
+ DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 0),
+ DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl, params.max_ioqpairs, 64),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/block/nvme.h b/hw/block/nvme.h
index 91f16c812582..26c38bd913be 100644
--- a/hw/block/nvme.h
+++ b/hw/block/nvme.h
@@ -5,7 +5,8 @@
typedef struct NvmeParams {
char *serial;
- uint32_t num_queues;
+ uint32_t num_queues; /* deprecated since 5.1 */
+ uint32_t max_ioqpairs;
uint32_t cmb_size_mb;
} NvmeParams;
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 09/22] hw/block/nvme: remove redundant cmbloc/cmbsz members
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (7 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 08/22] hw/block/nvme: add max_ioqpairs device parameter Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 10/22] hw/block/nvme: factor out property/constraint checks Klaus Jensen
` (13 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200514044611.734782-11-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 7 ++-----
hw/block/nvme.h | 2 --
2 files changed, 2 insertions(+), 7 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 1c1d2f8b7768..61447220a873 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -76,7 +76,7 @@ static bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr)
static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
{
- if (n->cmbsz && nvme_addr_is_cmb(n, addr)) {
+ if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) {
memcpy(buf, (void *)&n->cmbuf[addr - n->ctrl_mem.addr], size);
return;
}
@@ -170,7 +170,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1,
if (unlikely(!prp1)) {
trace_pci_nvme_err_invalid_prp();
return NVME_INVALID_FIELD | NVME_DNR;
- } else if (n->cmbsz && prp1 >= n->ctrl_mem.addr &&
+ } else if (n->bar.cmbsz && prp1 >= n->ctrl_mem.addr &&
prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
qsg->nsg = 0;
qemu_iovec_init(iov, num_prps);
@@ -1485,9 +1485,6 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */
NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->params.cmb_size_mb);
- n->cmbloc = n->bar.cmbloc;
- n->cmbsz = n->bar.cmbsz;
-
n->cmbuf = g_malloc0(NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
memory_region_init_io(&n->ctrl_mem, OBJECT(n), &nvme_cmb_ops, n,
"nvme-cmb", NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
diff --git a/hw/block/nvme.h b/hw/block/nvme.h
index 26c38bd913be..cedc8022dbb3 100644
--- a/hw/block/nvme.h
+++ b/hw/block/nvme.h
@@ -82,8 +82,6 @@ typedef struct NvmeCtrl {
uint32_t num_namespaces;
uint32_t max_q_ents;
uint64_t ns_size;
- uint32_t cmbsz;
- uint32_t cmbloc;
uint8_t *cmbuf;
uint32_t irq_status;
uint64_t host_timestamp; /* Timestamp sent by the host */
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 10/22] hw/block/nvme: factor out property/constraint checks
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (8 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 09/22] hw/block/nvme: remove redundant cmbloc/cmbsz members Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 11/22] hw/block/nvme: factor out device state setup Klaus Jensen
` (12 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200514044611.734782-12-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 48 ++++++++++++++++++++++++++++++------------------
1 file changed, 30 insertions(+), 18 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 61447220a873..ee669ee8dc2d 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -1354,24 +1354,19 @@ static const MemoryRegionOps nvme_cmb_ops = {
},
};
-static void nvme_realize(PCIDevice *pci_dev, Error **errp)
+static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
{
- NvmeCtrl *n = NVME(pci_dev);
- NvmeIdCtrl *id = &n->id_ctrl;
+ NvmeParams *params = &n->params;
- int i;
- int64_t bs_size;
- uint8_t *pci_conf;
-
- if (n->params.num_queues) {
+ if (params->num_queues) {
warn_report("num_queues is deprecated; please use max_ioqpairs "
"instead");
- n->params.max_ioqpairs = n->params.num_queues - 1;
+ params->max_ioqpairs = params->num_queues - 1;
}
- if (n->params.max_ioqpairs < 1 ||
- n->params.max_ioqpairs > PCI_MSIX_FLAGS_QSIZE) {
+ if (params->max_ioqpairs < 1 ||
+ params->max_ioqpairs > PCI_MSIX_FLAGS_QSIZE) {
error_setg(errp, "max_ioqpairs must be between 1 and %d",
PCI_MSIX_FLAGS_QSIZE);
return;
@@ -1382,13 +1377,7 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
return;
}
- bs_size = blk_getlength(n->conf.blk);
- if (bs_size < 0) {
- error_setg(errp, "could not get backing file size");
- return;
- }
-
- if (!n->params.serial) {
+ if (!params->serial) {
error_setg(errp, "serial property not set");
return;
}
@@ -1408,6 +1397,29 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
host_memory_backend_set_mapped(n->pmrdev, true);
}
+}
+
+static void nvme_realize(PCIDevice *pci_dev, Error **errp)
+{
+ NvmeCtrl *n = NVME(pci_dev);
+ NvmeIdCtrl *id = &n->id_ctrl;
+ Error *local_err = NULL;
+
+ int i;
+ int64_t bs_size;
+ uint8_t *pci_conf;
+
+ nvme_check_constraints(n, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ bs_size = blk_getlength(n->conf.blk);
+ if (bs_size < 0) {
+ error_setg(errp, "could not get backing file size");
+ return;
+ }
blkconf_blocksizes(&n->conf);
if (!blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk),
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 11/22] hw/block/nvme: factor out device state setup
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (9 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 10/22] hw/block/nvme: factor out property/constraint checks Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 12/22] hw/block/nvme: factor out block backend setup Klaus Jensen
` (11 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200514044611.734782-13-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 22 +++++++++++++---------
1 file changed, 13 insertions(+), 9 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index ee669ee8dc2d..b721cab9b0fd 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -1399,6 +1399,17 @@ static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
}
}
+static void nvme_init_state(NvmeCtrl *n)
+{
+ n->num_namespaces = 1;
+ /* add one to max_ioqpairs to account for the admin queue pair */
+ n->reg_size = pow2ceil(NVME_REG_SIZE +
+ 2 * (n->params.max_ioqpairs + 1) * NVME_DB_SIZE);
+ n->namespaces = g_new0(NvmeNamespace, n->num_namespaces);
+ n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1);
+ n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1);
+}
+
static void nvme_realize(PCIDevice *pci_dev, Error **errp)
{
NvmeCtrl *n = NVME(pci_dev);
@@ -1415,6 +1426,8 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
return;
}
+ nvme_init_state(n);
+
bs_size = blk_getlength(n->conf.blk);
if (bs_size < 0) {
error_setg(errp, "could not get backing file size");
@@ -1433,17 +1446,8 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
pci_config_set_class(pci_dev->config, PCI_CLASS_STORAGE_EXPRESS);
pcie_endpoint_cap_init(pci_dev, 0x80);
- n->num_namespaces = 1;
-
- /* add one to max_ioqpairs to account for the admin queue pair */
- n->reg_size = pow2ceil(NVME_REG_SIZE +
- 2 * (n->params.max_ioqpairs + 1) * NVME_DB_SIZE);
n->ns_size = bs_size / (uint64_t)n->num_namespaces;
- n->namespaces = g_new0(NvmeNamespace, n->num_namespaces);
- n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1);
- n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1);
-
memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n,
"nvme", n->reg_size);
pci_register_bar(pci_dev, 0,
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 12/22] hw/block/nvme: factor out block backend setup
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (10 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 11/22] hw/block/nvme: factor out device state setup Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 13/22] hw/block/nvme: add namespace helpers Klaus Jensen
` (10 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200514044611.734782-14-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index b721cab9b0fd..87f1f0d0d1a1 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -1410,6 +1410,13 @@ static void nvme_init_state(NvmeCtrl *n)
n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1);
}
+static void nvme_init_blk(NvmeCtrl *n, Error **errp)
+{
+ blkconf_blocksizes(&n->conf);
+ blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk),
+ false, errp);
+}
+
static void nvme_realize(PCIDevice *pci_dev, Error **errp)
{
NvmeCtrl *n = NVME(pci_dev);
@@ -1434,9 +1441,9 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
return;
}
- blkconf_blocksizes(&n->conf);
- if (!blkconf_apply_backend_options(&n->conf, blk_is_read_only(n->conf.blk),
- false, errp)) {
+ nvme_init_blk(n, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
return;
}
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 13/22] hw/block/nvme: add namespace helpers
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (11 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 12/22] hw/block/nvme: factor out block backend setup Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 14/22] hw/block/nvme: factor out namespace setup Klaus Jensen
` (9 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Introduce some small helpers to make the next patches easier on the eye.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200514044611.734782-15-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 3 +--
hw/block/nvme.h | 17 +++++++++++++++++
2 files changed, 18 insertions(+), 2 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 87f1f0d0d1a1..3f3db17231b3 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -1573,8 +1573,7 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
id_ns->dps = 0;
id_ns->lbaf[0].ds = BDRV_SECTOR_BITS;
id_ns->ncap = id_ns->nuse = id_ns->nsze =
- cpu_to_le64(n->ns_size >>
- id_ns->lbaf[NVME_ID_NS_FLBAS_INDEX(ns->id_ns.flbas)].ds);
+ cpu_to_le64(nvme_ns_nlbas(n, ns));
}
}
diff --git a/hw/block/nvme.h b/hw/block/nvme.h
index cedc8022dbb3..61dd9b23b81d 100644
--- a/hw/block/nvme.h
+++ b/hw/block/nvme.h
@@ -61,6 +61,17 @@ typedef struct NvmeNamespace {
NvmeIdNs id_ns;
} NvmeNamespace;
+static inline NvmeLBAF *nvme_ns_lbaf(NvmeNamespace *ns)
+{
+ NvmeIdNs *id_ns = &ns->id_ns;
+ return &id_ns->lbaf[NVME_ID_NS_FLBAS_INDEX(id_ns->flbas)];
+}
+
+static inline uint8_t nvme_ns_lbads(NvmeNamespace *ns)
+{
+ return nvme_ns_lbaf(ns)->ds;
+}
+
#define TYPE_NVME "nvme"
#define NVME(obj) \
OBJECT_CHECK(NvmeCtrl, (obj), TYPE_NVME)
@@ -97,4 +108,10 @@ typedef struct NvmeCtrl {
NvmeIdCtrl id_ctrl;
} NvmeCtrl;
+/* calculate the number of LBAs that the namespace can accomodate */
+static inline uint64_t nvme_ns_nlbas(NvmeCtrl *n, NvmeNamespace *ns)
+{
+ return n->ns_size >> nvme_ns_lbads(ns);
+}
+
#endif /* HW_NVME_H */
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 14/22] hw/block/nvme: factor out namespace setup
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (12 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 13/22] hw/block/nvme: add namespace helpers Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 15/22] hw/block/nvme: factor out pci setup Klaus Jensen
` (8 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200514044611.734782-16-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 46 ++++++++++++++++++++++++++--------------------
1 file changed, 26 insertions(+), 20 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 3f3db17231b3..c98af03f4449 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -1417,6 +1417,27 @@ static void nvme_init_blk(NvmeCtrl *n, Error **errp)
false, errp);
}
+static void nvme_init_namespace(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
+{
+ int64_t bs_size;
+ NvmeIdNs *id_ns = &ns->id_ns;
+
+ bs_size = blk_getlength(n->conf.blk);
+ if (bs_size < 0) {
+ error_setg_errno(errp, -bs_size, "could not get backing file size");
+ return;
+ }
+
+ n->ns_size = bs_size;
+
+ id_ns->lbaf[0].ds = BDRV_SECTOR_BITS;
+ id_ns->nsze = cpu_to_le64(nvme_ns_nlbas(n, ns));
+
+ /* no thin provisioning */
+ id_ns->ncap = id_ns->nsze;
+ id_ns->nuse = id_ns->ncap;
+}
+
static void nvme_realize(PCIDevice *pci_dev, Error **errp)
{
NvmeCtrl *n = NVME(pci_dev);
@@ -1424,7 +1445,6 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
Error *local_err = NULL;
int i;
- int64_t bs_size;
uint8_t *pci_conf;
nvme_check_constraints(n, &local_err);
@@ -1435,12 +1455,6 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
nvme_init_state(n);
- bs_size = blk_getlength(n->conf.blk);
- if (bs_size < 0) {
- error_setg(errp, "could not get backing file size");
- return;
- }
-
nvme_init_blk(n, &local_err);
if (local_err) {
error_propagate(errp, local_err);
@@ -1453,8 +1467,6 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
pci_config_set_class(pci_dev->config, PCI_CLASS_STORAGE_EXPRESS);
pcie_endpoint_cap_init(pci_dev, 0x80);
- n->ns_size = bs_size / (uint64_t)n->num_namespaces;
-
memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n,
"nvme", n->reg_size);
pci_register_bar(pci_dev, 0,
@@ -1563,17 +1575,11 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
}
for (i = 0; i < n->num_namespaces; i++) {
- NvmeNamespace *ns = &n->namespaces[i];
- NvmeIdNs *id_ns = &ns->id_ns;
- id_ns->nsfeat = 0;
- id_ns->nlbaf = 0;
- id_ns->flbas = 0;
- id_ns->mc = 0;
- id_ns->dpc = 0;
- id_ns->dps = 0;
- id_ns->lbaf[0].ds = BDRV_SECTOR_BITS;
- id_ns->ncap = id_ns->nuse = id_ns->nsze =
- cpu_to_le64(nvme_ns_nlbas(n, ns));
+ nvme_init_namespace(n, &n->namespaces[i], &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
}
}
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 15/22] hw/block/nvme: factor out pci setup
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (13 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 14/22] hw/block/nvme: factor out namespace setup Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 16/22] hw/block/nvme: factor out cmb setup Klaus Jensen
` (7 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200514044611.734782-17-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 30 ++++++++++++++++++------------
1 file changed, 18 insertions(+), 12 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index c98af03f4449..a4022b029166 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -1438,6 +1438,22 @@ static void nvme_init_namespace(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
id_ns->nuse = id_ns->ncap;
}
+static void nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev)
+{
+ uint8_t *pci_conf = pci_dev->config;
+
+ pci_conf[PCI_INTERRUPT_PIN] = 1;
+ pci_config_set_prog_interface(pci_conf, 0x2);
+ pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_EXPRESS);
+ pcie_endpoint_cap_init(pci_dev, 0x80);
+
+ memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n, "nvme",
+ n->reg_size);
+ pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_TYPE_64, &n->iomem);
+ msix_init_exclusive_bar(pci_dev, n->params.max_ioqpairs + 1, 4, NULL);
+}
+
static void nvme_realize(PCIDevice *pci_dev, Error **errp)
{
NvmeCtrl *n = NVME(pci_dev);
@@ -1461,19 +1477,9 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
return;
}
+ nvme_init_pci(n, pci_dev);
+
pci_conf = pci_dev->config;
- pci_conf[PCI_INTERRUPT_PIN] = 1;
- pci_config_set_prog_interface(pci_dev->config, 0x2);
- pci_config_set_class(pci_dev->config, PCI_CLASS_STORAGE_EXPRESS);
- pcie_endpoint_cap_init(pci_dev, 0x80);
-
- memory_region_init_io(&n->iomem, OBJECT(n), &nvme_mmio_ops, n,
- "nvme", n->reg_size);
- pci_register_bar(pci_dev, 0,
- PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64,
- &n->iomem);
- msix_init_exclusive_bar(pci_dev, n->params.max_ioqpairs + 1, 4, NULL);
-
id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' ');
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 16/22] hw/block/nvme: factor out cmb setup
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (14 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 15/22] hw/block/nvme: factor out pci setup Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 17/22] hw/block/nvme: factor out pmr setup Klaus Jensen
` (6 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200514044611.734782-18-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 43 ++++++++++++++++++++++++-------------------
1 file changed, 24 insertions(+), 19 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index a4022b029166..8aabb4c3c39f 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -56,6 +56,7 @@
#define NVME_REG_SIZE 0x1000
#define NVME_DB_SIZE 4
+#define NVME_CMB_BIR 2
#define NVME_GUEST_ERR(trace, fmt, ...) \
do { \
@@ -1438,6 +1439,28 @@ static void nvme_init_namespace(NvmeCtrl *n, NvmeNamespace *ns, Error **errp)
id_ns->nuse = id_ns->ncap;
}
+static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev)
+{
+ NVME_CMBLOC_SET_BIR(n->bar.cmbloc, NVME_CMB_BIR);
+ NVME_CMBLOC_SET_OFST(n->bar.cmbloc, 0);
+
+ NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1);
+ NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0);
+ NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 0);
+ NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1);
+ NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1);
+ NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */
+ NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->params.cmb_size_mb);
+
+ n->cmbuf = g_malloc0(NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
+ memory_region_init_io(&n->ctrl_mem, OBJECT(n), &nvme_cmb_ops, n,
+ "nvme-cmb", NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
+ pci_register_bar(pci_dev, NVME_CMBLOC_BIR(n->bar.cmbloc),
+ PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_TYPE_64 |
+ PCI_BASE_ADDRESS_MEM_PREFETCH, &n->ctrl_mem);
+}
+
static void nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev)
{
uint8_t *pci_conf = pci_dev->config;
@@ -1514,25 +1537,7 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
n->bar.intmc = n->bar.intms = 0;
if (n->params.cmb_size_mb) {
-
- NVME_CMBLOC_SET_BIR(n->bar.cmbloc, 2);
- NVME_CMBLOC_SET_OFST(n->bar.cmbloc, 0);
-
- NVME_CMBSZ_SET_SQS(n->bar.cmbsz, 1);
- NVME_CMBSZ_SET_CQS(n->bar.cmbsz, 0);
- NVME_CMBSZ_SET_LISTS(n->bar.cmbsz, 0);
- NVME_CMBSZ_SET_RDS(n->bar.cmbsz, 1);
- NVME_CMBSZ_SET_WDS(n->bar.cmbsz, 1);
- NVME_CMBSZ_SET_SZU(n->bar.cmbsz, 2); /* MBs */
- NVME_CMBSZ_SET_SZ(n->bar.cmbsz, n->params.cmb_size_mb);
-
- n->cmbuf = g_malloc0(NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
- memory_region_init_io(&n->ctrl_mem, OBJECT(n), &nvme_cmb_ops, n,
- "nvme-cmb", NVME_CMBSZ_GETSIZE(n->bar.cmbsz));
- pci_register_bar(pci_dev, NVME_CMBLOC_BIR(n->bar.cmbloc),
- PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64 |
- PCI_BASE_ADDRESS_MEM_PREFETCH, &n->ctrl_mem);
-
+ nvme_init_cmb(n, pci_dev);
} else if (n->pmrdev) {
/* Controller Capabilities register */
NVME_CAP_SET_PMRS(n->bar.cap, 1);
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 17/22] hw/block/nvme: factor out pmr setup
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (15 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 16/22] hw/block/nvme: factor out cmb setup Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 18/22] hw/block/nvme: do cmb/pmr init as part of pci init Klaus Jensen
` (5 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20200514044611.734782-19-its@irrelevant.dk>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 95 ++++++++++++++++++++++++++-----------------------
1 file changed, 51 insertions(+), 44 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 8aabb4c3c39f..b954e7b7b2fe 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -57,6 +57,7 @@
#define NVME_REG_SIZE 0x1000
#define NVME_DB_SIZE 4
#define NVME_CMB_BIR 2
+#define NVME_PMR_BIR 2
#define NVME_GUEST_ERR(trace, fmt, ...) \
do { \
@@ -1461,6 +1462,55 @@ static void nvme_init_cmb(NvmeCtrl *n, PCIDevice *pci_dev)
PCI_BASE_ADDRESS_MEM_PREFETCH, &n->ctrl_mem);
}
+static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev)
+{
+ /* Controller Capabilities register */
+ NVME_CAP_SET_PMRS(n->bar.cap, 1);
+
+ /* PMR Capabities register */
+ n->bar.pmrcap = 0;
+ NVME_PMRCAP_SET_RDS(n->bar.pmrcap, 0);
+ NVME_PMRCAP_SET_WDS(n->bar.pmrcap, 0);
+ NVME_PMRCAP_SET_BIR(n->bar.pmrcap, NVME_PMR_BIR);
+ NVME_PMRCAP_SET_PMRTU(n->bar.pmrcap, 0);
+ /* Turn on bit 1 support */
+ NVME_PMRCAP_SET_PMRWBM(n->bar.pmrcap, 0x02);
+ NVME_PMRCAP_SET_PMRTO(n->bar.pmrcap, 0);
+ NVME_PMRCAP_SET_CMSS(n->bar.pmrcap, 0);
+
+ /* PMR Control register */
+ n->bar.pmrctl = 0;
+ NVME_PMRCTL_SET_EN(n->bar.pmrctl, 0);
+
+ /* PMR Status register */
+ n->bar.pmrsts = 0;
+ NVME_PMRSTS_SET_ERR(n->bar.pmrsts, 0);
+ NVME_PMRSTS_SET_NRDY(n->bar.pmrsts, 0);
+ NVME_PMRSTS_SET_HSTS(n->bar.pmrsts, 0);
+ NVME_PMRSTS_SET_CBAI(n->bar.pmrsts, 0);
+
+ /* PMR Elasticity Buffer Size register */
+ n->bar.pmrebs = 0;
+ NVME_PMREBS_SET_PMRSZU(n->bar.pmrebs, 0);
+ NVME_PMREBS_SET_RBB(n->bar.pmrebs, 0);
+ NVME_PMREBS_SET_PMRWBZ(n->bar.pmrebs, 0);
+
+ /* PMR Sustained Write Throughput register */
+ n->bar.pmrswtp = 0;
+ NVME_PMRSWTP_SET_PMRSWTU(n->bar.pmrswtp, 0);
+ NVME_PMRSWTP_SET_PMRSWTV(n->bar.pmrswtp, 0);
+
+ /* PMR Memory Space Control register */
+ n->bar.pmrmsc = 0;
+ NVME_PMRMSC_SET_CMSE(n->bar.pmrmsc, 0);
+ NVME_PMRMSC_SET_CBA(n->bar.pmrmsc, 0);
+
+ pci_register_bar(pci_dev, NVME_PMRCAP_BIR(n->bar.pmrcap),
+ PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_TYPE_64 |
+ PCI_BASE_ADDRESS_MEM_PREFETCH, &n->pmrdev->mr);
+}
+
static void nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev)
{
uint8_t *pci_conf = pci_dev->config;
@@ -1539,50 +1589,7 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
if (n->params.cmb_size_mb) {
nvme_init_cmb(n, pci_dev);
} else if (n->pmrdev) {
- /* Controller Capabilities register */
- NVME_CAP_SET_PMRS(n->bar.cap, 1);
-
- /* PMR Capabities register */
- n->bar.pmrcap = 0;
- NVME_PMRCAP_SET_RDS(n->bar.pmrcap, 0);
- NVME_PMRCAP_SET_WDS(n->bar.pmrcap, 0);
- NVME_PMRCAP_SET_BIR(n->bar.pmrcap, 2);
- NVME_PMRCAP_SET_PMRTU(n->bar.pmrcap, 0);
- /* Turn on bit 1 support */
- NVME_PMRCAP_SET_PMRWBM(n->bar.pmrcap, 0x02);
- NVME_PMRCAP_SET_PMRTO(n->bar.pmrcap, 0);
- NVME_PMRCAP_SET_CMSS(n->bar.pmrcap, 0);
-
- /* PMR Control register */
- n->bar.pmrctl = 0;
- NVME_PMRCTL_SET_EN(n->bar.pmrctl, 0);
-
- /* PMR Status register */
- n->bar.pmrsts = 0;
- NVME_PMRSTS_SET_ERR(n->bar.pmrsts, 0);
- NVME_PMRSTS_SET_NRDY(n->bar.pmrsts, 0);
- NVME_PMRSTS_SET_HSTS(n->bar.pmrsts, 0);
- NVME_PMRSTS_SET_CBAI(n->bar.pmrsts, 0);
-
- /* PMR Elasticity Buffer Size register */
- n->bar.pmrebs = 0;
- NVME_PMREBS_SET_PMRSZU(n->bar.pmrebs, 0);
- NVME_PMREBS_SET_RBB(n->bar.pmrebs, 0);
- NVME_PMREBS_SET_PMRWBZ(n->bar.pmrebs, 0);
-
- /* PMR Sustained Write Throughput register */
- n->bar.pmrswtp = 0;
- NVME_PMRSWTP_SET_PMRSWTU(n->bar.pmrswtp, 0);
- NVME_PMRSWTP_SET_PMRSWTV(n->bar.pmrswtp, 0);
-
- /* PMR Memory Space Control register */
- n->bar.pmrmsc = 0;
- NVME_PMRMSC_SET_CMSE(n->bar.pmrmsc, 0);
- NVME_PMRMSC_SET_CBA(n->bar.pmrmsc, 0);
-
- pci_register_bar(pci_dev, NVME_PMRCAP_BIR(n->bar.pmrcap),
- PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64 |
- PCI_BASE_ADDRESS_MEM_PREFETCH, &n->pmrdev->mr);
+ nvme_init_pmr(n, pci_dev);
}
for (i = 0; i < n->num_namespaces; i++) {
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 18/22] hw/block/nvme: do cmb/pmr init as part of pci init
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (16 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 17/22] hw/block/nvme: factor out pmr setup Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 19/22] hw/block/nvme: factor out controller identify setup Klaus Jensen
` (4 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20200514044611.734782-20-its@irrelevant.dk>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index b954e7b7b2fe..02a6a97df925 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -1525,6 +1525,12 @@ static void nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev)
pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_64, &n->iomem);
msix_init_exclusive_bar(pci_dev, n->params.max_ioqpairs + 1, 4, NULL);
+
+ if (n->params.cmb_size_mb) {
+ nvme_init_cmb(n, pci_dev);
+ } else if (n->pmrdev) {
+ nvme_init_pmr(n, pci_dev);
+ }
}
static void nvme_realize(PCIDevice *pci_dev, Error **errp)
@@ -1586,12 +1592,6 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
n->bar.vs = 0x00010200;
n->bar.intmc = n->bar.intms = 0;
- if (n->params.cmb_size_mb) {
- nvme_init_cmb(n, pci_dev);
- } else if (n->pmrdev) {
- nvme_init_pmr(n, pci_dev);
- }
-
for (i = 0; i < n->num_namespaces; i++) {
nvme_init_namespace(n, &n->namespaces[i], &local_err);
if (local_err) {
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 19/22] hw/block/nvme: factor out controller identify setup
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (17 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 18/22] hw/block/nvme: do cmb/pmr init as part of pci init Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 20/22] hw/block/nvme: Verify msix_vector_use() returned value Klaus Jensen
` (3 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Reviewed-by: Keith Busch <kbusch@kernel.org>
Message-Id: <20200514044611.734782-21-its@irrelevant.dk>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 49 ++++++++++++++++++++++++++-----------------------
1 file changed, 26 insertions(+), 23 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 02a6a97df925..e10fc774fc34 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -1533,32 +1533,11 @@ static void nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev)
}
}
-static void nvme_realize(PCIDevice *pci_dev, Error **errp)
+static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
{
- NvmeCtrl *n = NVME(pci_dev);
NvmeIdCtrl *id = &n->id_ctrl;
- Error *local_err = NULL;
+ uint8_t *pci_conf = pci_dev->config;
- int i;
- uint8_t *pci_conf;
-
- nvme_check_constraints(n, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
-
- nvme_init_state(n);
-
- nvme_init_blk(n, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
-
- nvme_init_pci(n, pci_dev);
-
- pci_conf = pci_dev->config;
id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' ');
@@ -1591,6 +1570,30 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
n->bar.vs = 0x00010200;
n->bar.intmc = n->bar.intms = 0;
+}
+
+static void nvme_realize(PCIDevice *pci_dev, Error **errp)
+{
+ NvmeCtrl *n = NVME(pci_dev);
+ Error *local_err = NULL;
+
+ int i;
+
+ nvme_check_constraints(n, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ nvme_init_state(n);
+ nvme_init_blk(n, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ nvme_init_pci(n, pci_dev);
+ nvme_init_ctrl(n, pci_dev);
for (i = 0; i < n->num_namespaces; i++) {
nvme_init_namespace(n, &n->namespaces[i], &local_err);
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 20/22] hw/block/nvme: Verify msix_vector_use() returned value
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (18 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 19/22] hw/block/nvme: factor out controller identify setup Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 21/22] hw/block/nvme: add msix_qsize parameter Klaus Jensen
` (2 subsequent siblings)
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Philippe Mathieu-Daudé <philmd@redhat.com>
msix_vector_use() returns -EINVAL on error. Assert it won't.
Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Message-Id: <20200602155709.9776-1-philmd@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
---
hw/block/nvme.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index e10fc774fc34..fe17aa5d7041 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -615,6 +615,10 @@ static uint16_t nvme_del_cq(NvmeCtrl *n, NvmeCmd *cmd)
static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
uint16_t cqid, uint16_t vector, uint16_t size, uint16_t irq_enabled)
{
+ int ret;
+
+ ret = msix_vector_use(&n->parent_obj, vector);
+ assert(ret == 0);
cq->ctrl = n;
cq->cqid = cqid;
cq->size = size;
@@ -625,7 +629,6 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
cq->head = cq->tail = 0;
QTAILQ_INIT(&cq->req_list);
QTAILQ_INIT(&cq->sq_list);
- msix_vector_use(&n->parent_obj, cq->vector);
n->cq[cqid] = cq;
cq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_post_cqes, cq);
}
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 21/22] hw/block/nvme: add msix_qsize parameter
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (19 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 20/22] hw/block/nvme: Verify msix_vector_use() returned value Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-09 19:03 ` [PATCH v7 22/22] hw/block/nvme: verify msix_init_exclusive_bar() return value Klaus Jensen
2020-06-15 16:21 ` [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Kevin Wolf
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Decouple the requested maximum number of ioqpairs (param max_ioqpairs)
from the number of MSI-X interrupt vectors by introducing a new
msix_qsize parameter and initialize MSI-X with that. This allows
emulating a device that has fewer vectors than I/O queue pairs and also
allows more than 2048 queue pairs. To keep the device behaving as
previously, use a msix_qsize default of 65 (default max_ioqpairs + 1).
This decoupling was actually suggested by Maxim some time ago in a
slightly different context, so adding a Suggested-by.
Suggested-by: Maxim Levitsky <mlevitsk@redhat.com>
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
hw/block/nvme.c | 17 +++++++++++++----
hw/block/nvme.h | 1 +
2 files changed, 14 insertions(+), 4 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index fe17aa5d7041..acc6dbc900e2 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -54,6 +54,7 @@
#include "trace.h"
#include "nvme.h"
+#define NVME_MAX_IOQPAIRS 0xffff
#define NVME_REG_SIZE 0x1000
#define NVME_DB_SIZE 4
#define NVME_CMB_BIR 2
@@ -662,7 +663,7 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeCmd *cmd)
trace_pci_nvme_err_invalid_create_cq_vector(vector);
return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
}
- if (unlikely(vector > n->params.max_ioqpairs)) {
+ if (unlikely(vector >= n->params.msix_qsize)) {
trace_pci_nvme_err_invalid_create_cq_vector(vector);
return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
}
@@ -1371,9 +1372,16 @@ static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
}
if (params->max_ioqpairs < 1 ||
- params->max_ioqpairs > PCI_MSIX_FLAGS_QSIZE) {
+ params->max_ioqpairs > NVME_MAX_IOQPAIRS) {
error_setg(errp, "max_ioqpairs must be between 1 and %d",
- PCI_MSIX_FLAGS_QSIZE);
+ NVME_MAX_IOQPAIRS);
+ return;
+ }
+
+ if (params->msix_qsize < 1 ||
+ params->msix_qsize > PCI_MSIX_FLAGS_QSIZE + 1) {
+ error_setg(errp, "msix_qsize must be between 1 and %d",
+ PCI_MSIX_FLAGS_QSIZE + 1);
return;
}
@@ -1527,7 +1535,7 @@ static void nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev)
n->reg_size);
pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_64, &n->iomem);
- msix_init_exclusive_bar(pci_dev, n->params.max_ioqpairs + 1, 4, NULL);
+ msix_init_exclusive_bar(pci_dev, n->params.msix_qsize, 4, NULL);
if (n->params.cmb_size_mb) {
nvme_init_cmb(n, pci_dev);
@@ -1634,6 +1642,7 @@ static Property nvme_props[] = {
DEFINE_PROP_UINT32("cmb_size_mb", NvmeCtrl, params.cmb_size_mb, 0),
DEFINE_PROP_UINT32("num_queues", NvmeCtrl, params.num_queues, 0),
DEFINE_PROP_UINT32("max_ioqpairs", NvmeCtrl, params.max_ioqpairs, 64),
+ DEFINE_PROP_UINT16("msix_qsize", NvmeCtrl, params.msix_qsize, 65),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/block/nvme.h b/hw/block/nvme.h
index 61dd9b23b81d..1d30c0bca283 100644
--- a/hw/block/nvme.h
+++ b/hw/block/nvme.h
@@ -7,6 +7,7 @@ typedef struct NvmeParams {
char *serial;
uint32_t num_queues; /* deprecated since 5.1 */
uint32_t max_ioqpairs;
+ uint16_t msix_qsize;
uint32_t cmb_size_mb;
} NvmeParams;
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* [PATCH v7 22/22] hw/block/nvme: verify msix_init_exclusive_bar() return value
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (20 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 21/22] hw/block/nvme: add msix_qsize parameter Klaus Jensen
@ 2020-06-09 19:03 ` Klaus Jensen
2020-06-15 16:21 ` [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Kevin Wolf
22 siblings, 0 replies; 24+ messages in thread
From: Klaus Jensen @ 2020-06-09 19:03 UTC (permalink / raw)
To: qemu-block
Cc: Kevin Wolf, Klaus Jensen, qemu-devel, Max Reitz, Klaus Jensen,
Keith Busch, Javier Gonzalez, Maxim Levitsky,
Philippe Mathieu-Daudé
From: Klaus Jensen <k.jensen@samsung.com>
Pass an Error to msix_init_exclusive_bar() and check it.
Signed-off-by: Klaus Jensen <k.jensen@samsung.com>
---
hw/block/nvme.c | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index acc6dbc900e2..2a2e43f681f9 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -1522,7 +1522,7 @@ static void nvme_init_pmr(NvmeCtrl *n, PCIDevice *pci_dev)
PCI_BASE_ADDRESS_MEM_PREFETCH, &n->pmrdev->mr);
}
-static void nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev)
+static void nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
{
uint8_t *pci_conf = pci_dev->config;
@@ -1535,7 +1535,9 @@ static void nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev)
n->reg_size);
pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_64, &n->iomem);
- msix_init_exclusive_bar(pci_dev, n->params.msix_qsize, 4, NULL);
+ if (msix_init_exclusive_bar(pci_dev, n->params.msix_qsize, 4, errp)) {
+ return;
+ }
if (n->params.cmb_size_mb) {
nvme_init_cmb(n, pci_dev);
@@ -1603,7 +1605,12 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp)
return;
}
- nvme_init_pci(n, pci_dev);
+ nvme_init_pci(n, pci_dev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
nvme_init_ctrl(n, pci_dev);
for (i = 0; i < n->num_namespaces; i++) {
--
2.27.0
^ permalink raw reply related [flat|nested] 24+ messages in thread
* Re: [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups
2020-06-09 19:03 [PATCH v7 00/22] nvme: small fixes, refactoring and cleanups Klaus Jensen
` (21 preceding siblings ...)
2020-06-09 19:03 ` [PATCH v7 22/22] hw/block/nvme: verify msix_init_exclusive_bar() return value Klaus Jensen
@ 2020-06-15 16:21 ` Kevin Wolf
22 siblings, 0 replies; 24+ messages in thread
From: Kevin Wolf @ 2020-06-15 16:21 UTC (permalink / raw)
To: Klaus Jensen
Cc: qemu-block, Klaus Jensen, qemu-devel, Max Reitz, Keith Busch,
Javier Gonzalez, Maxim Levitsky, Philippe Mathieu-Daudé
Am 09.06.2020 um 21:03 hat Klaus Jensen geschrieben:
> From: Klaus Jensen <k.jensen@samsung.com>
>
> Hi all,
>
> As per our discussion about how to amend the bug I introduced in
> "hw/block/nvme: allow use of any valid msix vector", this is a respin
> without that patch.
>
> Kevin, it applies cleanly on top of your block tree with all current
> hw/block/bnvme patches removed.
Thanks, applied to the block branch.
Kevin
^ permalink raw reply [flat|nested] 24+ messages in thread