From: Lukasz Maniak <lukasz.maniak@linux.intel.com>
To: qemu-devel@nongnu.org
Cc: "Keith Busch" <kbusch@kernel.org>,
"Łukasz Gieryk" <lukasz.gieryk@linux.intel.com>,
"Klaus Jensen" <its@irrelevant.dk>,
"Lukasz Maniak" <lukasz.maniak@linux.intel.com>,
qemu-block@nongnu.org
Subject: [PATCH 10/15] hw/nvme: Make max_ioqpairs and msix_qsize configurable in runtime
Date: Thu, 7 Oct 2021 18:24:01 +0200 [thread overview]
Message-ID: <20211007162406.1920374-11-lukasz.maniak@linux.intel.com> (raw)
In-Reply-To: <20211007162406.1920374-1-lukasz.maniak@linux.intel.com>
From: Łukasz Gieryk <lukasz.gieryk@linux.intel.com>
The Nvme device defines two properties: max_ioqpairs, msix_qsize. Having
them as constants is problematic for SR-IOV support.
The SR-IOV feature introduces virtual resources (queues, interrupts)
that can be assigned to PF and its dependent VFs. Each device, following
a reset, should work with the configured number of queues. A single
constant is no longer sufficient to hold the whole state.
This patch tries to solve the problem by introducing additional
variables in NvmeCtrl’s state. The variables for, e.g., managing queues
are therefore organized as:
- n->params.max_ioqpairs – no changes, constant set by the user.
- n->max_ioqpairs - (new) value derived from n->params.* in realize();
constant through device’s lifetime.
- n->(mutable_state) – (not a part of this patch) user-configurable,
specifies number of queues available _after_
reset.
- n->conf_ioqpairs - (new) used in all the places instead of the ‘old’
n->params.max_ioqpairs; initialized in realize()
and updated during reset() to reflect user’s
changes to the mutable state.
Since the number of available i/o queues and interrupts can change in
runtime, buffers for sq/cqs and the MSIX-related structures are
allocated big enough to handle the limits, to completely avoid the
complicated reallocation. A helper function (nvme_update_msixcap_ts)
updates the corresponding capability register, to signal configuration
changes.
Signed-off-by: Łukasz Gieryk <lukasz.gieryk@linux.intel.com>
---
hw/nvme/ctrl.c | 62 +++++++++++++++++++++++++++++++++-----------------
hw/nvme/nvme.h | 4 ++++
2 files changed, 45 insertions(+), 21 deletions(-)
diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
index b04cf5eae9..5d9166d66f 100644
--- a/hw/nvme/ctrl.c
+++ b/hw/nvme/ctrl.c
@@ -416,12 +416,12 @@ static bool nvme_nsid_valid(NvmeCtrl *n, uint32_t nsid)
static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
{
- return sqid < n->params.max_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1;
+ return sqid < n->conf_ioqpairs + 1 && n->sq[sqid] != NULL ? 0 : -1;
}
static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid)
{
- return cqid < n->params.max_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1;
+ return cqid < n->conf_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1;
}
static void nvme_inc_cq_tail(NvmeCQueue *cq)
@@ -4034,8 +4034,7 @@ static uint16_t nvme_create_sq(NvmeCtrl *n, NvmeRequest *req)
trace_pci_nvme_err_invalid_create_sq_cqid(cqid);
return NVME_INVALID_CQID | NVME_DNR;
}
- if (unlikely(!sqid || sqid > n->params.max_ioqpairs ||
- n->sq[sqid] != NULL)) {
+ if (unlikely(!sqid || sqid > n->conf_ioqpairs || n->sq[sqid] != NULL)) {
trace_pci_nvme_err_invalid_create_sq_sqid(sqid);
return NVME_INVALID_QID | NVME_DNR;
}
@@ -4382,8 +4381,7 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req)
trace_pci_nvme_create_cq(prp1, cqid, vector, qsize, qflags,
NVME_CQ_FLAGS_IEN(qflags) != 0);
- if (unlikely(!cqid || cqid > n->params.max_ioqpairs ||
- n->cq[cqid] != NULL)) {
+ if (unlikely(!cqid || cqid > n->conf_ioqpairs || n->cq[cqid] != NULL)) {
trace_pci_nvme_err_invalid_create_cq_cqid(cqid);
return NVME_INVALID_QID | NVME_DNR;
}
@@ -4399,7 +4397,7 @@ static uint16_t nvme_create_cq(NvmeCtrl *n, NvmeRequest *req)
trace_pci_nvme_err_invalid_create_cq_vector(vector);
return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
}
- if (unlikely(vector >= n->params.msix_qsize)) {
+ if (unlikely(vector >= n->conf_msix_qsize)) {
trace_pci_nvme_err_invalid_create_cq_vector(vector);
return NVME_INVALID_IRQ_VECTOR | NVME_DNR;
}
@@ -4980,13 +4978,12 @@ defaults:
break;
case NVME_NUMBER_OF_QUEUES:
- result = (n->params.max_ioqpairs - 1) |
- ((n->params.max_ioqpairs - 1) << 16);
+ result = (n->conf_ioqpairs - 1) | ((n->conf_ioqpairs - 1) << 16);
trace_pci_nvme_getfeat_numq(result);
break;
case NVME_INTERRUPT_VECTOR_CONF:
iv = dw11 & 0xffff;
- if (iv >= n->params.max_ioqpairs + 1) {
+ if (iv >= n->conf_ioqpairs + 1) {
return NVME_INVALID_FIELD | NVME_DNR;
}
@@ -5141,10 +5138,10 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeRequest *req)
trace_pci_nvme_setfeat_numq((dw11 & 0xffff) + 1,
((dw11 >> 16) & 0xffff) + 1,
- n->params.max_ioqpairs,
- n->params.max_ioqpairs);
- req->cqe.result = cpu_to_le32((n->params.max_ioqpairs - 1) |
- ((n->params.max_ioqpairs - 1) << 16));
+ n->conf_ioqpairs,
+ n->conf_ioqpairs);
+ req->cqe.result = cpu_to_le32((n->conf_ioqpairs - 1) |
+ ((n->conf_ioqpairs - 1) << 16));
break;
case NVME_ASYNCHRONOUS_EVENT_CONF:
n->features.async_config = dw11;
@@ -5582,8 +5579,21 @@ static void nvme_process_sq(void *opaque)
}
}
+static void nvme_update_msixcap_ts(PCIDevice *pci_dev, uint32_t table_size)
+{
+ uint8_t *config;
+
+ assert(pci_dev->msix_cap);
+ assert(table_size <= pci_dev->msix_entries_nr);
+
+ config = pci_dev->config + pci_dev->msix_cap;
+ pci_set_word_by_mask(config + PCI_MSIX_FLAGS, PCI_MSIX_FLAGS_QSIZE,
+ table_size - 1);
+}
+
static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst)
{
+ PCIDevice *pci_dev = &n->parent_obj;
NvmeNamespace *ns;
int i;
@@ -5596,12 +5606,12 @@ static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst)
nvme_ns_drain(ns);
}
- for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
+ for (i = 0; i < n->max_ioqpairs + 1; i++) {
if (n->sq[i] != NULL) {
nvme_free_sq(n->sq[i], n);
}
}
- for (i = 0; i < n->params.max_ioqpairs + 1; i++) {
+ for (i = 0; i < n->max_ioqpairs + 1; i++) {
if (n->cq[i] != NULL) {
nvme_free_cq(n->cq[i], n);
}
@@ -5613,15 +5623,17 @@ static void nvme_ctrl_reset(NvmeCtrl *n, NvmeResetType rst)
g_free(event);
}
- if (!pci_is_vf(&n->parent_obj) && n->params.sriov_max_vfs) {
+ if (!pci_is_vf(pci_dev) && n->params.sriov_max_vfs) {
if (rst != NVME_RESET_CONTROLLER) {
- pcie_sriov_pf_disable_vfs(&n->parent_obj);
+ pcie_sriov_pf_disable_vfs(pci_dev);
}
}
n->aer_queued = 0;
n->outstanding_aers = 0;
n->qs_created = false;
+
+ nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize);
}
static void nvme_ctrl_shutdown(NvmeCtrl *n)
@@ -6322,11 +6334,17 @@ static void nvme_init_state(NvmeCtrl *n)
NvmeSecCtrlEntry *sctrl;
int i;
+ n->max_ioqpairs = n->params.max_ioqpairs;
+ n->conf_ioqpairs = n->max_ioqpairs;
+
+ n->max_msix_qsize = n->params.msix_qsize;
+ n->conf_msix_qsize = n->max_msix_qsize;
+
/* add one to max_ioqpairs to account for the admin queue pair */
n->reg_size = pow2ceil(sizeof(NvmeBar) +
2 * (n->params.max_ioqpairs + 1) * NVME_DB_SIZE);
- n->sq = g_new0(NvmeSQueue *, n->params.max_ioqpairs + 1);
- n->cq = g_new0(NvmeCQueue *, n->params.max_ioqpairs + 1);
+ n->sq = g_new0(NvmeSQueue *, n->max_ioqpairs + 1);
+ n->cq = g_new0(NvmeCQueue *, n->max_ioqpairs + 1);
n->temperature = NVME_TEMPERATURE;
n->features.temp_thresh_hi = NVME_TEMPERATURE_WARNING;
n->starttime_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL);
@@ -6491,7 +6509,7 @@ static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY |
PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0);
}
- ret = msix_init(pci_dev, n->params.msix_qsize,
+ ret = msix_init(pci_dev, n->max_msix_qsize,
&n->bar0, 0, msix_table_offset,
&n->bar0, 0, msix_pba_offset, 0, &err);
if (ret < 0) {
@@ -6503,6 +6521,8 @@ static int nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
}
}
+ nvme_update_msixcap_ts(pci_dev, n->conf_msix_qsize);
+
if (n->params.cmb_size_mb) {
nvme_init_cmb(n, pci_dev);
}
diff --git a/hw/nvme/nvme.h b/hw/nvme/nvme.h
index 9fbb0a70b5..65383e495c 100644
--- a/hw/nvme/nvme.h
+++ b/hw/nvme/nvme.h
@@ -420,6 +420,10 @@ typedef struct NvmeCtrl {
uint64_t starttime_ms;
uint16_t temperature;
uint8_t smart_critical_warning;
+ uint32_t max_msix_qsize; /* Derived from params.msix.qsize */
+ uint32_t conf_msix_qsize; /* Configured limit */
+ uint32_t max_ioqpairs; /* Derived from params.max_ioqpairs */
+ uint32_t conf_ioqpairs; /* Configured limit */
struct {
MemoryRegion mem;
--
2.25.1
next prev parent reply other threads:[~2021-10-07 17:21 UTC|newest]
Thread overview: 55+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-10-07 16:23 [PATCH 00/15] hw/nvme: SR-IOV with Virtualization Enhancements Lukasz Maniak
2021-10-07 16:23 ` [PATCH 01/15] pcie: Set default and supported MaxReadReq to 512 Lukasz Maniak
2021-10-07 22:12 ` Michael S. Tsirkin
2021-10-26 14:36 ` Lukasz Maniak
2021-10-26 15:37 ` Knut Omang
2021-10-07 16:23 ` [PATCH 02/15] pcie: Add support for Single Root I/O Virtualization (SR/IOV) Lukasz Maniak
2021-10-07 16:23 ` [PATCH 03/15] pcie: Add some SR/IOV API documentation in docs/pcie_sriov.txt Lukasz Maniak
2021-10-07 16:23 ` [PATCH 04/15] pcie: Add callback preceding SR-IOV VFs update Lukasz Maniak
2021-10-12 7:25 ` Michael S. Tsirkin
2021-10-12 16:06 ` Lukasz Maniak
2021-10-13 9:10 ` Michael S. Tsirkin
2021-10-15 16:24 ` Lukasz Maniak
2021-10-15 17:30 ` Michael S. Tsirkin
2021-10-20 13:30 ` Lukasz Maniak
2021-10-07 16:23 ` [PATCH 05/15] hw/nvme: Add support for SR-IOV Lukasz Maniak
2021-10-20 19:07 ` Klaus Jensen
2021-10-21 14:33 ` Lukasz Maniak
2021-11-02 14:33 ` Klaus Jensen
2021-11-02 17:33 ` Lukasz Maniak
2021-11-04 14:30 ` Lukasz Maniak
2021-11-08 7:56 ` Klaus Jensen
2021-11-10 13:42 ` Lukasz Maniak
2021-11-10 16:39 ` Klaus Jensen
2021-10-07 16:23 ` [PATCH 06/15] hw/nvme: Add support for Primary Controller Capabilities Lukasz Maniak
2021-11-02 14:34 ` Klaus Jensen
2021-10-07 16:23 ` [PATCH 07/15] hw/nvme: Add support for Secondary Controller List Lukasz Maniak
2021-11-02 14:35 ` Klaus Jensen
2021-10-07 16:23 ` [PATCH 08/15] pcie: Add 1.2 version token for the Power Management Capability Lukasz Maniak
2021-10-07 16:24 ` [PATCH 09/15] hw/nvme: Implement the Function Level Reset Lukasz Maniak
2021-11-02 14:35 ` Klaus Jensen
2021-10-07 16:24 ` Lukasz Maniak [this message]
2021-10-18 10:06 ` [PATCH 10/15] hw/nvme: Make max_ioqpairs and msix_qsize configurable in runtime Philippe Mathieu-Daudé
2021-10-18 15:53 ` Łukasz Gieryk
2021-10-20 19:06 ` Klaus Jensen
2021-10-21 13:40 ` Łukasz Gieryk
2021-11-03 12:11 ` Klaus Jensen
2021-10-20 19:26 ` Klaus Jensen
2021-10-07 16:24 ` [PATCH 11/15] hw/nvme: Calculate BAR atributes in a function Lukasz Maniak
2021-10-18 9:52 ` Philippe Mathieu-Daudé
2021-10-07 16:24 ` [PATCH 12/15] hw/nvme: Initialize capability structures for primary/secondary controllers Lukasz Maniak
2021-11-03 12:07 ` Klaus Jensen
2021-11-04 15:48 ` Łukasz Gieryk
2021-11-05 8:46 ` Łukasz Gieryk
2021-11-05 14:04 ` Łukasz Gieryk
2021-11-08 8:25 ` Klaus Jensen
2021-11-08 13:57 ` Łukasz Gieryk
2021-11-09 12:22 ` Klaus Jensen
2021-10-07 16:24 ` [PATCH 13/15] pcie: Add helpers to the SR/IOV API Lukasz Maniak
2021-10-26 16:57 ` Knut Omang
2021-10-07 16:24 ` [PATCH 14/15] hw/nvme: Add support for the Virtualization Management command Lukasz Maniak
2021-10-07 16:24 ` [PATCH 15/15] docs: Add documentation for SR-IOV and Virtualization Enhancements Lukasz Maniak
2021-10-08 6:31 ` [PATCH 00/15] hw/nvme: SR-IOV with " Klaus Jensen
2021-10-26 18:20 ` Klaus Jensen
2021-10-27 16:49 ` Lukasz Maniak
2021-11-02 7:24 ` Klaus Jensen
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20211007162406.1920374-11-lukasz.maniak@linux.intel.com \
--to=lukasz.maniak@linux.intel.com \
--cc=its@irrelevant.dk \
--cc=kbusch@kernel.org \
--cc=lukasz.gieryk@linux.intel.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).