From: Klaus Jensen <its@irrelevant.dk>
To: Jinhao Fan <fanjinhao21s@ict.ac.cn>
Cc: qemu-devel@nongnu.org, kbusch@kernel.org,
"open list:nvme" <qemu-block@nongnu.org>
Subject: Re: [PATCH] hw/nvme: Add helper functions for qid-db conversion
Date: Tue, 2 Aug 2022 08:02:47 +0200 [thread overview]
Message-ID: <Yui+B7yEikNGACgq@apples> (raw)
In-Reply-To: <20220728080710.372027-1-fanjinhao21s@ict.ac.cn>
[-- Attachment #1: Type: text/plain, Size: 6952 bytes --]
On Jul 28 16:07, Jinhao Fan wrote:
> With the introduction of shadow doorbell and ioeventfd, we need to do
> frequent conversion between qid and its doorbell offset. The original
> hard-coded calculation is confusing and error-prone. Add several helper
> functions to do this task.
>
> Signed-off-by: Jinhao Fan <fanjinhao21s@ict.ac.cn>
> ---
> hw/nvme/ctrl.c | 61 ++++++++++++++++++++++++++++++++------------------
> 1 file changed, 39 insertions(+), 22 deletions(-)
>
> diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c
> index 533ad14e7a..6116c0e660 100644
> --- a/hw/nvme/ctrl.c
> +++ b/hw/nvme/ctrl.c
> @@ -487,6 +487,29 @@ static int nvme_check_cqid(NvmeCtrl *n, uint16_t cqid)
> {
> return cqid < n->conf_ioqpairs + 1 && n->cq[cqid] != NULL ? 0 : -1;
> }
> +static inline bool nvme_db_offset_is_cq(NvmeCtrl *n, hwaddr offset)
> +{
> + hwaddr stride = 4 << NVME_CAP_DSTRD(ldq_le_p(&n->bar.cap));
> + return (offset / stride) & 1;
> +}
This can be changed morphed into `(offset >> (2 + dstrd)) & 1` if I am not
mistaken.
> +
> +static inline uint16_t nvme_db_offset_to_qid(NvmeCtrl *n, hwaddr offset)
> +{
> + hwaddr stride = 4 << NVME_CAP_DSTRD(ldq_le_p(&n->bar.cap));
> + return offset / (2 * stride);
> +}
Same, should be able to do `offset >> (2 * dstrd + 1)`, no?
> +
> +static inline hwaddr nvme_cqid_to_db_offset(NvmeCtrl *n, uint16_t cqid)
> +{
> + hwaddr stride = 4 << NVME_CAP_DSTRD(ldq_le_p(&n->bar.cap));
> + return stride * (cqid * 2 + 1);
> +}
> +
> +static inline hwaddr nvme_sqid_to_db_offset(NvmeCtrl *n, uint16_t sqid)
> +{
> + hwaddr stride = 4 << NVME_CAP_DSTRD(ldq_le_p(&n->bar.cap));
> + return stride * sqid * 2;
> +}
>
> static void nvme_inc_cq_tail(NvmeCQueue *cq)
> {
> @@ -4256,7 +4279,7 @@ static void nvme_cq_notifier(EventNotifier *e)
> static int nvme_init_cq_ioeventfd(NvmeCQueue *cq)
> {
> NvmeCtrl *n = cq->ctrl;
> - uint16_t offset = (cq->cqid << 3) + (1 << 2);
> + uint16_t offset = nvme_cqid_to_db_offset(n, cq->cqid);
> int ret;
>
> ret = event_notifier_init(&cq->notifier, 0);
> @@ -4283,7 +4306,7 @@ static void nvme_sq_notifier(EventNotifier *e)
> static int nvme_init_sq_ioeventfd(NvmeSQueue *sq)
> {
> NvmeCtrl *n = sq->ctrl;
> - uint16_t offset = sq->sqid << 3;
> + uint16_t offset = nvme_sqid_to_db_offset(n, sq->sqid);
> int ret;
>
> ret = event_notifier_init(&sq->notifier, 0);
> @@ -4300,7 +4323,7 @@ static int nvme_init_sq_ioeventfd(NvmeSQueue *sq)
>
> static void nvme_free_sq(NvmeSQueue *sq, NvmeCtrl *n)
> {
> - uint16_t offset = sq->sqid << 3;
> + uint16_t offset = nvme_sqid_to_db_offset(n, sq->sqid);
>
> n->sq[sq->sqid] = NULL;
> timer_free(sq->timer);
> @@ -4379,8 +4402,8 @@ static void nvme_init_sq(NvmeSQueue *sq, NvmeCtrl *n, uint64_t dma_addr,
> sq->timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, nvme_process_sq, sq);
>
> if (n->dbbuf_enabled) {
> - sq->db_addr = n->dbbuf_dbs + (sqid << 3);
> - sq->ei_addr = n->dbbuf_eis + (sqid << 3);
> + sq->db_addr = n->dbbuf_dbs + nvme_sqid_to_db_offset(n, sqid);
> + sq->ei_addr = n->dbbuf_eis + nvme_sqid_to_db_offset(n, sqid);
>
> if (n->params.ioeventfd && sq->sqid != 0) {
> if (!nvme_init_sq_ioeventfd(sq)) {
> @@ -4690,8 +4713,8 @@ static uint16_t nvme_get_log(NvmeCtrl *n, NvmeRequest *req)
>
> static void nvme_free_cq(NvmeCQueue *cq, NvmeCtrl *n)
> {
> - uint16_t offset = (cq->cqid << 3) + (1 << 2);
> -
> + uint16_t offset = nvme_cqid_to_db_offset(n, cq->cqid);
> +
> n->cq[cq->cqid] = NULL;
> timer_free(cq->timer);
> if (cq->ioeventfd_enabled) {
> @@ -4755,8 +4778,8 @@ static void nvme_init_cq(NvmeCQueue *cq, NvmeCtrl *n, uint64_t dma_addr,
> QTAILQ_INIT(&cq->req_list);
> QTAILQ_INIT(&cq->sq_list);
> if (n->dbbuf_enabled) {
> - cq->db_addr = n->dbbuf_dbs + (cqid << 3) + (1 << 2);
> - cq->ei_addr = n->dbbuf_eis + (cqid << 3) + (1 << 2);
> + cq->db_addr = n->dbbuf_dbs + nvme_cqid_to_db_offset(n, cqid);
> + cq->ei_addr = n->dbbuf_eis + nvme_cqid_to_db_offset(n, cqid);
>
> if (n->params.ioeventfd && cqid != 0) {
> if (!nvme_init_cq_ioeventfd(cq)) {
> @@ -6128,13 +6151,8 @@ static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req)
> NvmeCQueue *cq = n->cq[i];
>
> if (sq) {
> - /*
> - * CAP.DSTRD is 0, so offset of ith sq db_addr is (i<<3)
> - * nvme_process_db() uses this hard-coded way to calculate
> - * doorbell offsets. Be consistent with that here.
> - */
> - sq->db_addr = dbs_addr + (i << 3);
> - sq->ei_addr = eis_addr + (i << 3);
> + sq->db_addr = dbs_addr + nvme_sqid_to_db_offset(n, i);
> + sq->ei_addr = eis_addr + nvme_sqid_to_db_offset(n, i);
> pci_dma_write(&n->parent_obj, sq->db_addr, &sq->tail,
> sizeof(sq->tail));
>
> @@ -6146,9 +6164,8 @@ static uint16_t nvme_dbbuf_config(NvmeCtrl *n, const NvmeRequest *req)
> }
>
> if (cq) {
> - /* CAP.DSTRD is 0, so offset of ith cq db_addr is (i<<3)+(1<<2) */
> - cq->db_addr = dbs_addr + (i << 3) + (1 << 2);
> - cq->ei_addr = eis_addr + (i << 3) + (1 << 2);
> + cq->db_addr = dbs_addr + nvme_cqid_to_db_offset(n, i);
> + cq->ei_addr = eis_addr + nvme_cqid_to_db_offset(n, i);
> pci_dma_write(&n->parent_obj, cq->db_addr, &cq->head,
> sizeof(cq->head));
>
> @@ -6843,14 +6860,14 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
> return;
> }
>
> - if (((addr - 0x1000) >> 2) & 1) {
> + if (nvme_db_offset_is_cq(n, addr - 0x1000)) {
> /* Completion queue doorbell write */
>
> uint16_t new_head = val & 0xffff;
> int start_sqs;
> NvmeCQueue *cq;
>
> - qid = (addr - (0x1000 + (1 << 2))) >> 3;
> + qid = nvme_db_offset_to_qid(n, addr - 0x1000);
> if (unlikely(nvme_check_cqid(n, qid))) {
> NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_cq,
> "completion queue doorbell write"
> @@ -6925,7 +6942,7 @@ static void nvme_process_db(NvmeCtrl *n, hwaddr addr, int val)
> uint16_t new_tail = val & 0xffff;
> NvmeSQueue *sq;
>
> - qid = (addr - 0x1000) >> 3;
> + qid = nvme_db_offset_to_qid(n, addr - 0x1000);
> if (unlikely(nvme_check_sqid(n, qid))) {
> NVME_GUEST_ERR(pci_nvme_ub_db_wr_invalid_sq,
> "submission queue doorbell write"
> --
> 2.25.1
>
--
One of us - No more doubt, silence or taboo about mental illness.
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
next prev parent reply other threads:[~2022-08-02 6:05 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-28 8:07 [PATCH] hw/nvme: Add helper functions for qid-db conversion Jinhao Fan
2022-08-02 3:46 ` Jinhao Fan
2022-08-02 6:02 ` Klaus Jensen [this message]
2022-08-02 8:31 ` Jinhao Fan
2022-08-02 8:54 ` Klaus Jensen
2022-08-02 10:05 ` Klaus Jensen
2022-08-03 1:46 ` Jinhao Fan
2022-08-03 2:36 ` Keith Busch
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Yui+B7yEikNGACgq@apples \
--to=its@irrelevant.dk \
--cc=fanjinhao21s@ict.ac.cn \
--cc=kbusch@kernel.org \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).