public inbox for linux-block@vger.kernel.org
 help / color / mirror / Atom feed
From: Dongli Zhang <dongli.zhang@oracle.com>
To: Prasun Ratn <prasun.ratn@gmail.com>,
	ming.lei@redhat.com, jianchao.w.wang@oracle.com
Cc: keith.busch@intel.com, hch@lst.de, sagi@grimberg.me,
	linux-nvme@lists.infradead.org, axboe@fb.com,
	linux-block@vger.kernel.org, tglx@linutronix.de
Subject: Re: nvme-pci: number of queues off by one
Date: Mon, 8 Oct 2018 14:58:21 +0800	[thread overview]
Message-ID: <847ccf73-9fb8-0507-b68e-99960530468a@oracle.com> (raw)
In-Reply-To: <9c851b95-1c41-2171-40ec-bd47032401fc@oracle.com>

I got the same result when emulating nvme with qemu: the VM has 12 cpu, while
the num_queues of nvme is 8.

# uname -r
4.14.1
# ll /sys/block/nvme*n1/mq/*/cpu_list
-r--r--r-- 1 root root 4096 Oct  8 14:30 /sys/block/nvme0n1/mq/0/cpu_list
-r--r--r-- 1 root root 4096 Oct  8 14:30 /sys/block/nvme0n1/mq/1/cpu_list
-r--r--r-- 1 root root 4096 Oct  8 14:30 /sys/block/nvme0n1/mq/2/cpu_list
-r--r--r-- 1 root root 4096 Oct  8 14:30 /sys/block/nvme0n1/mq/3/cpu_list
-r--r--r-- 1 root root 4096 Oct  8 14:30 /sys/block/nvme0n1/mq/4/cpu_list
-r--r--r-- 1 root root 4096 Oct  8 14:30 /sys/block/nvme0n1/mq/5/cpu_list
-r--r--r-- 1 root root 4096 Oct  8 14:30 /sys/block/nvme0n1/mq/6/cpu_list


# uname -r
4.18.10
# ll /sys/block/nvme*n1/mq/*/cpu_list
-r--r--r-- 1 root root 4096 Oct  8 14:34 /sys/block/nvme0n1/mq/0/cpu_list
-r--r--r-- 1 root root 4096 Oct  8 14:34 /sys/block/nvme0n1/mq/1/cpu_list
-r--r--r-- 1 root root 4096 Oct  8 14:34 /sys/block/nvme0n1/mq/2/cpu_list
-r--r--r-- 1 root root 4096 Oct  8 14:34 /sys/block/nvme0n1/mq/3/cpu_list
-r--r--r-- 1 root root 4096 Oct  8 14:34 /sys/block/nvme0n1/mq/4/cpu_list
-r--r--r-- 1 root root 4096 Oct  8 14:34 /sys/block/nvme0n1/mq/5/cpu_list
-r--r--r-- 1 root root 4096 Oct  8 14:34 /sys/block/nvme0n1/mq/6/cpu_list

>From below qemu source code, when n->num_queues is 8, the handler of
NVME_FEAT_NUM_QUEUES returns 0x60006.

 719 static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req)
 720 {
 721     uint32_t dw10 = le32_to_cpu(cmd->cdw10);
 722     uint32_t dw11 = le32_to_cpu(cmd->cdw11);
 723
 724     switch (dw10) {
 725     case NVME_VOLATILE_WRITE_CACHE:
 726         blk_set_enable_write_cache(n->conf.blk, dw11 & 1);
 727         break;
 728     case NVME_NUMBER_OF_QUEUES:
 729         trace_nvme_setfeat_numq((dw11 & 0xFFFF) + 1,
 730                                 ((dw11 >> 16) & 0xFFFF) + 1,
 731                                 n->num_queues - 1, n->num_queues - 1);
 732         req->cqe.result =
 733             cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16));
----> returns 0x60006 when num_queues is 8.


Finally, nr_io_queues is set to 6+1=7 in nvme_set_queue_count() in VM kernel.

I do not know how to paraphrase this in the world of nvme.

Dongli Zhang

On 10/08/2018 01:59 PM, Dongli Zhang wrote:
> I can reproduce with qemu:
> 
> # ls /sys/block/nvme*n1/mq/*/cpu_list
> /sys/block/nvme0n1/mq/0/cpu_list
> /sys/block/nvme0n1/mq/1/cpu_list
> /sys/block/nvme0n1/mq/2/cpu_list
> /sys/block/nvme0n1/mq/3/cpu_list
> /sys/block/nvme0n1/mq/4/cpu_list
> /sys/block/nvme0n1/mq/5/cpu_list
> /sys/block/nvme0n1/mq/6/cpu_list
> 
> Here is the qemu cmdline emulating 8-queue nvme while the VM has 12 cpu:
> 
> # qemu-system-x86_64 -m 4096 -smp 12 \
> 	-kernel /path-to-kernel/linux-4.18.10/arch/x86_64/boot/bzImage \
> 	-hda /path-to-img/ubuntu1804.qcow2  \
> 	-append "root=/dev/sda1 init=/sbin/init text" -enable-kvm \
> 	-net nic -net user,hostfwd=tcp::5022-:22 \
> 	-device nvme,drive=nvme1,serial=deadbeaf1,num_queues=8 \
> 	-drive file=/path-to-img/nvme.disk,if=none,id=nvme1
> 
> Dongli Zhang
> 
> 
> On 10/08/2018 01:05 PM, Prasun Ratn wrote:
>> Hi
>>
>> I have an NVMe SSD that has 8 hw queues and on older kernels I see all
>> 8 show up. However on a recent kernel (I tried 4.18), I only see 7. Is
>> this a known issue?
>>
>> $ uname -r
>> 4.14.1-1.el7.elrepo.x86_64
>>
>> $ ls /sys/block/nvme*n1/mq/*/cpu_list
>> /sys/block/nvme0n1/mq/0/cpu_list
>> /sys/block/nvme0n1/mq/1/cpu_list
>> /sys/block/nvme0n1/mq/2/cpu_list
>> /sys/block/nvme0n1/mq/3/cpu_list
>> /sys/block/nvme0n1/mq/4/cpu_list
>> /sys/block/nvme0n1/mq/5/cpu_list
>> /sys/block/nvme0n1/mq/6/cpu_list
>> /sys/block/nvme0n1/mq/7/cpu_list
>> /sys/block/nvme1n1/mq/0/cpu_list
>> /sys/block/nvme1n1/mq/1/cpu_list
>> /sys/block/nvme1n1/mq/2/cpu_list
>> /sys/block/nvme1n1/mq/3/cpu_list
>> /sys/block/nvme1n1/mq/4/cpu_list
>> /sys/block/nvme1n1/mq/5/cpu_list
>> /sys/block/nvme1n1/mq/6/cpu_list
>> /sys/block/nvme1n1/mq/7/cpu_list
>> /sys/block/nvme2n1/mq/0/cpu_list
>> /sys/block/nvme2n1/mq/1/cpu_list
>> /sys/block/nvme2n1/mq/2/cpu_list
>> /sys/block/nvme2n1/mq/3/cpu_list
>> /sys/block/nvme2n1/mq/4/cpu_list
>> /sys/block/nvme2n1/mq/5/cpu_list
>> /sys/block/nvme2n1/mq/6/cpu_list
>> /sys/block/nvme2n1/mq/7/cpu_list
>> /sys/block/nvme3n1/mq/0/cpu_list
>> /sys/block/nvme3n1/mq/1/cpu_list
>> /sys/block/nvme3n1/mq/2/cpu_list
>> /sys/block/nvme3n1/mq/3/cpu_list
>> /sys/block/nvme3n1/mq/4/cpu_list
>> /sys/block/nvme3n1/mq/5/cpu_list
>> /sys/block/nvme3n1/mq/6/cpu_list
>> /sys/block/nvme3n1/mq/7/cpu_list
>>
>>
>> $ uname -r
>> 4.18.10-1.el7.elrepo.x86_64
>>
>> $ ls /sys/block/nvme*n1/mq/*/cpu_list
>> /sys/block/nvme0n1/mq/0/cpu_list
>> /sys/block/nvme0n1/mq/1/cpu_list
>> /sys/block/nvme0n1/mq/2/cpu_list
>> /sys/block/nvme0n1/mq/3/cpu_list
>> /sys/block/nvme0n1/mq/4/cpu_list
>> /sys/block/nvme0n1/mq/5/cpu_list
>> /sys/block/nvme0n1/mq/6/cpu_list
>> /sys/block/nvme1n1/mq/0/cpu_list
>> /sys/block/nvme1n1/mq/1/cpu_list
>> /sys/block/nvme1n1/mq/2/cpu_list
>> /sys/block/nvme1n1/mq/3/cpu_list
>> /sys/block/nvme1n1/mq/4/cpu_list
>> /sys/block/nvme1n1/mq/5/cpu_list
>> /sys/block/nvme1n1/mq/6/cpu_list
>> /sys/block/nvme2n1/mq/0/cpu_list
>> /sys/block/nvme2n1/mq/1/cpu_list
>> /sys/block/nvme2n1/mq/2/cpu_list
>> /sys/block/nvme2n1/mq/3/cpu_list
>> /sys/block/nvme2n1/mq/4/cpu_list
>> /sys/block/nvme2n1/mq/5/cpu_list
>> /sys/block/nvme2n1/mq/6/cpu_list
>> /sys/block/nvme3n1/mq/0/cpu_list
>> /sys/block/nvme3n1/mq/1/cpu_list
>> /sys/block/nvme3n1/mq/2/cpu_list
>> /sys/block/nvme3n1/mq/3/cpu_list
>> /sys/block/nvme3n1/mq/4/cpu_list
>> /sys/block/nvme3n1/mq/5/cpu_list
>> /sys/block/nvme3n1/mq/6/cpu_list
>>

  reply	other threads:[~2018-10-08 14:07 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <1519832921-13915-1-git-send-email-jianchao.w.wang@oracle.com>
     [not found] ` <20180228164726.GB16536@lst.de>
     [not found]   ` <20180301150329.GB6795@ming.t460p>
     [not found]     ` <20180301161042.GA14799@localhost.localdomain>
     [not found]       ` <20180308074220.GC15748@lst.de>
     [not found]         ` <20180309172445.GC14765@localhost.localdomain>
2018-03-12  9:09           ` [PATCH V2] nvme-pci: assign separate irq vectors for adminq and ioq0 Ming Lei
2018-10-08  5:05             ` nvme-pci: number of queues off by one Prasun Ratn
2018-10-08  5:59               ` Dongli Zhang
2018-10-08  6:58                 ` Dongli Zhang [this message]
2018-10-08 14:54                   ` Keith Busch
2018-10-08 10:19                 ` Ming Lei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=847ccf73-9fb8-0507-b68e-99960530468a@oracle.com \
    --to=dongli.zhang@oracle.com \
    --cc=axboe@fb.com \
    --cc=hch@lst.de \
    --cc=jianchao.w.wang@oracle.com \
    --cc=keith.busch@intel.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=ming.lei@redhat.com \
    --cc=prasun.ratn@gmail.com \
    --cc=sagi@grimberg.me \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox