linux-scsi.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Daniel Wagner <wagi@kernel.org>
To: Jens Axboe <axboe@kernel.dk>, Keith Busch <kbusch@kernel.org>,
	 Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>,
	 "Michael S. Tsirkin" <mst@redhat.com>
Cc: "Martin K. Petersen" <martin.petersen@oracle.com>,
	 Thomas Gleixner <tglx@linutronix.de>,
	 Costa Shulyupin <costa.shul@redhat.com>,
	Juri Lelli <juri.lelli@redhat.com>,
	 Valentin Schneider <vschneid@redhat.com>,
	Waiman Long <llong@redhat.com>,  Ming Lei <ming.lei@redhat.com>,
	Frederic Weisbecker <frederic@kernel.org>,
	 Mel Gorman <mgorman@suse.de>, Hannes Reinecke <hare@suse.de>,
	 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
	 linux-kernel@vger.kernel.org, linux-block@vger.kernel.org,
	 linux-nvme@lists.infradead.org, megaraidlinux.pdl@broadcom.com,
	 linux-scsi@vger.kernel.org, storagedev@microchip.com,
	 virtualization@lists.linux.dev,
	GR-QLogic-Storage-Upstream@marvell.com,
	 Daniel Wagner <wagi@kernel.org>
Subject: [PATCH v6 4/9] scsi: use block layer helpers to calculate num of queues
Date: Thu, 24 Apr 2025 20:19:43 +0200	[thread overview]
Message-ID: <20250424-isolcpus-io-queues-v6-4-9a53a870ca1f@kernel.org> (raw)
In-Reply-To: <20250424-isolcpus-io-queues-v6-0-9a53a870ca1f@kernel.org>

Multiqueue devices should only allocate queues for the housekeeping CPUs
when isolcpus=managed_irq is set. This avoids that the isolated CPUs get
disturbed with OS workload.

Use helpers which calculates the correct number of queues which should
be used when isolcpus is used.

Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Daniel Wagner <wagi@kernel.org>
---
 drivers/scsi/megaraid/megaraid_sas_base.c | 15 +++++++++------
 drivers/scsi/qla2xxx/qla_isr.c            | 10 +++++-----
 drivers/scsi/smartpqi/smartpqi_init.c     |  5 ++---
 3 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 28c75865967af36c6390c5ee5767577ec1bcf779..a5f1117f3ddb20da04e0b29fd9d52d47ed1af3d8 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -5962,7 +5962,8 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
 		else
 			instance->iopoll_q_count = 0;
 
-		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
+		num_msix_req = blk_mq_num_online_queues(0) +
+			instance->low_latency_index_start;
 		instance->msix_vectors = min(num_msix_req,
 				instance->msix_vectors);
 
@@ -5978,7 +5979,8 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
 		/* Disable Balanced IOPS mode and try realloc vectors */
 		instance->perf_mode = MR_LATENCY_PERF_MODE;
 		instance->low_latency_index_start = 1;
-		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
+		num_msix_req = blk_mq_num_online_queues(0) +
+			instance->low_latency_index_start;
 
 		instance->msix_vectors = min(num_msix_req,
 				instance->msix_vectors);
@@ -6234,7 +6236,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
 		intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
 								true : false;
 		if (intr_coalescing &&
-			(num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
+			(blk_mq_num_online_queues(0) >= MR_HIGH_IOPS_QUEUE_COUNT) &&
 			(instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
 			instance->perf_mode = MR_BALANCED_PERF_MODE;
 		else
@@ -6278,7 +6280,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
 		else
 			instance->low_latency_index_start = 1;
 
-		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
+		num_msix_req = blk_mq_num_online_queues(0) +
+			instance->low_latency_index_start;
 
 		instance->msix_vectors = min(num_msix_req,
 				instance->msix_vectors);
@@ -6310,8 +6313,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
 	megasas_setup_reply_map(instance);
 
 	dev_info(&instance->pdev->dev,
-		"current msix/online cpus\t: (%d/%d)\n",
-		instance->msix_vectors, (unsigned int)num_online_cpus());
+		"current msix/max num queues\t: (%d/%u)\n",
+		instance->msix_vectors, blk_mq_num_online_queues(0));
 	dev_info(&instance->pdev->dev,
 		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
 
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index fe98c76e9be32ff03a1960f366f0d700d1168383..c4c6b5c6658c0734f7ff68bcc31b33dde87296dd 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -4533,13 +4533,13 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
 	if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
 		/* user wants to control IRQ setting for target mode */
 		ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
-		    min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
-		    PCI_IRQ_MSIX);
+			blk_mq_num_online_queues(ha->msix_count) + min_vecs,
+			PCI_IRQ_MSIX);
 	} else
 		ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
-		    min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
-		    PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
-		    &desc);
+			blk_mq_num_online_queues(ha->msix_count) + min_vecs,
+			PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+			&desc);
 
 	if (ret < 0) {
 		ql_log(ql_log_fatal, vha, 0x00c7,
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 0da7be40c925807519f5bff8d428a29e5ce454a5..7212cb96d0f9a337578fa2b982afa3ee6d17f4be 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -5278,15 +5278,14 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
 	if (reset_devices) {
 		num_queue_groups = 1;
 	} else {
-		int num_cpus;
 		int max_queue_groups;
 
 		max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
 			ctrl_info->max_outbound_queues - 1);
 		max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
 
-		num_cpus = num_online_cpus();
-		num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
+		num_queue_groups =
+			blk_mq_num_online_queues(ctrl_info->max_msix_vectors);
 		num_queue_groups = min(num_queue_groups, max_queue_groups);
 	}
 

-- 
2.49.0


  parent reply	other threads:[~2025-04-24 18:19 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-04-24 18:19 [PATCH v6 0/9] blk: honor isolcpus configuration Daniel Wagner
2025-04-24 18:19 ` [PATCH v6 1/9] lib/group_cpus: let group_cpu_evenly return number initialized masks Daniel Wagner
2025-04-28 12:37   ` Thomas Gleixner
2025-05-09  1:29   ` Ming Lei
2025-04-24 18:19 ` [PATCH v6 2/9] blk-mq: add number of queue calc helper Daniel Wagner
2025-05-09  1:43   ` Ming Lei
2025-04-24 18:19 ` [PATCH v6 3/9] nvme-pci: use block layer helpers to calculate num of queues Daniel Wagner
2025-05-09  1:47   ` Ming Lei
2025-05-14 16:12     ` Daniel Wagner
2025-04-24 18:19 ` Daniel Wagner [this message]
2025-05-09  1:49   ` [PATCH v6 4/9] scsi: " Ming Lei
2025-04-24 18:19 ` [PATCH v6 5/9] virtio: blk/scsi: " Daniel Wagner
2025-05-09  1:52   ` Ming Lei
2025-04-24 18:19 ` [PATCH v6 6/9] isolation: introduce io_queue isolcpus type Daniel Wagner
2025-04-25  6:26   ` Hannes Reinecke
2025-04-25  7:32     ` Daniel Wagner
2025-05-09  2:04       ` Ming Lei
2025-05-14 16:08         ` Daniel Wagner
2025-04-24 18:19 ` [PATCH v6 7/9] lib/group_cpus: honor housekeeping config when grouping CPUs Daniel Wagner
2025-05-09  2:22   ` Ming Lei
     [not found]   ` <cd1576ee-82a3-4899-b218-2e5c5334af6e@redhat.com>
2025-05-14 17:49     ` Daniel Wagner
2025-04-24 18:19 ` [PATCH v6 8/9] blk-mq: use hk cpus only when isolcpus=io_queue is enabled Daniel Wagner
2025-05-09  2:38   ` Ming Lei
2025-05-15  8:36     ` Daniel Wagner
2025-04-24 18:19 ` [PATCH v6 9/9] blk-mq: prevent offlining hk CPU with associated online isolated CPUs Daniel Wagner
2025-04-25  6:28   ` Hannes Reinecke
2025-05-09  2:54   ` Ming Lei
2025-05-06  3:17 ` [PATCH v6 0/9] blk: honor isolcpus configuration Ming Lei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250424-isolcpus-io-queues-v6-4-9a53a870ca1f@kernel.org \
    --to=wagi@kernel.org \
    --cc=GR-QLogic-Storage-Upstream@marvell.com \
    --cc=axboe@kernel.dk \
    --cc=costa.shul@redhat.com \
    --cc=frederic@kernel.org \
    --cc=hare@suse.de \
    --cc=hch@lst.de \
    --cc=juri.lelli@redhat.com \
    --cc=kbusch@kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=linux-scsi@vger.kernel.org \
    --cc=llong@redhat.com \
    --cc=martin.petersen@oracle.com \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=megaraidlinux.pdl@broadcom.com \
    --cc=mgorman@suse.de \
    --cc=ming.lei@redhat.com \
    --cc=mst@redhat.com \
    --cc=sagi@grimberg.me \
    --cc=storagedev@microchip.com \
    --cc=tglx@linutronix.de \
    --cc=virtualization@lists.linux.dev \
    --cc=vschneid@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).