* [PATCH 1/6] blk-mq: introduce blk_mq_hctx_map_queues
2024-09-13 7:41 [PATCH 0/6] EDITME: blk: refactor queue affinity helpers Daniel Wagner
@ 2024-09-13 7:41 ` Daniel Wagner
2024-09-13 16:26 ` Bjorn Helgaas
2024-09-13 7:42 ` [PATCH 2/6] scsi: replace blk_mq_pci_map_queues with blk_mq_hctx_map_queues Daniel Wagner
` (4 subsequent siblings)
5 siblings, 1 reply; 11+ messages in thread
From: Daniel Wagner @ 2024-09-13 7:41 UTC (permalink / raw)
To: Jens Axboe, Bjorn Helgaas, Michael S. Tsirkin, Jason Wang,
Martin K. Petersen, Keith Busch, Christoph Hellwig, Sagi Grimberg
Cc: linux-block, linux-kernel, linux-pci, virtualization, linux-scsi,
megaraidlinux.pdl, mpi3mr-linuxdrv.pdl, MPT-FusionLinux.pdl,
storagedev, linux-nvme, Daniel Wagner,
20240912-do-not-overwrite-pci-mapping-v1-1-85724b6cec49, Ming Lei
From: Ming Lei <ming.lei@redhat.com>
blk_mq_pci_map_queues and blk_mq_virtio_map_queues will create a CPU to
hardware queue mapping based on affinity information. These two
function share code which only differs on how the affinity information
is retrieved. Also there is the hisi_sas which open codes the same loop.
Thus introduce a new helper function for creating these mappings which
takes an callback function for fetching the affinity mask. Also
introduce common helper function for PCI and virtio devices to retrieve
affinity masks.
Signed-off-by: Ming Lei <ming.lei@redhat.com>
[dwagner: - removed fallback mapping
- added affinity helpers (moved to pci/virtio)
- updated commit message]
Signed-off-by: Daniel Wagner <dwagner@suse.de>
---
block/blk-mq-cpumap.c | 35 +++++++++++++++++++++++++++++++++++
drivers/pci/pci.c | 20 ++++++++++++++++++++
drivers/virtio/virtio.c | 31 +++++++++++++++++++++++++++++++
include/linux/blk-mq.h | 5 +++++
include/linux/pci.h | 11 +++++++++++
include/linux/virtio.h | 13 +++++++++++++
6 files changed, 115 insertions(+)
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index 9638b25fd521..c4993c0f822e 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -54,3 +54,38 @@ int blk_mq_hw_queue_to_node(struct blk_mq_queue_map *qmap, unsigned int index)
return NUMA_NO_NODE;
}
+
+/**
+ * blk_mq_hctx_map_queues - Create CPU to hardware queue mapping
+ * @qmap: CPU to hardware queue map.
+ * @dev_off: Offset to use for the device.
+ * @dev_data: Device data passed to get_queue_affinity().
+ * @get_queue_affinity: Callback to retrieve queue affinity.
+ *
+ * Create a CPU to hardware queue mapping in @qmap. For each queue
+ * @get_queue_affinity will be called to retrieve the affinity for given
+ * queue.
+ */
+void blk_mq_hctx_map_queues(struct blk_mq_queue_map *qmap,
+ void *dev_data, int dev_off,
+ get_queue_affinty_fn *get_queue_affinity)
+{
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ for (queue = 0; queue < qmap->nr_queues; queue++) {
+ mask = get_queue_affinity(dev_data, dev_off, queue);
+ if (!mask)
+ goto fallback;
+
+ for_each_cpu(cpu, mask)
+ qmap->mq_map[cpu] = qmap->queue_offset + queue;
+ }
+
+ return;
+
+fallback:
+ WARN_ON_ONCE(qmap->nr_queues > 1);
+ blk_mq_clear_mq_map(qmap);
+}
+EXPORT_SYMBOL_GPL(blk_mq_hctx_map_queues);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e3a49f66982d..84f9c16b813b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -6370,6 +6370,26 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
return 0;
}
+#ifdef CONFIG_BLK_MQ_PCI
+/**
+ * pci_get_blk_mq_affinity - get affinity mask queue mapping for PCI device
+ * @dev_data: Pointer to struct pci_dev.
+ * @offset: Offset to use for the pci irq vector
+ * @queue: Queue index
+ *
+ * This function returns for a queue the affinity mask for a PCI device.
+ * It is usually used as callback for blk_mq_hctx_map_queues().
+ */
+const struct cpumask *pci_get_blk_mq_affinity(void *dev_data, int offset,
+ int queue)
+{
+ struct pci_dev *pdev = dev_data;
+
+ return pci_irq_get_affinity(pdev, offset + queue);
+}
+EXPORT_SYMBOL_GPL(pci_get_blk_mq_affinity);
+#endif
+
#ifdef CONFIG_ACPI
bool pci_pr3_present(struct pci_dev *pdev)
{
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index a9b93e99c23a..21667309ca9a 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -592,6 +592,37 @@ int virtio_device_restore(struct virtio_device *dev)
EXPORT_SYMBOL_GPL(virtio_device_restore);
#endif
+const struct cpumask *virtio_get_vq_affinity(struct virtio_device *dev,
+ int index)
+{
+ if (!dev->config->get_vq_affinity)
+ return NULL;
+
+ return dev->config->get_vq_affinity(dev, index);
+}
+EXPORT_SYMBOL_GPL(virtio_get_vq_affinity);
+
+#ifdef CONFIG_BLK_MQ_VIRTIO
+/**
+ * virtio_get_blk_mq_affinity - get affinity mask queue mapping for
+ * virtio device
+ * @dev_data: Pointer to struct virtio_device.
+ * @offset: Offset to use for the virtio irq vector
+ * @queue: Queue index
+ *
+ * This function returns for a queue the affinity mask for a virtio device.
+ * It is usually used as callback for blk_mq_hctx_map_queues().
+ */
+const struct cpumask *virtio_get_blk_mq_affinity(void *dev_data,
+ int offset, int queue)
+{
+ struct virtio_device *vdev = dev_data;
+
+ return virtio_get_vq_affinity(vdev, offset + queue);
+}
+EXPORT_SYMBOL_GPL(virtio_get_blk_mq_affinity);
+#endif
+
static int virtio_init(void)
{
if (bus_register(&virtio_bus) != 0)
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8d304b1d16b1..b9881a8794af 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -929,7 +929,12 @@ void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
+typedef const struct cpumask *(get_queue_affinty_fn)(void *dev_data,
+ int dev_off, int queue_idx);
void blk_mq_map_queues(struct blk_mq_queue_map *qmap);
+void blk_mq_hctx_map_queues(struct blk_mq_queue_map *qmap,
+ void *dev_data, int dev_off,
+ get_queue_affinty_fn *get_queue_affinity);
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4cf89a4b4cbc..97f4797b5060 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1633,6 +1633,17 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus,
int pci_set_vga_state(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
+#ifdef CONFIG_BLK_MQ_PCI
+const struct cpumask *pci_get_blk_mq_affinity(void *dev_data,
+ int offset, int queue);
+#else
+static inline const struct cpumask *pci_get_blk_mq_affinity(void *dev_data,
+ int offset, int queue)
+{
+ return cpu_possible_mask;
+}
+#endif
+
/*
* Virtual interrupts allow for more interrupts to be allocated
* than the device has interrupts for. These are not programmed
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index ecc5cb7b8c91..49d4f7353e5c 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -170,6 +170,19 @@ int virtio_device_restore(struct virtio_device *dev);
void virtio_reset_device(struct virtio_device *dev);
size_t virtio_max_dma_size(const struct virtio_device *vdev);
+const struct cpumask *virtio_get_vq_affinity(struct virtio_device *dev,
+ int index);
+
+#ifdef CONFIG_BLK_MQ_VIRTIO
+const struct cpumask *virtio_get_blk_mq_affinity(void *dev_data,
+ int offset, int queue);
+#else
+static inline const struct cpumask *virtio_get_blk_mq_affinity(void *dev_data,
+ int offset, int queue)
+{
+ return cpu_possible_mask;
+}
+#endif
#define virtio_device_for_each_vq(vdev, vq) \
list_for_each_entry(vq, &vdev->vqs, list)
--
2.46.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* Re: [PATCH 1/6] blk-mq: introduce blk_mq_hctx_map_queues
2024-09-13 7:41 ` [PATCH 1/6] blk-mq: introduce blk_mq_hctx_map_queues Daniel Wagner
@ 2024-09-13 16:26 ` Bjorn Helgaas
2024-09-15 20:32 ` Jens Axboe
2024-09-16 6:48 ` Christoph Hellwig
0 siblings, 2 replies; 11+ messages in thread
From: Bjorn Helgaas @ 2024-09-13 16:26 UTC (permalink / raw)
To: Daniel Wagner
Cc: Jens Axboe, Bjorn Helgaas, Michael S. Tsirkin, Jason Wang,
Martin K. Petersen, Keith Busch, Christoph Hellwig, Sagi Grimberg,
linux-block, linux-kernel, linux-pci, virtualization, linux-scsi,
megaraidlinux.pdl, mpi3mr-linuxdrv.pdl, MPT-FusionLinux.pdl,
storagedev, linux-nvme, Daniel Wagner,
20240912-do-not-overwrite-pci-mapping-v1-1-85724b6cec49, Ming Lei
On Fri, Sep 13, 2024 at 09:41:59AM +0200, Daniel Wagner wrote:
> From: Ming Lei <ming.lei@redhat.com>
>
> blk_mq_pci_map_queues and blk_mq_virtio_map_queues will create a CPU to
> hardware queue mapping based on affinity information. These two
> function share code which only differs on how the affinity information
> is retrieved. Also there is the hisi_sas which open codes the same loop.
>
> Thus introduce a new helper function for creating these mappings which
> takes an callback function for fetching the affinity mask. Also
> introduce common helper function for PCI and virtio devices to retrieve
> affinity masks.
> diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
> index e3a49f66982d..84f9c16b813b 100644
> --- a/drivers/pci/pci.c
> +++ b/drivers/pci/pci.c
> @@ -6370,6 +6370,26 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
> return 0;
> }
>
> +#ifdef CONFIG_BLK_MQ_PCI
> +/**
> + * pci_get_blk_mq_affinity - get affinity mask queue mapping for PCI device
> + * @dev_data: Pointer to struct pci_dev.
> + * @offset: Offset to use for the pci irq vector
> + * @queue: Queue index
> + *
> + * This function returns for a queue the affinity mask for a PCI device.
> + * It is usually used as callback for blk_mq_hctx_map_queues().
> + */
> +const struct cpumask *pci_get_blk_mq_affinity(void *dev_data, int offset,
> + int queue)
> +{
> + struct pci_dev *pdev = dev_data;
> +
> + return pci_irq_get_affinity(pdev, offset + queue);
> +}
> +EXPORT_SYMBOL_GPL(pci_get_blk_mq_affinity);
> +#endif
IMO this doesn't really fit well in drivers/pci since it doesn't add
any PCI-specific knowledge or require any PCI core internals, and the
parameters are blk-specific. I don't object to the code, but it seems
like it could go somewhere in block/?
Bjorn
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH 1/6] blk-mq: introduce blk_mq_hctx_map_queues
2024-09-13 16:26 ` Bjorn Helgaas
@ 2024-09-15 20:32 ` Jens Axboe
2024-09-16 6:26 ` Daniel Wagner
2024-09-16 6:48 ` Christoph Hellwig
1 sibling, 1 reply; 11+ messages in thread
From: Jens Axboe @ 2024-09-15 20:32 UTC (permalink / raw)
To: Bjorn Helgaas, Daniel Wagner
Cc: Bjorn Helgaas, Michael S. Tsirkin, Jason Wang, Martin K. Petersen,
Keith Busch, Christoph Hellwig, Sagi Grimberg, linux-block,
linux-kernel, linux-pci, virtualization, linux-scsi,
megaraidlinux.pdl, mpi3mr-linuxdrv.pdl, MPT-FusionLinux.pdl,
storagedev, linux-nvme, Daniel Wagner,
20240912-do-not-overwrite-pci-mapping-v1-1-85724b6cec49, Ming Lei
On 9/13/24 10:26 AM, Bjorn Helgaas wrote:
> On Fri, Sep 13, 2024 at 09:41:59AM +0200, Daniel Wagner wrote:
>> From: Ming Lei <ming.lei@redhat.com>
>>
>> blk_mq_pci_map_queues and blk_mq_virtio_map_queues will create a CPU to
>> hardware queue mapping based on affinity information. These two
>> function share code which only differs on how the affinity information
>> is retrieved. Also there is the hisi_sas which open codes the same loop.
>>
>> Thus introduce a new helper function for creating these mappings which
>> takes an callback function for fetching the affinity mask. Also
>> introduce common helper function for PCI and virtio devices to retrieve
>> affinity masks.
>
>> diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
>> index e3a49f66982d..84f9c16b813b 100644
>> --- a/drivers/pci/pci.c
>> +++ b/drivers/pci/pci.c
>> @@ -6370,6 +6370,26 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
>> return 0;
>> }
>>
>> +#ifdef CONFIG_BLK_MQ_PCI
>> +/**
>> + * pci_get_blk_mq_affinity - get affinity mask queue mapping for PCI device
>> + * @dev_data: Pointer to struct pci_dev.
>> + * @offset: Offset to use for the pci irq vector
>> + * @queue: Queue index
>> + *
>> + * This function returns for a queue the affinity mask for a PCI device.
>> + * It is usually used as callback for blk_mq_hctx_map_queues().
>> + */
>> +const struct cpumask *pci_get_blk_mq_affinity(void *dev_data, int offset,
>> + int queue)
>> +{
>> + struct pci_dev *pdev = dev_data;
>> +
>> + return pci_irq_get_affinity(pdev, offset + queue);
>> +}
>> +EXPORT_SYMBOL_GPL(pci_get_blk_mq_affinity);
>> +#endif
>
> IMO this doesn't really fit well in drivers/pci since it doesn't add
> any PCI-specific knowledge or require any PCI core internals, and the
> parameters are blk-specific. I don't object to the code, but it seems
> like it could go somewhere in block/?
Probably not a bad idea.
Unrelated to that topic, but Daniel, all your email gets marked as spam.
I didn't see your series before this reply. This has been common
recently for people that haven't kept up with kernel.org changes, please
check for smtp changes there.
--
Jens Axboe
^ permalink raw reply [flat|nested] 11+ messages in thread* Re: [PATCH 1/6] blk-mq: introduce blk_mq_hctx_map_queues
2024-09-15 20:32 ` Jens Axboe
@ 2024-09-16 6:26 ` Daniel Wagner
0 siblings, 0 replies; 11+ messages in thread
From: Daniel Wagner @ 2024-09-16 6:26 UTC (permalink / raw)
To: Jens Axboe
Cc: Bjorn Helgaas, Daniel Wagner, Bjorn Helgaas, Michael S. Tsirkin,
Jason Wang, Martin K. Petersen, Keith Busch, Christoph Hellwig,
Sagi Grimberg, linux-block, linux-kernel, linux-pci,
virtualization, linux-scsi, megaraidlinux.pdl,
mpi3mr-linuxdrv.pdl, MPT-FusionLinux.pdl, storagedev, linux-nvme,
Ming Lei
On Sun, Sep 15, 2024 at 02:32:30PM GMT, Jens Axboe wrote:
> > IMO this doesn't really fit well in drivers/pci since it doesn't add
> > any PCI-specific knowledge or require any PCI core internals, and the
> > parameters are blk-specific. I don't object to the code, but it seems
> > like it could go somewhere in block/?
>
> Probably not a bad idea.
Christoph suggested to move these function to matching subsystem. I am
fine either way.
> Unrelated to that topic, but Daniel, all your email gets marked as spam.
> I didn't see your series before this reply. This has been common
> recently for people that haven't kept up with kernel.org changes, please
> check for smtp changes there.
Thanks for letting me know. FWIW, I switch over to use the kernel.org smtp
server and I must miss some important config option.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 1/6] blk-mq: introduce blk_mq_hctx_map_queues
2024-09-13 16:26 ` Bjorn Helgaas
2024-09-15 20:32 ` Jens Axboe
@ 2024-09-16 6:48 ` Christoph Hellwig
1 sibling, 0 replies; 11+ messages in thread
From: Christoph Hellwig @ 2024-09-16 6:48 UTC (permalink / raw)
To: Bjorn Helgaas
Cc: Daniel Wagner, Jens Axboe, Bjorn Helgaas, Michael S. Tsirkin,
Jason Wang, Martin K. Petersen, Keith Busch, Christoph Hellwig,
Sagi Grimberg, linux-block, linux-kernel, linux-pci,
virtualization, linux-scsi, megaraidlinux.pdl,
mpi3mr-linuxdrv.pdl, MPT-FusionLinux.pdl, storagedev, linux-nvme,
Daniel Wagner,
20240912-do-not-overwrite-pci-mapping-v1-1-85724b6cec49, Ming Lei,
Greg Kroah-Hartman, Rafael J. Wysocki, Thomas Gleixner
On Fri, Sep 13, 2024 at 11:26:54AM -0500, Bjorn Helgaas wrote:
> > +const struct cpumask *pci_get_blk_mq_affinity(void *dev_data, int offset,
> > + int queue)
> > +{
> > + struct pci_dev *pdev = dev_data;
> > +
> > + return pci_irq_get_affinity(pdev, offset + queue);
> > +}
> > +EXPORT_SYMBOL_GPL(pci_get_blk_mq_affinity);
> > +#endif
>
> IMO this doesn't really fit well in drivers/pci since it doesn't add
> any PCI-specific knowledge or require any PCI core internals, and the
> parameters are blk-specific. I don't object to the code, but it seems
> like it could go somewhere in block/?
That's where it, or rather the current equivalent, lives, which is a bit
silly. That being said, I suspect the nicest thing would be to offer a
real irq_get_affinity interface at the bus level.
e.g. add something like:
const struct cpumask *(*irq_get_affinity(struct device *dev,
unsigned int irq_vec);
to struct bus_type so that any layer can just query the irq affinity
for buses that support it without extra glue code.
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH 2/6] scsi: replace blk_mq_pci_map_queues with blk_mq_hctx_map_queues
2024-09-13 7:41 [PATCH 0/6] EDITME: blk: refactor queue affinity helpers Daniel Wagner
2024-09-13 7:41 ` [PATCH 1/6] blk-mq: introduce blk_mq_hctx_map_queues Daniel Wagner
@ 2024-09-13 7:42 ` Daniel Wagner
2024-09-13 7:42 ` [PATCH 3/6] scsi: hisi_sas: " Daniel Wagner
` (3 subsequent siblings)
5 siblings, 0 replies; 11+ messages in thread
From: Daniel Wagner @ 2024-09-13 7:42 UTC (permalink / raw)
To: Jens Axboe, Bjorn Helgaas, Michael S. Tsirkin, Jason Wang,
Martin K. Petersen, Keith Busch, Christoph Hellwig, Sagi Grimberg
Cc: linux-block, linux-kernel, linux-pci, virtualization, linux-scsi,
megaraidlinux.pdl, mpi3mr-linuxdrv.pdl, MPT-FusionLinux.pdl,
storagedev, linux-nvme, Daniel Wagner,
20240912-do-not-overwrite-pci-mapping-v1-1-85724b6cec49
From: Daniel Wagner <dwagner@suse.de>
Replace all users of blk_mq_pci_map_queues with the more generic
blk_mq_hctx_map_queues. This in preparation to retire
blk_mq_pci_map_queues.
Signed-off-by: Daniel Wagner <dwagner@suse.de>
---
drivers/scsi/fnic/fnic_main.c | 4 ++--
drivers/scsi/megaraid/megaraid_sas_base.c | 4 ++--
drivers/scsi/mpi3mr/mpi3mr.h | 1 -
drivers/scsi/mpi3mr/mpi3mr_os.c | 3 ++-
drivers/scsi/mpt3sas/mpt3sas_scsih.c | 4 ++--
drivers/scsi/pm8001/pm8001_init.c | 3 ++-
drivers/scsi/pm8001/pm8001_sas.h | 1 -
drivers/scsi/qla2xxx/qla_nvme.c | 4 ++--
drivers/scsi/qla2xxx/qla_os.c | 4 ++--
drivers/scsi/smartpqi/smartpqi_init.c | 8 ++++----
10 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 29eead383eb9..77ad1971351e 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -16,7 +16,6 @@
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/if_ether.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
@@ -601,7 +600,8 @@ void fnic_mq_map_queues_cpus(struct Scsi_Host *host)
return;
}
- blk_mq_pci_map_queues(qmap, l_pdev, FNIC_PCI_OFFSET);
+ blk_mq_hctx_map_queues(qmap, l_pdev, FNIC_PCI_OFFSET,
+ pci_get_blk_mq_affinity);
}
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 6c79c350a4d5..597bf8476bbc 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -37,7 +37,6 @@
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/irq_poll.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -3193,7 +3192,8 @@ static void megasas_map_queues(struct Scsi_Host *shost)
map = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
map->nr_queues = instance->msix_vectors - offset;
map->queue_offset = 0;
- blk_mq_pci_map_queues(map, instance->pdev, offset);
+ blk_mq_hctx_map_queues(map, instance->pdev, offset,
+ pci_get_blk_mq_affinity);
qoff += map->nr_queues;
offset += map->nr_queues;
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index dc2cdd5f0311..8e502e34e18b 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -12,7 +12,6 @@
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/delay.h>
#include <linux/dmapool.h>
#include <linux/errno.h>
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 69b14918de59..1002c19aa2d1 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -4031,7 +4031,8 @@ static void mpi3mr_map_queues(struct Scsi_Host *shost)
*/
map->queue_offset = qoff;
if (i != HCTX_TYPE_POLL)
- blk_mq_pci_map_queues(map, mrioc->pdev, offset);
+ blk_mq_hctx_map_queues(map, mrioc->pdev, offset,
+ pci_get_blk_mq_affinity);
else
blk_mq_map_queues(map);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 97c2472cd434..162d3da5d8d0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -53,7 +53,6 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/raid_class.h>
-#include <linux/blk-mq-pci.h>
#include <asm/unaligned.h>
#include "mpt3sas_base.h"
@@ -11890,7 +11889,8 @@ static void scsih_map_queues(struct Scsi_Host *shost)
*/
map->queue_offset = qoff;
if (i != HCTX_TYPE_POLL)
- blk_mq_pci_map_queues(map, ioc->pdev, offset);
+ blk_mq_hctx_map_queues(map, ioc->pdev, offset,
+ pci_get_blk_mq_affinity);
else
blk_mq_map_queues(map);
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 33e1eba62ca1..47a0917fb2a0 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -101,7 +101,8 @@ static void pm8001_map_queues(struct Scsi_Host *shost)
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
if (pm8001_ha->number_of_intr > 1) {
- blk_mq_pci_map_queues(qmap, pm8001_ha->pdev, 1);
+ blk_mq_hctx_map_queues(qmap, pm8001_ha->pdev, 1,
+ pci_get_blk_mq_affinity);
return;
}
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index ced6721380a8..c46470e0cf63 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -56,7 +56,6 @@
#include <scsi/sas_ata.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include "pm8001_defs.h"
#define DRV_NAME "pm80xx"
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 8f4cc136a9c9..ea1ef2fae7dd 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -8,7 +8,6 @@
#include <linux/delay.h>
#include <linux/nvme.h>
#include <linux/nvme-fc.h>
-#include <linux/blk-mq-pci.h>
#include <linux/blk-mq.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
@@ -841,7 +840,8 @@ static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
{
struct scsi_qla_host *vha = lport->private;
- blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
+ blk_mq_hctx_map_queues(map, vha->hw->pdev, vha->irq_offset,
+ pci_get_blk_mq_affinity);
}
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index bc3b2aea3f8b..521ae591898d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,7 +13,6 @@
#include <linux/mutex.h>
#include <linux/kobject.h>
#include <linux/slab.h>
-#include <linux/blk-mq-pci.h>
#include <linux/refcount.h>
#include <linux/crash_dump.h>
#include <linux/trace_events.h>
@@ -8068,7 +8067,8 @@ static void qla2xxx_map_queues(struct Scsi_Host *shost)
if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase)
blk_mq_map_queues(qmap);
else
- blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset);
+ blk_mq_hctx_map_queues(qmap, vha->hw->pdev, vha->irq_offset,
+ pci_get_blk_mq_affinity);
}
struct scsi_host_template qla2xxx_driver_template = {
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 24c7cb285dca..9f8c2e16d55b 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -19,7 +19,6 @@
#include <linux/bcd.h>
#include <linux/reboot.h>
#include <linux/cciss_ioctl.h>
-#include <linux/blk-mq-pci.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@@ -6533,10 +6532,11 @@ static void pqi_map_queues(struct Scsi_Host *shost)
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
if (!ctrl_info->disable_managed_interrupts)
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- ctrl_info->pci_dev, 0);
+ blk_mq_hctx_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ ctrl_info->pci_dev, 0,
+ pci_get_blk_mq_affinity);
else
- return blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
+ blk_mq_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT]);
}
static inline bool pqi_is_tape_changer_device(struct pqi_scsi_dev *device)
--
2.46.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH 3/6] scsi: hisi_sas: replace blk_mq_pci_map_queues with blk_mq_hctx_map_queues
2024-09-13 7:41 [PATCH 0/6] EDITME: blk: refactor queue affinity helpers Daniel Wagner
2024-09-13 7:41 ` [PATCH 1/6] blk-mq: introduce blk_mq_hctx_map_queues Daniel Wagner
2024-09-13 7:42 ` [PATCH 2/6] scsi: replace blk_mq_pci_map_queues with blk_mq_hctx_map_queues Daniel Wagner
@ 2024-09-13 7:42 ` Daniel Wagner
2024-09-13 7:42 ` [PATCH 4/6] nvme: " Daniel Wagner
` (2 subsequent siblings)
5 siblings, 0 replies; 11+ messages in thread
From: Daniel Wagner @ 2024-09-13 7:42 UTC (permalink / raw)
To: Jens Axboe, Bjorn Helgaas, Michael S. Tsirkin, Jason Wang,
Martin K. Petersen, Keith Busch, Christoph Hellwig, Sagi Grimberg
Cc: linux-block, linux-kernel, linux-pci, virtualization, linux-scsi,
megaraidlinux.pdl, mpi3mr-linuxdrv.pdl, MPT-FusionLinux.pdl,
storagedev, linux-nvme, Daniel Wagner,
20240912-do-not-overwrite-pci-mapping-v1-1-85724b6cec49
From: Daniel Wagner <dwagner@suse.de>
Replace all users of blk_mq_pci_map_queues with the more generic
blk_mq_hctx_map_queues. This in preparation to retire
blk_mq_pci_map_queues.
For his_sas_v2_hw.c we have to provide its own callback for retrieving
the affinity because pci_get_blk_mq_affinity is using
pci_irq_get_affinity and not irq_data_get_affinity_mask.
But at least we can replace the open code loop with
blk_mq_hctx_map_queues.
Signed-off-by: Daniel Wagner <dwagner@suse.de>
---
drivers/scsi/hisi_sas/hisi_sas.h | 1 -
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 20 ++++++++++----------
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 5 +++--
3 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index d223f482488f..010479a354ee 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -9,7 +9,6 @@
#include <linux/acpi.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/dmapool.h>
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 342d75f12051..31be34f23164 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -3549,21 +3549,21 @@ static const struct attribute_group *sdev_groups_v2_hw[] = {
NULL
};
+static const struct cpumask *hisi_hba_get_queue_affinity(void *dev_data,
+ int offset, int queue)
+{
+ struct hisi_hba *hba = dev_data;
+
+ return irq_get_affinity_mask(hba->irq_map[offset + queue]);
+}
+
static void map_queues_v2_hw(struct Scsi_Host *shost)
{
struct hisi_hba *hisi_hba = shost_priv(shost);
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- const struct cpumask *mask;
- unsigned int queue, cpu;
- for (queue = 0; queue < qmap->nr_queues; queue++) {
- mask = irq_get_affinity_mask(hisi_hba->irq_map[96 + queue]);
- if (!mask)
- continue;
-
- for_each_cpu(cpu, mask)
- qmap->mq_map[cpu] = qmap->queue_offset + queue;
- }
+ blk_mq_hctx_map_queues(qmap, hisi_hba, CQ0_IRQ_INDEX,
+ hisi_hba_get_queue_affinity);
}
static const struct scsi_host_template sht_v2_hw = {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index feda9b54b443..1576eee943ba 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -3322,8 +3322,9 @@ static void hisi_sas_map_queues(struct Scsi_Host *shost)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(qmap);
else
- blk_mq_pci_map_queues(qmap, hisi_hba->pci_dev,
- BASE_VECTORS_V3_HW);
+ blk_mq_hctx_map_queues(qmap, hisi_hba->pci_dev,
+ BASE_VECTORS_V3_HW,
+ pci_get_blk_mq_affinity);
qoff += qmap->nr_queues;
}
}
--
2.46.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH 4/6] nvme: replace blk_mq_pci_map_queues with blk_mq_hctx_map_queues
2024-09-13 7:41 [PATCH 0/6] EDITME: blk: refactor queue affinity helpers Daniel Wagner
` (2 preceding siblings ...)
2024-09-13 7:42 ` [PATCH 3/6] scsi: hisi_sas: " Daniel Wagner
@ 2024-09-13 7:42 ` Daniel Wagner
2024-09-13 7:42 ` [PATCH 5/6] virtio: blk/scsi: replace blk_mq_virtio_map_queues " Daniel Wagner
2024-09-13 7:42 ` [PATCH 6/6] blk-mq: remove unused queue mapping helpers Daniel Wagner
5 siblings, 0 replies; 11+ messages in thread
From: Daniel Wagner @ 2024-09-13 7:42 UTC (permalink / raw)
To: Jens Axboe, Bjorn Helgaas, Michael S. Tsirkin, Jason Wang,
Martin K. Petersen, Keith Busch, Christoph Hellwig, Sagi Grimberg
Cc: linux-block, linux-kernel, linux-pci, virtualization, linux-scsi,
megaraidlinux.pdl, mpi3mr-linuxdrv.pdl, MPT-FusionLinux.pdl,
storagedev, linux-nvme, Daniel Wagner,
20240912-do-not-overwrite-pci-mapping-v1-1-85724b6cec49
From: Daniel Wagner <dwagner@suse.de>
Replace all users of blk_mq_pci_map_queues with the more generic
blk_mq_hctx_map_queues. This in preparation to retire
blk_mq_pci_map_queues.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Daniel Wagner <dwagner@suse.de>
---
drivers/nvme/host/fc.c | 1 -
drivers/nvme/host/pci.c | 4 ++--
2 files changed, 2 insertions(+), 3 deletions(-)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b81af7919e94..094be164ffdc 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -16,7 +16,6 @@
#include <linux/nvme-fc.h>
#include "fc.h"
#include <scsi/scsi_transport_fc.h>
-#include <linux/blk-mq-pci.h>
/* *************************** Data Structures/Defines ****************** */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 6cd9395ba9ec..4af6931af19f 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -8,7 +8,6 @@
#include <linux/async.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-pci.h>
#include <linux/blk-integrity.h>
#include <linux/dmi.h>
#include <linux/init.h>
@@ -457,7 +456,8 @@ static void nvme_pci_map_queues(struct blk_mq_tag_set *set)
*/
map->queue_offset = qoff;
if (i != HCTX_TYPE_POLL && offset)
- blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset);
+ blk_mq_hctx_map_queues(map, to_pci_dev(dev->dev), offset,
+ pci_get_blk_mq_affinity);
else
blk_mq_map_queues(map);
qoff += map->nr_queues;
--
2.46.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH 5/6] virtio: blk/scsi: replace blk_mq_virtio_map_queues with blk_mq_hctx_map_queues
2024-09-13 7:41 [PATCH 0/6] EDITME: blk: refactor queue affinity helpers Daniel Wagner
` (3 preceding siblings ...)
2024-09-13 7:42 ` [PATCH 4/6] nvme: " Daniel Wagner
@ 2024-09-13 7:42 ` Daniel Wagner
2024-09-13 7:42 ` [PATCH 6/6] blk-mq: remove unused queue mapping helpers Daniel Wagner
5 siblings, 0 replies; 11+ messages in thread
From: Daniel Wagner @ 2024-09-13 7:42 UTC (permalink / raw)
To: Jens Axboe, Bjorn Helgaas, Michael S. Tsirkin, Jason Wang,
Martin K. Petersen, Keith Busch, Christoph Hellwig, Sagi Grimberg
Cc: linux-block, linux-kernel, linux-pci, virtualization, linux-scsi,
megaraidlinux.pdl, mpi3mr-linuxdrv.pdl, MPT-FusionLinux.pdl,
storagedev, linux-nvme, Daniel Wagner,
20240912-do-not-overwrite-pci-mapping-v1-1-85724b6cec49
From: Daniel Wagner <dwagner@suse.de>
Replace all users of blk_mq_virtio_map_queues with the more generic
blk_mq_hctx_map_queues. This in preparation to retire
blk_mq_virtio_map_queues.
Signed-off-by: Daniel Wagner <dwagner@suse.de>
---
drivers/block/virtio_blk.c | 4 ++--
drivers/scsi/virtio_scsi.c | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 194417abc105..c3f4d1bdc0ef 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -13,7 +13,6 @@
#include <linux/string_helpers.h>
#include <linux/idr.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-virtio.h>
#include <linux/numa.h>
#include <linux/vmalloc.h>
#include <uapi/linux/virtio_ring.h>
@@ -1186,7 +1185,8 @@ static void virtblk_map_queues(struct blk_mq_tag_set *set)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(&set->map[i]);
else
- blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
+ blk_mq_hctx_map_queues(&set->map[i], vblk->vdev, 0,
+ virtio_get_blk_mq_affinity);
}
}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8471f38b730e..4104db7a6dff 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -29,7 +29,6 @@
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_devinfo.h>
#include <linux/seqlock.h>
-#include <linux/blk-mq-virtio.h>
#include "sd.h"
@@ -746,7 +745,8 @@ static void virtscsi_map_queues(struct Scsi_Host *shost)
if (i == HCTX_TYPE_POLL)
blk_mq_map_queues(map);
else
- blk_mq_virtio_map_queues(map, vscsi->vdev, 2);
+ blk_mq_hctx_map_queues(map, vscsi->vdev, 2,
+ virtio_get_blk_mq_affinity);
}
}
--
2.46.0
^ permalink raw reply related [flat|nested] 11+ messages in thread* [PATCH 6/6] blk-mq: remove unused queue mapping helpers
2024-09-13 7:41 [PATCH 0/6] EDITME: blk: refactor queue affinity helpers Daniel Wagner
` (4 preceding siblings ...)
2024-09-13 7:42 ` [PATCH 5/6] virtio: blk/scsi: replace blk_mq_virtio_map_queues " Daniel Wagner
@ 2024-09-13 7:42 ` Daniel Wagner
5 siblings, 0 replies; 11+ messages in thread
From: Daniel Wagner @ 2024-09-13 7:42 UTC (permalink / raw)
To: Jens Axboe, Bjorn Helgaas, Michael S. Tsirkin, Jason Wang,
Martin K. Petersen, Keith Busch, Christoph Hellwig, Sagi Grimberg
Cc: linux-block, linux-kernel, linux-pci, virtualization, linux-scsi,
megaraidlinux.pdl, mpi3mr-linuxdrv.pdl, MPT-FusionLinux.pdl,
storagedev, linux-nvme, Daniel Wagner,
20240912-do-not-overwrite-pci-mapping-v1-1-85724b6cec49
From: Daniel Wagner <dwagner@suse.de>
There are no users left of the pci and virtio queue mapping helpers.
Thus remove them.
Signed-off-by: Daniel Wagner <dwagner@suse.de>
---
block/Makefile | 2 --
block/blk-mq-pci.c | 46 -------------------------------------------
block/blk-mq-virtio.c | 46 -------------------------------------------
include/linux/blk-mq-pci.h | 11 -----------
include/linux/blk-mq-virtio.h | 11 -----------
5 files changed, 116 deletions(-)
diff --git a/block/Makefile b/block/Makefile
index ddfd21c1a9ff..33748123710b 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -27,8 +27,6 @@ bfq-y := bfq-iosched.o bfq-wf2q.o bfq-cgroup.o
obj-$(CONFIG_IOSCHED_BFQ) += bfq.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
-obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
-obj-$(CONFIG_BLK_MQ_VIRTIO) += blk-mq-virtio.o
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
obj-$(CONFIG_BLK_WBT) += blk-wbt.o
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
diff --git a/block/blk-mq-pci.c b/block/blk-mq-pci.c
deleted file mode 100644
index d47b5c73c9eb..000000000000
--- a/block/blk-mq-pci.c
+++ /dev/null
@@ -1,46 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2016 Christoph Hellwig.
- */
-#include <linux/kobject.h>
-#include <linux/blkdev.h>
-#include <linux/blk-mq-pci.h>
-#include <linux/pci.h>
-#include <linux/module.h>
-
-#include "blk-mq.h"
-
-/**
- * blk_mq_pci_map_queues - provide a default queue mapping for PCI device
- * @qmap: CPU to hardware queue map.
- * @pdev: PCI device associated with @set.
- * @offset: Offset to use for the pci irq vector
- *
- * This function assumes the PCI device @pdev has at least as many available
- * interrupt vectors as @set has queues. It will then query the vector
- * corresponding to each queue for it's affinity mask and built queue mapping
- * that maps a queue to the CPUs that have irq affinity for the corresponding
- * vector.
- */
-void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
- int offset)
-{
- const struct cpumask *mask;
- unsigned int queue, cpu;
-
- for (queue = 0; queue < qmap->nr_queues; queue++) {
- mask = pci_irq_get_affinity(pdev, queue + offset);
- if (!mask)
- goto fallback;
-
- for_each_cpu(cpu, mask)
- qmap->mq_map[cpu] = qmap->queue_offset + queue;
- }
-
- return;
-
-fallback:
- WARN_ON_ONCE(qmap->nr_queues > 1);
- blk_mq_clear_mq_map(qmap);
-}
-EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);
diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c
deleted file mode 100644
index 68d0945c0b08..000000000000
--- a/block/blk-mq-virtio.c
+++ /dev/null
@@ -1,46 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2016 Christoph Hellwig.
- */
-#include <linux/device.h>
-#include <linux/blk-mq-virtio.h>
-#include <linux/virtio_config.h>
-#include <linux/module.h>
-#include "blk-mq.h"
-
-/**
- * blk_mq_virtio_map_queues - provide a default queue mapping for virtio device
- * @qmap: CPU to hardware queue map.
- * @vdev: virtio device to provide a mapping for.
- * @first_vec: first interrupt vectors to use for queues (usually 0)
- *
- * This function assumes the virtio device @vdev has at least as many available
- * interrupt vectors as @set has queues. It will then query the vector
- * corresponding to each queue for it's affinity mask and built queue mapping
- * that maps a queue to the CPUs that have irq affinity for the corresponding
- * vector.
- */
-void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
- struct virtio_device *vdev, int first_vec)
-{
- const struct cpumask *mask;
- unsigned int queue, cpu;
-
- if (!vdev->config->get_vq_affinity)
- goto fallback;
-
- for (queue = 0; queue < qmap->nr_queues; queue++) {
- mask = vdev->config->get_vq_affinity(vdev, first_vec + queue);
- if (!mask)
- goto fallback;
-
- for_each_cpu(cpu, mask)
- qmap->mq_map[cpu] = qmap->queue_offset + queue;
- }
-
- return;
-
-fallback:
- blk_mq_map_queues(qmap);
-}
-EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
diff --git a/include/linux/blk-mq-pci.h b/include/linux/blk-mq-pci.h
deleted file mode 100644
index ca544e1d3508..000000000000
--- a/include/linux/blk-mq-pci.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_BLK_MQ_PCI_H
-#define _LINUX_BLK_MQ_PCI_H
-
-struct blk_mq_queue_map;
-struct pci_dev;
-
-void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
- int offset);
-
-#endif /* _LINUX_BLK_MQ_PCI_H */
diff --git a/include/linux/blk-mq-virtio.h b/include/linux/blk-mq-virtio.h
deleted file mode 100644
index 13226e9b22dd..000000000000
--- a/include/linux/blk-mq-virtio.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_BLK_MQ_VIRTIO_H
-#define _LINUX_BLK_MQ_VIRTIO_H
-
-struct blk_mq_queue_map;
-struct virtio_device;
-
-void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
- struct virtio_device *vdev, int first_vec);
-
-#endif /* _LINUX_BLK_MQ_VIRTIO_H */
--
2.46.0
^ permalink raw reply related [flat|nested] 11+ messages in thread