linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ming Lei <ming.lei@redhat.com>
To: Daniel Wagner <wagi@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>, Keith Busch <kbusch@kernel.org>,
	Christoph Hellwig <hch@lst.de>, Sagi Grimberg <sagi@grimberg.me>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	"Martin K. Petersen" <martin.petersen@oracle.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Costa Shulyupin <costa.shul@redhat.com>,
	Juri Lelli <juri.lelli@redhat.com>,
	Valentin Schneider <vschneid@redhat.com>,
	Waiman Long <llong@redhat.com>,
	Frederic Weisbecker <frederic@kernel.org>,
	Mel Gorman <mgorman@suse.de>, Hannes Reinecke <hare@suse.de>,
	Mathieu Desnoyers <mathieu.desnoyers@efficios.com>,
	linux-kernel@vger.kernel.org, linux-block@vger.kernel.org,
	linux-nvme@lists.infradead.org, megaraidlinux.pdl@broadcom.com,
	linux-scsi@vger.kernel.org, storagedev@microchip.com,
	virtualization@lists.linux.dev,
	GR-QLogic-Storage-Upstream@marvell.com
Subject: Re: [PATCH v6 1/9] lib/group_cpus: let group_cpu_evenly return number initialized masks
Date: Fri, 9 May 2025 09:29:48 +0800	[thread overview]
Message-ID: <aB1ajDUwJy6PVGEY@fedora> (raw)
In-Reply-To: <20250424-isolcpus-io-queues-v6-1-9a53a870ca1f@kernel.org>

On Thu, Apr 24, 2025 at 08:19:40PM +0200, Daniel Wagner wrote:
> group_cpu_evenly might allocated less groups then the requested:
> 
> group_cpu_evenly
>   __group_cpus_evenly
>     alloc_nodes_groups
>       # allocated total groups may be less than numgrps when
>       # active total CPU number is less then numgrps
> 
> In this case, the caller will do an out of bound access because the
> caller assumes the masks returned has numgrps.
> 
> Return the number of groups created so the caller can limit the access
> range accordingly.
> 
> Reviewed-by: Hannes Reinecke <hare@suse.de>
> Signed-off-by: Daniel Wagner <wagi@kernel.org>
> ---
>  block/blk-mq-cpumap.c        |  6 +++---
>  drivers/virtio/virtio_vdpa.c |  9 +++++----
>  fs/fuse/virtio_fs.c          |  6 +++---
>  include/linux/group_cpus.h   |  3 ++-
>  kernel/irq/affinity.c        |  9 +++++----
>  lib/group_cpus.c             | 12 +++++++++---
>  6 files changed, 27 insertions(+), 18 deletions(-)
> 
> diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
> index 444798c5374f48088b661b519f2638bda8556cf2..269161252add756897fce1b65cae5b2e6aebd647 100644
> --- a/block/blk-mq-cpumap.c
> +++ b/block/blk-mq-cpumap.c
> @@ -19,9 +19,9 @@
>  void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
>  {
>  	const struct cpumask *masks;
> -	unsigned int queue, cpu;
> +	unsigned int queue, cpu, nr_masks;
>  
> -	masks = group_cpus_evenly(qmap->nr_queues);
> +	masks = group_cpus_evenly(qmap->nr_queues, &nr_masks);
>  	if (!masks) {
>  		for_each_possible_cpu(cpu)
>  			qmap->mq_map[cpu] = qmap->queue_offset;
> @@ -29,7 +29,7 @@ void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
>  	}
>  
>  	for (queue = 0; queue < qmap->nr_queues; queue++) {
> -		for_each_cpu(cpu, &masks[queue])
> +		for_each_cpu(cpu, &masks[queue % nr_masks])
>  			qmap->mq_map[cpu] = qmap->queue_offset + queue;
>  	}
>  	kfree(masks);
> diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
> index 1f60c9d5cb1810a6f208c24bb2ac640d537391a0..a7b297dae4890c9d6002744b90fc133bbedb7b44 100644
> --- a/drivers/virtio/virtio_vdpa.c
> +++ b/drivers/virtio/virtio_vdpa.c
> @@ -329,20 +329,21 @@ create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
>  
>  	for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
>  		unsigned int this_vecs = affd->set_size[i];
> +		unsigned int nr_masks;
>  		int j;
> -		struct cpumask *result = group_cpus_evenly(this_vecs);
> +		struct cpumask *result = group_cpus_evenly(this_vecs, &nr_masks);
>  
>  		if (!result) {
>  			kfree(masks);
>  			return NULL;
>  		}
>  
> -		for (j = 0; j < this_vecs; j++)
> +		for (j = 0; j < nr_masks; j++)
>  			cpumask_copy(&masks[curvec + j], &result[j]);
>  		kfree(result);
>  
> -		curvec += this_vecs;
> -		usedvecs += this_vecs;
> +		curvec += nr_masks;
> +		usedvecs += nr_masks;
>  	}
>  
>  	/* Fill out vectors at the end that don't need affinity */
> diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
> index 2c7b24cb67adb2cb329ed545f56f04700aca8b81..7ed43b9ea4f3f8b108f1e0d7050c27267b9941c9 100644
> --- a/fs/fuse/virtio_fs.c
> +++ b/fs/fuse/virtio_fs.c
> @@ -862,7 +862,7 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
>  static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *fs)
>  {
>  	const struct cpumask *mask, *masks;
> -	unsigned int q, cpu;
> +	unsigned int q, cpu, nr_masks;
>  
>  	/* First attempt to map using existing transport layer affinities
>  	 * e.g. PCIe MSI-X
> @@ -882,7 +882,7 @@ static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *f
>  	return;
>  fallback:
>  	/* Attempt to map evenly in groups over the CPUs */
> -	masks = group_cpus_evenly(fs->num_request_queues);
> +	masks = group_cpus_evenly(fs->num_request_queues, &nr_masks);
>  	/* If even this fails we default to all CPUs use first request queue */
>  	if (!masks) {
>  		for_each_possible_cpu(cpu)
> @@ -891,7 +891,7 @@ static void virtio_fs_map_queues(struct virtio_device *vdev, struct virtio_fs *f
>  	}
>  
>  	for (q = 0; q < fs->num_request_queues; q++) {
> -		for_each_cpu(cpu, &masks[q])
> +		for_each_cpu(cpu, &masks[q % nr_masks])
>  			fs->mq_map[cpu] = q + VQ_REQUEST;
>  	}
>  	kfree(masks);
> diff --git a/include/linux/group_cpus.h b/include/linux/group_cpus.h
> index e42807ec61f6e8cf3787af7daa0d8686edfef0a3..bd5dada6e8606fa6cf8f7babf939e39fd7475c8d 100644
> --- a/include/linux/group_cpus.h
> +++ b/include/linux/group_cpus.h
> @@ -9,6 +9,7 @@
>  #include <linux/kernel.h>
>  #include <linux/cpu.h>
>  
> -struct cpumask *group_cpus_evenly(unsigned int numgrps);
> +struct cpumask *group_cpus_evenly(unsigned int numgrps,
> +				  unsigned int *nummasks);
>  
>  #endif
> diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
> index 44a4eba80315cc098ecfa366ca1d88483641b12a..d2aefab5eb2b929877ced43f48b6268098484bd7 100644
> --- a/kernel/irq/affinity.c
> +++ b/kernel/irq/affinity.c
> @@ -70,20 +70,21 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
>  	 */
>  	for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
>  		unsigned int this_vecs = affd->set_size[i];
> +		unsigned int nr_masks;
>  		int j;
> -		struct cpumask *result = group_cpus_evenly(this_vecs);
> +		struct cpumask *result = group_cpus_evenly(this_vecs, &nr_masks);
>  
>  		if (!result) {
>  			kfree(masks);
>  			return NULL;
>  		}
>  
> -		for (j = 0; j < this_vecs; j++)
> +		for (j = 0; j < nr_masks; j++)
>  			cpumask_copy(&masks[curvec + j].mask, &result[j]);
>  		kfree(result);
>  
> -		curvec += this_vecs;
> -		usedvecs += this_vecs;
> +		curvec += nr_masks;
> +		usedvecs += nr_masks;
>  	}
>  
>  	/* Fill out vectors at the end that don't need affinity */
> diff --git a/lib/group_cpus.c b/lib/group_cpus.c
> index ee272c4cefcc13907ce9f211f479615d2e3c9154..016c6578a07616959470b47121459a16a1bc99e5 100644
> --- a/lib/group_cpus.c
> +++ b/lib/group_cpus.c
> @@ -332,9 +332,11 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
>  /**
>   * group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality
>   * @numgrps: number of groups
> + * @nummasks: number of initialized cpumasks
>   *
>   * Return: cpumask array if successful, NULL otherwise. And each element
> - * includes CPUs assigned to this group
> + * includes CPUs assigned to this group. nummasks contains the number
> + * of initialized masks which can be less than numgrps.
>   *
>   * Try to put close CPUs from viewpoint of CPU and NUMA locality into
>   * same group, and run two-stage grouping:
> @@ -344,7 +346,8 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
>   * We guarantee in the resulted grouping that all CPUs are covered, and
>   * no same CPU is assigned to multiple groups
>   */
> -struct cpumask *group_cpus_evenly(unsigned int numgrps)
> +struct cpumask *group_cpus_evenly(unsigned int numgrps,
> +				  unsigned int *nummasks)
>  {
>  	unsigned int curgrp = 0, nr_present = 0, nr_others = 0;
>  	cpumask_var_t *node_to_cpumask;
> @@ -421,10 +424,12 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
>  		kfree(masks);
>  		return NULL;
>  	}
> +	*nummasks = nr_present + nr_others;

WARN_ON(nr_present + nr_others < numgrps) can be removed now.

Other than that and with Thomas's comment addressed:

Reviewed-by: Ming Lei <ming.lei@redhat.com>


Thanks,
Ming


  parent reply	other threads:[~2025-05-09  1:30 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-04-24 18:19 [PATCH v6 0/9] blk: honor isolcpus configuration Daniel Wagner
2025-04-24 18:19 ` [PATCH v6 1/9] lib/group_cpus: let group_cpu_evenly return number initialized masks Daniel Wagner
2025-04-28 12:37   ` Thomas Gleixner
2025-05-09  1:29   ` Ming Lei [this message]
2025-04-24 18:19 ` [PATCH v6 2/9] blk-mq: add number of queue calc helper Daniel Wagner
2025-05-09  1:43   ` Ming Lei
2025-04-24 18:19 ` [PATCH v6 3/9] nvme-pci: use block layer helpers to calculate num of queues Daniel Wagner
2025-05-09  1:47   ` Ming Lei
2025-05-14 16:12     ` Daniel Wagner
2025-04-24 18:19 ` [PATCH v6 4/9] scsi: " Daniel Wagner
2025-05-09  1:49   ` Ming Lei
2025-04-24 18:19 ` [PATCH v6 5/9] virtio: blk/scsi: " Daniel Wagner
2025-05-09  1:52   ` Ming Lei
2025-04-24 18:19 ` [PATCH v6 6/9] isolation: introduce io_queue isolcpus type Daniel Wagner
2025-04-25  6:26   ` Hannes Reinecke
2025-04-25  7:32     ` Daniel Wagner
2025-05-09  2:04       ` Ming Lei
2025-05-14 16:08         ` Daniel Wagner
2025-04-24 18:19 ` [PATCH v6 7/9] lib/group_cpus: honor housekeeping config when grouping CPUs Daniel Wagner
2025-05-09  2:22   ` Ming Lei
     [not found]   ` <cd1576ee-82a3-4899-b218-2e5c5334af6e@redhat.com>
2025-05-14 17:49     ` Daniel Wagner
2025-04-24 18:19 ` [PATCH v6 8/9] blk-mq: use hk cpus only when isolcpus=io_queue is enabled Daniel Wagner
2025-05-09  2:38   ` Ming Lei
2025-05-15  8:36     ` Daniel Wagner
2025-04-24 18:19 ` [PATCH v6 9/9] blk-mq: prevent offlining hk CPU with associated online isolated CPUs Daniel Wagner
2025-04-25  6:28   ` Hannes Reinecke
2025-05-09  2:54   ` Ming Lei
2025-05-06  3:17 ` [PATCH v6 0/9] blk: honor isolcpus configuration Ming Lei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aB1ajDUwJy6PVGEY@fedora \
    --to=ming.lei@redhat.com \
    --cc=GR-QLogic-Storage-Upstream@marvell.com \
    --cc=axboe@kernel.dk \
    --cc=costa.shul@redhat.com \
    --cc=frederic@kernel.org \
    --cc=hare@suse.de \
    --cc=hch@lst.de \
    --cc=juri.lelli@redhat.com \
    --cc=kbusch@kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=linux-scsi@vger.kernel.org \
    --cc=llong@redhat.com \
    --cc=martin.petersen@oracle.com \
    --cc=mathieu.desnoyers@efficios.com \
    --cc=megaraidlinux.pdl@broadcom.com \
    --cc=mgorman@suse.de \
    --cc=mst@redhat.com \
    --cc=sagi@grimberg.me \
    --cc=storagedev@microchip.com \
    --cc=tglx@linutronix.de \
    --cc=virtualization@lists.linux.dev \
    --cc=vschneid@redhat.com \
    --cc=wagi@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).