From: Christoph Hellwig <hch@lst.de>
To: Jens Axboe <axboe@kernel.dk>
Cc: Richard Weinberger <richard@nod.at>,
Anton Ivanov <anton.ivanov@cambridgegreys.com>,
Johannes Berg <johannes@sipsolutions.net>,
Justin Sanders <justin@coraid.com>,
Denis Efremov <efremov@linux.com>,
Josef Bacik <josef@toxicpanda.com>,
Geoff Levand <geoff@infradead.org>,
Ilya Dryomov <idryomov@gmail.com>,
"Md. Haris Iqbal" <haris.iqbal@ionos.com>,
Jack Wang <jinpu.wang@ionos.com>, Ming Lei <ming.lei@redhat.com>,
Maxim Levitsky <maximlevitsky@gmail.com>,
Alex Dubov <oakad@yahoo.com>,
Ulf Hansson <ulf.hansson@linaro.org>,
Miquel Raynal <miquel.raynal@bootlin.com>,
Vignesh Raghavendra <vigneshr@ti.com>,
Vineeth Vijayan <vneethv@linux.ibm.com>,
linux-block@vger.kernel.org, nbd@other.debian.org,
ceph-devel@vger.kernel.org, linux-mmc@vger.kernel.org,
linux-mtd@lists.infradead.org, linux-s390@vger.kernel.org
Subject: [PATCH 17/17] mmc: pass queue_limits to blk_mq_alloc_disk
Date: Thu, 15 Feb 2024 08:03:00 +0100 [thread overview]
Message-ID: <20240215070300.2200308-18-hch@lst.de> (raw)
In-Reply-To: <20240215070300.2200308-1-hch@lst.de>
Pass the queue limit set at initialization time directly to
blk_mq_alloc_disk instead of updating it right after the allocation.
This requires refactoring the code a bit so that what was mmc_setup_queue
before also allocates the gendisk now and actually sets all limits.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
drivers/mmc/core/queue.c | 97 +++++++++++++++++++++-------------------
1 file changed, 52 insertions(+), 45 deletions(-)
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 67ad186d132a69..2ae60d208cdf1e 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -174,8 +174,8 @@ static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp)
return sg;
}
-static void mmc_queue_setup_discard(struct request_queue *q,
- struct mmc_card *card)
+static void mmc_queue_setup_discard(struct mmc_card *card,
+ struct queue_limits *lim)
{
unsigned max_discard;
@@ -183,15 +183,17 @@ static void mmc_queue_setup_discard(struct request_queue *q,
if (!max_discard)
return;
- blk_queue_max_discard_sectors(q, max_discard);
- q->limits.discard_granularity = card->pref_erase << 9;
- /* granularity must not be greater than max. discard */
- if (card->pref_erase > max_discard)
- q->limits.discard_granularity = SECTOR_SIZE;
+ lim->max_hw_discard_sectors = max_discard;
if (mmc_can_secure_erase_trim(card))
- blk_queue_max_secure_erase_sectors(q, max_discard);
+ lim->max_secure_erase_sectors = max_discard;
if (mmc_can_trim(card) && card->erased_byte == 0)
- blk_queue_max_write_zeroes_sectors(q, max_discard);
+ lim->max_write_zeroes_sectors = max_discard;
+
+ /* granularity must not be greater than max. discard */
+ if (card->pref_erase > max_discard)
+ lim->discard_granularity = SECTOR_SIZE;
+ else
+ lim->discard_granularity = card->pref_erase << 9;
}
static unsigned short mmc_get_max_segments(struct mmc_host *host)
@@ -341,40 +343,53 @@ static const struct blk_mq_ops mmc_mq_ops = {
.timeout = mmc_mq_timed_out,
};
-static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
+static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq,
+ struct mmc_card *card)
{
struct mmc_host *host = card->host;
- unsigned block_size = 512;
+ struct queue_limits lim = { };
+ struct gendisk *disk;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
if (mmc_can_erase(card))
- mmc_queue_setup_discard(mq->queue, card);
+ mmc_queue_setup_discard(card, &lim);
if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask)
- blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH);
- blk_queue_max_hw_sectors(mq->queue,
- min(host->max_blk_count, host->max_req_size / 512));
- if (host->can_dma_map_merge)
- WARN(!blk_queue_can_use_dma_map_merging(mq->queue,
- mmc_dev(host)),
- "merging was advertised but not possible");
- blk_queue_max_segments(mq->queue, mmc_get_max_segments(host));
-
- if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) {
- block_size = card->ext_csd.data_sector_size;
- WARN_ON(block_size != 512 && block_size != 4096);
- }
+ lim.bounce = BLK_BOUNCE_HIGH;
+
+ lim.max_hw_sectors = min(host->max_blk_count, host->max_req_size / 512);
+
+ if (mmc_card_mmc(card) && card->ext_csd.data_sector_size)
+ lim.logical_block_size = card->ext_csd.data_sector_size;
+ else
+ lim.logical_block_size = 512;
+
+ WARN_ON_ONCE(lim.logical_block_size != 512 &&
+ lim.logical_block_size != 4096);
- blk_queue_logical_block_size(mq->queue, block_size);
/*
- * After blk_queue_can_use_dma_map_merging() was called with succeed,
- * since it calls blk_queue_virt_boundary(), the mmc should not call
- * both blk_queue_max_segment_size().
+ * Setting a virt_boundary implicity sets a max_segment_size, so try
+ * to set the hardware one here.
*/
- if (!host->can_dma_map_merge)
- blk_queue_max_segment_size(mq->queue,
- round_down(host->max_seg_size, block_size));
+ if (host->can_dma_map_merge) {
+ lim.virt_boundary_mask = dma_get_merge_boundary(mmc_dev(host));
+ lim.max_segments = MMC_DMA_MAP_MERGE_SEGMENTS;
+ } else {
+ lim.max_segment_size =
+ round_down(host->max_seg_size, lim.logical_block_size);
+ lim.max_segments = host->max_segs;
+ }
+
+ disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq);
+ if (IS_ERR(disk))
+ return disk;
+ mq->queue = disk->queue;
+
+ if (mmc_host_is_spi(host) && host->use_spi_crc)
+ blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
+ blk_queue_rq_timeout(mq->queue, 60 * HZ);
+
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue);
+ blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue);
dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
@@ -386,6 +401,7 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
init_waitqueue_head(&mq->wait);
mmc_crypto_setup_queue(mq->queue, host);
+ return disk;
}
static inline bool mmc_merge_capable(struct mmc_host *host)
@@ -447,18 +463,9 @@ struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
return ERR_PTR(ret);
- disk = blk_mq_alloc_disk(&mq->tag_set, NULL, mq);
- if (IS_ERR(disk)) {
+ disk = mmc_alloc_disk(mq, card);
+ if (IS_ERR(disk))
blk_mq_free_tag_set(&mq->tag_set);
- return disk;
- }
- mq->queue = disk->queue;
-
- if (mmc_host_is_spi(host) && host->use_spi_crc)
- blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue);
- blk_queue_rq_timeout(mq->queue, 60 * HZ);
-
- mmc_setup_queue(mq, card);
return disk;
}
--
2.39.2
______________________________________________________
Linux MTD discussion mailing list
http://lists.infradead.org/mailman/listinfo/linux-mtd/
next prev parent reply other threads:[~2024-02-15 8:16 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-02-15 7:02 pass queue_limits to blk_mq_alloc_disk for simple drivers Christoph Hellwig
2024-02-15 7:02 ` [PATCH 01/17] ubd: pass queue_limits to blk_mq_alloc_disk Christoph Hellwig
2024-02-15 7:02 ` [PATCH 02/17] aoe: " Christoph Hellwig
2024-02-15 7:02 ` [PATCH 03/17] floppy: " Christoph Hellwig
2024-02-15 7:02 ` [PATCH 04/17] mtip: " Christoph Hellwig
2024-02-15 7:02 ` [PATCH 05/17] nbd: " Christoph Hellwig
2024-02-15 7:02 ` [PATCH 06/17] ps3disk: " Christoph Hellwig
2024-02-15 7:02 ` [PATCH 07/17] rbd: " Christoph Hellwig
2024-02-20 11:18 ` Ilya Dryomov
2024-02-15 7:02 ` [PATCH 08/17] rnbd-clt: " Christoph Hellwig
2024-02-16 7:49 ` Jinpu Wang
2024-02-15 7:02 ` [PATCH 09/17] sunvdc: " Christoph Hellwig
2024-02-15 7:02 ` [PATCH 10/17] gdrom: " Christoph Hellwig
2024-02-15 7:02 ` [PATCH 11/17] ms_block: " Christoph Hellwig
2024-02-15 7:02 ` [PATCH 12/17] mspro_block: " Christoph Hellwig
2024-02-15 7:02 ` [PATCH 13/17] mtd_blkdevs: " Christoph Hellwig
2024-02-15 7:02 ` [PATCH 14/17] ubiblock: " Christoph Hellwig
2024-02-18 2:33 ` Zhihao Cheng
2024-02-15 7:02 ` [PATCH 15/17] scm_blk: " Christoph Hellwig
2024-02-15 7:02 ` [PATCH 16/17] ublk: " Christoph Hellwig
2024-02-15 7:03 ` Christoph Hellwig [this message]
2024-02-15 16:40 ` [PATCH 17/17] mmc: " Ulf Hansson
2024-02-15 16:49 ` Christoph Hellwig
2024-02-15 16:53 ` Ulf Hansson
2024-02-20 22:01 ` Geert Uytterhoeven
2024-02-20 22:14 ` Geert Uytterhoeven
2024-02-21 5:44 ` Christoph Hellwig
2024-02-21 9:37 ` Geert Uytterhoeven
2024-06-27 9:43 ` Jon Hunter
2024-06-27 9:49 ` Christoph Hellwig
2024-06-27 9:58 ` Jon Hunter
2024-02-20 13:22 ` pass queue_limits to blk_mq_alloc_disk for simple drivers Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240215070300.2200308-18-hch@lst.de \
--to=hch@lst.de \
--cc=anton.ivanov@cambridgegreys.com \
--cc=axboe@kernel.dk \
--cc=ceph-devel@vger.kernel.org \
--cc=efremov@linux.com \
--cc=geoff@infradead.org \
--cc=haris.iqbal@ionos.com \
--cc=idryomov@gmail.com \
--cc=jinpu.wang@ionos.com \
--cc=johannes@sipsolutions.net \
--cc=josef@toxicpanda.com \
--cc=justin@coraid.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-mmc@vger.kernel.org \
--cc=linux-mtd@lists.infradead.org \
--cc=linux-s390@vger.kernel.org \
--cc=maximlevitsky@gmail.com \
--cc=ming.lei@redhat.com \
--cc=miquel.raynal@bootlin.com \
--cc=nbd@other.debian.org \
--cc=oakad@yahoo.com \
--cc=richard@nod.at \
--cc=ulf.hansson@linaro.org \
--cc=vigneshr@ti.com \
--cc=vneethv@linux.ibm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox