linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: Jens Axboe <axboe@kernel.dk>, Eric Biggers <ebiggers@kernel.org>
Cc: linux-block@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-fscrypt@vger.kernel.org
Subject: [PATCH 7/9] blk-crypto: use mempool_alloc_bulk for encrypted bio page allocation
Date: Wed, 17 Dec 2025 07:06:50 +0100	[thread overview]
Message-ID: <20251217060740.923397-8-hch@lst.de> (raw)
In-Reply-To: <20251217060740.923397-1-hch@lst.de>

Calling mempool_alloc in a loop is not safe unless the maximum allocation
size times the maximum number of threads using it is less than the
minimum pool size.  Use the new mempool_alloc_bulk helper to allocate
all missing elements in one pass to remove this deadlock risk.  This
also means that non-pool allocations now use alloc_pages_bulk which can
be significantly faster than a loop over individual page allocations.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-crypto-fallback.c | 70 ++++++++++++++++++++++++++++---------
 1 file changed, 53 insertions(+), 17 deletions(-)

diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index 58b35c5d6949..1db4aa4d812a 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -22,7 +22,7 @@
 #include "blk-cgroup.h"
 #include "blk-crypto-internal.h"
 
-static unsigned int num_prealloc_bounce_pg = 32;
+static unsigned int num_prealloc_bounce_pg = BIO_MAX_VECS;
 module_param(num_prealloc_bounce_pg, uint, 0);
 MODULE_PARM_DESC(num_prealloc_bounce_pg,
 		 "Number of preallocated bounce pages for the blk-crypto crypto API fallback");
@@ -144,11 +144,21 @@ static const struct blk_crypto_ll_ops blk_crypto_fallback_ll_ops = {
 static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
 {
 	struct bio *src_bio = enc_bio->bi_private;
-	int i;
+	struct page **pages = (struct page **)enc_bio->bi_io_vec;
+	struct bio_vec *bv;
+	unsigned int i;
 
-	for (i = 0; i < enc_bio->bi_vcnt; i++)
-		mempool_free(enc_bio->bi_io_vec[i].bv_page,
-			     blk_crypto_bounce_page_pool);
+	/*
+	 * Use the same trick as the alloc side to avoid the need for an extra
+	 * pages array.
+	 */
+	bio_for_each_bvec_all(bv, enc_bio, i)
+		pages[i] = bv->bv_page;
+
+	i = mempool_free_bulk(blk_crypto_bounce_page_pool, (void **)pages,
+			enc_bio->bi_vcnt);
+	if (i < enc_bio->bi_vcnt)
+		release_pages(pages + i, enc_bio->bi_vcnt - i);
 
 	if (enc_bio->bi_status)
 		cmpxchg(&src_bio->bi_status, 0, enc_bio->bi_status);
@@ -157,9 +167,14 @@ static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
 	bio_endio(src_bio);
 }
 
+#define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
+
 static struct bio *blk_crypto_alloc_enc_bio(struct bio *bio_src,
-		unsigned int nr_segs)
+		unsigned int nr_segs, struct page ***pages_ret)
 {
+	unsigned int memflags = memalloc_noio_save();
+	unsigned int nr_allocated;
+	struct page **pages;
 	struct bio *bio;
 
 	nr_segs = min(nr_segs, BIO_MAX_VECS);
@@ -174,6 +189,30 @@ static struct bio *blk_crypto_alloc_enc_bio(struct bio *bio_src,
 	bio->bi_write_stream	= bio_src->bi_write_stream;
 	bio->bi_iter.bi_sector	= bio_src->bi_iter.bi_sector;
 	bio_clone_blkg_association(bio, bio_src);
+
+	/*
+	 * Move page array up in the allocated memory for the bio vecs as far as
+	 * possible so that we can start filling biovecs from the beginning
+	 * without overwriting the temporary page array.
+	 */
+	static_assert(PAGE_PTRS_PER_BVEC > 1);
+	pages = (struct page **)bio->bi_io_vec;
+	pages += nr_segs * (PAGE_PTRS_PER_BVEC - 1);
+
+	/*
+	 * Try a bulk allocation first.  This could leave random pages in the
+	 * array unallocated, but we'll fix that up later in mempool_alloc_bulk.
+	 *
+	 * Note: alloc_pages_bulk needs the array to be zeroed, as it assumes
+	 * any non-zero slot already contains a valid allocation.
+	 */
+	memset(pages, 0, sizeof(struct page *) * nr_segs);
+	nr_allocated = alloc_pages_bulk(GFP_KERNEL, nr_segs, pages);
+	if (nr_allocated < nr_segs)
+		mempool_alloc_bulk(blk_crypto_bounce_page_pool, (void **)pages,
+				nr_segs, nr_allocated);
+	memalloc_noio_restore(memflags);
+	*pages_ret = pages;
 	return bio;
 }
 
@@ -210,6 +249,7 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
 	u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
 	struct scatterlist src, dst;
 	union blk_crypto_iv iv;
+	struct page **enc_pages;
 	unsigned int enc_idx;
 	struct bio *enc_bio;
 	unsigned int j;
@@ -227,15 +267,13 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
 
 	/* Encrypt each page in the source bio */
 new_bio:
-	enc_bio = blk_crypto_alloc_enc_bio(src_bio, nr_segs);
+	enc_bio = blk_crypto_alloc_enc_bio(src_bio, nr_segs, &enc_pages);
 	enc_idx = 0;
 	for (;;) {
 		struct bio_vec src_bv =
 			bio_iter_iovec(src_bio, src_bio->bi_iter);
-		struct page *enc_page;
+		struct page *enc_page = enc_pages[enc_idx];
 
-		enc_page = mempool_alloc(blk_crypto_bounce_page_pool,
-				GFP_NOIO);
 		__bio_add_page(enc_bio, enc_page, src_bv.bv_len,
 				src_bv.bv_offset);
 
@@ -246,10 +284,8 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
 		/* Encrypt each data unit in this page */
 		for (j = 0; j < src_bv.bv_len; j += data_unit_size) {
 			blk_crypto_dun_to_iv(curr_dun, &iv);
-			if (crypto_skcipher_encrypt(ciph_req)) {
-				enc_idx++;
-				goto out_free_bounce_pages;
-			}
+			if (crypto_skcipher_encrypt(ciph_req))
+				goto out_free_enc_bio;
 			bio_crypt_dun_increment(curr_dun, 1);
 			src.offset += data_unit_size;
 			dst.offset += data_unit_size;
@@ -278,9 +314,9 @@ static void __blk_crypto_fallback_encrypt_bio(struct bio *src_bio,
 	submit_bio(enc_bio);
 	return;
 
-out_free_bounce_pages:
-	while (enc_idx > 0)
-		mempool_free(enc_bio->bi_io_vec[--enc_idx].bv_page,
+out_free_enc_bio:
+	for (enc_idx = 0; enc_idx < enc_bio->bi_max_vecs; enc_idx++)
+		mempool_free(enc_bio->bi_io_vec[enc_idx].bv_page,
 			     blk_crypto_bounce_page_pool);
 	bio_put(enc_bio);
 	cmpxchg(&src_bio->bi_status, 0, BLK_STS_IOERR);
-- 
2.47.3


  parent reply	other threads:[~2025-12-17  6:08 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-12-17  6:06 move blk-crypto-fallback to sit above the block layer v3 Christoph Hellwig
2025-12-17  6:06 ` [PATCH 1/9] fscrypt: pass a real sector_t to fscrypt_zeroout_range_inline_crypt Christoph Hellwig
2025-12-17  6:06 ` [PATCH 2/9] fscrypt: keep multiple bios in flight in fscrypt_zeroout_range_inline_crypt Christoph Hellwig
2025-12-17  6:06 ` [PATCH 3/9] blk-crypto: add a bio_crypt_ctx() helper Christoph Hellwig
2025-12-19 19:50   ` Eric Biggers
2025-12-17  6:06 ` [PATCH 4/9] blk-crypto: submit the encrypted bio in blk_crypto_fallback_bio_prep Christoph Hellwig
2025-12-19 19:50   ` Eric Biggers
2025-12-17  6:06 ` [PATCH 5/9] blk-crypto: optimize bio splitting in blk_crypto_fallback_encrypt_bio Christoph Hellwig
2025-12-19 20:08   ` Eric Biggers
2025-12-22 22:12     ` Christoph Hellwig
2025-12-17  6:06 ` [PATCH 6/9] blk-crypto: use on-stack skcipher requests for fallback en/decryption Christoph Hellwig
2025-12-17  6:06 ` Christoph Hellwig [this message]
2025-12-19 20:02   ` [PATCH 7/9] blk-crypto: use mempool_alloc_bulk for encrypted bio page allocation Eric Biggers
2025-12-19 20:25     ` Eric Biggers
2025-12-22 22:16       ` Christoph Hellwig
2025-12-22 22:18     ` Christoph Hellwig
2026-01-06  7:39       ` Christoph Hellwig
2025-12-17  6:06 ` [PATCH 8/9] blk-crypto: optimize data unit alignment checking Christoph Hellwig
2025-12-19 20:14   ` Eric Biggers
2025-12-17  6:06 ` [PATCH 9/9] blk-crypto: handle the fallback above the block layer Christoph Hellwig
  -- strict thread matches above, loose matches on Subject: below --
2026-01-06  7:36 move blk-crypto-fallback to sit above the block layer v4 Christoph Hellwig
2026-01-06  7:36 ` [PATCH 7/9] blk-crypto: use mempool_alloc_bulk for encrypted bio page allocation Christoph Hellwig
2025-12-10 15:23 move blk-crypto-fallback to sit above the block layer v2 Christoph Hellwig
2025-12-10 15:23 ` [PATCH 7/9] blk-crypto: use mempool_alloc_bulk for encrypted bio page allocation Christoph Hellwig
2025-12-13  1:21   ` Eric Biggers
2025-12-15  6:01     ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20251217060740.923397-8-hch@lst.de \
    --to=hch@lst.de \
    --cc=axboe@kernel.dk \
    --cc=ebiggers@kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-fscrypt@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).