From: Christoph Hellwig <hch@lst.de>
To: Jens Axboe <axboe@kernel.dk>, Eric Biggers <ebiggers@kernel.org>,
Vlastimil Babka <vbabka@suse.cz>,
Andrew Morton <akpm@linux-foundation.org>
Cc: Christoph Lameter <cl@gentwo.org>,
David Rientjes <rientjes@google.com>,
Roman Gushchin <roman.gushchin@linux.dev>,
Harry Yoo <harry.yoo@oracle.com>,
linux-block@vger.kernel.org, linux-fsdevel@vger.kernel.org,
linux-fscrypt@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH 6/9] blk-crypto: optimize bio splitting in blk_crypto_fallback_encrypt_bio
Date: Fri, 31 Oct 2025 10:34:36 +0100 [thread overview]
Message-ID: <20251031093517.1603379-7-hch@lst.de> (raw)
In-Reply-To: <20251031093517.1603379-1-hch@lst.de>
The current code in blk_crypto_fallback_encrypt_bio is inefficient and
prone to deadlocks under memory pressure: It first walks to pass in
plaintext bio to see how much of it can fit into a single encrypted
bio using up to BIO_MAX_VEC PAGE_SIZE segments, and then allocates a
plaintext clone that fits the size, only to allocate another bio for
the ciphertext later. While the plaintext clone uses a bioset to avoid
deadlocks when allocations could fail, the ciphertex one uses bio_kmalloc
which is a no-go in the file system I/O path.
Switch blk_crypto_fallback_encrypt_bio to walk the source plaintext bio
while consuming bi_iter without cloning it, and instead allocate a
ciphertext bio at the beginning and whenever we fille up the previous
one. The existing bio_set for the plaintext clones is reused for the
ciphertext bios to remove the deadlock risk.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
block/blk-crypto-fallback.c | 162 ++++++++++++++----------------------
1 file changed, 63 insertions(+), 99 deletions(-)
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index 86b27f96051a..1f58010fb437 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -152,35 +152,26 @@ static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio)
src_bio->bi_status = enc_bio->bi_status;
- bio_uninit(enc_bio);
- kfree(enc_bio);
+ bio_put(enc_bio);
bio_endio(src_bio);
}
-static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src)
+static struct bio *blk_crypto_alloc_enc_bio(struct bio *bio_src,
+ unsigned int nr_segs)
{
- unsigned int nr_segs = bio_segments(bio_src);
- struct bvec_iter iter;
- struct bio_vec bv;
struct bio *bio;
- bio = bio_kmalloc(nr_segs, GFP_NOIO);
- if (!bio)
- return NULL;
- bio_init_inline(bio, bio_src->bi_bdev, nr_segs, bio_src->bi_opf);
+ bio = bio_alloc_bioset(bio_src->bi_bdev, nr_segs, bio_src->bi_opf,
+ GFP_NOIO, &crypto_bio_split);
if (bio_flagged(bio_src, BIO_REMAPPED))
bio_set_flag(bio, BIO_REMAPPED);
+ bio->bi_private = bio_src;
+ bio->bi_end_io = blk_crypto_fallback_encrypt_endio;
bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_write_stream = bio_src->bi_write_stream;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
- bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
-
- bio_for_each_segment(bv, bio_src, iter)
- bio->bi_io_vec[bio->bi_vcnt++] = bv;
-
bio_clone_blkg_association(bio, bio_src);
-
return bio;
}
@@ -208,32 +199,6 @@ blk_crypto_fallback_alloc_cipher_req(struct blk_crypto_keyslot *slot,
return true;
}
-static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
-{
- struct bio *bio = *bio_ptr;
- unsigned int i = 0;
- unsigned int num_sectors = 0;
- struct bio_vec bv;
- struct bvec_iter iter;
-
- bio_for_each_segment(bv, bio, iter) {
- num_sectors += bv.bv_len >> SECTOR_SHIFT;
- if (++i == BIO_MAX_VECS)
- break;
- }
-
- if (num_sectors < bio_sectors(bio)) {
- bio = bio_submit_split_bioset(bio, num_sectors,
- &crypto_bio_split);
- if (!bio)
- return false;
-
- *bio_ptr = bio;
- }
-
- return true;
-}
-
union blk_crypto_iv {
__le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
@@ -257,34 +222,22 @@ static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
*/
static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
{
- struct bio *src_bio, *enc_bio;
- struct bio_crypt_ctx *bc;
- struct blk_crypto_keyslot *slot;
- int data_unit_size;
+ struct bio *src_bio = *bio_ptr;
+ struct bio_crypt_ctx *bc = src_bio->bi_crypt_context;
+ int data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
struct skcipher_request *ciph_req = NULL;
+ struct blk_crypto_keyslot *slot;
DECLARE_CRYPTO_WAIT(wait);
u64 curr_dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
struct scatterlist src, dst;
union blk_crypto_iv iv;
- unsigned int i, j;
+ struct bio *enc_bio = NULL;
+ unsigned int nr_segs;
+ unsigned int enc_idx = 0;
+ unsigned int j;
bool ret = false;
blk_status_t blk_st;
- /* Split the bio if it's too big for single page bvec */
- if (!blk_crypto_fallback_split_bio_if_needed(bio_ptr))
- return false;
-
- src_bio = *bio_ptr;
- bc = src_bio->bi_crypt_context;
- data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
-
- /* Allocate bounce bio for encryption */
- enc_bio = blk_crypto_fallback_clone_bio(src_bio);
- if (!enc_bio) {
- src_bio->bi_status = BLK_STS_RESOURCE;
- return false;
- }
-
/*
* Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
* this bio's algorithm and key.
@@ -293,7 +246,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
bc->bc_key, &slot);
if (blk_st != BLK_STS_OK) {
src_bio->bi_status = blk_st;
- goto out_put_enc_bio;
+ return false;
}
/* and then allocate an skcipher_request for it */
@@ -309,61 +262,72 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
skcipher_request_set_crypt(ciph_req, &src, &dst, data_unit_size,
iv.bytes);
- /* Encrypt each page in the bounce bio */
- for (i = 0; i < enc_bio->bi_vcnt; i++) {
- struct bio_vec *enc_bvec = &enc_bio->bi_io_vec[i];
- struct page *plaintext_page = enc_bvec->bv_page;
- struct page *ciphertext_page =
- mempool_alloc(blk_crypto_bounce_page_pool, GFP_NOIO);
-
- enc_bvec->bv_page = ciphertext_page;
+ /* Encrypt each page in the origin bio */
+ nr_segs = bio_segments(src_bio);
+ for (;;) {
+ struct bio_vec src_bv =
+ bio_iter_iovec(src_bio, src_bio->bi_iter);
+ struct page *enc_page;
- if (!ciphertext_page) {
- src_bio->bi_status = BLK_STS_RESOURCE;
- goto out_free_bounce_pages;
+ if (!enc_bio) {
+ enc_bio = blk_crypto_alloc_enc_bio(src_bio,
+ min(nr_segs, BIO_MAX_VECS));
}
- sg_set_page(&src, plaintext_page, data_unit_size,
- enc_bvec->bv_offset);
- sg_set_page(&dst, ciphertext_page, data_unit_size,
- enc_bvec->bv_offset);
+ enc_page = mempool_alloc(blk_crypto_bounce_page_pool,
+ GFP_NOIO);
+ __bio_add_page(enc_bio, enc_page, src_bv.bv_len,
+ src_bv.bv_offset);
+
+ sg_set_page(&src, src_bv.bv_page, data_unit_size,
+ src_bv.bv_offset);
+ sg_set_page(&dst, enc_page, data_unit_size, src_bv.bv_offset);
/* Encrypt each data unit in this page */
- for (j = 0; j < enc_bvec->bv_len; j += data_unit_size) {
+ for (j = 0; j < src_bv.bv_len; j += data_unit_size) {
blk_crypto_dun_to_iv(curr_dun, &iv);
if (crypto_wait_req(crypto_skcipher_encrypt(ciph_req),
- &wait)) {
- i++;
- src_bio->bi_status = BLK_STS_IOERR;
- goto out_free_bounce_pages;
- }
+ &wait))
+ goto out_ioerror;
bio_crypt_dun_increment(curr_dun, 1);
src.offset += data_unit_size;
dst.offset += data_unit_size;
}
+
+ bio_advance_iter_single(src_bio, &src_bio->bi_iter,
+ src_bv.bv_len);
+ if (!src_bio->bi_iter.bi_size)
+ break;
+
+ if (++enc_idx == enc_bio->bi_max_vecs) {
+ /*
+ * Each encrypted bio will call bio_endio in the
+ * completion handler, so ensure the remaining count
+ * matches the number of submitted bios.
+ */
+ bio_inc_remaining(src_bio);
+ submit_bio(enc_bio);
+ enc_bio = NULL;
+ enc_idx = 0;
+ }
+ nr_segs--;
}
- enc_bio->bi_private = src_bio;
- enc_bio->bi_end_io = blk_crypto_fallback_encrypt_endio;
*bio_ptr = enc_bio;
ret = true;
-
- enc_bio = NULL;
- goto out_free_ciph_req;
-
-out_free_bounce_pages:
- while (i > 0)
- mempool_free(enc_bio->bi_io_vec[--i].bv_page,
- blk_crypto_bounce_page_pool);
out_free_ciph_req:
skcipher_request_free(ciph_req);
out_release_keyslot:
blk_crypto_put_keyslot(slot);
-out_put_enc_bio:
- if (enc_bio)
- bio_uninit(enc_bio);
- kfree(enc_bio);
return ret;
+
+out_ioerror:
+ while (enc_idx > 0)
+ mempool_free(enc_bio->bi_io_vec[enc_idx--].bv_page,
+ blk_crypto_bounce_page_pool);
+ bio_put(enc_bio);
+ src_bio->bi_status = BLK_STS_IOERR;
+ goto out_free_ciph_req;
}
/*
@@ -537,7 +501,7 @@ static int blk_crypto_fallback_init(void)
get_random_bytes(blank_key, sizeof(blank_key));
- err = bioset_init(&crypto_bio_split, 64, 0, 0);
+ err = bioset_init(&crypto_bio_split, 64, 0, BIOSET_NEED_BVECS);
if (err)
goto out;
--
2.47.3
next prev parent reply other threads:[~2025-10-31 9:35 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-31 9:34 move blk-crypto-fallback to sit above the block layer Christoph Hellwig
2025-10-31 9:34 ` [PATCH 1/9] mempool: update kerneldoc comments Christoph Hellwig
2025-11-05 14:02 ` Vlastimil Babka
2025-11-05 14:14 ` Vlastimil Babka
2025-11-07 3:26 ` Eric Biggers
2025-11-07 12:02 ` Christoph Hellwig
2025-10-31 9:34 ` [PATCH 2/9] mempool: add error injection support Christoph Hellwig
2025-11-05 14:04 ` Vlastimil Babka
2025-11-07 3:29 ` Eric Biggers
2025-11-07 12:04 ` Christoph Hellwig
2025-10-31 9:34 ` [PATCH 3/9] mempool: add mempool_{alloc,free}_bulk Christoph Hellwig
2025-11-05 15:04 ` Vlastimil Babka
2025-11-06 14:13 ` Christoph Hellwig
2025-11-06 14:27 ` Vlastimil Babka
2025-11-06 14:48 ` Christoph Hellwig
2025-11-06 14:57 ` Vlastimil Babka
2025-11-06 15:00 ` Christoph Hellwig
2025-11-06 15:09 ` Vlastimil Babka
2025-11-07 3:52 ` Eric Biggers
2025-11-07 12:06 ` Christoph Hellwig
2025-10-31 9:34 ` [PATCH 4/9] fscrypt: pass a real sector_t to fscrypt_zeroout_range_inline_crypt Christoph Hellwig
2025-11-07 3:55 ` Eric Biggers
2025-11-07 12:07 ` Christoph Hellwig
2025-10-31 9:34 ` [PATCH 5/9] fscrypt: keep multiple bios in flight in fscrypt_zeroout_range_inline_crypt Christoph Hellwig
2025-11-07 4:06 ` Eric Biggers
2025-10-31 9:34 ` Christoph Hellwig [this message]
2025-11-14 0:22 ` [PATCH 6/9] blk-crypto: optimize bio splitting in blk_crypto_fallback_encrypt_bio Eric Biggers
2025-11-14 5:56 ` Christoph Hellwig
2025-10-31 9:34 ` [PATCH 7/9] blk-crypto: handle the fallback above the block layer Christoph Hellwig
2025-11-07 4:42 ` Eric Biggers
2025-11-07 12:10 ` Christoph Hellwig
2025-11-14 0:37 ` Eric Biggers
2025-11-14 5:56 ` Christoph Hellwig
2025-10-31 9:34 ` [PATCH 8/9] blk-crypto: use on-stack skciphers for fallback en/decryption Christoph Hellwig
2025-11-07 4:18 ` Eric Biggers
2025-11-07 12:10 ` Christoph Hellwig
2025-11-14 0:32 ` Eric Biggers
2025-11-14 5:57 ` Christoph Hellwig
2025-10-31 9:34 ` [PATCH 9/9] blk-crypto: use mempool_alloc_bulk for encrypted bio page allocation Christoph Hellwig
2025-11-05 15:12 ` Vlastimil Babka
2025-11-06 14:01 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251031093517.1603379-7-hch@lst.de \
--to=hch@lst.de \
--cc=akpm@linux-foundation.org \
--cc=axboe@kernel.dk \
--cc=cl@gentwo.org \
--cc=ebiggers@kernel.org \
--cc=harry.yoo@oracle.com \
--cc=linux-block@vger.kernel.org \
--cc=linux-fscrypt@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=rientjes@google.com \
--cc=roman.gushchin@linux.dev \
--cc=vbabka@suse.cz \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).