linux-block.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/3] Fix bio splitting in the crypto fallback code
@ 2025-06-25 23:42 Bart Van Assche
  2025-06-25 23:42 ` [PATCH 1/3] block: Split blk_crypto_fallback_split_bio_if_needed() Bart Van Assche
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Bart Van Assche @ 2025-06-25 23:42 UTC (permalink / raw)
  To: Jens Axboe
  Cc: linux-block, Eric Biggers, Keith Busch, Christop Hellwig,
	Bart Van Assche

Hi Jens,

When using the crypto fallback code, large bios are split twice. A first
time by bio_split_to_limits() and a second time by the crypto fallback
code. Additionally, this causes bios not to be submitted in LBA error and
hence triggers write errors for zoned block devices. This patch series
fixes this by splitting bios once. Please consider this patch series for
the next merge window.

Thanks,

Bart.

Bart Van Assche (3):
  block: Split blk_crypto_fallback_split_bio_if_needed()
  block: Introduce the BLK_FEAT_CALLS_BIO_SPLIT_TO_LIMITS flag
  block: Rework splitting of encrypted bios

 block/bio.c                 |  3 +++
 block/blk-core.c            | 19 ++++++++-----
 block/blk-crypto-fallback.c | 53 ++++++++++++++++++++-----------------
 block/blk-crypto-internal.h |  7 +++++
 block/blk-crypto.c          |  3 ++-
 block/blk-merge.c           | 13 +++++++--
 block/blk-mq.c              |  3 ++-
 block/blk.h                 |  9 +++++++
 include/linux/blk_types.h   |  1 +
 include/linux/blkdev.h      |  6 ++++-
 10 files changed, 81 insertions(+), 36 deletions(-)


^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH 1/3] block: Split blk_crypto_fallback_split_bio_if_needed()
  2025-06-25 23:42 [PATCH 0/3] Fix bio splitting in the crypto fallback code Bart Van Assche
@ 2025-06-25 23:42 ` Bart Van Assche
  2025-06-25 23:42 ` [PATCH 2/3] block: Introduce the BLK_FEAT_CALLS_BIO_SPLIT_TO_LIMITS flag Bart Van Assche
  2025-06-25 23:42 ` [PATCH 3/3] block: Rework splitting of encrypted bios Bart Van Assche
  2 siblings, 0 replies; 7+ messages in thread
From: Bart Van Assche @ 2025-06-25 23:42 UTC (permalink / raw)
  To: Jens Axboe
  Cc: linux-block, Eric Biggers, Keith Busch, Christop Hellwig,
	Bart Van Assche

Prepare for calling blk_crypto_max_io_size() from the bio splitting code.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 block/blk-crypto-fallback.c | 17 +++++++++++++++--
 1 file changed, 15 insertions(+), 2 deletions(-)

diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index 005c9157ffb3..323f59b91a8f 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -209,9 +209,12 @@ blk_crypto_fallback_alloc_cipher_req(struct blk_crypto_keyslot *slot,
 	return true;
 }
 
-static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
+/*
+ * The encryption fallback code allocates bounce pages individually. Hence this
+ * function that calculates an upper limit for the bio size.
+ */
+static unsigned int blk_crypto_max_io_size(struct bio *bio)
 {
-	struct bio *bio = *bio_ptr;
 	unsigned int i = 0;
 	unsigned int num_sectors = 0;
 	struct bio_vec bv;
@@ -222,6 +225,16 @@ static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
 		if (++i == BIO_MAX_VECS)
 			break;
 	}
+
+	return num_sectors;
+}
+
+static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
+{
+	struct bio *bio = *bio_ptr;
+	unsigned int num_sectors;
+
+	num_sectors = blk_crypto_max_io_size(bio);
 	if (num_sectors < bio_sectors(bio)) {
 		struct bio *split_bio;
 

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 2/3] block: Introduce the BLK_FEAT_CALLS_BIO_SPLIT_TO_LIMITS flag
  2025-06-25 23:42 [PATCH 0/3] Fix bio splitting in the crypto fallback code Bart Van Assche
  2025-06-25 23:42 ` [PATCH 1/3] block: Split blk_crypto_fallback_split_bio_if_needed() Bart Van Assche
@ 2025-06-25 23:42 ` Bart Van Assche
  2025-06-26  5:24   ` Christop Hellwig
  2025-06-25 23:42 ` [PATCH 3/3] block: Rework splitting of encrypted bios Bart Van Assche
  2 siblings, 1 reply; 7+ messages in thread
From: Bart Van Assche @ 2025-06-25 23:42 UTC (permalink / raw)
  To: Jens Axboe
  Cc: linux-block, Eric Biggers, Keith Busch, Christop Hellwig,
	Bart Van Assche

Introduce a flag that indicates whether or not bio_split_to_limits() is
called while processing a bio. Set this flag from inside
blk_mq_alloc_queue() because bio_split_to_limits() is called for all
request-based block drivers. This patch prepares for modifying when
__submit_bio() calls blk_crypto_bio_prep().

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 block/blk-mq.c         | 3 ++-
 include/linux/blkdev.h | 6 +++++-
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/block/blk-mq.c b/block/blk-mq.c
index 532acdbe9e16..ab2fbc895a98 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4411,7 +4411,8 @@ struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set,
 
 	if (!lim)
 		lim = &default_lim;
-	lim->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT;
+	lim->features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT |
+		BLK_FEAT_CALLS_BIO_SPLIT_TO_LIMITS;
 	if (set->nr_maps > HCTX_TYPE_POLL)
 		lim->features |= BLK_FEAT_POLL;
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index a51f92b6c340..108f70aa4d15 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -340,13 +340,17 @@ typedef unsigned int __bitwise blk_features_t;
 #define BLK_FEAT_ATOMIC_WRITES \
 	((__force blk_features_t)(1u << 16))
 
+/* bio_split_to_limits() is called while processing a bio */
+#define BLK_FEAT_CALLS_BIO_SPLIT_TO_LIMITS ((__force blk_features_t)(1u << 17))
+
 /*
  * Flags automatically inherited when stacking limits.
  */
 #define BLK_FEAT_INHERIT_MASK \
 	(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | BLK_FEAT_ROTATIONAL | \
 	 BLK_FEAT_STABLE_WRITES | BLK_FEAT_ZONED | \
-	 BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE)
+	 BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE | \
+	 BLK_FEAT_CALLS_BIO_SPLIT_TO_LIMITS)
 
 /* internal flags in queue_limits.flags */
 typedef unsigned int __bitwise blk_flags_t;

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH 3/3] block: Rework splitting of encrypted bios
  2025-06-25 23:42 [PATCH 0/3] Fix bio splitting in the crypto fallback code Bart Van Assche
  2025-06-25 23:42 ` [PATCH 1/3] block: Split blk_crypto_fallback_split_bio_if_needed() Bart Van Assche
  2025-06-25 23:42 ` [PATCH 2/3] block: Introduce the BLK_FEAT_CALLS_BIO_SPLIT_TO_LIMITS flag Bart Van Assche
@ 2025-06-25 23:42 ` Bart Van Assche
  2025-06-26  5:28   ` Christop Hellwig
  2 siblings, 1 reply; 7+ messages in thread
From: Bart Van Assche @ 2025-06-25 23:42 UTC (permalink / raw)
  To: Jens Axboe
  Cc: linux-block, Eric Biggers, Keith Busch, Christop Hellwig,
	Bart Van Assche

Modify splitting of encrypted bios as follows:
- Introduce a new block device flag BD_SPLITS_BIO_TO_LIMITS that allows
  bio-based drivers to report that they call bio_split_to_limits() for
  every bio.
- For request-based block drivers and for bio-based block drivers that call
  bio_split_to_limits(), call blk_crypto_bio_prep() after bio splitting
  happened instead of before bio splitting happened.
- For bio-based block drivers of which it is not known whether they call
  bio_split_to_limits(), call blk_crypto_bio_prep() before .submit_bio() is
  called.
- In blk_crypto_fallback_encrypt_bio(), prevent infinite recursion by only
  trying to split a bio if this function is not called from inside
  bio_split_to_limits().
- Since blk_crypto_fallback_encrypt_bio() may clear *bio_ptr, in its caller
  (__blk_crypto_bio_prep()), check whether or not this pointer is NULL.
- In bio_split_rw(), restrict the bio size to the smaller size of what is
  supported to the block driver and the crypto fallback code.

The advantages of these changes are as follows:
- This patch fixes write errors on zoned storage caused by out-of-order
  submission of bios. This out-of-order submission happens if both the
  crypto fallback code and bio_split_to_limits() split a bio.
- Less code duplication. The crypto fallback code now calls
  bio_split_to_limits() instead of open-coding it.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 block/bio.c                 |  3 ++
 block/blk-core.c            | 19 +++++++++----
 block/blk-crypto-fallback.c | 56 +++++++++++++++----------------------
 block/blk-crypto-internal.h |  7 +++++
 block/blk-crypto.c          |  3 +-
 block/blk-merge.c           | 13 +++++++--
 block/blk.h                 |  9 ++++++
 include/linux/blk_types.h   |  1 +
 8 files changed, 69 insertions(+), 42 deletions(-)

diff --git a/block/bio.c b/block/bio.c
index 92c512e876c8..2103612171c0 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1688,6 +1688,9 @@ struct bio *bio_split(struct bio *bio, int sectors,
 	if (!split)
 		return ERR_PTR(-ENOMEM);
 
+	/* Prevent that bio_split_to_limits() calls itself recursively. */
+	bio_set_flag(split, BIO_HAS_BEEN_SPLIT);
+
 	split->bi_iter.bi_size = sectors << 9;
 
 	if (bio_integrity(split))
diff --git a/block/blk-core.c b/block/blk-core.c
index fdac48aec5ef..3a720ec8e6cc 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -626,23 +626,30 @@ static void __submit_bio(struct bio *bio)
 	/* If plug is not used, add new plug here to cache nsecs time. */
 	struct blk_plug plug;
 
-	if (unlikely(!blk_crypto_bio_prep(&bio)))
-		return;
-
 	blk_start_plug(&plug);
 
 	if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
 		blk_mq_submit_bio(bio);
 	} else if (likely(bio_queue_enter(bio) == 0)) {
 		struct gendisk *disk = bio->bi_bdev->bd_disk;
-	
-		if ((bio->bi_opf & REQ_POLLED) &&
-		    !(disk->queue->limits.features & BLK_FEAT_POLL)) {
+		const blk_features_t features = disk->queue->limits.features;
+
+		/*
+		 * Only call blk_crypto_bio_prep() before .submit_bio() if
+		 * the block driver won't call bio_split_to_limits().
+		 */
+		if (unlikely(!(features & BLK_FEAT_CALLS_BIO_SPLIT_TO_LIMITS) &&
+			     !blk_crypto_bio_prep(&bio)))
+			goto exit_queue;
+
+		if ((bio->bi_opf & REQ_POLLED) && !(features & BLK_FEAT_POLL)) {
 			bio->bi_status = BLK_STS_NOTSUPP;
 			bio_endio(bio);
 		} else {
 			disk->fops->submit_bio(bio);
 		}
+
+exit_queue:
 		blk_queue_exit(disk->queue);
 	}
 
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index 323f59b91a8f..8f9aee6a837a 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -213,7 +213,7 @@ blk_crypto_fallback_alloc_cipher_req(struct blk_crypto_keyslot *slot,
  * The encryption fallback code allocates bounce pages individually. Hence this
  * function that calculates an upper limit for the bio size.
  */
-static unsigned int blk_crypto_max_io_size(struct bio *bio)
+unsigned int blk_crypto_max_io_size(struct bio *bio)
 {
 	unsigned int i = 0;
 	unsigned int num_sectors = 0;
@@ -229,29 +229,6 @@ static unsigned int blk_crypto_max_io_size(struct bio *bio)
 	return num_sectors;
 }
 
-static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
-{
-	struct bio *bio = *bio_ptr;
-	unsigned int num_sectors;
-
-	num_sectors = blk_crypto_max_io_size(bio);
-	if (num_sectors < bio_sectors(bio)) {
-		struct bio *split_bio;
-
-		split_bio = bio_split(bio, num_sectors, GFP_NOIO,
-				      &crypto_bio_split);
-		if (IS_ERR(split_bio)) {
-			bio->bi_status = BLK_STS_RESOURCE;
-			return false;
-		}
-		bio_chain(split_bio, bio);
-		submit_bio_noacct(bio);
-		*bio_ptr = split_bio;
-	}
-
-	return true;
-}
-
 union blk_crypto_iv {
 	__le64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
 	u8 bytes[BLK_CRYPTO_MAX_IV_SIZE];
@@ -270,8 +247,10 @@ static void blk_crypto_dun_to_iv(const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
  * The crypto API fallback's encryption routine.
  * Allocate a bounce bio for encryption, encrypt the input bio using crypto API,
  * and replace *bio_ptr with the bounce bio. May split input bio if it's too
- * large. Returns true on success. Returns false and sets bio->bi_status on
- * error.
+ * large. Returns %true on success. On error, %false is returned and one of
+ * these two actions is taken:
+ * - Either @bio_ptr->bi_status is set and *@bio_ptr is not modified.
+ * - Or bio_endio() is called and *@bio_ptr is changed into %NULL.
  */
 static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
 {
@@ -288,11 +267,17 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
 	bool ret = false;
 	blk_status_t blk_st;
 
-	/* Split the bio if it's too big for single page bvec */
-	if (!blk_crypto_fallback_split_bio_if_needed(bio_ptr))
-		return false;
+	if (!bio_flagged(*bio_ptr, BIO_HAS_BEEN_SPLIT)) {
+		/* Split the bio if it's too big for single page bvec */
+		src_bio = bio_split_to_limits(*bio_ptr);
+		if (!src_bio) {
+			*bio_ptr = NULL;
+			return false;
+		}
+	} else {
+		src_bio = *bio_ptr;
+	}
 
-	src_bio = *bio_ptr;
 	bc = src_bio->bi_crypt_context;
 	data_unit_size = bc->bc_key->crypto_cfg.data_unit_size;
 
@@ -488,9 +473,8 @@ static void blk_crypto_fallback_decrypt_endio(struct bio *bio)
  * @bio_ptr: pointer to the bio to prepare
  *
  * If bio is doing a WRITE operation, this splits the bio into two parts if it's
- * too big (see blk_crypto_fallback_split_bio_if_needed()). It then allocates a
- * bounce bio for the first part, encrypts it, and updates bio_ptr to point to
- * the bounce bio.
+ * too big. It then allocates a bounce bio for the first part, encrypts it, and
+ * updates bio_ptr to point to the bounce bio.
  *
  * For a READ operation, we mark the bio for decryption by using bi_private and
  * bi_end_io.
@@ -508,6 +492,12 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
 	struct bio_crypt_ctx *bc = bio->bi_crypt_context;
 	struct bio_fallback_crypt_ctx *f_ctx;
 
+	/*
+	 * Check whether blk_crypto_fallback_bio_prep() has already been called.
+	 */
+	if (bio->bi_end_io == blk_crypto_fallback_encrypt_endio)
+		return true;
+
 	if (WARN_ON_ONCE(!tfms_inited[bc->bc_key->crypto_cfg.crypto_mode])) {
 		/* User didn't call blk_crypto_start_using_key() first */
 		bio->bi_status = BLK_STS_IOERR;
diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h
index ccf6dff6ff6b..443ba1fd82e6 100644
--- a/block/blk-crypto-internal.h
+++ b/block/blk-crypto-internal.h
@@ -223,6 +223,8 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
 
 int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
 
+unsigned int blk_crypto_max_io_size(struct bio *bio);
+
 #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
 
 static inline int
@@ -245,6 +247,11 @@ blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
 	return 0;
 }
 
+static inline unsigned int blk_crypto_max_io_size(struct bio *bio)
+{
+	return UINT_MAX;
+}
+
 #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
 
 #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */
diff --git a/block/blk-crypto.c b/block/blk-crypto.c
index 4b1ad84d1b5a..76278e23193d 100644
--- a/block/blk-crypto.c
+++ b/block/blk-crypto.c
@@ -306,7 +306,8 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
 	if (blk_crypto_fallback_bio_prep(bio_ptr))
 		return true;
 fail:
-	bio_endio(*bio_ptr);
+	if (*bio_ptr)
+		bio_endio(*bio_ptr);
 	return false;
 }
 
diff --git a/block/blk-merge.c b/block/blk-merge.c
index e55a8ec219c9..df65231be543 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -9,6 +9,7 @@
 #include <linux/blk-integrity.h>
 #include <linux/part_stat.h>
 #include <linux/blk-cgroup.h>
+#include <linux/blk-crypto.h>
 
 #include <trace/events/block.h>
 
@@ -125,9 +126,14 @@ static struct bio *bio_submit_split(struct bio *bio, int split_sectors)
 				  bio->bi_iter.bi_sector);
 		WARN_ON_ONCE(bio_zone_write_plugging(bio));
 		submit_bio_noacct(bio);
-		return split;
+
+		bio = split;
 	}
 
+	WARN_ON_ONCE(!bio);
+	if (unlikely(!blk_crypto_bio_prep(&bio)))
+		return NULL;
+
 	return bio;
 error:
 	bio->bi_status = errno_to_blk_status(split_sectors);
@@ -356,9 +362,12 @@ EXPORT_SYMBOL_GPL(bio_split_rw_at);
 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
 		unsigned *nr_segs)
 {
+	u32 max_sectors =
+		min(get_max_io_size(bio, lim), blk_crypto_max_io_size(bio));
+
 	return bio_submit_split(bio,
 		bio_split_rw_at(bio, lim, nr_segs,
-			get_max_io_size(bio, lim) << SECTOR_SHIFT));
+				(u64)max_sectors << SECTOR_SHIFT));
 }
 
 /*
diff --git a/block/blk.h b/block/blk.h
index 1141b343d0b5..5f2b43b250df 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -376,6 +376,11 @@ static inline bool bio_may_need_split(struct bio *bio,
 		lim->min_segment_size;
 }
 
+static void bio_clear_split_flag(struct bio **bio)
+{
+	bio_clear_flag(*bio, BIO_HAS_BEEN_SPLIT);
+}
+
 /**
  * __bio_split_to_limits - split a bio to fit the queue limits
  * @bio:     bio to be split
@@ -392,6 +397,10 @@ static inline bool bio_may_need_split(struct bio *bio,
 static inline struct bio *__bio_split_to_limits(struct bio *bio,
 		const struct queue_limits *lim, unsigned int *nr_segs)
 {
+	struct bio *clear_split_flag __cleanup(bio_clear_split_flag) = bio;
+
+	bio_set_flag(bio, BIO_HAS_BEEN_SPLIT);
+
 	switch (bio_op(bio)) {
 	case REQ_OP_READ:
 	case REQ_OP_WRITE:
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 09b99d52fd36..d235e894347a 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -308,6 +308,7 @@ enum {
 	BIO_REMAPPED,
 	BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */
 	BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */
+	BIO_HAS_BEEN_SPLIT,
 	BIO_FLAG_LAST
 };
 

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 2/3] block: Introduce the BLK_FEAT_CALLS_BIO_SPLIT_TO_LIMITS flag
  2025-06-25 23:42 ` [PATCH 2/3] block: Introduce the BLK_FEAT_CALLS_BIO_SPLIT_TO_LIMITS flag Bart Van Assche
@ 2025-06-26  5:24   ` Christop Hellwig
  0 siblings, 0 replies; 7+ messages in thread
From: Christop Hellwig @ 2025-06-26  5:24 UTC (permalink / raw)
  To: Bart Van Assche
  Cc: Jens Axboe, linux-block, Eric Biggers, Keith Busch,
	Christop Hellwig

On Wed, Jun 25, 2025 at 04:42:58PM -0700, Bart Van Assche wrote:
> Introduce a flag that indicates whether or not bio_split_to_limits() is
> called while processing a bio. Set this flag from inside
> blk_mq_alloc_queue() because bio_split_to_limits() is called for all
> request-based block drivers. This patch prepares for modifying when
> __submit_bio() calls blk_crypto_bio_prep().

I don't see how this is a good idea.  bio based drivers are a thin
abstraction that the block should not know anything about. 


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 3/3] block: Rework splitting of encrypted bios
  2025-06-25 23:42 ` [PATCH 3/3] block: Rework splitting of encrypted bios Bart Van Assche
@ 2025-06-26  5:28   ` Christop Hellwig
  2025-07-10 18:04     ` Bart Van Assche
  0 siblings, 1 reply; 7+ messages in thread
From: Christop Hellwig @ 2025-06-26  5:28 UTC (permalink / raw)
  To: Bart Van Assche
  Cc: Jens Axboe, linux-block, Eric Biggers, Keith Busch,
	Christoph Hellwig

This adds a lot of mess to work around the blk-crypto fallback code, and
looking at it a bit more I think the fundamental problem is that we
call into the blk-crypto fallback code from the block layer, which is
very much against how the rest of the block layer works for submit_bio
based driver.

So instead of piling up workarounds in the block layer code I'd suggest
you change the blk-crypto code to work more like the rest of the block
layer for bio based drivers, i.e. move the call into the drivers that
you care about and stop supporting it for others.  The whole concept
of working around the lack of features in drivers in the block layer
is just going to cause more and more problems.


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 3/3] block: Rework splitting of encrypted bios
  2025-06-26  5:28   ` Christop Hellwig
@ 2025-07-10 18:04     ` Bart Van Assche
  0 siblings, 0 replies; 7+ messages in thread
From: Bart Van Assche @ 2025-07-10 18:04 UTC (permalink / raw)
  To: Christop Hellwig; +Cc: Jens Axboe, linux-block, Eric Biggers, Keith Busch

On 6/25/25 10:28 PM, Christop Hellwig wrote:
> This adds a lot of mess to work around the blk-crypto fallback code, and
> looking at it a bit more I think the fundamental problem is that we
> call into the blk-crypto fallback code from the block layer, which is
> very much against how the rest of the block layer works for submit_bio
> based driver.
> 
> So instead of piling up workarounds in the block layer code I'd suggest
> you change the blk-crypto code to work more like the rest of the block
> layer for bio based drivers, i.e. move the call into the drivers that
> you care about and stop supporting it for others.  The whole concept
> of working around the lack of features in drivers in the block layer
> is just going to cause more and more problems.

Hi Christoph,

Thanks for having proposed a path forward.

The blk-crypto-fallback code replaces a bio because it allocates a data
buffer for the encrypted or decrypted data. I think this data buffer
allocation is fundamental. Hence, the crypto fallback code must be
called before DMA mapping has happened. In case of the UFS driver, DMA
mapping happens by the SCSI core. Would it be acceptable to keep the
blk-crypto-fallback code in the block layer core instead of moving it
into the SCSI core? The patch below shows that is is possible without
the disadvantages of the patch series I'm replying to. In the patch
below the BIO_HAS_BEEN_SPLIT and BLK_FEAT_CALLS_BIO_SPLIT_TO_LIMITS
flags are gone. I have verified that the patch below doesn't trigger any
write errors in combination with zoned storage.

Thanks,

Bart.


Subject: block: Rework splitting of encrypted bios

Modify splitting of encrypted bios as follows:
- Stop calling blk_crypto_bio_prep() for bio-based block drivers that do not
   call bio_split_to_limits().
- For request-based block drivers and for bio-based block drivers that call
   bio_split_to_limits(), call blk_crypto_bio_prep() after bio splitting
   happened instead of before bio splitting happened.
- In bio_split_rw(), restrict the bio size to the smaller size of what is
   supported to the block driver and the crypto fallback code.

The advantages of these changes are as follows:
- This patch fixes write errors on zoned storage caused by out-of-order
   submission of bios. This out-of-order submission happens if both the
   crypto fallback code and bio_split_to_limits() split a bio.
- Less code duplication. The crypto fallback code now calls
   bio_split_to_limits() instead of open-coding it.

Signed-off-by: Bart Van Assche <bvanassche@acm.org>

diff --git a/block/blk-core.c b/block/blk-core.c
index fdac48aec5ef..5af5f8c3cd06 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -626,9 +626,6 @@ static void __submit_bio(struct bio *bio)
  	/* If plug is not used, add new plug here to cache nsecs time. */
  	struct blk_plug plug;

-	if (unlikely(!blk_crypto_bio_prep(&bio)))
-		return;
-
  	blk_start_plug(&plug);

  	if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index 005c9157ffb3..3b9ae57141b0 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -209,9 +209,12 @@ blk_crypto_fallback_alloc_cipher_req(struct 
blk_crypto_keyslot *slot,
  	return true;
  }

-static bool blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
+/*
+ * The encryption fallback code allocates bounce pages individually. 
Hence this
+ * function that calculates an upper limit for the bio size.
+ */
+unsigned int blk_crypto_max_io_size(struct bio *bio)
  {
-	struct bio *bio = *bio_ptr;
  	unsigned int i = 0;
  	unsigned int num_sectors = 0;
  	struct bio_vec bv;
@@ -222,21 +225,8 @@ static bool 
blk_crypto_fallback_split_bio_if_needed(struct bio **bio_ptr)
  		if (++i == BIO_MAX_VECS)
  			break;
  	}
-	if (num_sectors < bio_sectors(bio)) {
-		struct bio *split_bio;
-
-		split_bio = bio_split(bio, num_sectors, GFP_NOIO,
-				      &crypto_bio_split);
-		if (IS_ERR(split_bio)) {
-			bio->bi_status = BLK_STS_RESOURCE;
-			return false;
-		}
-		bio_chain(split_bio, bio);
-		submit_bio_noacct(bio);
-		*bio_ptr = split_bio;
-	}

-	return true;
+	return num_sectors;
  }

  union blk_crypto_iv {
@@ -257,8 +247,10 @@ static void blk_crypto_dun_to_iv(const u64 
dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
   * The crypto API fallback's encryption routine.
   * Allocate a bounce bio for encryption, encrypt the input bio using 
crypto API,
   * and replace *bio_ptr with the bounce bio. May split input bio if 
it's too
- * large. Returns true on success. Returns false and sets bio->bi_status on
- * error.
+ * large. Returns %true on success. On error, %false is returned and one of
+ * these two actions is taken:
+ * - Either @bio_ptr->bi_status is set and *@bio_ptr is not modified.
+ * - Or bio_endio() is called and *@bio_ptr is changed into %NULL.
   */
  static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
  {
@@ -275,9 +267,12 @@ static bool blk_crypto_fallback_encrypt_bio(struct 
bio **bio_ptr)
  	bool ret = false;
  	blk_status_t blk_st;

-	/* Split the bio if it's too big for single page bvec */
-	if (!blk_crypto_fallback_split_bio_if_needed(bio_ptr))
+	/* Verify that bio splitting has occurred. */
+	if (WARN_ON_ONCE(bio_sectors(*bio_ptr) >
+			 blk_crypto_max_io_size(*bio_ptr))) {
+		src_bio->bi_status = BLK_STS_IOERR;
  		return false;
+	}

  	src_bio = *bio_ptr;
  	bc = src_bio->bi_crypt_context;
@@ -475,9 +470,8 @@ static void blk_crypto_fallback_decrypt_endio(struct 
bio *bio)
   * @bio_ptr: pointer to the bio to prepare
   *
   * If bio is doing a WRITE operation, this splits the bio into two 
parts if it's
- * too big (see blk_crypto_fallback_split_bio_if_needed()). It then 
allocates a
- * bounce bio for the first part, encrypts it, and updates bio_ptr to 
point to
- * the bounce bio.
+ * too big. It then allocates a bounce bio for the first part, encrypts 
it, and
+ * updates bio_ptr to point to the bounce bio.
   *
   * For a READ operation, we mark the bio for decryption by using 
bi_private and
   * bi_end_io.
diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h
index ccf6dff6ff6b..443ba1fd82e6 100644
--- a/block/blk-crypto-internal.h
+++ b/block/blk-crypto-internal.h
@@ -223,6 +223,8 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);

  int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);

+unsigned int blk_crypto_max_io_size(struct bio *bio);
+
  #else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */

  static inline int
@@ -245,6 +247,11 @@ blk_crypto_fallback_evict_key(const struct 
blk_crypto_key *key)
  	return 0;
  }

+static inline unsigned int blk_crypto_max_io_size(struct bio *bio)
+{
+	return UINT_MAX;
+}
+
  #endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */

  #endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 70d704615be5..caaec70477bb 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -9,6 +9,7 @@
  #include <linux/blk-integrity.h>
  #include <linux/part_stat.h>
  #include <linux/blk-cgroup.h>
+#include <linux/blk-crypto.h>

  #include <trace/events/block.h>

@@ -124,9 +125,14 @@ static struct bio *bio_submit_split(struct bio 
*bio, int split_sectors)
  		trace_block_split(split, bio->bi_iter.bi_sector);
  		WARN_ON_ONCE(bio_zone_write_plugging(bio));
  		submit_bio_noacct(bio);
-		return split;
+
+		bio = split;
  	}

+	WARN_ON_ONCE(!bio);
+	if (unlikely(!blk_crypto_bio_prep(&bio)))
+		return NULL;
+
  	return bio;
  error:
  	bio->bi_status = errno_to_blk_status(split_sectors);
@@ -355,9 +361,12 @@ EXPORT_SYMBOL_GPL(bio_split_rw_at);
  struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
  		unsigned *nr_segs)
  {
+	u32 max_sectors =
+		min(get_max_io_size(bio, lim), blk_crypto_max_io_size(bio));
+
  	return bio_submit_split(bio,
  		bio_split_rw_at(bio, lim, nr_segs,
-			get_max_io_size(bio, lim) << SECTOR_SHIFT));
+				(u64)max_sectors << SECTOR_SHIFT));
  }

  /*


^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2025-07-10 18:05 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-06-25 23:42 [PATCH 0/3] Fix bio splitting in the crypto fallback code Bart Van Assche
2025-06-25 23:42 ` [PATCH 1/3] block: Split blk_crypto_fallback_split_bio_if_needed() Bart Van Assche
2025-06-25 23:42 ` [PATCH 2/3] block: Introduce the BLK_FEAT_CALLS_BIO_SPLIT_TO_LIMITS flag Bart Van Assche
2025-06-26  5:24   ` Christop Hellwig
2025-06-25 23:42 ` [PATCH 3/3] block: Rework splitting of encrypted bios Bart Van Assche
2025-06-26  5:28   ` Christop Hellwig
2025-07-10 18:04     ` Bart Van Assche

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).