public inbox for linux-block@vger.kernel.org
 help / color / mirror / Atom feed
* [RESEND PATCHv3 1/2] dm-crypt: allow unaligned bio_vecs for direct io
@ 2026-03-16 15:02 Keith Busch
  2026-03-16 15:09 ` [RESEND PATCHv3 2/2] dm-crypt: dynamic scatterlist for many segments Keith Busch
                   ` (2 more replies)
  0 siblings, 3 replies; 16+ messages in thread
From: Keith Busch @ 2026-03-16 15:02 UTC (permalink / raw)
  To: dm-devel; +Cc: linux-block, snitzer, Keith Busch

From: Keith Busch <kbusch@kernel.org>

Many storage devices can handle DMA for data that is not aligned to the
sector block size. The block and filesystem layers have introduced
updates to allow that kind of memory alignment flexibility when
possible.

dm-crypt, however, currently constrains itself to aligned memory because
it sends a single scatterlist element for the in/out list to the encrypt
and decrypt algorithms. This forces applications that have unaligned
data to copy through a bounce buffer, increasing CPU and memory
utilization.

Use multiple scatterlist elements to relax the memory alignment
requirement. To keep this simple, this more flexible constraint is
enabled only for certain encryption and initialization vector types,
specifically the ones that don't have additional use for the request
base scatterlist elements beyond holding decrypted data.

Signed-off-by: Keith Busch <kbusch@kernel.org>
---
 drivers/md/dm-crypt.c | 79 +++++++++++++++++++++++++++++++++----------
 drivers/md/dm-table.c |  1 +
 2 files changed, 62 insertions(+), 18 deletions(-)

diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 54823341c9fda..bbb4346d0127f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -150,6 +150,7 @@ enum cipher_flags {
 	CRYPT_IV_LARGE_SECTORS,		/* Calculate IV from sector_size, not 512B sectors */
 	CRYPT_ENCRYPT_PREPROCESS,	/* Must preprocess data for encryption (elephant) */
 	CRYPT_KEY_MAC_SIZE_SET,		/* The integrity_key_size option was used */
+	CRYPT_DISCONTIGUOUS_SEGS,	/* Can use partial sector segments */
 };
 
 /*
@@ -215,6 +216,7 @@ struct crypt_config {
 	unsigned int key_extra_size; /* additional keys length */
 	unsigned int key_mac_size;   /* MAC key size for authenc(...) */
 
+	unsigned int io_alignment;
 	unsigned int integrity_tag_size;
 	unsigned int integrity_iv_size;
 	unsigned int used_tag_size;
@@ -1384,22 +1386,48 @@ static int crypt_convert_block_aead(struct crypt_config *cc,
 	return r;
 }
 
+static int crypt_build_sgl(struct crypt_config *cc, struct scatterlist *sg,
+			   struct bvec_iter *iter, struct bio *bio,
+			   int max_segs)
+{
+	unsigned int bytes = cc->sector_size;
+	struct bvec_iter tmp = *iter;
+	int segs, i = 0;
+
+	bio_advance_iter(bio, &tmp, bytes);
+	segs = tmp.bi_idx - iter->bi_idx + !!tmp.bi_bvec_done;
+	if (segs > max_segs)
+		return -EIO;
+
+	sg_init_table(sg, segs);
+	do {
+		struct bio_vec bv = mp_bvec_iter_bvec(bio->bi_io_vec, *iter);
+		int len = min(bytes, bv.bv_len);
+
+		/* Reject unexpected unaligned bio. */
+		if (unlikely((len | bv.bv_offset) & cc->io_alignment))
+			return -EIO;
+
+		sg_set_page(&sg[i++], bv.bv_page, len, bv.bv_offset);
+		bio_advance_iter_single(bio, iter, len);
+		bytes -= len;
+	} while (bytes);
+
+	if (WARN_ON_ONCE(i != segs))
+		return -EIO;
+	return 0;
+}
+
 static int crypt_convert_block_skcipher(struct crypt_config *cc,
 					struct convert_context *ctx,
 					struct skcipher_request *req,
 					unsigned int tag_offset)
 {
-	struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
-	struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
 	struct scatterlist *sg_in, *sg_out;
 	struct dm_crypt_request *dmreq;
 	u8 *iv, *org_iv, *tag_iv;
 	__le64 *sector;
-	int r = 0;
-
-	/* Reject unexpected unaligned bio. */
-	if (unlikely(bv_in.bv_len & (cc->sector_size - 1)))
-		return -EIO;
+	int r;
 
 	dmreq = dmreq_of_req(cc, req);
 	dmreq->iv_sector = ctx->cc_sector;
@@ -1416,15 +1444,18 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
 	sector = org_sector_of_dmreq(cc, dmreq);
 	*sector = cpu_to_le64(ctx->cc_sector - cc->iv_offset);
 
-	/* For skcipher we use only the first sg item */
 	sg_in  = &dmreq->sg_in[0];
 	sg_out = &dmreq->sg_out[0];
 
-	sg_init_table(sg_in, 1);
-	sg_set_page(sg_in, bv_in.bv_page, cc->sector_size, bv_in.bv_offset);
+	r = crypt_build_sgl(cc, sg_in, &ctx->iter_in, ctx->bio_in,
+			    ARRAY_SIZE(dmreq->sg_in));
+	if (r < 0)
+		return r;
 
-	sg_init_table(sg_out, 1);
-	sg_set_page(sg_out, bv_out.bv_page, cc->sector_size, bv_out.bv_offset);
+	r = crypt_build_sgl(cc, sg_out, &ctx->iter_out, ctx->bio_out,
+			    ARRAY_SIZE(dmreq->sg_out));
+	if (r < 0)
+		return r;
 
 	if (cc->iv_gen_ops) {
 		/* For READs use IV stored in integrity metadata */
@@ -1455,9 +1486,6 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
 	if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
 		r = cc->iv_gen_ops->post(cc, org_iv, dmreq);
 
-	bio_advance_iter(ctx->bio_in, &ctx->iter_in, cc->sector_size);
-	bio_advance_iter(ctx->bio_out, &ctx->iter_out, cc->sector_size);
-
 	return r;
 }
 
@@ -2788,10 +2816,12 @@ static void crypt_dtr(struct dm_target *ti)
 static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
 {
 	struct crypt_config *cc = ti->private;
+	bool unaligned_allowed = true;
 
-	if (crypt_integrity_aead(cc))
+	if (crypt_integrity_aead(cc)) {
 		cc->iv_size = crypto_aead_ivsize(any_tfm_aead(cc));
-	else
+		unaligned_allowed = false;
+	} else
 		cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
 
 	if (cc->iv_size)
@@ -2827,6 +2857,7 @@ static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
 		if (cc->key_extra_size > ELEPHANT_MAX_KEY_SIZE)
 			return -EINVAL;
 		set_bit(CRYPT_ENCRYPT_PREPROCESS, &cc->cipher_flags);
+		unaligned_allowed = false;
 	} else if (strcmp(ivmode, "lmk") == 0) {
 		cc->iv_gen_ops = &crypt_iv_lmk_ops;
 		/*
@@ -2839,10 +2870,12 @@ static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
 			cc->key_parts++;
 			cc->key_extra_size = cc->key_size / cc->key_parts;
 		}
+		unaligned_allowed = false;
 	} else if (strcmp(ivmode, "tcw") == 0) {
 		cc->iv_gen_ops = &crypt_iv_tcw_ops;
 		cc->key_parts += 2; /* IV + whitening */
 		cc->key_extra_size = cc->iv_size + TCW_WHITENING_SIZE;
+		unaligned_allowed = false;
 	} else if (strcmp(ivmode, "random") == 0) {
 		cc->iv_gen_ops = &crypt_iv_random_ops;
 		/* Need storage space in integrity fields. */
@@ -2852,6 +2885,12 @@ static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
 		return -EINVAL;
 	}
 
+	if (!unaligned_allowed) {
+		cc->io_alignment = cc->sector_size - 1;
+	} else {
+		set_bit(CRYPT_DISCONTIGUOUS_SEGS, &cc->cipher_flags);
+		cc->io_alignment = 3;
+	}
 	return 0;
 }
 
@@ -3722,7 +3761,11 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
 	limits->physical_block_size =
 		max_t(unsigned int, limits->physical_block_size, cc->sector_size);
 	limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size);
-	limits->dma_alignment = limits->logical_block_size - 1;
+
+	if (test_bit(CRYPT_DISCONTIGUOUS_SEGS, &cc->cipher_flags))
+		limits->dma_alignment = cc->io_alignment;
+	else
+		limits->dma_alignment = limits->logical_block_size - 1;
 
 	/*
 	 * For zoned dm-crypt targets, there will be no internal splitting of
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index dc2eff6b739df..aecb19a6913db 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1767,6 +1767,7 @@ int dm_calculate_queue_limits(struct dm_table *t,
 	bool zoned = false;
 
 	dm_set_stacking_limits(limits);
+	limits->dma_alignment = 0;
 
 	t->integrity_supported = true;
 	for (unsigned int i = 0; i < t->num_targets; i++) {
-- 
2.52.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread
* [RESEND PATCHv3 0/2] dm-crypt: support relaxed memory alignment
@ 2026-01-14 15:49 Keith Busch
  2026-01-14 15:49 ` [RESEND PATCHv3 2/2] dm-crypt: dynamic scatterlist for many segments Keith Busch
  0 siblings, 1 reply; 16+ messages in thread
From: Keith Busch @ 2026-01-14 15:49 UTC (permalink / raw)
  To: dm-devel, snitzer, hch, ebiggers; +Cc: linux-block, axboe, Keith Busch

From: Keith Busch <kbusch@kernel.org>

Resending as the previous send was bad timing with the merge window.

Direct-io can support any memory alignment the hardware allows. Device
mappers don't need to impose any software constraints on memory
alignment, so this series removes one of those limitations the dm-crypt
mapper.

Changes from v2:

 * Don't change the default stacking limit to allow the relaxed memory
   alignment requirements; have the caller do it instead.

 * Fixed scatterlist memory leaks when handling the case that can't
   use the inline scatterlist.

 * Fixed segment boundary check to use the crypt_config rather than the
   lower level block device's dma_alignment, which may not be the same
   size as the cc->sector_size which was used before, or the newly
   enabled 4-byte alignment this patch set allows in certain
   circumstances. 

Keith Busch (2):
  dm-crypt: allow unaligned bio_vecs for direct io
  dm-crypt: dynamic scatterlist for many segments

 drivers/md/dm-crypt.c | 114 ++++++++++++++++++++++++++++++++++--------
 drivers/md/dm-table.c |   1 +
 2 files changed, 94 insertions(+), 21 deletions(-)

-- 
2.47.3


^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2026-03-25 18:34 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-16 15:02 [RESEND PATCHv3 1/2] dm-crypt: allow unaligned bio_vecs for direct io Keith Busch
2026-03-16 15:09 ` [RESEND PATCHv3 2/2] dm-crypt: dynamic scatterlist for many segments Keith Busch
2026-03-18 16:34   ` Mikulas Patocka
2026-03-18 17:01     ` Keith Busch
2026-03-18 17:40       ` Mikulas Patocka
2026-03-18 17:53         ` Keith Busch
2026-03-18 18:16           ` Mikulas Patocka
2026-03-18 18:32             ` Keith Busch
2026-03-18 18:41               ` Mikulas Patocka
2026-03-16 15:09 ` [RESEND PATCHv3 0/2] dm-crypt: support relaxed memory alignment Keith Busch
2026-03-18 16:19 ` [RESEND PATCHv3 1/2] dm-crypt: allow unaligned bio_vecs for direct io Mikulas Patocka
2026-03-18 17:40   ` Keith Busch
2026-03-18 18:06     ` Mikulas Patocka
2026-03-18 18:35       ` Keith Busch
2026-03-25 18:34       ` Keith Busch
  -- strict thread matches above, loose matches on Subject: below --
2026-01-14 15:49 [RESEND PATCHv3 0/2] dm-crypt: support relaxed memory alignment Keith Busch
2026-01-14 15:49 ` [RESEND PATCHv3 2/2] dm-crypt: dynamic scatterlist for many segments Keith Busch

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox