* [PATCH] blk-crypto: dynamically allocate fallback profile
@ 2023-08-08 17:25 Sweet Tea Dorminy
2023-08-09 6:55 ` Eric Biggers
0 siblings, 1 reply; 2+ messages in thread
From: Sweet Tea Dorminy @ 2023-08-08 17:25 UTC (permalink / raw)
To: Jens Axboe, Eric Biggers, Satya Tangirala, linux-block,
kernel-team
Cc: Sweet Tea Dorminy
blk_crypto_profile_init() calls lockdep_register_key(), which asserts
that the provided memory is not a static object. Unfortunately,
blk-crypto-fallback currently has a single static blk_crypto_profile,
which means trying to use the fallback with lockdep explodes in
blk_crypto_fallback_init().
Fortunately it is simple enough to use a dynamically allocated profile
for fallback, allowing the use of lockdep.
Fixes: 488f6682c832e ("block: blk-crypto-fallback for Inline Encryption")
Signed-off-by: Sweet Tea Dorminy <sweettea-kernel@dorminy.me>
---
block/blk-crypto-fallback.c | 27 +++++++++++++++------------
1 file changed, 15 insertions(+), 12 deletions(-)
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index ad9844c5b40c..de94e9bffec6 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -78,7 +78,7 @@ static struct blk_crypto_fallback_keyslot {
struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
} *blk_crypto_keyslots;
-static struct blk_crypto_profile blk_crypto_fallback_profile;
+static struct blk_crypto_profile *blk_crypto_fallback_profile;
static struct workqueue_struct *blk_crypto_wq;
static mempool_t *blk_crypto_bounce_page_pool;
static struct bio_set crypto_bio_split;
@@ -292,7 +292,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
* Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
* this bio's algorithm and key.
*/
- blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
+ blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
bc->bc_key, &slot);
if (blk_st != BLK_STS_OK) {
src_bio->bi_status = blk_st;
@@ -395,7 +395,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
* Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
* this bio's algorithm and key.
*/
- blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
+ blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
bc->bc_key, &slot);
if (blk_st != BLK_STS_OK) {
bio->bi_status = blk_st;
@@ -499,7 +499,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
return false;
}
- if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile,
+ if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile,
&bc->bc_key->crypto_cfg)) {
bio->bi_status = BLK_STS_NOTSUPP;
return false;
@@ -526,7 +526,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
{
- return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key);
+ return __blk_crypto_evict_key(blk_crypto_fallback_profile, key);
}
static bool blk_crypto_fallback_inited;
@@ -534,29 +534,32 @@ static int blk_crypto_fallback_init(void)
{
int i;
int err;
- struct blk_crypto_profile *profile = &blk_crypto_fallback_profile;
if (blk_crypto_fallback_inited)
return 0;
get_random_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
+ blk_crypto_fallback_profile =
+ kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
+
err = bioset_init(&crypto_bio_split, 64, 0, 0);
if (err)
goto out;
- err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots);
+ err = blk_crypto_profile_init(blk_crypto_fallback_profile,
+ blk_crypto_num_keyslots);
if (err)
goto fail_free_bioset;
err = -ENOMEM;
- profile->ll_ops = blk_crypto_fallback_ll_ops;
- profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
+ blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
+ blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
/* All blk-crypto modes have a crypto API fallback. */
for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
- profile->modes_supported[i] = 0xFFFFFFFF;
- profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
+ blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF;
+ blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
WQ_UNBOUND | WQ_HIGHPRI |
@@ -597,7 +600,7 @@ static int blk_crypto_fallback_init(void)
fail_free_wq:
destroy_workqueue(blk_crypto_wq);
fail_destroy_profile:
- blk_crypto_profile_destroy(profile);
+ blk_crypto_profile_destroy(blk_crypto_fallback_profile);
fail_free_bioset:
bioset_exit(&crypto_bio_split);
out:
base-commit: 54d2161835d828a9663f548f61d1d9c3d3482122
--
2.41.0
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH] blk-crypto: dynamically allocate fallback profile
2023-08-08 17:25 [PATCH] blk-crypto: dynamically allocate fallback profile Sweet Tea Dorminy
@ 2023-08-09 6:55 ` Eric Biggers
0 siblings, 0 replies; 2+ messages in thread
From: Eric Biggers @ 2023-08-09 6:55 UTC (permalink / raw)
To: Sweet Tea Dorminy; +Cc: Jens Axboe, Satya Tangirala, linux-block, kernel-team
On Tue, Aug 08, 2023 at 01:25:30PM -0400, Sweet Tea Dorminy wrote:
> blk_crypto_profile_init() calls lockdep_register_key(), which asserts
> that the provided memory is not a static object. Unfortunately,
> blk-crypto-fallback currently has a single static blk_crypto_profile,
> which means trying to use the fallback with lockdep explodes in
> blk_crypto_fallback_init().
>
> Fortunately it is simple enough to use a dynamically allocated profile
> for fallback, allowing the use of lockdep.
>
> Fixes: 488f6682c832e ("block: blk-crypto-fallback for Inline Encryption")
> Signed-off-by: Sweet Tea Dorminy <sweettea-kernel@dorminy.me>
> ---
> block/blk-crypto-fallback.c | 27 +++++++++++++++------------
> 1 file changed, 15 insertions(+), 12 deletions(-)
Thanks for catching this! My bad for not running xfstests with the inlinecrypt
mount option recently. Can you use the correct Fixes tag:?
Fixes: 2fb48d88e77f ("blk-crypto: use dynamic lock class for blk_crypto_profile::lock")
Cc: stable@vger.kernel.org
Also, when describing the problem please try to be more specific than
"explodes". I just get a WARN_ON. Beyond that, presumably it just makes
lockdep not track blk_crypto_fallback_profile.lock?
> @@ -534,29 +534,32 @@ static int blk_crypto_fallback_init(void)
> {
> int i;
> int err;
> - struct blk_crypto_profile *profile = &blk_crypto_fallback_profile;
>
> if (blk_crypto_fallback_inited)
> return 0;
>
> get_random_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
>
> + blk_crypto_fallback_profile =
> + kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
> +
Maybe add a comment:
/* Dynamic allocation is needed because of lockdep_register_key(). */
Also, kzalloc() should be checked for failure.
Can you consider folding the following into your patch?
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index de94e9bffec6d..0764668a78157 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -540,17 +540,21 @@ static int blk_crypto_fallback_init(void)
get_random_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
- blk_crypto_fallback_profile =
- kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
-
err = bioset_init(&crypto_bio_split, 64, 0, 0);
if (err)
goto out;
+ /* Dynamic allocation is needed because of lockdep_register_key(). */
+ blk_crypto_fallback_profile =
+ kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
+ if (!blk_crypto_fallback_profile) {
+ err = -ENOMEM;
+ goto fail_free_bioset;
+ }
+
err = blk_crypto_profile_init(blk_crypto_fallback_profile,
blk_crypto_num_keyslots);
if (err)
- goto fail_free_bioset;
+ goto fail_free_profile;
err = -ENOMEM;
blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
@@ -601,6 +605,8 @@ static int blk_crypto_fallback_init(void)
destroy_workqueue(blk_crypto_wq);
fail_destroy_profile:
blk_crypto_profile_destroy(blk_crypto_fallback_profile);
+fail_free_profile:
+ kfree(blk_crypto_fallback_profile);
fail_free_bioset:
bioset_exit(&crypto_bio_split);
out:
^ permalink raw reply related [flat|nested] 2+ messages in thread
end of thread, other threads:[~2023-08-09 6:55 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-08-08 17:25 [PATCH] blk-crypto: dynamically allocate fallback profile Sweet Tea Dorminy
2023-08-09 6:55 ` Eric Biggers
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox