linux-aspeed.lists.ozlabs.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 00/11] crypto: aspeed/hash - Convert to partial block API
@ 2025-05-13  6:03 Herbert Xu
  2025-05-13  6:03 ` [PATCH 01/11] crypto: aspeed/hash - Remove purely software hmac implementation Herbert Xu
                   ` (10 more replies)
  0 siblings, 11 replies; 12+ messages in thread
From: Herbert Xu @ 2025-05-13  6:03 UTC (permalink / raw)
  To: Linux Crypto Mailing List; +Cc: Neal Liu, linux-aspeed

This series converts aspeed to the partial block API while removing
hash length limits and making it more robust if dma mapping fails.

Herbert Xu (11):
  crypto: aspeed/hash - Remove purely software hmac implementation
  crypto: aspeed/hash - Reorganise struct aspeed_sham_reqctx
  crypto: aspeed/hash - Use init_tfm instead of cra_init
  crypto: aspeed/hash - Provide rctx->buffer as argument to fill padding
  crypto: aspeed/hash - Move sham_final call into sham_update
  crypto: aspeed/hash - Move final padding into dma_prepare
  crypto: aspeed/hash - Remove sha_iv
  crypto: aspeed/hash - Use API partial block handling
  crypto: aspeed/hash - Add fallback
  crypto: aspeed/hash - Iterate on large hashes in dma_prepare
  crypto: aspeed/hash - Fix potential overflow in dma_prepare_sg

 drivers/crypto/aspeed/aspeed-hace-hash.c | 802 ++++++-----------------
 drivers/crypto/aspeed/aspeed-hace.h      |  28 +-
 2 files changed, 207 insertions(+), 623 deletions(-)

-- 
2.39.5



^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 01/11] crypto: aspeed/hash - Remove purely software hmac implementation
  2025-05-13  6:03 [PATCH 00/11] crypto: aspeed/hash - Convert to partial block API Herbert Xu
@ 2025-05-13  6:03 ` Herbert Xu
  2025-05-13  6:03 ` [PATCH 02/11] crypto: aspeed/hash - Reorganise struct aspeed_sham_reqctx Herbert Xu
                   ` (9 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Herbert Xu @ 2025-05-13  6:03 UTC (permalink / raw)
  To: Linux Crypto Mailing List; +Cc: Neal Liu, linux-aspeed

The hmac implementation in aspeed simply duplicates what the new
ahash hmac template already does, namely construct ipad and opad
by hand and then adding them to the hash before feeding it to the
engine.

Remove them and just use the generic ahash hmac template.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/aspeed/aspeed-hace-hash.c | 335 -----------------------
 drivers/crypto/aspeed/aspeed-hace.h      |  10 -
 2 files changed, 345 deletions(-)

diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index 0b6e49c06eff..0bcec2d40ed6 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -5,7 +5,6 @@
 
 #include "aspeed-hace.h"
 #include <crypto/engine.h>
-#include <crypto/hmac.h>
 #include <crypto/internal/hash.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/sha1.h>
@@ -338,70 +337,6 @@ static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
 	return -EINPROGRESS;
 }
 
-/*
- * HMAC resume aims to do the second pass produces
- * the final HMAC code derived from the inner hash
- * result and the outer key.
- */
-static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev)
-{
-	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
-	struct ahash_request *req = hash_engine->req;
-	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
-	struct aspeed_sha_hmac_ctx *bctx = tctx->base;
-	int rc = 0;
-
-	AHASH_DBG(hace_dev, "\n");
-
-	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
-			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
-	dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
-			 rctx->block_size * 2, DMA_TO_DEVICE);
-
-	/* o key pad + hash sum 1 */
-	memcpy(rctx->buffer, bctx->opad, rctx->block_size);
-	memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize);
-
-	rctx->bufcnt = rctx->block_size + rctx->digsize;
-	rctx->digcnt[0] = rctx->block_size + rctx->digsize;
-
-	aspeed_ahash_fill_padding(hace_dev, rctx);
-	memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize);
-
-	rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
-					       SHA512_DIGEST_SIZE,
-					       DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
-		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer,
-					       rctx->block_size * 2,
-					       DMA_TO_DEVICE);
-	if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
-		dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
-		rc = -ENOMEM;
-		goto free_rctx_digest;
-	}
-
-	hash_engine->src_dma = rctx->buffer_dma_addr;
-	hash_engine->src_length = rctx->bufcnt;
-	hash_engine->digest_dma = rctx->digest_dma_addr;
-
-	return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
-
-free_rctx_digest:
-	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
-			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-end:
-	return rc;
-}
-
 static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
 {
 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
@@ -437,10 +372,6 @@ static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
 	hash_engine->src_length = rctx->bufcnt;
 	hash_engine->digest_dma = rctx->digest_dma_addr;
 
-	if (rctx->flags & SHA_FLAGS_HMAC)
-		return aspeed_hace_ahash_trigger(hace_dev,
-						 aspeed_ahash_hmac_resume);
-
 	return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
 
 free_rctx_digest:
@@ -609,16 +540,6 @@ static int aspeed_sham_update(struct ahash_request *req)
 	return aspeed_hace_hash_handle_queue(hace_dev, req);
 }
 
-static int aspeed_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
-				    const u8 *data, unsigned int len, u8 *out)
-{
-	SHASH_DESC_ON_STACK(shash, tfm);
-
-	shash->tfm = tfm;
-
-	return crypto_shash_digest(shash, data, len, out);
-}
-
 static int aspeed_sham_final(struct ahash_request *req)
 {
 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
@@ -664,7 +585,6 @@ static int aspeed_sham_init(struct ahash_request *req)
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
-	struct aspeed_sha_hmac_ctx *bctx = tctx->base;
 
 	AHASH_DBG(hace_dev, "%s: digest size:%d\n",
 		  crypto_tfm_alg_name(&tfm->base),
@@ -732,14 +652,6 @@ static int aspeed_sham_init(struct ahash_request *req)
 	rctx->digcnt[0] = 0;
 	rctx->digcnt[1] = 0;
 
-	/* HMAC init */
-	if (tctx->flags & SHA_FLAGS_HMAC) {
-		rctx->digcnt[0] = rctx->block_size;
-		rctx->bufcnt = rctx->block_size;
-		memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
-		rctx->flags |= SHA_FLAGS_HMAC;
-	}
-
 	return 0;
 }
 
@@ -748,43 +660,6 @@ static int aspeed_sham_digest(struct ahash_request *req)
 	return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
 }
 
-static int aspeed_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
-			      unsigned int keylen)
-{
-	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
-	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
-	struct aspeed_sha_hmac_ctx *bctx = tctx->base;
-	int ds = crypto_shash_digestsize(bctx->shash);
-	int bs = crypto_shash_blocksize(bctx->shash);
-	int err = 0;
-	int i;
-
-	AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base),
-		  keylen);
-
-	if (keylen > bs) {
-		err = aspeed_sham_shash_digest(bctx->shash,
-					       crypto_shash_get_flags(bctx->shash),
-					       key, keylen, bctx->ipad);
-		if (err)
-			return err;
-		keylen = ds;
-
-	} else {
-		memcpy(bctx->ipad, key, keylen);
-	}
-
-	memset(bctx->ipad + keylen, 0, bs - keylen);
-	memcpy(bctx->opad, bctx->ipad, bs);
-
-	for (i = 0; i < bs; i++) {
-		bctx->ipad[i] ^= HMAC_IPAD_VALUE;
-		bctx->opad[i] ^= HMAC_OPAD_VALUE;
-	}
-
-	return err;
-}
-
 static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
 {
 	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
@@ -793,43 +668,13 @@ static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
 
 	ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash.base);
 	tctx->hace_dev = ast_alg->hace_dev;
-	tctx->flags = 0;
 
 	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 				 sizeof(struct aspeed_sham_reqctx));
 
-	if (ast_alg->alg_base) {
-		/* hmac related */
-		struct aspeed_sha_hmac_ctx *bctx = tctx->base;
-
-		tctx->flags |= SHA_FLAGS_HMAC;
-		bctx->shash = crypto_alloc_shash(ast_alg->alg_base, 0,
-						 CRYPTO_ALG_NEED_FALLBACK);
-		if (IS_ERR(bctx->shash)) {
-			dev_warn(ast_alg->hace_dev->dev,
-				 "base driver '%s' could not be loaded.\n",
-				 ast_alg->alg_base);
-			return PTR_ERR(bctx->shash);
-		}
-	}
-
 	return 0;
 }
 
-static void aspeed_sham_cra_exit(struct crypto_tfm *tfm)
-{
-	struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
-	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
-
-	AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm));
-
-	if (tctx->flags & SHA_FLAGS_HMAC) {
-		struct aspeed_sha_hmac_ctx *bctx = tctx->base;
-
-		crypto_free_shash(bctx->shash);
-	}
-}
-
 static int aspeed_sham_export(struct ahash_request *req, void *out)
 {
 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
@@ -873,7 +718,6 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 					.cra_alignmask		= 0,
 					.cra_module		= THIS_MODULE,
 					.cra_init		= aspeed_sham_cra_init,
-					.cra_exit		= aspeed_sham_cra_exit,
 				}
 			}
 		},
@@ -905,7 +749,6 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 					.cra_alignmask		= 0,
 					.cra_module		= THIS_MODULE,
 					.cra_init		= aspeed_sham_cra_init,
-					.cra_exit		= aspeed_sham_cra_exit,
 				}
 			}
 		},
@@ -937,112 +780,6 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 					.cra_alignmask		= 0,
 					.cra_module		= THIS_MODULE,
 					.cra_init		= aspeed_sham_cra_init,
-					.cra_exit		= aspeed_sham_cra_exit,
-				}
-			}
-		},
-		.alg.ahash.op = {
-			.do_one_request = aspeed_ahash_do_one,
-		},
-	},
-	{
-		.alg_base = "sha1",
-		.alg.ahash.base = {
-			.init	= aspeed_sham_init,
-			.update	= aspeed_sham_update,
-			.final	= aspeed_sham_final,
-			.finup	= aspeed_sham_finup,
-			.digest	= aspeed_sham_digest,
-			.setkey	= aspeed_sham_setkey,
-			.export	= aspeed_sham_export,
-			.import	= aspeed_sham_import,
-			.halg = {
-				.digestsize = SHA1_DIGEST_SIZE,
-				.statesize = sizeof(struct aspeed_sham_reqctx),
-				.base = {
-					.cra_name		= "hmac(sha1)",
-					.cra_driver_name	= "aspeed-hmac-sha1",
-					.cra_priority		= 300,
-					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-								  CRYPTO_ALG_ASYNC |
-								  CRYPTO_ALG_KERN_DRIVER_ONLY,
-					.cra_blocksize		= SHA1_BLOCK_SIZE,
-					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
-								sizeof(struct aspeed_sha_hmac_ctx),
-					.cra_alignmask		= 0,
-					.cra_module		= THIS_MODULE,
-					.cra_init		= aspeed_sham_cra_init,
-					.cra_exit		= aspeed_sham_cra_exit,
-				}
-			}
-		},
-		.alg.ahash.op = {
-			.do_one_request = aspeed_ahash_do_one,
-		},
-	},
-	{
-		.alg_base = "sha224",
-		.alg.ahash.base = {
-			.init	= aspeed_sham_init,
-			.update	= aspeed_sham_update,
-			.final	= aspeed_sham_final,
-			.finup	= aspeed_sham_finup,
-			.digest	= aspeed_sham_digest,
-			.setkey	= aspeed_sham_setkey,
-			.export	= aspeed_sham_export,
-			.import	= aspeed_sham_import,
-			.halg = {
-				.digestsize = SHA224_DIGEST_SIZE,
-				.statesize = sizeof(struct aspeed_sham_reqctx),
-				.base = {
-					.cra_name		= "hmac(sha224)",
-					.cra_driver_name	= "aspeed-hmac-sha224",
-					.cra_priority		= 300,
-					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-								  CRYPTO_ALG_ASYNC |
-								  CRYPTO_ALG_KERN_DRIVER_ONLY,
-					.cra_blocksize		= SHA224_BLOCK_SIZE,
-					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
-								sizeof(struct aspeed_sha_hmac_ctx),
-					.cra_alignmask		= 0,
-					.cra_module		= THIS_MODULE,
-					.cra_init		= aspeed_sham_cra_init,
-					.cra_exit		= aspeed_sham_cra_exit,
-				}
-			}
-		},
-		.alg.ahash.op = {
-			.do_one_request = aspeed_ahash_do_one,
-		},
-	},
-	{
-		.alg_base = "sha256",
-		.alg.ahash.base = {
-			.init	= aspeed_sham_init,
-			.update	= aspeed_sham_update,
-			.final	= aspeed_sham_final,
-			.finup	= aspeed_sham_finup,
-			.digest	= aspeed_sham_digest,
-			.setkey	= aspeed_sham_setkey,
-			.export	= aspeed_sham_export,
-			.import	= aspeed_sham_import,
-			.halg = {
-				.digestsize = SHA256_DIGEST_SIZE,
-				.statesize = sizeof(struct aspeed_sham_reqctx),
-				.base = {
-					.cra_name		= "hmac(sha256)",
-					.cra_driver_name	= "aspeed-hmac-sha256",
-					.cra_priority		= 300,
-					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-								  CRYPTO_ALG_ASYNC |
-								  CRYPTO_ALG_KERN_DRIVER_ONLY,
-					.cra_blocksize		= SHA256_BLOCK_SIZE,
-					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
-								sizeof(struct aspeed_sha_hmac_ctx),
-					.cra_alignmask		= 0,
-					.cra_module		= THIS_MODULE,
-					.cra_init		= aspeed_sham_cra_init,
-					.cra_exit		= aspeed_sham_cra_exit,
 				}
 			}
 		},
@@ -1077,7 +814,6 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
 					.cra_alignmask		= 0,
 					.cra_module		= THIS_MODULE,
 					.cra_init		= aspeed_sham_cra_init,
-					.cra_exit		= aspeed_sham_cra_exit,
 				}
 			}
 		},
@@ -1109,77 +845,6 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
 					.cra_alignmask		= 0,
 					.cra_module		= THIS_MODULE,
 					.cra_init		= aspeed_sham_cra_init,
-					.cra_exit		= aspeed_sham_cra_exit,
-				}
-			}
-		},
-		.alg.ahash.op = {
-			.do_one_request = aspeed_ahash_do_one,
-		},
-	},
-	{
-		.alg_base = "sha384",
-		.alg.ahash.base = {
-			.init	= aspeed_sham_init,
-			.update	= aspeed_sham_update,
-			.final	= aspeed_sham_final,
-			.finup	= aspeed_sham_finup,
-			.digest	= aspeed_sham_digest,
-			.setkey	= aspeed_sham_setkey,
-			.export	= aspeed_sham_export,
-			.import	= aspeed_sham_import,
-			.halg = {
-				.digestsize = SHA384_DIGEST_SIZE,
-				.statesize = sizeof(struct aspeed_sham_reqctx),
-				.base = {
-					.cra_name		= "hmac(sha384)",
-					.cra_driver_name	= "aspeed-hmac-sha384",
-					.cra_priority		= 300,
-					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-								  CRYPTO_ALG_ASYNC |
-								  CRYPTO_ALG_KERN_DRIVER_ONLY,
-					.cra_blocksize		= SHA384_BLOCK_SIZE,
-					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
-								sizeof(struct aspeed_sha_hmac_ctx),
-					.cra_alignmask		= 0,
-					.cra_module		= THIS_MODULE,
-					.cra_init		= aspeed_sham_cra_init,
-					.cra_exit		= aspeed_sham_cra_exit,
-				}
-			}
-		},
-		.alg.ahash.op = {
-			.do_one_request = aspeed_ahash_do_one,
-		},
-	},
-	{
-		.alg_base = "sha512",
-		.alg.ahash.base = {
-			.init	= aspeed_sham_init,
-			.update	= aspeed_sham_update,
-			.final	= aspeed_sham_final,
-			.finup	= aspeed_sham_finup,
-			.digest	= aspeed_sham_digest,
-			.setkey	= aspeed_sham_setkey,
-			.export	= aspeed_sham_export,
-			.import	= aspeed_sham_import,
-			.halg = {
-				.digestsize = SHA512_DIGEST_SIZE,
-				.statesize = sizeof(struct aspeed_sham_reqctx),
-				.base = {
-					.cra_name		= "hmac(sha512)",
-					.cra_driver_name	= "aspeed-hmac-sha512",
-					.cra_priority		= 300,
-					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
-								  CRYPTO_ALG_ASYNC |
-								  CRYPTO_ALG_KERN_DRIVER_ONLY,
-					.cra_blocksize		= SHA512_BLOCK_SIZE,
-					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
-								sizeof(struct aspeed_sha_hmac_ctx),
-					.cra_alignmask		= 0,
-					.cra_module		= THIS_MODULE,
-					.cra_init		= aspeed_sham_cra_init,
-					.cra_exit		= aspeed_sham_cra_exit,
 				}
 			}
 		},
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
index 68f70e01fccb..7ff1798bc198 100644
--- a/drivers/crypto/aspeed/aspeed-hace.h
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -119,7 +119,6 @@
 #define SHA_FLAGS_SHA512		BIT(4)
 #define SHA_FLAGS_SHA512_224		BIT(5)
 #define SHA_FLAGS_SHA512_256		BIT(6)
-#define SHA_FLAGS_HMAC			BIT(8)
 #define SHA_FLAGS_FINUP			BIT(9)
 #define SHA_FLAGS_MASK			(0xff)
 
@@ -161,17 +160,8 @@ struct aspeed_engine_hash {
 	aspeed_hace_fn_t		dma_prepare;
 };
 
-struct aspeed_sha_hmac_ctx {
-	struct crypto_shash *shash;
-	u8 ipad[SHA512_BLOCK_SIZE];
-	u8 opad[SHA512_BLOCK_SIZE];
-};
-
 struct aspeed_sham_ctx {
 	struct aspeed_hace_dev		*hace_dev;
-	unsigned long			flags;	/* hmac flag */
-
-	struct aspeed_sha_hmac_ctx	base[];
 };
 
 struct aspeed_sham_reqctx {
-- 
2.39.5



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 02/11] crypto: aspeed/hash - Reorganise struct aspeed_sham_reqctx
  2025-05-13  6:03 [PATCH 00/11] crypto: aspeed/hash - Convert to partial block API Herbert Xu
  2025-05-13  6:03 ` [PATCH 01/11] crypto: aspeed/hash - Remove purely software hmac implementation Herbert Xu
@ 2025-05-13  6:03 ` Herbert Xu
  2025-05-13  6:03 ` [PATCH 03/11] crypto: aspeed/hash - Use init_tfm instead of cra_init Herbert Xu
                   ` (8 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Herbert Xu @ 2025-05-13  6:03 UTC (permalink / raw)
  To: Linux Crypto Mailing List; +Cc: Neal Liu, linux-aspeed

Move the from-device DMA buffer to the front of the structure.

Sort the rest by size and alignment.

Keep the partial block buffer at the end.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/aspeed/aspeed-hace.h | 13 +++++++++----
 1 file changed, 9 insertions(+), 4 deletions(-)

diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
index 7ff1798bc198..a34677f10966 100644
--- a/drivers/crypto/aspeed/aspeed-hace.h
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -165,6 +165,12 @@ struct aspeed_sham_ctx {
 };
 
 struct aspeed_sham_reqctx {
+	/* DMA buffer written by hardware */
+	u8			digest[SHA512_DIGEST_SIZE] __aligned(64);
+
+	/* Software state sorted by size. */
+	u64			digcnt[2];
+
 	unsigned long		flags;		/* final update flag should no use*/
 	unsigned long		op;		/* final or update */
 	u32			cmd;		/* trigger cmd */
@@ -181,14 +187,13 @@ struct aspeed_sham_reqctx {
 	const __be32		*sha_iv;
 
 	/* remain data buffer */
-	u8			buffer[SHA512_BLOCK_SIZE * 2];
 	dma_addr_t		buffer_dma_addr;
 	size_t			bufcnt;		/* buffer counter */
 
-	/* output buffer */
-	u8			digest[SHA512_DIGEST_SIZE] __aligned(64);
 	dma_addr_t		digest_dma_addr;
-	u64			digcnt[2];
+
+	/* This is DMA too but read-only for hardware. */
+	u8			buffer[SHA512_BLOCK_SIZE * 2];
 };
 
 struct aspeed_engine_crypto {
-- 
2.39.5



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 03/11] crypto: aspeed/hash - Use init_tfm instead of cra_init
  2025-05-13  6:03 [PATCH 00/11] crypto: aspeed/hash - Convert to partial block API Herbert Xu
  2025-05-13  6:03 ` [PATCH 01/11] crypto: aspeed/hash - Remove purely software hmac implementation Herbert Xu
  2025-05-13  6:03 ` [PATCH 02/11] crypto: aspeed/hash - Reorganise struct aspeed_sham_reqctx Herbert Xu
@ 2025-05-13  6:03 ` Herbert Xu
  2025-05-13  6:03 ` [PATCH 04/11] crypto: aspeed/hash - Provide rctx->buffer as argument to fill padding Herbert Xu
                   ` (7 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Herbert Xu @ 2025-05-13  6:03 UTC (permalink / raw)
  To: Linux Crypto Mailing List; +Cc: Neal Liu, linux-aspeed

Use the init_tfm interface instead of cra_init.

Also get rid of the dynamic reqsize.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/aspeed/aspeed-hace-hash.c | 24 +++++++++++++-----------
 1 file changed, 13 insertions(+), 11 deletions(-)

diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index 0bcec2d40ed6..4a479a204331 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -660,18 +660,15 @@ static int aspeed_sham_digest(struct ahash_request *req)
 	return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
 }
 
-static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
+static int aspeed_sham_cra_init(struct crypto_ahash *tfm)
 {
-	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
-	struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
+	struct ahash_alg *alg = crypto_ahash_alg(tfm);
+	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 	struct aspeed_hace_alg *ast_alg;
 
 	ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash.base);
 	tctx->hace_dev = ast_alg->hace_dev;
 
-	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
-				 sizeof(struct aspeed_sham_reqctx));
-
 	return 0;
 }
 
@@ -703,6 +700,7 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 			.digest	= aspeed_sham_digest,
 			.export	= aspeed_sham_export,
 			.import	= aspeed_sham_import,
+			.init_tfm = aspeed_sham_cra_init,
 			.halg = {
 				.digestsize = SHA1_DIGEST_SIZE,
 				.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -715,9 +713,9 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
 					.cra_blocksize		= SHA1_BLOCK_SIZE,
 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
+					.cra_reqsize		= sizeof(struct aspeed_sham_reqctx),
 					.cra_alignmask		= 0,
 					.cra_module		= THIS_MODULE,
-					.cra_init		= aspeed_sham_cra_init,
 				}
 			}
 		},
@@ -734,6 +732,7 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 			.digest	= aspeed_sham_digest,
 			.export	= aspeed_sham_export,
 			.import	= aspeed_sham_import,
+			.init_tfm = aspeed_sham_cra_init,
 			.halg = {
 				.digestsize = SHA256_DIGEST_SIZE,
 				.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -746,9 +745,9 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
 					.cra_blocksize		= SHA256_BLOCK_SIZE,
 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
+					.cra_reqsize		= sizeof(struct aspeed_sham_reqctx),
 					.cra_alignmask		= 0,
 					.cra_module		= THIS_MODULE,
-					.cra_init		= aspeed_sham_cra_init,
 				}
 			}
 		},
@@ -765,6 +764,7 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 			.digest	= aspeed_sham_digest,
 			.export	= aspeed_sham_export,
 			.import	= aspeed_sham_import,
+			.init_tfm = aspeed_sham_cra_init,
 			.halg = {
 				.digestsize = SHA224_DIGEST_SIZE,
 				.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -777,9 +777,9 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
 					.cra_blocksize		= SHA224_BLOCK_SIZE,
 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
+					.cra_reqsize		= sizeof(struct aspeed_sham_reqctx),
 					.cra_alignmask		= 0,
 					.cra_module		= THIS_MODULE,
-					.cra_init		= aspeed_sham_cra_init,
 				}
 			}
 		},
@@ -799,6 +799,7 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
 			.digest	= aspeed_sham_digest,
 			.export	= aspeed_sham_export,
 			.import	= aspeed_sham_import,
+			.init_tfm = aspeed_sham_cra_init,
 			.halg = {
 				.digestsize = SHA384_DIGEST_SIZE,
 				.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -811,9 +812,9 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
 					.cra_blocksize		= SHA384_BLOCK_SIZE,
 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
+					.cra_reqsize		= sizeof(struct aspeed_sham_reqctx),
 					.cra_alignmask		= 0,
 					.cra_module		= THIS_MODULE,
-					.cra_init		= aspeed_sham_cra_init,
 				}
 			}
 		},
@@ -830,6 +831,7 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
 			.digest	= aspeed_sham_digest,
 			.export	= aspeed_sham_export,
 			.import	= aspeed_sham_import,
+			.init_tfm = aspeed_sham_cra_init,
 			.halg = {
 				.digestsize = SHA512_DIGEST_SIZE,
 				.statesize = sizeof(struct aspeed_sham_reqctx),
@@ -842,9 +844,9 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
 					.cra_blocksize		= SHA512_BLOCK_SIZE,
 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
+					.cra_reqsize		= sizeof(struct aspeed_sham_reqctx),
 					.cra_alignmask		= 0,
 					.cra_module		= THIS_MODULE,
-					.cra_init		= aspeed_sham_cra_init,
 				}
 			}
 		},
-- 
2.39.5



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 04/11] crypto: aspeed/hash - Provide rctx->buffer as argument to fill padding
  2025-05-13  6:03 [PATCH 00/11] crypto: aspeed/hash - Convert to partial block API Herbert Xu
                   ` (2 preceding siblings ...)
  2025-05-13  6:03 ` [PATCH 03/11] crypto: aspeed/hash - Use init_tfm instead of cra_init Herbert Xu
@ 2025-05-13  6:03 ` Herbert Xu
  2025-05-13  6:03 ` [PATCH 05/11] crypto: aspeed/hash - Move sham_final call into sham_update Herbert Xu
                   ` (6 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Herbert Xu @ 2025-05-13  6:03 UTC (permalink / raw)
  To: Linux Crypto Mailing List; +Cc: Neal Liu, linux-aspeed

Instead of always writing the padding to rctx->buffer, make it
an argument.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/aspeed/aspeed-hace-hash.c | 27 ++++++++++++------------
 1 file changed, 13 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index 4a479a204331..9f776ec8f5ec 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -73,10 +73,10 @@ static const __be64 sha512_iv[8] = {
  *  - if message length < 112 bytes then padlen = 112 - message length
  *  - else padlen = 128 + 112 - message length
  */
-static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
-				      struct aspeed_sham_reqctx *rctx)
+static int aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
+				     struct aspeed_sham_reqctx *rctx, u8 *buf)
 {
-	unsigned int index, padlen;
+	unsigned int index, padlen, bitslen;
 	__be64 bits[2];
 
 	AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
@@ -86,25 +86,23 @@ static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
 	case SHA_FLAGS_SHA224:
 	case SHA_FLAGS_SHA256:
 		bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
-		index = rctx->bufcnt & 0x3f;
+		index = rctx->digcnt[0] & 0x3f;
 		padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
-		*(rctx->buffer + rctx->bufcnt) = 0x80;
-		memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
-		memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8);
-		rctx->bufcnt += padlen + 8;
+		bitslen = 8;
 		break;
 	default:
 		bits[1] = cpu_to_be64(rctx->digcnt[0] << 3);
 		bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 |
 				      rctx->digcnt[0] >> 61);
-		index = rctx->bufcnt & 0x7f;
+		index = rctx->digcnt[0] & 0x7f;
 		padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
-		*(rctx->buffer + rctx->bufcnt) = 0x80;
-		memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
-		memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16);
-		rctx->bufcnt += padlen + 16;
+		bitslen = 16;
 		break;
 	}
+	buf[0] = 0x80;
+	memset(buf + 1, 0, padlen - 1);
+	memcpy(buf + padlen, bits, bitslen);
+	return padlen + bitslen;
 }
 
 /*
@@ -346,7 +344,8 @@ static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
 
 	AHASH_DBG(hace_dev, "\n");
 
-	aspeed_ahash_fill_padding(hace_dev, rctx);
+	rctx->bufcnt += aspeed_ahash_fill_padding(hace_dev, rctx,
+						  rctx->buffer + rctx->bufcnt);
 
 	rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
 					       rctx->digest,
-- 
2.39.5



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 05/11] crypto: aspeed/hash - Move sham_final call into sham_update
  2025-05-13  6:03 [PATCH 00/11] crypto: aspeed/hash - Convert to partial block API Herbert Xu
                   ` (3 preceding siblings ...)
  2025-05-13  6:03 ` [PATCH 04/11] crypto: aspeed/hash - Provide rctx->buffer as argument to fill padding Herbert Xu
@ 2025-05-13  6:03 ` Herbert Xu
  2025-05-13  6:03 ` [PATCH 06/11] crypto: aspeed/hash - Move final padding into dma_prepare Herbert Xu
                   ` (5 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Herbert Xu @ 2025-05-13  6:03 UTC (permalink / raw)
  To: Linux Crypto Mailing List; +Cc: Neal Liu, linux-aspeed

The only time when sham_final needs to be called in sham_finup
is when the finup request fits into the partial block.  Move this
special handling into sham_update.

The comment about releaseing resources is non-sense.  The Crypto
API does not mandate the use of final so the user could always go
away after an update and never come back.  Therefore the driver
must not hold any resources after an update call.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/aspeed/aspeed-hace-hash.c | 44 +++++++++---------------
 1 file changed, 17 insertions(+), 27 deletions(-)

diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index 9f776ec8f5ec..40363159489e 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -508,6 +508,20 @@ static int aspeed_ahash_do_one(struct crypto_engine *engine, void *areq)
 	return aspeed_ahash_do_request(engine, areq);
 }
 
+static int aspeed_sham_final(struct ahash_request *req)
+{
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+
+	AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
+		  req->nbytes, rctx->total);
+	rctx->op = SHA_OP_FINAL;
+
+	return aspeed_hace_hash_handle_queue(hace_dev, req);
+}
+
 static int aspeed_sham_update(struct ahash_request *req)
 {
 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
@@ -533,49 +547,25 @@ static int aspeed_sham_update(struct ahash_request *req)
 					 rctx->total, 0);
 		rctx->bufcnt += rctx->total;
 
-		return 0;
+		return rctx->flags & SHA_FLAGS_FINUP ?
+		       aspeed_sham_final(req) : 0;
 	}
 
 	return aspeed_hace_hash_handle_queue(hace_dev, req);
 }
 
-static int aspeed_sham_final(struct ahash_request *req)
-{
-	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
-	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
-
-	AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
-		  req->nbytes, rctx->total);
-	rctx->op = SHA_OP_FINAL;
-
-	return aspeed_hace_hash_handle_queue(hace_dev, req);
-}
-
 static int aspeed_sham_finup(struct ahash_request *req)
 {
 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
-	int rc1, rc2;
 
 	AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
 
 	rctx->flags |= SHA_FLAGS_FINUP;
 
-	rc1 = aspeed_sham_update(req);
-	if (rc1 == -EINPROGRESS || rc1 == -EBUSY)
-		return rc1;
-
-	/*
-	 * final() has to be always called to cleanup resources
-	 * even if update() failed, except EINPROGRESS
-	 */
-	rc2 = aspeed_sham_final(req);
-
-	return rc1 ? : rc2;
+	return aspeed_sham_update(req);
 }
 
 static int aspeed_sham_init(struct ahash_request *req)
-- 
2.39.5



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 06/11] crypto: aspeed/hash - Move final padding into dma_prepare
  2025-05-13  6:03 [PATCH 00/11] crypto: aspeed/hash - Convert to partial block API Herbert Xu
                   ` (4 preceding siblings ...)
  2025-05-13  6:03 ` [PATCH 05/11] crypto: aspeed/hash - Move sham_final call into sham_update Herbert Xu
@ 2025-05-13  6:03 ` Herbert Xu
  2025-05-13  6:04 ` [PATCH 07/11] crypto: aspeed/hash - Remove sha_iv Herbert Xu
                   ` (4 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Herbert Xu @ 2025-05-13  6:03 UTC (permalink / raw)
  To: Linux Crypto Mailing List; +Cc: Neal Liu, linux-aspeed

Rather than processing a final as two separate updates, combine
them into one for the linear dma_prepare case.

This means that the total hash size is slightly reduced, but that
will be fixed up later by repeating the hash if necessary.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/aspeed/aspeed-hace-hash.c | 29 ++++++++++++++----------
 1 file changed, 17 insertions(+), 12 deletions(-)

diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index 40363159489e..ceea2e2f5658 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -114,29 +114,34 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
 	struct ahash_request *req = hash_engine->req;
 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-	int length, remain;
+	bool final = rctx->flags & SHA_FLAGS_FINUP;
+	unsigned int length, remain;
 
 	length = rctx->total + rctx->bufcnt;
-	remain = length % rctx->block_size;
+	remain = final ? 0 : length % rctx->block_size;
 
 	AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
 
 	if (rctx->bufcnt)
 		memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt);
 
-	if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
-		scatterwalk_map_and_copy(hash_engine->ahash_src_addr +
-					 rctx->bufcnt, rctx->src_sg,
-					 rctx->offset, rctx->total - remain, 0);
-		rctx->offset += rctx->total - remain;
-
-	} else {
+	if ((final ? round_up(length, rctx->block_size) + rctx->block_size :
+		     length) > ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
 		dev_warn(hace_dev->dev, "Hash data length is too large\n");
 		return -EINVAL;
 	}
 
-	scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
-				 rctx->offset, remain, 0);
+	scatterwalk_map_and_copy(hash_engine->ahash_src_addr +
+				 rctx->bufcnt, rctx->src_sg,
+				 rctx->offset, rctx->total - remain, 0);
+	rctx->offset += rctx->total - remain;
+
+	if (final)
+		length += aspeed_ahash_fill_padding(
+			hace_dev, rctx, hash_engine->ahash_src_addr + length);
+	else
+		scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
+					 rctx->offset, remain, 0);
 
 	rctx->bufcnt = remain;
 	rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
@@ -423,7 +428,7 @@ static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
 			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
 
 	if (rctx->flags & SHA_FLAGS_FINUP)
-		return aspeed_ahash_req_final(hace_dev);
+		memcpy(req->result, rctx->digest, rctx->digsize);
 
 	return aspeed_ahash_complete(hace_dev);
 }
-- 
2.39.5



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 07/11] crypto: aspeed/hash - Remove sha_iv
  2025-05-13  6:03 [PATCH 00/11] crypto: aspeed/hash - Convert to partial block API Herbert Xu
                   ` (5 preceding siblings ...)
  2025-05-13  6:03 ` [PATCH 06/11] crypto: aspeed/hash - Move final padding into dma_prepare Herbert Xu
@ 2025-05-13  6:04 ` Herbert Xu
  2025-05-13  6:04 ` [PATCH 08/11] crypto: aspeed/hash - Use API partial block handling Herbert Xu
                   ` (3 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Herbert Xu @ 2025-05-13  6:04 UTC (permalink / raw)
  To: Linux Crypto Mailing List; +Cc: Neal Liu, linux-aspeed

Removed unused sha_iv field from request context.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/aspeed/aspeed-hace-hash.c | 5 -----
 drivers/crypto/aspeed/aspeed-hace.h      | 1 -
 2 files changed, 6 deletions(-)

diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index ceea2e2f5658..d31d7e1c9af2 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -593,7 +593,6 @@ static int aspeed_sham_init(struct ahash_request *req)
 		rctx->flags |= SHA_FLAGS_SHA1;
 		rctx->digsize = SHA1_DIGEST_SIZE;
 		rctx->block_size = SHA1_BLOCK_SIZE;
-		rctx->sha_iv = sha1_iv;
 		rctx->ivsize = 32;
 		memcpy(rctx->digest, sha1_iv, rctx->ivsize);
 		break;
@@ -602,7 +601,6 @@ static int aspeed_sham_init(struct ahash_request *req)
 		rctx->flags |= SHA_FLAGS_SHA224;
 		rctx->digsize = SHA224_DIGEST_SIZE;
 		rctx->block_size = SHA224_BLOCK_SIZE;
-		rctx->sha_iv = sha224_iv;
 		rctx->ivsize = 32;
 		memcpy(rctx->digest, sha224_iv, rctx->ivsize);
 		break;
@@ -611,7 +609,6 @@ static int aspeed_sham_init(struct ahash_request *req)
 		rctx->flags |= SHA_FLAGS_SHA256;
 		rctx->digsize = SHA256_DIGEST_SIZE;
 		rctx->block_size = SHA256_BLOCK_SIZE;
-		rctx->sha_iv = sha256_iv;
 		rctx->ivsize = 32;
 		memcpy(rctx->digest, sha256_iv, rctx->ivsize);
 		break;
@@ -621,7 +618,6 @@ static int aspeed_sham_init(struct ahash_request *req)
 		rctx->flags |= SHA_FLAGS_SHA384;
 		rctx->digsize = SHA384_DIGEST_SIZE;
 		rctx->block_size = SHA384_BLOCK_SIZE;
-		rctx->sha_iv = (const __be32 *)sha384_iv;
 		rctx->ivsize = 64;
 		memcpy(rctx->digest, sha384_iv, rctx->ivsize);
 		break;
@@ -631,7 +627,6 @@ static int aspeed_sham_init(struct ahash_request *req)
 		rctx->flags |= SHA_FLAGS_SHA512;
 		rctx->digsize = SHA512_DIGEST_SIZE;
 		rctx->block_size = SHA512_BLOCK_SIZE;
-		rctx->sha_iv = (const __be32 *)sha512_iv;
 		rctx->ivsize = 64;
 		memcpy(rctx->digest, sha512_iv, rctx->ivsize);
 		break;
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
index a34677f10966..ad39954251dd 100644
--- a/drivers/crypto/aspeed/aspeed-hace.h
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -184,7 +184,6 @@ struct aspeed_sham_reqctx {
 	size_t			digsize;
 	size_t			block_size;
 	size_t			ivsize;
-	const __be32		*sha_iv;
 
 	/* remain data buffer */
 	dma_addr_t		buffer_dma_addr;
-- 
2.39.5



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 08/11] crypto: aspeed/hash - Use API partial block handling
  2025-05-13  6:03 [PATCH 00/11] crypto: aspeed/hash - Convert to partial block API Herbert Xu
                   ` (6 preceding siblings ...)
  2025-05-13  6:04 ` [PATCH 07/11] crypto: aspeed/hash - Remove sha_iv Herbert Xu
@ 2025-05-13  6:04 ` Herbert Xu
  2025-05-13  6:04 ` [PATCH 09/11] crypto: aspeed/hash - Add fallback Herbert Xu
                   ` (2 subsequent siblings)
  10 siblings, 0 replies; 12+ messages in thread
From: Herbert Xu @ 2025-05-13  6:04 UTC (permalink / raw)
  To: Linux Crypto Mailing List; +Cc: Neal Liu, linux-aspeed

Use the Crypto API partial block handling.

Also switch to the generic export format.

Remove final function that is no longer used by the Crypto API.
Move final padding into aspeed_ahash_dma_prepare_sg.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/aspeed/aspeed-hace-hash.c | 304 ++++++++---------------
 drivers/crypto/aspeed/aspeed-hace.h      |   6 +-
 2 files changed, 104 insertions(+), 206 deletions(-)

diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index d31d7e1c9af2..d7c7f867d6e1 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -13,6 +13,7 @@
 #include <linux/err.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/scatterlist.h>
 #include <linux/string.h>
 
 #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
@@ -58,6 +59,46 @@ static const __be64 sha512_iv[8] = {
 	cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
 };
 
+static int aspeed_sham_init(struct ahash_request *req);
+static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev);
+
+static int aspeed_sham_export(struct ahash_request *req, void *out)
+{
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	union {
+		u8 *u8;
+		u64 *u64;
+	} p = { .u8 = out };
+
+	memcpy(out, rctx->digest, rctx->ivsize);
+	p.u8 += rctx->ivsize;
+	put_unaligned(rctx->digcnt[0], p.u64++);
+	if (rctx->ivsize == 64)
+		put_unaligned(rctx->digcnt[1], p.u64);
+	return 0;
+}
+
+static int aspeed_sham_import(struct ahash_request *req, const void *in)
+{
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	union {
+		const u8 *u8;
+		const u64 *u64;
+	} p = { .u8 = in };
+	int err;
+
+	err = aspeed_sham_init(req);
+	if (err)
+		return err;
+
+	memcpy(rctx->digest, in, rctx->ivsize);
+	p.u8 += rctx->ivsize;
+	rctx->digcnt[0] = get_unaligned(p.u64++);
+	if (rctx->ivsize == 64)
+		rctx->digcnt[1] = get_unaligned(p.u64);
+	return 0;
+}
+
 /* The purpose of this padding is to ensure that the padded message is a
  * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
  * The bit "1" is appended at the end of the message followed by
@@ -117,33 +158,29 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
 	bool final = rctx->flags & SHA_FLAGS_FINUP;
 	unsigned int length, remain;
 
-	length = rctx->total + rctx->bufcnt;
+	length = rctx->total;
 	remain = final ? 0 : length % rctx->block_size;
 
 	AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
 
-	if (rctx->bufcnt)
-		memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt);
-
 	if ((final ? round_up(length, rctx->block_size) + rctx->block_size :
 		     length) > ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
 		dev_warn(hace_dev->dev, "Hash data length is too large\n");
 		return -EINVAL;
 	}
 
-	scatterwalk_map_and_copy(hash_engine->ahash_src_addr +
-				 rctx->bufcnt, rctx->src_sg,
+	scatterwalk_map_and_copy(hash_engine->ahash_src_addr, rctx->src_sg,
 				 rctx->offset, rctx->total - remain, 0);
 	rctx->offset += rctx->total - remain;
 
+	rctx->digcnt[0] += rctx->total - remain;
+	if (rctx->digcnt[0] < rctx->total - remain)
+		rctx->digcnt[1]++;
+
 	if (final)
 		length += aspeed_ahash_fill_padding(
 			hace_dev, rctx, hash_engine->ahash_src_addr + length);
-	else
-		scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
-					 rctx->offset, remain, 0);
 
-	rctx->bufcnt = remain;
 	rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
 					       SHA512_DIGEST_SIZE,
 					       DMA_BIDIRECTIONAL);
@@ -168,16 +205,17 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
 	struct ahash_request *req = hash_engine->req;
 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	bool final = rctx->flags & SHA_FLAGS_FINUP;
 	struct aspeed_sg_list *src_list;
 	struct scatterlist *s;
 	int length, remain, sg_len, i;
 	int rc = 0;
 
-	remain = (rctx->total + rctx->bufcnt) % rctx->block_size;
-	length = rctx->total + rctx->bufcnt - remain;
+	remain = final ? 0 : rctx->total % rctx->block_size;
+	length = rctx->total - remain;
 
-	AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n",
-		  "rctx total", rctx->total, "bufcnt", rctx->bufcnt,
+	AHASH_DBG(hace_dev, "%s:0x%x, %s:0x%x, %s:0x%x\n",
+		  "rctx total", rctx->total,
 		  "length", length, "remain", remain);
 
 	sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
@@ -198,13 +236,39 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
 		goto free_src_sg;
 	}
 
-	if (rctx->bufcnt != 0) {
-		u32 phy_addr;
-		u32 len;
+	for_each_sg(rctx->src_sg, s, sg_len, i) {
+		u32 phy_addr = sg_dma_address(s);
+		u32 len = sg_dma_len(s);
 
+		if (length > len)
+			length -= len;
+		else {
+			/* Last sg list */
+			len = length;
+			length = 0;
+		}
+
+		src_list[i].phy_addr = cpu_to_le32(phy_addr);
+		src_list[i].len = cpu_to_le32(len);
+	}
+
+	if (length != 0) {
+		rc = -EINVAL;
+		goto free_rctx_digest;
+	}
+
+	rctx->digcnt[0] += rctx->total - remain;
+	if (rctx->digcnt[0] < rctx->total - remain)
+		rctx->digcnt[1]++;
+
+	if (final) {
+		int len = aspeed_ahash_fill_padding(hace_dev, rctx,
+						    rctx->buffer);
+
+		rctx->total += len;
 		rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
 						       rctx->buffer,
-						       rctx->block_size * 2,
+						       sizeof(rctx->buffer),
 						       DMA_TO_DEVICE);
 		if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
 			dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
@@ -212,54 +276,19 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
 			goto free_rctx_digest;
 		}
 
-		phy_addr = rctx->buffer_dma_addr;
-		len = rctx->bufcnt;
-		length -= len;
-
-		/* Last sg list */
-		if (length == 0)
-			len |= HASH_SG_LAST_LIST;
-
-		src_list[0].phy_addr = cpu_to_le32(phy_addr);
-		src_list[0].len = cpu_to_le32(len);
-		src_list++;
-	}
-
-	if (length != 0) {
-		for_each_sg(rctx->src_sg, s, sg_len, i) {
-			u32 phy_addr = sg_dma_address(s);
-			u32 len = sg_dma_len(s);
-
-			if (length > len)
-				length -= len;
-			else {
-				/* Last sg list */
-				len = length;
-				len |= HASH_SG_LAST_LIST;
-				length = 0;
-			}
-
-			src_list[i].phy_addr = cpu_to_le32(phy_addr);
-			src_list[i].len = cpu_to_le32(len);
-		}
-	}
-
-	if (length != 0) {
-		rc = -EINVAL;
-		goto free_rctx_buffer;
+		src_list[i].phy_addr = cpu_to_le32(rctx->buffer_dma_addr);
+		src_list[i].len = cpu_to_le32(len);
+		i++;
 	}
+	src_list[i - 1].len |= cpu_to_le32(HASH_SG_LAST_LIST);
 
 	rctx->offset = rctx->total - remain;
-	hash_engine->src_length = rctx->total + rctx->bufcnt - remain;
+	hash_engine->src_length = rctx->total - remain;
 	hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
 	hash_engine->digest_dma = rctx->digest_dma_addr;
 
 	return 0;
 
-free_rctx_buffer:
-	if (rctx->bufcnt != 0)
-		dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
-				 rctx->block_size * 2, DMA_TO_DEVICE);
 free_rctx_digest:
 	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
 			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
@@ -274,39 +303,21 @@ static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
 {
 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
 	struct ahash_request *req = hash_engine->req;
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 
 	AHASH_DBG(hace_dev, "\n");
 
 	hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
 
-	crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0);
+	if (rctx->flags & SHA_FLAGS_FINUP)
+		memcpy(req->result, rctx->digest, rctx->digsize);
+
+	crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req,
+				     rctx->total - rctx->offset);
 
 	return 0;
 }
 
-/*
- * Copy digest to the corresponding request result.
- * This function will be called at final() stage.
- */
-static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
-{
-	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
-	struct ahash_request *req = hash_engine->req;
-	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
-	AHASH_DBG(hace_dev, "\n");
-
-	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
-			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
-	dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
-			 rctx->block_size * 2, DMA_TO_DEVICE);
-
-	memcpy(req->result, rctx->digest, rctx->digsize);
-
-	return aspeed_ahash_complete(hace_dev);
-}
-
 /*
  * Trigger hardware engines to do the math.
  */
@@ -340,51 +351,6 @@ static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
 	return -EINPROGRESS;
 }
 
-static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
-{
-	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
-	struct ahash_request *req = hash_engine->req;
-	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-	int rc = 0;
-
-	AHASH_DBG(hace_dev, "\n");
-
-	rctx->bufcnt += aspeed_ahash_fill_padding(hace_dev, rctx,
-						  rctx->buffer + rctx->bufcnt);
-
-	rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
-					       rctx->digest,
-					       SHA512_DIGEST_SIZE,
-					       DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
-		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
-		rc = -ENOMEM;
-		goto end;
-	}
-
-	rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
-					       rctx->buffer,
-					       rctx->block_size * 2,
-					       DMA_TO_DEVICE);
-	if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
-		dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
-		rc = -ENOMEM;
-		goto free_rctx_digest;
-	}
-
-	hash_engine->src_dma = rctx->buffer_dma_addr;
-	hash_engine->src_length = rctx->bufcnt;
-	hash_engine->digest_dma = rctx->digest_dma_addr;
-
-	return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
-
-free_rctx_digest:
-	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
-			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-end:
-	return rc;
-}
-
 static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
 {
 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
@@ -396,23 +362,15 @@ static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
 	dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
 		     DMA_TO_DEVICE);
 
-	if (rctx->bufcnt != 0)
+	if (rctx->flags & SHA_FLAGS_FINUP)
 		dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
-				 rctx->block_size * 2,
-				 DMA_TO_DEVICE);
+				 sizeof(rctx->buffer), DMA_TO_DEVICE);
 
 	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
 			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
 
-	scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset,
-				 rctx->total - rctx->offset, 0);
-
-	rctx->bufcnt = rctx->total - rctx->offset;
 	rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
 
-	if (rctx->flags & SHA_FLAGS_FINUP)
-		return aspeed_ahash_req_final(hace_dev);
-
 	return aspeed_ahash_complete(hace_dev);
 }
 
@@ -427,9 +385,6 @@ static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
 	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
 			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
 
-	if (rctx->flags & SHA_FLAGS_FINUP)
-		memcpy(req->result, rctx->digest, rctx->digsize);
-
 	return aspeed_ahash_complete(hace_dev);
 }
 
@@ -468,21 +423,16 @@ static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
 static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
 {
 	struct ahash_request *req = ahash_request_cast(areq);
-	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
 	struct aspeed_engine_hash *hash_engine;
-	int ret = 0;
+	int ret;
 
 	hash_engine = &hace_dev->hash_engine;
 	hash_engine->flags |= CRYPTO_FLAGS_BUSY;
 
-	if (rctx->op == SHA_OP_UPDATE)
-		ret = aspeed_ahash_req_update(hace_dev);
-	else if (rctx->op == SHA_OP_FINAL)
-		ret = aspeed_ahash_req_final(hace_dev);
-
+	ret = aspeed_ahash_req_update(hace_dev);
 	if (ret != -EINPROGRESS)
 		return ret;
 
@@ -513,20 +463,6 @@ static int aspeed_ahash_do_one(struct crypto_engine *engine, void *areq)
 	return aspeed_ahash_do_request(engine, areq);
 }
 
-static int aspeed_sham_final(struct ahash_request *req)
-{
-	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
-	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
-
-	AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
-		  req->nbytes, rctx->total);
-	rctx->op = SHA_OP_FINAL;
-
-	return aspeed_hace_hash_handle_queue(hace_dev, req);
-}
-
 static int aspeed_sham_update(struct ahash_request *req)
 {
 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
@@ -539,22 +475,7 @@ static int aspeed_sham_update(struct ahash_request *req)
 	rctx->total = req->nbytes;
 	rctx->src_sg = req->src;
 	rctx->offset = 0;
-	rctx->src_nents = sg_nents(req->src);
-	rctx->op = SHA_OP_UPDATE;
-
-	rctx->digcnt[0] += rctx->total;
-	if (rctx->digcnt[0] < rctx->total)
-		rctx->digcnt[1]++;
-
-	if (rctx->bufcnt + rctx->total < rctx->block_size) {
-		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt,
-					 rctx->src_sg, rctx->offset,
-					 rctx->total, 0);
-		rctx->bufcnt += rctx->total;
-
-		return rctx->flags & SHA_FLAGS_FINUP ?
-		       aspeed_sham_final(req) : 0;
-	}
+	rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
 
 	return aspeed_hace_hash_handle_queue(hace_dev, req);
 }
@@ -636,7 +557,6 @@ static int aspeed_sham_init(struct ahash_request *req)
 		return -EINVAL;
 	}
 
-	rctx->bufcnt = 0;
 	rctx->total = 0;
 	rctx->digcnt[0] = 0;
 	rctx->digcnt[1] = 0;
@@ -661,30 +581,11 @@ static int aspeed_sham_cra_init(struct crypto_ahash *tfm)
 	return 0;
 }
 
-static int aspeed_sham_export(struct ahash_request *req, void *out)
-{
-	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
-	memcpy(out, rctx, sizeof(*rctx));
-
-	return 0;
-}
-
-static int aspeed_sham_import(struct ahash_request *req, const void *in)
-{
-	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
-	memcpy(rctx, in, sizeof(*rctx));
-
-	return 0;
-}
-
 static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 	{
 		.alg.ahash.base = {
 			.init	= aspeed_sham_init,
 			.update	= aspeed_sham_update,
-			.final	= aspeed_sham_final,
 			.finup	= aspeed_sham_finup,
 			.digest	= aspeed_sham_digest,
 			.export	= aspeed_sham_export,
@@ -699,6 +600,7 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 					.cra_priority		= 300,
 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
 								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_AHASH_ALG_BLOCK_ONLY |
 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
 					.cra_blocksize		= SHA1_BLOCK_SIZE,
 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
@@ -716,7 +618,6 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 		.alg.ahash.base = {
 			.init	= aspeed_sham_init,
 			.update	= aspeed_sham_update,
-			.final	= aspeed_sham_final,
 			.finup	= aspeed_sham_finup,
 			.digest	= aspeed_sham_digest,
 			.export	= aspeed_sham_export,
@@ -731,6 +632,7 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 					.cra_priority		= 300,
 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
 								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_AHASH_ALG_BLOCK_ONLY |
 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
 					.cra_blocksize		= SHA256_BLOCK_SIZE,
 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
@@ -748,7 +650,6 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 		.alg.ahash.base = {
 			.init	= aspeed_sham_init,
 			.update	= aspeed_sham_update,
-			.final	= aspeed_sham_final,
 			.finup	= aspeed_sham_finup,
 			.digest	= aspeed_sham_digest,
 			.export	= aspeed_sham_export,
@@ -763,6 +664,7 @@ static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 					.cra_priority		= 300,
 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
 								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_AHASH_ALG_BLOCK_ONLY |
 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
 					.cra_blocksize		= SHA224_BLOCK_SIZE,
 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
@@ -783,7 +685,6 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
 		.alg.ahash.base = {
 			.init	= aspeed_sham_init,
 			.update	= aspeed_sham_update,
-			.final	= aspeed_sham_final,
 			.finup	= aspeed_sham_finup,
 			.digest	= aspeed_sham_digest,
 			.export	= aspeed_sham_export,
@@ -798,6 +699,7 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
 					.cra_priority		= 300,
 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
 								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_AHASH_ALG_BLOCK_ONLY |
 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
 					.cra_blocksize		= SHA384_BLOCK_SIZE,
 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
@@ -815,7 +717,6 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
 		.alg.ahash.base = {
 			.init	= aspeed_sham_init,
 			.update	= aspeed_sham_update,
-			.final	= aspeed_sham_final,
 			.finup	= aspeed_sham_finup,
 			.digest	= aspeed_sham_digest,
 			.export	= aspeed_sham_export,
@@ -830,6 +731,7 @@ static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
 					.cra_priority		= 300,
 					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
 								  CRYPTO_ALG_ASYNC |
+								  CRYPTO_AHASH_ALG_BLOCK_ONLY |
 								  CRYPTO_ALG_KERN_DRIVER_ONLY,
 					.cra_blocksize		= SHA512_BLOCK_SIZE,
 					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
index ad39954251dd..b1d07730d543 100644
--- a/drivers/crypto/aspeed/aspeed-hace.h
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -172,7 +172,6 @@ struct aspeed_sham_reqctx {
 	u64			digcnt[2];
 
 	unsigned long		flags;		/* final update flag should no use*/
-	unsigned long		op;		/* final or update */
 	u32			cmd;		/* trigger cmd */
 
 	/* walk state */
@@ -185,14 +184,11 @@ struct aspeed_sham_reqctx {
 	size_t			block_size;
 	size_t			ivsize;
 
-	/* remain data buffer */
 	dma_addr_t		buffer_dma_addr;
-	size_t			bufcnt;		/* buffer counter */
-
 	dma_addr_t		digest_dma_addr;
 
 	/* This is DMA too but read-only for hardware. */
-	u8			buffer[SHA512_BLOCK_SIZE * 2];
+	u8			buffer[SHA512_BLOCK_SIZE + 16];
 };
 
 struct aspeed_engine_crypto {
-- 
2.39.5



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 09/11] crypto: aspeed/hash - Add fallback
  2025-05-13  6:03 [PATCH 00/11] crypto: aspeed/hash - Convert to partial block API Herbert Xu
                   ` (7 preceding siblings ...)
  2025-05-13  6:04 ` [PATCH 08/11] crypto: aspeed/hash - Use API partial block handling Herbert Xu
@ 2025-05-13  6:04 ` Herbert Xu
  2025-05-13  6:04 ` [PATCH 10/11] crypto: aspeed/hash - Iterate on large hashes in dma_prepare Herbert Xu
  2025-05-13  6:04 ` [PATCH 11/11] crypto: aspeed/hash - Fix potential overflow in dma_prepare_sg Herbert Xu
  10 siblings, 0 replies; 12+ messages in thread
From: Herbert Xu @ 2025-05-13  6:04 UTC (permalink / raw)
  To: Linux Crypto Mailing List; +Cc: Neal Liu, linux-aspeed

If a hash request fails due to a DMA mapping error, or if it is too
large to fit in the the driver buffer, use a fallback to do the hash
rather than failing.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/aspeed/aspeed-hace-hash.c | 28 +++++++++++++++++++++++-
 1 file changed, 27 insertions(+), 1 deletion(-)

diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index d7c7f867d6e1..3bfb7db96c40 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -420,6 +420,32 @@ static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
 			hace_dev->crypt_engine_hash, req);
 }
 
+static noinline int aspeed_ahash_fallback(struct ahash_request *req)
+{
+	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+	HASH_FBREQ_ON_STACK(fbreq, req);
+	u8 *state = rctx->buffer;
+	struct scatterlist sg[2];
+	struct scatterlist *ssg;
+	int ret;
+
+	ssg = scatterwalk_ffwd(sg, req->src, rctx->offset);
+	ahash_request_set_crypt(fbreq, ssg, req->result,
+				rctx->total - rctx->offset);
+
+	ret = aspeed_sham_export(req, state) ?:
+	      crypto_ahash_import_core(fbreq, state);
+
+	if (rctx->flags & SHA_FLAGS_FINUP)
+		ret = ret ?: crypto_ahash_finup(fbreq);
+	else
+		ret = ret ?: crypto_ahash_update(fbreq);
+			     crypto_ahash_export_core(fbreq, state) ?:
+			     aspeed_sham_import(req, state);
+	HASH_REQUEST_ZERO(fbreq);
+	return ret;
+}
+
 static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
 {
 	struct ahash_request *req = ahash_request_cast(areq);
@@ -434,7 +460,7 @@ static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
 
 	ret = aspeed_ahash_req_update(hace_dev);
 	if (ret != -EINPROGRESS)
-		return ret;
+		return aspeed_ahash_fallback(req);
 
 	return 0;
 }
-- 
2.39.5



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 10/11] crypto: aspeed/hash - Iterate on large hashes in dma_prepare
  2025-05-13  6:03 [PATCH 00/11] crypto: aspeed/hash - Convert to partial block API Herbert Xu
                   ` (8 preceding siblings ...)
  2025-05-13  6:04 ` [PATCH 09/11] crypto: aspeed/hash - Add fallback Herbert Xu
@ 2025-05-13  6:04 ` Herbert Xu
  2025-05-13  6:04 ` [PATCH 11/11] crypto: aspeed/hash - Fix potential overflow in dma_prepare_sg Herbert Xu
  10 siblings, 0 replies; 12+ messages in thread
From: Herbert Xu @ 2025-05-13  6:04 UTC (permalink / raw)
  To: Linux Crypto Mailing List; +Cc: Neal Liu, linux-aspeed

Rather than failing a hash larger than ASPEED_CRYPTO_SRC_DMA_BUF_LEN,
just hash them over and over again until it's done.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/aspeed/aspeed-hace-hash.c | 36 +++++++++++++++---------
 1 file changed, 22 insertions(+), 14 deletions(-)

diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index 3bfb7db96c40..fc2154947ec8 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -155,26 +155,30 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
 	struct ahash_request *req = hash_engine->req;
 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-	bool final = rctx->flags & SHA_FLAGS_FINUP;
 	unsigned int length, remain;
+	bool final = false;
 
-	length = rctx->total;
-	remain = final ? 0 : length % rctx->block_size;
+	length = rctx->total - rctx->offset;
+	remain = length - round_down(length, rctx->block_size);
 
 	AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
 
-	if ((final ? round_up(length, rctx->block_size) + rctx->block_size :
-		     length) > ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
-		dev_warn(hace_dev->dev, "Hash data length is too large\n");
-		return -EINVAL;
-	}
-
+	if (length > ASPEED_HASH_SRC_DMA_BUF_LEN)
+		length = ASPEED_HASH_SRC_DMA_BUF_LEN;
+	else if (rctx->flags & SHA_FLAGS_FINUP) {
+		if (round_up(length, rctx->block_size) + rctx->block_size >
+		    ASPEED_CRYPTO_SRC_DMA_BUF_LEN)
+			length = round_down(length - 1, rctx->block_size);
+		else
+			final = true;
+	} else
+		length -= remain;
 	scatterwalk_map_and_copy(hash_engine->ahash_src_addr, rctx->src_sg,
-				 rctx->offset, rctx->total - remain, 0);
-	rctx->offset += rctx->total - remain;
+				 rctx->offset, length, 0);
+	rctx->offset += length;
 
-	rctx->digcnt[0] += rctx->total - remain;
-	if (rctx->digcnt[0] < rctx->total - remain)
+	rctx->digcnt[0] += length;
+	if (rctx->digcnt[0] < length)
 		rctx->digcnt[1]++;
 
 	if (final)
@@ -189,7 +193,7 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
 		return -ENOMEM;
 	}
 
-	hash_engine->src_length = length - remain;
+	hash_engine->src_length = length;
 	hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
 	hash_engine->digest_dma = rctx->digest_dma_addr;
 
@@ -385,6 +389,10 @@ static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
 	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
 			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
 
+	if (rctx->total - rctx->offset >= rctx->block_size ||
+	    (rctx->total != rctx->offset && rctx->flags & SHA_FLAGS_FINUP))
+		return aspeed_ahash_req_update(hace_dev);
+
 	return aspeed_ahash_complete(hace_dev);
 }
 
-- 
2.39.5



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 11/11] crypto: aspeed/hash - Fix potential overflow in dma_prepare_sg
  2025-05-13  6:03 [PATCH 00/11] crypto: aspeed/hash - Convert to partial block API Herbert Xu
                   ` (9 preceding siblings ...)
  2025-05-13  6:04 ` [PATCH 10/11] crypto: aspeed/hash - Iterate on large hashes in dma_prepare Herbert Xu
@ 2025-05-13  6:04 ` Herbert Xu
  10 siblings, 0 replies; 12+ messages in thread
From: Herbert Xu @ 2025-05-13  6:04 UTC (permalink / raw)
  To: Linux Crypto Mailing List; +Cc: Neal Liu, linux-aspeed

The mapped SG lists are written to hash_engine->ahash_src_addr which
has the size ASPEED_HASH_SRC_DMA_BUF_LEN.  Since scatterlists are
not bound in size, make sure that size is not exceeded.

If the mapped SG list is larger than the buffer, simply iterate
over it as is done in the dma_prepare case.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 drivers/crypto/aspeed/aspeed-hace-hash.c | 84 ++++++++++++------------
 1 file changed, 43 insertions(+), 41 deletions(-)

diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
index fc2154947ec8..e54b7dd03be3 100644
--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
+++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
@@ -146,6 +146,15 @@ static int aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
 	return padlen + bitslen;
 }
 
+static void aspeed_ahash_update_counter(struct aspeed_sham_reqctx *rctx,
+					unsigned int len)
+{
+	rctx->offset += len;
+	rctx->digcnt[0] += len;
+	if (rctx->digcnt[0] < len)
+		rctx->digcnt[1]++;
+}
+
 /*
  * Prepare DMA buffer before hardware engine
  * processing.
@@ -175,12 +184,7 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
 		length -= remain;
 	scatterwalk_map_and_copy(hash_engine->ahash_src_addr, rctx->src_sg,
 				 rctx->offset, length, 0);
-	rctx->offset += length;
-
-	rctx->digcnt[0] += length;
-	if (rctx->digcnt[0] < length)
-		rctx->digcnt[1]++;
-
+	aspeed_ahash_update_counter(rctx, length);
 	if (final)
 		length += aspeed_ahash_fill_padding(
 			hace_dev, rctx, hash_engine->ahash_src_addr + length);
@@ -210,13 +214,16 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
 	struct ahash_request *req = hash_engine->req;
 	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 	bool final = rctx->flags & SHA_FLAGS_FINUP;
+	int remain, sg_len, i, max_sg_nents;
+	unsigned int length, offset, total;
 	struct aspeed_sg_list *src_list;
 	struct scatterlist *s;
-	int length, remain, sg_len, i;
 	int rc = 0;
 
-	remain = final ? 0 : rctx->total % rctx->block_size;
-	length = rctx->total - remain;
+	offset = rctx->offset;
+	length = rctx->total - offset;
+	remain = final ? 0 : length - round_down(length, rctx->block_size);
+	length -= remain;
 
 	AHASH_DBG(hace_dev, "%s:0x%x, %s:0x%x, %s:0x%x\n",
 		  "rctx total", rctx->total,
@@ -230,6 +237,8 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
 		goto end;
 	}
 
+	max_sg_nents = ASPEED_HASH_SRC_DMA_BUF_LEN / sizeof(*src_list) - final;
+	sg_len = min(sg_len, max_sg_nents);
 	src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr;
 	rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
 					       SHA512_DIGEST_SIZE,
@@ -240,10 +249,20 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
 		goto free_src_sg;
 	}
 
+	total = 0;
 	for_each_sg(rctx->src_sg, s, sg_len, i) {
 		u32 phy_addr = sg_dma_address(s);
 		u32 len = sg_dma_len(s);
 
+		if (len <= offset) {
+			offset -= len;
+			continue;
+		}
+
+		len -= offset;
+		phy_addr += offset;
+		offset = 0;
+
 		if (length > len)
 			length -= len;
 		else {
@@ -252,24 +271,22 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
 			length = 0;
 		}
 
+		total += len;
 		src_list[i].phy_addr = cpu_to_le32(phy_addr);
 		src_list[i].len = cpu_to_le32(len);
 	}
 
 	if (length != 0) {
-		rc = -EINVAL;
-		goto free_rctx_digest;
+		total = round_down(total, rctx->block_size);
+		final = false;
 	}
 
-	rctx->digcnt[0] += rctx->total - remain;
-	if (rctx->digcnt[0] < rctx->total - remain)
-		rctx->digcnt[1]++;
-
+	aspeed_ahash_update_counter(rctx, total);
 	if (final) {
 		int len = aspeed_ahash_fill_padding(hace_dev, rctx,
 						    rctx->buffer);
 
-		rctx->total += len;
+		total += len;
 		rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
 						       rctx->buffer,
 						       sizeof(rctx->buffer),
@@ -286,8 +303,7 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
 	}
 	src_list[i - 1].len |= cpu_to_le32(HASH_SG_LAST_LIST);
 
-	rctx->offset = rctx->total - remain;
-	hash_engine->src_length = rctx->total - remain;
+	hash_engine->src_length = total;
 	hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
 	hash_engine->digest_dma = rctx->digest_dma_addr;
 
@@ -311,6 +327,13 @@ static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
 
 	AHASH_DBG(hace_dev, "\n");
 
+	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+
+	if (rctx->total - rctx->offset >= rctx->block_size ||
+	    (rctx->total != rctx->offset && rctx->flags & SHA_FLAGS_FINUP))
+		return aspeed_ahash_req_update(hace_dev);
+
 	hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
 
 	if (rctx->flags & SHA_FLAGS_FINUP)
@@ -366,36 +389,15 @@ static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
 	dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
 		     DMA_TO_DEVICE);
 
-	if (rctx->flags & SHA_FLAGS_FINUP)
+	if (rctx->flags & SHA_FLAGS_FINUP && rctx->total == rctx->offset)
 		dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
 				 sizeof(rctx->buffer), DMA_TO_DEVICE);
 
-	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
-			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
 	rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
 
 	return aspeed_ahash_complete(hace_dev);
 }
 
-static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
-{
-	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
-	struct ahash_request *req = hash_engine->req;
-	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
-
-	AHASH_DBG(hace_dev, "\n");
-
-	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
-			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
-
-	if (rctx->total - rctx->offset >= rctx->block_size ||
-	    (rctx->total != rctx->offset && rctx->flags & SHA_FLAGS_FINUP))
-		return aspeed_ahash_req_update(hace_dev);
-
-	return aspeed_ahash_complete(hace_dev);
-}
-
 static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
 {
 	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
@@ -411,7 +413,7 @@ static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
 		resume = aspeed_ahash_update_resume_sg;
 
 	} else {
-		resume = aspeed_ahash_update_resume;
+		resume = aspeed_ahash_complete;
 	}
 
 	ret = hash_engine->dma_prepare(hace_dev);
-- 
2.39.5



^ permalink raw reply related	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2025-05-13  6:43 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-05-13  6:03 [PATCH 00/11] crypto: aspeed/hash - Convert to partial block API Herbert Xu
2025-05-13  6:03 ` [PATCH 01/11] crypto: aspeed/hash - Remove purely software hmac implementation Herbert Xu
2025-05-13  6:03 ` [PATCH 02/11] crypto: aspeed/hash - Reorganise struct aspeed_sham_reqctx Herbert Xu
2025-05-13  6:03 ` [PATCH 03/11] crypto: aspeed/hash - Use init_tfm instead of cra_init Herbert Xu
2025-05-13  6:03 ` [PATCH 04/11] crypto: aspeed/hash - Provide rctx->buffer as argument to fill padding Herbert Xu
2025-05-13  6:03 ` [PATCH 05/11] crypto: aspeed/hash - Move sham_final call into sham_update Herbert Xu
2025-05-13  6:03 ` [PATCH 06/11] crypto: aspeed/hash - Move final padding into dma_prepare Herbert Xu
2025-05-13  6:04 ` [PATCH 07/11] crypto: aspeed/hash - Remove sha_iv Herbert Xu
2025-05-13  6:04 ` [PATCH 08/11] crypto: aspeed/hash - Use API partial block handling Herbert Xu
2025-05-13  6:04 ` [PATCH 09/11] crypto: aspeed/hash - Add fallback Herbert Xu
2025-05-13  6:04 ` [PATCH 10/11] crypto: aspeed/hash - Iterate on large hashes in dma_prepare Herbert Xu
2025-05-13  6:04 ` [PATCH 11/11] crypto: aspeed/hash - Fix potential overflow in dma_prepare_sg Herbert Xu

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).