public inbox for linux-crypto@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/6] crypto: hash - Move core export and import into internel/hash.h
@ 2025-05-05 12:32 Herbert Xu
  2025-05-05 12:32 ` [PATCH 2/6] crypto: ahash - Handle partial blocks in API Herbert Xu
                   ` (4 more replies)
  0 siblings, 5 replies; 8+ messages in thread
From: Herbert Xu @ 2025-05-05 12:32 UTC (permalink / raw)
  To: Linux Crypto Mailing List

The core export and import functions are targeted at implementors
so move them into internal/hash.h.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 include/crypto/hash.h          | 48 ----------------------------------
 include/crypto/internal/hash.h | 48 ++++++++++++++++++++++++++++++++++
 2 files changed, 48 insertions(+), 48 deletions(-)

diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 1760662ad70a..9fc9daaaaab4 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -506,18 +506,6 @@ int crypto_ahash_digest(struct ahash_request *req);
  */
 int crypto_ahash_export(struct ahash_request *req, void *out);
 
-/**
- * crypto_ahash_export_core() - extract core state for message digest
- * @req: reference to the ahash_request handle whose state is exported
- * @out: output buffer of sufficient size that can hold the hash state
- *
- * Export the hash state without the partial block buffer.
- *
- * Context: Softirq or process context.
- * Return: 0 if the export creation was successful; < 0 if an error occurred
- */
-int crypto_ahash_export_core(struct ahash_request *req, void *out);
-
 /**
  * crypto_ahash_import() - import message digest state
  * @req: reference to ahash_request handle the state is imported into
@@ -531,18 +519,6 @@ int crypto_ahash_export_core(struct ahash_request *req, void *out);
  */
 int crypto_ahash_import(struct ahash_request *req, const void *in);
 
-/**
- * crypto_ahash_import_core() - import core state
- * @req: reference to ahash_request handle the state is imported into
- * @in: buffer holding the state
- *
- * Import the hash state without the partial block buffer.
- *
- * Context: Softirq or process context.
- * Return: 0 if the import was successful; < 0 if an error occurred
- */
-int crypto_ahash_import_core(struct ahash_request *req, const void *in);
-
 /**
  * crypto_ahash_init() - (re)initialize message digest handle
  * @req: ahash_request handle that already is initialized with all necessary
@@ -933,18 +909,6 @@ int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
  */
 int crypto_shash_export(struct shash_desc *desc, void *out);
 
-/**
- * crypto_shash_export_core() - extract core state for message digest
- * @desc: reference to the operational state handle whose state is exported
- * @out: output buffer of sufficient size that can hold the hash state
- *
- * Export the hash state without the partial block buffer.
- *
- * Context: Softirq or process context.
- * Return: 0 if the export creation was successful; < 0 if an error occurred
- */
-int crypto_shash_export_core(struct shash_desc *desc, void *out);
-
 /**
  * crypto_shash_import() - import operational state
  * @desc: reference to the operational state handle the state imported into
@@ -959,18 +923,6 @@ int crypto_shash_export_core(struct shash_desc *desc, void *out);
  */
 int crypto_shash_import(struct shash_desc *desc, const void *in);
 
-/**
- * crypto_shash_import_core() - import core state
- * @desc: reference to the operational state handle the state imported into
- * @in: buffer holding the state
- *
- * Import the hash state without the partial block buffer.
- *
- * Context: Softirq or process context.
- * Return: 0 if the import was successful; < 0 if an error occurred
- */
-int crypto_shash_import_core(struct shash_desc *desc, const void *in);
-
 /**
  * crypto_shash_init() - (re)initialize message digest
  * @desc: operational state handle that is already filled
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index f2bbdb74e11a..ef5ea75ac5c8 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -305,5 +305,53 @@ static inline unsigned int crypto_shash_coresize(struct crypto_shash *tfm)
 #define HASH_REQUEST_ZERO(name) \
 	memzero_explicit(__##name##_req, sizeof(__##name##_req))
 
+/**
+ * crypto_ahash_export_core() - extract core state for message digest
+ * @req: reference to the ahash_request handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * Export the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+int crypto_ahash_export_core(struct ahash_request *req, void *out);
+
+/**
+ * crypto_ahash_import_core() - import core state
+ * @req: reference to ahash_request handle the state is imported into
+ * @in: buffer holding the state
+ *
+ * Import the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+int crypto_ahash_import_core(struct ahash_request *req, const void *in);
+
+/**
+ * crypto_shash_export_core() - extract core state for message digest
+ * @desc: reference to the operational state handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * Export the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
+int crypto_shash_export_core(struct shash_desc *desc, void *out);
+
+/**
+ * crypto_shash_import_core() - import core state
+ * @desc: reference to the operational state handle the state imported into
+ * @in: buffer holding the state
+ *
+ * Import the hash state without the partial block buffer.
+ *
+ * Context: Softirq or process context.
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
+int crypto_shash_import_core(struct shash_desc *desc, const void *in);
+
 #endif	/* _CRYPTO_INTERNAL_HASH_H */
 
-- 
2.39.5


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/6] crypto: ahash - Handle partial blocks in API
  2025-05-05 12:32 [PATCH 1/6] crypto: hash - Move core export and import into internel/hash.h Herbert Xu
@ 2025-05-05 12:32 ` Herbert Xu
  2025-05-08 22:03   ` kernel test robot
  2025-05-05 12:32 ` [PATCH 3/6] crypto: hmac - Zero shash desc in setkey Herbert Xu
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 8+ messages in thread
From: Herbert Xu @ 2025-05-05 12:32 UTC (permalink / raw)
  To: Linux Crypto Mailing List

Provide an option to handle the partial blocks in the ahash API.
Almost every hash algorithm has a block size and are only able
to hash partial blocks on finalisation.

As a first step disable virtual address support for algorithms
with state sizes larger than HASH_MAX_STATESIZE.  This is OK as
virtual addresses are currently only used on synchronous fallbacks.

This means ahash_do_req_chain only needs to handle synchronous
fallbacks, removing the complexities of saving the request state.

Also move the saved request state into the ahash_request object
as nesting is no longer possible.

Add a scatterlist to ahash_request to store the partial block.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/ahash.c        | 542 ++++++++++++++++++++----------------------
 include/crypto/hash.h |  12 +-
 2 files changed, 265 insertions(+), 289 deletions(-)

diff --git a/crypto/ahash.c b/crypto/ahash.c
index 344bf1b43e71..2a29c4a73d36 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -12,11 +12,13 @@
  * Copyright (c) 2008 Loc Ho <lho@amcc.com>
  */
 
+#include <crypto/scatterwalk.h>
 #include <linux/cryptouser.h>
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/module.h>
+#include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 #include <linux/string.h>
@@ -40,24 +42,47 @@ struct crypto_hash_walk {
 	struct scatterlist *sg;
 };
 
-struct ahash_save_req_state {
-	struct ahash_request *req0;
-	crypto_completion_t compl;
-	void *data;
-	struct scatterlist sg;
-	const u8 *src;
-	u8 *page;
-	unsigned int offset;
-	unsigned int nbytes;
-	bool update;
-};
-
-static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt);
-static void ahash_restore_req(struct ahash_request *req);
-static void ahash_def_finup_done1(void *data, int err);
-static int ahash_def_finup_finish1(struct ahash_request *req, int err);
 static int ahash_def_finup(struct ahash_request *req);
 
+static inline bool crypto_ahash_block_only(struct crypto_ahash *tfm)
+{
+	return crypto_ahash_alg(tfm)->halg.base.cra_flags &
+	       CRYPTO_AHASH_ALG_BLOCK_ONLY;
+}
+
+static inline bool crypto_ahash_final_nonzero(struct crypto_ahash *tfm)
+{
+	return crypto_ahash_alg(tfm)->halg.base.cra_flags &
+	       CRYPTO_AHASH_ALG_FINAL_NONZERO;
+}
+
+static inline bool crypto_ahash_need_fallback(struct crypto_ahash *tfm)
+{
+	return crypto_ahash_alg(tfm)->halg.base.cra_flags &
+	       CRYPTO_ALG_NEED_FALLBACK;
+}
+
+static inline void ahash_op_done(void *data, int err,
+				 int (*finish)(struct ahash_request *, int))
+{
+	struct ahash_request *areq = data;
+	crypto_completion_t compl;
+
+	compl = areq->saved_complete;
+	data = areq->saved_data;
+	if (err == -EINPROGRESS)
+		goto out;
+
+	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	err = finish(areq, err);
+	if (err == -EINPROGRESS || err == -EBUSY)
+		return;
+
+out:
+	compl(data, err);
+}
+
 static int hash_walk_next(struct crypto_hash_walk *walk)
 {
 	unsigned int offset = walk->offset;
@@ -298,7 +323,7 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
 		int err;
 
 		err = alg->setkey(tfm, key, keylen);
-		if (!err && ahash_is_async(tfm))
+		if (!err && crypto_ahash_need_fallback(tfm))
 			err = crypto_ahash_setkey(crypto_ahash_fb(tfm),
 						  key, keylen);
 		if (unlikely(err)) {
@@ -311,159 +336,46 @@ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
 
-static int ahash_reqchain_virt(struct ahash_save_req_state *state,
-			       int err, u32 mask)
-{
-	struct ahash_request *req = state->req0;
-	struct crypto_ahash *tfm;
-
-	tfm = crypto_ahash_reqtfm(req);
-
-	for (;;) {
-		unsigned len = state->nbytes;
-
-		if (!state->offset)
-			break;
-
-		if (state->offset == len || err) {
-			u8 *result = req->result;
-
-			ahash_request_set_virt(req, state->src, result, len);
-			state->offset = 0;
-			break;
-		}
-
-		len -= state->offset;
-
-		len = min(PAGE_SIZE, len);
-		memcpy(state->page, state->src + state->offset, len);
-		state->offset += len;
-		req->nbytes = len;
-
-		err = crypto_ahash_alg(tfm)->update(req);
-		if (err == -EINPROGRESS) {
-			if (state->offset < state->nbytes)
-				err = -EBUSY;
-			break;
-		}
-
-		if (err == -EBUSY)
-			break;
-	}
-
-	return err;
-}
-
-static int ahash_reqchain_finish(struct ahash_request *req0,
-				 struct ahash_save_req_state *state,
-				 int err, u32 mask)
-{
-	u8 *page;
-
-	err = ahash_reqchain_virt(state, err, mask);
-	if (err == -EINPROGRESS || err == -EBUSY)
-		goto out;
-
-	page = state->page;
-	if (page) {
-		memset(page, 0, PAGE_SIZE);
-		free_page((unsigned long)page);
-	}
-	ahash_restore_req(req0);
-
-out:
-	return err;
-}
-
-static void ahash_reqchain_done(void *data, int err)
-{
-	struct ahash_save_req_state *state = data;
-	crypto_completion_t compl = state->compl;
-
-	data = state->data;
-
-	if (err == -EINPROGRESS) {
-		if (state->offset < state->nbytes)
-			return;
-		goto notify;
-	}
-
-	err = ahash_reqchain_finish(state->req0, state, err,
-				    CRYPTO_TFM_REQ_MAY_BACKLOG);
-	if (err == -EBUSY)
-		return;
-
-notify:
-	compl(data, err);
-}
-
 static int ahash_do_req_chain(struct ahash_request *req,
-			      int (*op)(struct ahash_request *req))
+			      int (*const *op)(struct ahash_request *req))
 {
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-	bool update = op == crypto_ahash_alg(tfm)->update;
-	struct ahash_save_req_state *state;
-	struct ahash_save_req_state state0;
-	u8 *page = NULL;
 	int err;
 
-	if (crypto_ahash_req_virt(tfm) ||
-	    !update || !ahash_request_isvirt(req))
-		return op(req);
+	if (crypto_ahash_req_virt(tfm) || !ahash_request_isvirt(req))
+		return (*op)(req);
 
-	if (update && ahash_request_isvirt(req)) {
-		page = (void *)__get_free_page(GFP_ATOMIC);
-		err = -ENOMEM;
-		if (!page)
-			goto out;
-	}
+	if (crypto_ahash_statesize(tfm) > HASH_MAX_STATESIZE)
+		return -ENOSYS;
 
-	state = &state0;
-	if (ahash_is_async(tfm)) {
-		err = ahash_save_req(req, ahash_reqchain_done);
-		if (err)
-			goto out_free_page;
+	{
+		u8 state[HASH_MAX_STATESIZE];
 
-		state = req->base.data;
-	}
+		if (op == &crypto_ahash_alg(tfm)->digest) {
+			ahash_request_set_tfm(req, crypto_ahash_fb(tfm));
+			err = crypto_ahash_digest(req);
+			goto out_no_state;
+		}
 
-	state->update = update;
-	state->page = page;
-	state->offset = 0;
-	state->nbytes = 0;
+		err = crypto_ahash_export(req, state);
+		ahash_request_set_tfm(req, crypto_ahash_fb(tfm));
+		err = err ?: crypto_ahash_import(req, state);
 
-	if (page)
-		sg_init_one(&state->sg, page, PAGE_SIZE);
+		if (op == &crypto_ahash_alg(tfm)->finup) {
+			err = err ?: crypto_ahash_finup(req);
+			goto out_no_state;
+		}
 
-	if (update && ahash_request_isvirt(req) && req->nbytes) {
-		unsigned len = req->nbytes;
-		u8 *result = req->result;
+		err = err ?: crypto_ahash_update(req);
+			     crypto_ahash_export(req, state);
 
-		state->src = req->svirt;
-		state->nbytes = len;
+		ahash_request_set_tfm(req, tfm);
+		return err ?: crypto_ahash_import(req, state);
 
-		len = min(PAGE_SIZE, len);
-
-		memcpy(page, req->svirt, len);
-		state->offset = len;
-
-		ahash_request_set_crypt(req, &state->sg, result, len);
-	}
-
-	err = op(req);
-	if (err == -EINPROGRESS || err == -EBUSY) {
-		if (state->offset < state->nbytes)
-			err = -EBUSY;
+out_no_state:
+		ahash_request_set_tfm(req, tfm);
 		return err;
 	}
-
-	return ahash_reqchain_finish(req, state, err, ~0);
-
-out_free_page:
-	free_page((unsigned long)page);
-
-out:
-	return err;
 }
 
 int crypto_ahash_init(struct ahash_request *req)
@@ -476,144 +388,191 @@ int crypto_ahash_init(struct ahash_request *req)
 		return -ENOKEY;
 	if (ahash_req_on_stack(req) && ahash_is_async(tfm))
 		return -EAGAIN;
-	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->init);
+	if (crypto_ahash_block_only(tfm)) {
+		u8 *buf = ahash_request_ctx(req);
+
+		buf += crypto_ahash_reqsize(tfm) - 1;
+		*buf = 0;
+	}
+	return crypto_ahash_alg(tfm)->init(req);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_init);
 
-static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
+static void ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
 {
-	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
-	struct ahash_save_req_state *state;
-
-	if (!ahash_is_async(tfm))
-		return 0;
-
-	state = kmalloc(sizeof(*state), GFP_ATOMIC);
-	if (!state)
-		return -ENOMEM;
-
-	state->compl = req->base.complete;
-	state->data = req->base.data;
+	req->saved_complete = req->base.complete;
+	req->saved_data = req->base.data;
 	req->base.complete = cplt;
-	req->base.data = state;
-	state->req0 = req;
-
-	return 0;
+	req->base.data = req;
 }
 
 static void ahash_restore_req(struct ahash_request *req)
 {
-	struct ahash_save_req_state *state;
-	struct crypto_ahash *tfm;
+	req->base.complete = req->saved_complete;
+	req->base.data = req->saved_data;
+}
 
-	tfm = crypto_ahash_reqtfm(req);
-	if (!ahash_is_async(tfm))
-		return;
+static int ahash_update_finish(struct ahash_request *req, int err)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	bool nonzero = crypto_ahash_final_nonzero(tfm);
+	int bs = crypto_ahash_blocksize(tfm);
+	u8 *blenp = ahash_request_ctx(req);
+	int blen;
+	u8 *buf;
 
-	state = req->base.data;
+	blenp += crypto_ahash_reqsize(tfm) - 1;
+	blen = *blenp;
+	buf = blenp - bs;
 
-	req->base.complete = state->compl;
-	req->base.data = state->data;
-	kfree(state);
+	if (blen) {
+		req->src = req->sg_head + 1;
+		if (sg_is_chain(req->src))
+			req->src = sg_chain_ptr(req->src);
+	}
+
+	req->nbytes += nonzero - blen;
+
+	blen = err < 0 ? 0 : err + nonzero;
+	if (ahash_request_isvirt(req))
+		memcpy(buf, req->svirt + req->nbytes - blen, blen);
+	else
+		memcpy_from_sglist(buf, req->src, req->nbytes - blen, blen);
+	*blenp = blen;
+
+	ahash_restore_req(req);
+
+	return err;
+}
+
+static void ahash_update_done(void *data, int err)
+{
+	ahash_op_done(data, err, ahash_update_finish);
 }
 
 int crypto_ahash_update(struct ahash_request *req)
 {
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	bool nonzero = crypto_ahash_final_nonzero(tfm);
+	int bs = crypto_ahash_blocksize(tfm);
+	u8 *blenp = ahash_request_ctx(req);
+	int blen, err;
+	u8 *buf;
 
 	if (likely(tfm->using_shash))
 		return shash_ahash_update(req, ahash_request_ctx(req));
 	if (ahash_req_on_stack(req) && ahash_is_async(tfm))
 		return -EAGAIN;
-	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->update);
+	if (!crypto_ahash_block_only(tfm))
+		return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update);
+
+	blenp += crypto_ahash_reqsize(tfm) - 1;
+	blen = *blenp;
+	buf = blenp - bs;
+
+	if (blen + req->nbytes < bs + nonzero) {
+		if (ahash_request_isvirt(req))
+			memcpy(buf + blen, req->svirt, req->nbytes);
+		else
+			memcpy_from_sglist(buf + blen, req->src, 0,
+					   req->nbytes);
+
+		*blenp += req->nbytes;
+		return 0;
+	}
+
+	if (blen) {
+		memset(req->sg_head, 0, sizeof(req->sg_head[0]));
+		sg_set_buf(req->sg_head, buf, blen);
+		if (req->src != req->sg_head + 1)
+			sg_chain(req->sg_head, 2, req->src);
+		req->src = req->sg_head;
+		req->nbytes += blen;
+	}
+	req->nbytes -= nonzero;
+
+	ahash_save_req(req, ahash_update_done);
+
+	err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->update);
+	if (err == -EINPROGRESS || err == -EBUSY)
+		return err;
+
+	return ahash_update_finish(req, err);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_update);
 
-int crypto_ahash_final(struct ahash_request *req)
+static int ahash_finup_finish(struct ahash_request *req, int err)
 {
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	u8 *blenp = ahash_request_ctx(req);
+	int blen;
 
-	if (likely(tfm->using_shash))
-		return crypto_shash_final(ahash_request_ctx(req), req->result);
-	if (ahash_req_on_stack(req) && ahash_is_async(tfm))
-		return -EAGAIN;
-	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->final);
+	blenp += crypto_ahash_reqsize(tfm) - 1;
+	blen = *blenp;
+
+	if (blen) {
+		if (sg_is_last(req->src))
+			req->src = NULL;
+		else {
+			req->src = req->sg_head + 1;
+			if (sg_is_chain(req->src))
+				req->src = sg_chain_ptr(req->src);
+		}
+		req->nbytes -= blen;
+	}
+
+	ahash_restore_req(req);
+
+	return err;
+}
+
+static void ahash_finup_done(void *data, int err)
+{
+	ahash_op_done(data, err, ahash_finup_finish);
 }
-EXPORT_SYMBOL_GPL(crypto_ahash_final);
 
 int crypto_ahash_finup(struct ahash_request *req)
 {
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	int bs = crypto_ahash_blocksize(tfm);
+	u8 *blenp = ahash_request_ctx(req);
+	int blen, err;
+	u8 *buf;
 
 	if (likely(tfm->using_shash))
 		return shash_ahash_finup(req, ahash_request_ctx(req));
 	if (ahash_req_on_stack(req) && ahash_is_async(tfm))
 		return -EAGAIN;
-	if (!crypto_ahash_alg(tfm)->finup ||
-	    (!crypto_ahash_req_virt(tfm) && ahash_request_isvirt(req)))
+	if (!crypto_ahash_alg(tfm)->finup)
 		return ahash_def_finup(req);
-	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->finup);
+	if (!crypto_ahash_block_only(tfm))
+		return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup);
+
+	blenp += crypto_ahash_reqsize(tfm) - 1;
+	blen = *blenp;
+	buf = blenp - bs;
+
+	if (blen) {
+		memset(req->sg_head, 0, sizeof(req->sg_head[0]));
+		sg_set_buf(req->sg_head, buf, blen);
+		if (!req->src)
+			sg_mark_end(req->sg_head);
+		else if (req->src != req->sg_head + 1)
+			sg_chain(req->sg_head, 2, req->src);
+		req->src = req->sg_head;
+		req->nbytes += blen;
+	}
+
+	ahash_save_req(req, ahash_finup_done);
+
+	err = ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->finup);
+	if (err == -EINPROGRESS || err == -EBUSY)
+		return err;
+
+	return ahash_finup_finish(req, err);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_finup);
 
-static int ahash_def_digest_finish(struct ahash_request *req, int err)
-{
-	struct crypto_ahash *tfm;
-
-	if (err)
-		goto out;
-
-	tfm = crypto_ahash_reqtfm(req);
-	if (ahash_is_async(tfm))
-		req->base.complete = ahash_def_finup_done1;
-
-	err = crypto_ahash_update(req);
-	if (err == -EINPROGRESS || err == -EBUSY)
-		return err;
-
-	return ahash_def_finup_finish1(req, err);
-
-out:
-	ahash_restore_req(req);
-	return err;
-}
-
-static void ahash_def_digest_done(void *data, int err)
-{
-	struct ahash_save_req_state *state0 = data;
-	struct ahash_save_req_state state;
-	struct ahash_request *areq;
-
-	state = *state0;
-	areq = state.req0;
-	if (err == -EINPROGRESS)
-		goto out;
-
-	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
-	err = ahash_def_digest_finish(areq, err);
-	if (err == -EINPROGRESS || err == -EBUSY)
-		return;
-
-out:
-	state.compl(state.data, err);
-}
-
-static int ahash_def_digest(struct ahash_request *req)
-{
-	int err;
-
-	err = ahash_save_req(req, ahash_def_digest_done);
-	if (err)
-		return err;
-
-	err = crypto_ahash_init(req);
-	if (err == -EINPROGRESS || err == -EBUSY)
-		return err;
-
-	return ahash_def_digest_finish(req, err);
-}
-
 int crypto_ahash_digest(struct ahash_request *req)
 {
 	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -622,18 +581,15 @@ int crypto_ahash_digest(struct ahash_request *req)
 		return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
 	if (ahash_req_on_stack(req) && ahash_is_async(tfm))
 		return -EAGAIN;
-	if (!crypto_ahash_req_virt(tfm) && ahash_request_isvirt(req))
-		return ahash_def_digest(req);
 	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
 		return -ENOKEY;
-	return ahash_do_req_chain(req, crypto_ahash_alg(tfm)->digest);
+	return ahash_do_req_chain(req, &crypto_ahash_alg(tfm)->digest);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_digest);
 
 static void ahash_def_finup_done2(void *data, int err)
 {
-	struct ahash_save_req_state *state = data;
-	struct ahash_request *areq = state->req0;
+	struct ahash_request *areq = data;
 
 	if (err == -EINPROGRESS)
 		return;
@@ -644,14 +600,10 @@ static void ahash_def_finup_done2(void *data, int err)
 
 static int ahash_def_finup_finish1(struct ahash_request *req, int err)
 {
-	struct crypto_ahash *tfm;
-
 	if (err)
 		goto out;
 
-	tfm = crypto_ahash_reqtfm(req);
-	if (ahash_is_async(tfm))
-		req->base.complete = ahash_def_finup_done2;
+	req->base.complete = ahash_def_finup_done2;
 
 	err = crypto_ahash_final(req);
 	if (err == -EINPROGRESS || err == -EBUSY)
@@ -664,32 +616,14 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err)
 
 static void ahash_def_finup_done1(void *data, int err)
 {
-	struct ahash_save_req_state *state0 = data;
-	struct ahash_save_req_state state;
-	struct ahash_request *areq;
-
-	state = *state0;
-	areq = state.req0;
-	if (err == -EINPROGRESS)
-		goto out;
-
-	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
-
-	err = ahash_def_finup_finish1(areq, err);
-	if (err == -EINPROGRESS || err == -EBUSY)
-		return;
-
-out:
-	state.compl(state.data, err);
+	ahash_op_done(data, err, ahash_def_finup_finish1);
 }
 
 static int ahash_def_finup(struct ahash_request *req)
 {
 	int err;
 
-	err = ahash_save_req(req, ahash_def_finup_done1);
-	if (err)
-		return err;
+	ahash_save_req(req, ahash_def_finup_done1);
 
 	err = crypto_ahash_update(req);
 	if (err == -EINPROGRESS || err == -EBUSY)
@@ -714,6 +648,14 @@ int crypto_ahash_export(struct ahash_request *req, void *out)
 
 	if (likely(tfm->using_shash))
 		return crypto_shash_export(ahash_request_ctx(req), out);
+	if (crypto_ahash_block_only(tfm)) {
+		unsigned int plen = crypto_ahash_blocksize(tfm) + 1;
+		unsigned int reqsize = crypto_ahash_reqsize(tfm);
+		unsigned int ss = crypto_ahash_statesize(tfm);
+		u8 *buf = ahash_request_ctx(req);
+
+		memcpy(out + ss - plen, buf + reqsize - plen, plen);
+	}
 	return crypto_ahash_alg(tfm)->export(req, out);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_export);
@@ -737,8 +679,12 @@ int crypto_ahash_import(struct ahash_request *req, const void *in)
 
 	if (likely(tfm->using_shash))
 		return crypto_shash_import(prepare_shash_desc(req, tfm), in);
-	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
-		return -ENOKEY;
+	if (crypto_ahash_block_only(tfm)) {
+		unsigned int reqsize = crypto_ahash_reqsize(tfm);
+		u8 *buf = ahash_request_ctx(req);
+
+		buf[reqsize - 1] = 0;
+	}
 	return crypto_ahash_import_core(req, in);
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_import);
@@ -753,7 +699,7 @@ static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
 	else if (tfm->__crt_alg->cra_exit)
 		tfm->__crt_alg->cra_exit(tfm);
 
-	if (ahash_is_async(hash))
+	if (crypto_ahash_need_fallback(hash))
 		crypto_free_ahash(crypto_ahash_fb(hash));
 }
 
@@ -770,9 +716,14 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
 	if (tfm->__crt_alg->cra_type == &crypto_shash_type)
 		return crypto_init_ahash_using_shash(tfm);
 
-	if (ahash_is_async(hash)) {
+	if (crypto_ahash_need_fallback(hash)) {
+		unsigned int block_only = crypto_ahash_block_only(hash) ?
+					  CRYPTO_AHASH_ALG_BLOCK_ONLY : 0;
+
 		fb = crypto_alloc_ahash(crypto_ahash_alg_name(hash),
-					0, CRYPTO_ALG_ASYNC);
+					CRYPTO_ALG_REQ_VIRT | block_only,
+					CRYPTO_ALG_ASYNC |
+					CRYPTO_ALG_REQ_VIRT | block_only);
 		if (IS_ERR(fb))
 			return PTR_ERR(fb);
 
@@ -797,6 +748,10 @@ static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
 				     MAX_SYNC_HASH_REQSIZE)
 		goto out_exit_tfm;
 
+	BUILD_BUG_ON(HASH_MAX_DESCSIZE > MAX_SYNC_HASH_REQSIZE);
+	if (crypto_ahash_reqsize(hash) < HASH_MAX_DESCSIZE)
+		crypto_ahash_set_reqsize(hash, HASH_MAX_DESCSIZE);
+
 	return 0;
 
 out_exit_tfm:
@@ -941,7 +896,7 @@ struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
 		return nhash;
 	}
 
-	if (ahash_is_async(hash)) {
+	if (crypto_ahash_need_fallback(hash)) {
 		fb = crypto_clone_ahash(crypto_ahash_fb(hash));
 		err = PTR_ERR(fb);
 		if (IS_ERR(fb))
@@ -993,9 +948,22 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
 	base->cra_type = &crypto_ahash_type;
 	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
 
+	if ((base->cra_flags ^ CRYPTO_ALG_REQ_VIRT) &
+	    (CRYPTO_ALG_ASYNC | CRYPTO_ALG_REQ_VIRT))
+		base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
+
 	if (!alg->setkey)
 		alg->setkey = ahash_nosetkey;
 
+	if (base->cra_flags & CRYPTO_AHASH_ALG_BLOCK_ONLY) {
+		BUILD_BUG_ON(MAX_ALGAPI_BLOCKSIZE >= 256);
+		if (!alg->finup)
+			return -EINVAL;
+
+		base->cra_reqsize += base->cra_blocksize + 1;
+		alg->halg.statesize += base->cra_blocksize + 1;
+	}
+
 	return 0;
 }
 
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 9fc9daaaaab4..540e09ff395d 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -8,8 +8,8 @@
 #ifndef _CRYPTO_HASH_H
 #define _CRYPTO_HASH_H
 
-#include <linux/atomic.h>
 #include <linux/crypto.h>
+#include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/string.h>
 
@@ -65,6 +65,10 @@ struct ahash_request {
 	};
 	u8 *result;
 
+	struct scatterlist sg_head[2];
+	crypto_completion_t saved_complete;
+	void *saved_data;
+
 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
 };
 
@@ -478,7 +482,11 @@ int crypto_ahash_finup(struct ahash_request *req);
  * -EBUSY	if queue is full and request should be resubmitted later;
  * other < 0	if an error occurred
  */
-int crypto_ahash_final(struct ahash_request *req);
+static inline int crypto_ahash_final(struct ahash_request *req)
+{
+	req->nbytes = 0;
+	return crypto_ahash_finup(req);
+}
 
 /**
  * crypto_ahash_digest() - calculate message digest for a buffer
-- 
2.39.5


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 3/6] crypto: hmac - Zero shash desc in setkey
  2025-05-05 12:32 [PATCH 1/6] crypto: hash - Move core export and import into internel/hash.h Herbert Xu
  2025-05-05 12:32 ` [PATCH 2/6] crypto: ahash - Handle partial blocks in API Herbert Xu
@ 2025-05-05 12:32 ` Herbert Xu
  2025-05-05 12:32 ` [PATCH 4/6] crypto: shash - Set reqsize in shash_alg Herbert Xu
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: Herbert Xu @ 2025-05-05 12:32 UTC (permalink / raw)
  To: Linux Crypto Mailing List

The shash desc needs to be zeroed after use in setkey as it is
not finalised (finalisation automatically zeroes it).

Also remove the final function as it's been superseded by finup.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/hmac.c | 35 ++++++++++-------------------------
 1 file changed, 10 insertions(+), 25 deletions(-)

diff --git a/crypto/hmac.c b/crypto/hmac.c
index ba36ddf50037..4517e04bfbaa 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -13,13 +13,11 @@
 
 #include <crypto/hmac.h>
 #include <crypto/internal/hash.h>
-#include <crypto/scatterwalk.h>
 #include <linux/err.h>
 #include <linux/fips.h>
-#include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/scatterlist.h>
+#include <linux/slab.h>
 #include <linux/string.h>
 
 struct hmac_ctx {
@@ -39,7 +37,7 @@ static int hmac_setkey(struct crypto_shash *parent,
 	u8 *ipad = &tctx->pads[0];
 	u8 *opad = &tctx->pads[ss];
 	SHASH_DESC_ON_STACK(shash, hash);
-	unsigned int i;
+	int err, i;
 
 	if (fips_enabled && (keylen < 112 / 8))
 		return -EINVAL;
@@ -65,12 +63,14 @@ static int hmac_setkey(struct crypto_shash *parent,
 		opad[i] ^= HMAC_OPAD_VALUE;
 	}
 
-	return crypto_shash_init(shash) ?:
-	       crypto_shash_update(shash, ipad, bs) ?:
-	       crypto_shash_export(shash, ipad) ?:
-	       crypto_shash_init(shash) ?:
-	       crypto_shash_update(shash, opad, bs) ?:
-	       crypto_shash_export(shash, opad);
+	err = crypto_shash_init(shash) ?:
+	      crypto_shash_update(shash, ipad, bs) ?:
+	      crypto_shash_export(shash, ipad) ?:
+	      crypto_shash_init(shash) ?:
+	      crypto_shash_update(shash, opad, bs) ?:
+	      crypto_shash_export(shash, opad);
+	shash_desc_zero(shash);
+	return err;
 }
 
 static int hmac_export(struct shash_desc *pdesc, void *out)
@@ -105,20 +105,6 @@ static int hmac_update(struct shash_desc *pdesc,
 	return crypto_shash_update(desc, data, nbytes);
 }
 
-static int hmac_final(struct shash_desc *pdesc, u8 *out)
-{
-	struct crypto_shash *parent = pdesc->tfm;
-	int ds = crypto_shash_digestsize(parent);
-	int ss = crypto_shash_statesize(parent);
-	const struct hmac_ctx *tctx = crypto_shash_ctx(parent);
-	const u8 *opad = &tctx->pads[ss];
-	struct shash_desc *desc = shash_desc_ctx(pdesc);
-
-	return crypto_shash_final(desc, out) ?:
-	       crypto_shash_import(desc, opad) ?:
-	       crypto_shash_finup(desc, out, ds, out);
-}
-
 static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
 		      unsigned int nbytes, u8 *out)
 {
@@ -222,7 +208,6 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
 	inst->alg.descsize = sizeof(struct shash_desc) + salg->descsize;
 	inst->alg.init = hmac_init;
 	inst->alg.update = hmac_update;
-	inst->alg.final = hmac_final;
 	inst->alg.finup = hmac_finup;
 	inst->alg.export = hmac_export;
 	inst->alg.import = hmac_import;
-- 
2.39.5


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 4/6] crypto: shash - Set reqsize in shash_alg
  2025-05-05 12:32 [PATCH 1/6] crypto: hash - Move core export and import into internel/hash.h Herbert Xu
  2025-05-05 12:32 ` [PATCH 2/6] crypto: ahash - Handle partial blocks in API Herbert Xu
  2025-05-05 12:32 ` [PATCH 3/6] crypto: hmac - Zero shash desc in setkey Herbert Xu
@ 2025-05-05 12:32 ` Herbert Xu
  2025-05-05 12:32 ` [PATCH 5/6] crypto: algapi - Add driver template support to crypto_inst_setname Herbert Xu
  2025-05-05 12:32 ` [PATCH 6/6] crypto: hmac - Add ahash support Herbert Xu
  4 siblings, 0 replies; 8+ messages in thread
From: Herbert Xu @ 2025-05-05 12:32 UTC (permalink / raw)
  To: Linux Crypto Mailing List

Make reqsize static for shash algorithms.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/ahash.c | 1 -
 crypto/shash.c | 2 ++
 2 files changed, 2 insertions(+), 1 deletion(-)

diff --git a/crypto/ahash.c b/crypto/ahash.c
index 2a29c4a73d36..ec246cc37619 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -286,7 +286,6 @@ static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm)
 
 	crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
 				    CRYPTO_TFM_NEED_KEY);
-	crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
 
 	return 0;
 }
diff --git a/crypto/shash.c b/crypto/shash.c
index dee391d47f51..dd3b7de89309 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -483,6 +483,8 @@ static int shash_prepare_alg(struct shash_alg *alg)
 	if (alg->statesize > HASH_MAX_STATESIZE)
 		return -EINVAL;
 
+	base->cra_reqsize = sizeof(struct shash_desc) + alg->descsize;
+
 	return 0;
 }
 
-- 
2.39.5


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 5/6] crypto: algapi - Add driver template support to crypto_inst_setname
  2025-05-05 12:32 [PATCH 1/6] crypto: hash - Move core export and import into internel/hash.h Herbert Xu
                   ` (2 preceding siblings ...)
  2025-05-05 12:32 ` [PATCH 4/6] crypto: shash - Set reqsize in shash_alg Herbert Xu
@ 2025-05-05 12:32 ` Herbert Xu
  2025-05-05 12:32 ` [PATCH 6/6] crypto: hmac - Add ahash support Herbert Xu
  4 siblings, 0 replies; 8+ messages in thread
From: Herbert Xu @ 2025-05-05 12:32 UTC (permalink / raw)
  To: Linux Crypto Mailing List

Add support to crypto_inst_setname for having a driver template
name that differs from the algorithm template name.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/algapi.c         |  8 ++++----
 include/crypto/algapi.h | 12 ++++++++++--
 2 files changed, 14 insertions(+), 6 deletions(-)

diff --git a/crypto/algapi.c b/crypto/algapi.c
index 532d3efc3c7d..6618cab2a8f7 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -924,20 +924,20 @@ const char *crypto_attr_alg_name(struct rtattr *rta)
 }
 EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
 
-int crypto_inst_setname(struct crypto_instance *inst, const char *name,
-			struct crypto_alg *alg)
+int __crypto_inst_setname(struct crypto_instance *inst, const char *name,
+			  const char *driver, struct crypto_alg *alg)
 {
 	if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
 		     alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
 		return -ENAMETOOLONG;
 
 	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
-		     name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+		     driver, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
 		return -ENAMETOOLONG;
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(crypto_inst_setname);
+EXPORT_SYMBOL_GPL(__crypto_inst_setname);
 
 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen)
 {
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 423e57eca351..188eface0a11 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -146,8 +146,16 @@ void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
 struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
 int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret);
 const char *crypto_attr_alg_name(struct rtattr *rta);
-int crypto_inst_setname(struct crypto_instance *inst, const char *name,
-			struct crypto_alg *alg);
+int __crypto_inst_setname(struct crypto_instance *inst, const char *name,
+			  const char *driver, struct crypto_alg *alg);
+
+#define crypto_inst_setname(inst, name, ...) \
+	CONCATENATE(crypto_inst_setname_, COUNT_ARGS(__VA_ARGS__))( \
+		inst, name, ##__VA_ARGS__)
+#define crypto_inst_setname_1(inst, name, alg) \
+	__crypto_inst_setname(inst, name, name, alg)
+#define crypto_inst_setname_2(inst, name, driver, alg) \
+	__crypto_inst_setname(inst, name, driver, alg)
 
 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
 int crypto_enqueue_request(struct crypto_queue *queue,
-- 
2.39.5


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 6/6] crypto: hmac - Add ahash support
  2025-05-05 12:32 [PATCH 1/6] crypto: hash - Move core export and import into internel/hash.h Herbert Xu
                   ` (3 preceding siblings ...)
  2025-05-05 12:32 ` [PATCH 5/6] crypto: algapi - Add driver template support to crypto_inst_setname Herbert Xu
@ 2025-05-05 12:32 ` Herbert Xu
  2025-05-10 15:42   ` kernel test robot
  4 siblings, 1 reply; 8+ messages in thread
From: Herbert Xu @ 2025-05-05 12:32 UTC (permalink / raw)
  To: Linux Crypto Mailing List

Add ahash support to hmac so that drivers that can't do hmac in
hardware do not have to implement duplicate copies of hmac.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/ahash.c                 |  10 +-
 crypto/hmac.c                  | 318 +++++++++++++++++++++++++++++++--
 include/crypto/hash.h          |   3 +-
 include/crypto/internal/hash.h |   9 +
 4 files changed, 326 insertions(+), 14 deletions(-)

diff --git a/crypto/ahash.c b/crypto/ahash.c
index ec246cc37619..ae3eac5c7c97 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -845,7 +845,7 @@ int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
 }
 EXPORT_SYMBOL_GPL(crypto_has_ahash);
 
-static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
 {
 	struct crypto_alg *alg = &halg->base;
 
@@ -854,6 +854,7 @@ static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
 
 	return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey;
 }
+EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
 
 struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
 {
@@ -1060,5 +1061,12 @@ int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
 }
 EXPORT_SYMBOL_GPL(crypto_hash_digest);
 
+void ahash_free_singlespawn_instance(struct ahash_instance *inst)
+{
+	crypto_drop_spawn(ahash_instance_ctx(inst));
+	kfree(inst);
+}
+EXPORT_SYMBOL_GPL(ahash_free_singlespawn_instance);
+
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index 4517e04bfbaa..c9c635e48250 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -26,6 +26,12 @@ struct hmac_ctx {
 	u8 pads[];
 };
 
+struct ahash_hmac_ctx {
+	struct crypto_ahash *hash;
+	/* Contains 'u8 ipad[statesize];', then 'u8 opad[statesize];' */
+	u8 pads[];
+};
+
 static int hmac_setkey(struct crypto_shash *parent,
 		       const u8 *inkey, unsigned int keylen)
 {
@@ -157,21 +163,17 @@ static void hmac_exit_tfm(struct crypto_shash *parent)
 	crypto_free_shash(tctx->hash);
 }
 
-static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+static int hmac_create_shash(struct crypto_template *tmpl, struct rtattr **tb,
+			     u32 mask)
 {
 	struct shash_instance *inst;
 	struct crypto_shash_spawn *spawn;
 	struct crypto_alg *alg;
 	struct shash_alg *salg;
-	u32 mask;
 	int err;
 	int ds;
 	int ss;
 
-	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
-	if (err)
-		return err;
-
 	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
 	if (!inst)
 		return -ENOMEM;
@@ -226,20 +228,312 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
 	return err;
 }
 
-static struct crypto_template hmac_tmpl = {
-	.name = "hmac",
-	.create = hmac_create,
-	.module = THIS_MODULE,
+static int hmac_setkey_ahash(struct crypto_ahash *parent,
+			     const u8 *inkey, unsigned int keylen)
+{
+	struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
+	struct crypto_ahash *fb = crypto_ahash_fb(tctx->hash);
+	int ds = crypto_ahash_digestsize(parent);
+	int bs = crypto_ahash_blocksize(parent);
+	int ss = crypto_ahash_statesize(parent);
+	HASH_REQUEST_ON_STACK(req, fb);
+	u8 *opad = &tctx->pads[ss];
+	u8 *ipad = &tctx->pads[0];
+	int err, i;
+
+	if (fips_enabled && (keylen < 112 / 8))
+		return -EINVAL;
+
+	ahash_request_set_callback(req, 0, NULL, NULL);
+
+	if (keylen > bs) {
+		ahash_request_set_virt(req, inkey, ipad, keylen);
+		err = crypto_ahash_digest(req);
+		if (err)
+			goto out_zero_req;
+
+		keylen = ds;
+	} else
+		memcpy(ipad, inkey, keylen);
+
+	memset(ipad + keylen, 0, bs - keylen);
+	memcpy(opad, ipad, bs);
+
+	for (i = 0; i < bs; i++) {
+		ipad[i] ^= HMAC_IPAD_VALUE;
+		opad[i] ^= HMAC_OPAD_VALUE;
+	}
+
+	ahash_request_set_virt(req, ipad, NULL, bs);
+	err = crypto_ahash_init(req) ?:
+	      crypto_ahash_update(req) ?:
+	      crypto_ahash_export(req, ipad);
+
+	ahash_request_set_virt(req, opad, NULL, bs);
+	err = err ?:
+	      crypto_ahash_init(req) ?:
+	      crypto_ahash_update(req) ?:
+	      crypto_ahash_export(req, opad);
+
+out_zero_req:
+	HASH_REQUEST_ZERO(req);
+	return err;
+}
+
+static int hmac_export_ahash(struct ahash_request *preq, void *out)
+{
+	return crypto_ahash_export(ahash_request_ctx(preq), out);
+}
+
+static int hmac_import_ahash(struct ahash_request *preq, const void *in)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+	struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+	struct ahash_request *req = ahash_request_ctx(preq);
+
+	ahash_request_set_tfm(req, tctx->hash);
+	return crypto_ahash_import(req, in);
+}
+
+static int hmac_init_ahash(struct ahash_request *preq)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+	struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+
+	return hmac_import_ahash(preq, &tctx->pads[0]);
+}
+
+static int hmac_update_ahash(struct ahash_request *preq)
+{
+	struct ahash_request *req = ahash_request_ctx(preq);
+
+	ahash_request_set_callback(req, ahash_request_flags(preq),
+				   preq->base.complete, preq->base.data);
+	if (ahash_request_isvirt(preq))
+		ahash_request_set_virt(req, preq->svirt, NULL, preq->nbytes);
+	else
+		ahash_request_set_crypt(req, preq->src, NULL, preq->nbytes);
+	return crypto_ahash_update(req);
+}
+
+static int hmac_finup_finish(struct ahash_request *preq, unsigned int mask)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(preq);
+	struct ahash_request *req = ahash_request_ctx(preq);
+	struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(tfm);
+	int ds = crypto_ahash_digestsize(tfm);
+	int ss = crypto_ahash_statesize(tfm);
+	const u8 *opad = &tctx->pads[ss];
+
+	ahash_request_set_callback(req, ahash_request_flags(preq) & ~mask,
+				   preq->base.complete, preq->base.data);
+	ahash_request_set_virt(req, preq->result, preq->result, ds);
+	return crypto_ahash_import(req, opad) ?:
+	       crypto_ahash_finup(req);
+
+}
+
+static void hmac_finup_done(void *data, int err)
+{
+	struct ahash_request *preq = data;
+
+	if (err)
+		goto out;
+
+	err = hmac_finup_finish(preq, CRYPTO_TFM_REQ_MAY_SLEEP);
+	if (err == -EINPROGRESS || err == -EBUSY)
+		return;
+
+out:
+	ahash_request_complete(preq, err);
+}
+
+static int hmac_finup_ahash(struct ahash_request *preq)
+{
+	struct ahash_request *req = ahash_request_ctx(preq);
+
+	ahash_request_set_callback(req, ahash_request_flags(preq),
+				   hmac_finup_done, preq);
+	if (ahash_request_isvirt(preq))
+		ahash_request_set_virt(req, preq->svirt, preq->result,
+				       preq->nbytes);
+	else
+		ahash_request_set_crypt(req, preq->src, preq->result,
+					preq->nbytes);
+	return crypto_ahash_finup(req) ?:
+	       hmac_finup_finish(preq, 0);
+}
+
+static int hmac_digest_ahash(struct ahash_request *preq)
+{
+	return hmac_init_ahash(preq) ?:
+	       hmac_finup_ahash(preq);
+}
+
+static int hmac_init_ahash_tfm(struct crypto_ahash *parent)
+{
+	struct ahash_instance *inst = ahash_alg_instance(parent);
+	struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
+	struct crypto_ahash *hash;
+
+	hash = crypto_spawn_ahash(ahash_instance_ctx(inst));
+	if (IS_ERR(hash))
+		return PTR_ERR(hash);
+
+	if (crypto_ahash_reqsize(parent) < sizeof(struct ahash_request) +
+					   crypto_ahash_reqsize(hash))
+		return -EINVAL;
+
+	tctx->hash = hash;
+	return 0;
+}
+
+static int hmac_clone_ahash_tfm(struct crypto_ahash *dst,
+				struct crypto_ahash *src)
+{
+	struct ahash_hmac_ctx *sctx = crypto_ahash_ctx(src);
+	struct ahash_hmac_ctx *dctx = crypto_ahash_ctx(dst);
+	struct crypto_ahash *hash;
+
+	hash = crypto_clone_ahash(sctx->hash);
+	if (IS_ERR(hash))
+		return PTR_ERR(hash);
+
+	dctx->hash = hash;
+	return 0;
+}
+
+static void hmac_exit_ahash_tfm(struct crypto_ahash *parent)
+{
+	struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
+
+	crypto_free_ahash(tctx->hash);
+}
+
+static int __hmac_create_ahash(struct crypto_template *tmpl,
+			       struct rtattr **tb, u32 mask)
+{
+	struct crypto_ahash_spawn *spawn;
+	struct ahash_instance *inst;
+	struct crypto_alg *alg;
+	struct hash_alg_common *halg;
+	int ds, ss, err;
+
+	inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+	if (!inst)
+		return -ENOMEM;
+	spawn = ahash_instance_ctx(inst);
+
+	err = crypto_grab_ahash(spawn, ahash_crypto_instance(inst),
+				crypto_attr_alg_name(tb[1]), 0, mask);
+	if (err)
+		goto err_free_inst;
+	halg = crypto_spawn_ahash_alg(spawn);
+	alg = &halg->base;
+
+	/* The underlying hash algorithm must not require a key */
+	err = -EINVAL;
+	if (crypto_hash_alg_needs_key(halg))
+		goto err_free_inst;
+
+	ds = halg->digestsize;
+	ss = halg->statesize;
+	if (ds > alg->cra_blocksize || ss < alg->cra_blocksize)
+		goto err_free_inst;
+
+	err = crypto_inst_setname(ahash_crypto_instance(inst), "hmac",
+				  "hmac-ahash", alg);
+	if (err)
+		goto err_free_inst;
+
+	inst->alg.halg.base.cra_flags = alg->cra_flags &
+					CRYPTO_ALG_INHERITED_FLAGS;
+	inst->alg.halg.base.cra_flags |= CRYPTO_ALG_REQ_VIRT;
+	inst->alg.halg.base.cra_priority = alg->cra_priority + 100;
+	inst->alg.halg.base.cra_blocksize = alg->cra_blocksize;
+	inst->alg.halg.base.cra_ctxsize = sizeof(struct ahash_hmac_ctx) +
+					  (ss * 2);
+	inst->alg.halg.base.cra_reqsize = sizeof(struct ahash_request) +
+					  alg->cra_reqsize;
+
+	inst->alg.halg.digestsize = ds;
+	inst->alg.halg.statesize = ss;
+	inst->alg.init = hmac_init_ahash;
+	inst->alg.update = hmac_update_ahash;
+	inst->alg.finup = hmac_finup_ahash;
+	inst->alg.digest = hmac_digest_ahash;
+	inst->alg.export = hmac_export_ahash;
+	inst->alg.import = hmac_import_ahash;
+	inst->alg.setkey = hmac_setkey_ahash;
+	inst->alg.init_tfm = hmac_init_ahash_tfm;
+	inst->alg.clone_tfm = hmac_clone_ahash_tfm;
+	inst->alg.exit_tfm = hmac_exit_ahash_tfm;
+
+	inst->free = ahash_free_singlespawn_instance;
+
+	err = ahash_register_instance(tmpl, inst);
+	if (err) {
+err_free_inst:
+		ahash_free_singlespawn_instance(inst);
+	}
+	return err;
+}
+
+static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+	struct crypto_attr_type *algt;
+	u32 mask;
+
+	algt = crypto_get_attr_type(tb);
+	if (IS_ERR(algt))
+		return PTR_ERR(algt);
+
+	mask = crypto_algt_inherited_mask(algt);
+
+	if (!((algt->type ^ CRYPTO_ALG_TYPE_AHASH) &
+	      algt->mask & CRYPTO_ALG_TYPE_MASK))
+		return __hmac_create_ahash(tmpl, tb, mask);
+
+	if ((algt->type ^ CRYPTO_ALG_TYPE_SHASH) &
+	    algt->mask & CRYPTO_ALG_TYPE_MASK)
+		return -EINVAL;
+
+	return hmac_create_shash(tmpl, tb, mask);
+}
+
+static int hmac_create_ahash(struct crypto_template *tmpl, struct rtattr **tb)
+{
+	u32 mask;
+	int err;
+
+	err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AHASH, &mask);
+	if (err)
+		return err == -EINVAL ? -ENOENT : err;
+
+	return __hmac_create_ahash(tmpl, tb, mask);
+}
+
+static struct crypto_template hmac_tmpls[] = {
+	{
+		.name = "hmac",
+		.create = hmac_create,
+		.module = THIS_MODULE,
+	},
+	{
+		.name = "hmac-ahash",
+		.create = hmac_create_ahash,
+		.module = THIS_MODULE,
+	},
 };
 
 static int __init hmac_module_init(void)
 {
-	return crypto_register_template(&hmac_tmpl);
+	return crypto_register_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls));
 }
 
 static void __exit hmac_module_exit(void)
 {
-	crypto_unregister_template(&hmac_tmpl);
+	crypto_unregister_templates(hmac_tmpls, ARRAY_SIZE(hmac_tmpls));
 }
 
 module_init(hmac_module_init);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 540e09ff395d..2d982995dd12 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -179,7 +179,8 @@ struct shash_desc {
  * containing a 'struct s390_sha_ctx'.
  */
 #define HASH_MAX_DESCSIZE	(sizeof(struct shash_desc) + 360)
-#define MAX_SYNC_HASH_REQSIZE	HASH_MAX_DESCSIZE
+#define MAX_SYNC_HASH_REQSIZE	(sizeof(struct ahash_request) + \
+				 HASH_MAX_DESCSIZE)
 
 #define SHASH_DESC_ON_STACK(shash, ctx)					     \
 	char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index ef5ea75ac5c8..519e2de4bfba 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -64,6 +64,7 @@ int crypto_register_ahashes(struct ahash_alg *algs, int count);
 void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
 int ahash_register_instance(struct crypto_template *tmpl,
 			    struct ahash_instance *inst);
+void ahash_free_singlespawn_instance(struct ahash_instance *inst);
 
 int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
 		    unsigned int keylen);
@@ -73,12 +74,20 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
 	return alg->setkey != shash_no_setkey;
 }
 
+bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
+
 static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
 {
 	return crypto_shash_alg_has_setkey(alg) &&
 		!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
 }
 
+static inline bool crypto_hash_alg_needs_key(struct hash_alg_common *alg)
+{
+	return crypto_hash_alg_has_setkey(alg) &&
+		!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
+}
+
 int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
 		      struct crypto_instance *inst,
 		      const char *name, u32 type, u32 mask);
-- 
2.39.5


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/6] crypto: ahash - Handle partial blocks in API
  2025-05-05 12:32 ` [PATCH 2/6] crypto: ahash - Handle partial blocks in API Herbert Xu
@ 2025-05-08 22:03   ` kernel test robot
  0 siblings, 0 replies; 8+ messages in thread
From: kernel test robot @ 2025-05-08 22:03 UTC (permalink / raw)
  To: Herbert Xu, Linux Crypto Mailing List; +Cc: oe-kbuild-all

Hi Herbert,

kernel test robot noticed the following build warnings:

[auto build test WARNING on herbert-cryptodev-2.6/master]
[also build test WARNING on next-20250508]
[cannot apply to herbert-crypto-2.6/master linus/master v6.15-rc5]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Herbert-Xu/crypto-ahash-Handle-partial-blocks-in-API/20250505-203411
base:   https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git master
patch link:    https://lore.kernel.org/r/26a6ba5a71b8848c6e79757a596ecc3838bf320e.1746448291.git.herbert%40gondor.apana.org.au
patch subject: [PATCH 2/6] crypto: ahash - Handle partial blocks in API
config: nios2-randconfig-r073-20250509 (https://download.01.org/0day-ci/archive/20250509/202505090505.7uAKB19V-lkp@intel.com/config)
compiler: nios2-linux-gcc (GCC) 13.3.0

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202505090505.7uAKB19V-lkp@intel.com/

smatch warnings:
crypto/ahash.c:370 ahash_do_req_chain() warn: inconsistent indenting

vim +370 crypto/ahash.c

   338	
   339	static int ahash_do_req_chain(struct ahash_request *req,
   340				      int (*const *op)(struct ahash_request *req))
   341	{
   342		struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
   343		int err;
   344	
   345		if (crypto_ahash_req_virt(tfm) || !ahash_request_isvirt(req))
   346			return (*op)(req);
   347	
   348		if (crypto_ahash_statesize(tfm) > HASH_MAX_STATESIZE)
   349			return -ENOSYS;
   350	
   351		{
   352			u8 state[HASH_MAX_STATESIZE];
   353	
   354			if (op == &crypto_ahash_alg(tfm)->digest) {
   355				ahash_request_set_tfm(req, crypto_ahash_fb(tfm));
   356				err = crypto_ahash_digest(req);
   357				goto out_no_state;
   358			}
   359	
   360			err = crypto_ahash_export(req, state);
   361			ahash_request_set_tfm(req, crypto_ahash_fb(tfm));
   362			err = err ?: crypto_ahash_import(req, state);
   363	
   364			if (op == &crypto_ahash_alg(tfm)->finup) {
   365				err = err ?: crypto_ahash_finup(req);
   366				goto out_no_state;
   367			}
   368	
   369			err = err ?: crypto_ahash_update(req);
 > 370				     crypto_ahash_export(req, state);
   371	
   372			ahash_request_set_tfm(req, tfm);
   373			return err ?: crypto_ahash_import(req, state);
   374	
   375	out_no_state:
   376			ahash_request_set_tfm(req, tfm);
   377			return err;
   378		}
   379	}
   380	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 6/6] crypto: hmac - Add ahash support
  2025-05-05 12:32 ` [PATCH 6/6] crypto: hmac - Add ahash support Herbert Xu
@ 2025-05-10 15:42   ` kernel test robot
  0 siblings, 0 replies; 8+ messages in thread
From: kernel test robot @ 2025-05-10 15:42 UTC (permalink / raw)
  To: Herbert Xu, Linux Crypto Mailing List; +Cc: llvm, oe-kbuild-all

Hi Herbert,

kernel test robot noticed the following build warnings:

[auto build test WARNING on herbert-cryptodev-2.6/master]
[also build test WARNING on next-20250509]
[cannot apply to herbert-crypto-2.6/master linus/master v6.15-rc5]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Herbert-Xu/crypto-ahash-Handle-partial-blocks-in-API/20250505-203411
base:   https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git master
patch link:    https://lore.kernel.org/r/f67cc874594fd2cc873667530c83e239ae09db81.1746448291.git.herbert%40gondor.apana.org.au
patch subject: [PATCH 6/6] crypto: hmac - Add ahash support
config: mips-eyeq6_defconfig (https://download.01.org/0day-ci/archive/20250510/202505102347.IvvpwcPQ-lkp@intel.com/config)
compiler: clang version 21.0.0git (https://github.com/llvm/llvm-project f819f46284f2a79790038e1f6649172789734ae8)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250510/202505102347.IvvpwcPQ-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202505102347.IvvpwcPQ-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> crypto/hmac.c:231:12: warning: stack frame size (1152) exceeds limit (1024) in 'hmac_setkey_ahash' [-Wframe-larger-than]
     231 | static int hmac_setkey_ahash(struct crypto_ahash *parent,
         |            ^
   1 warning generated.


vim +/hmac_setkey_ahash +231 crypto/hmac.c

   230	
 > 231	static int hmac_setkey_ahash(struct crypto_ahash *parent,
   232				     const u8 *inkey, unsigned int keylen)
   233	{
   234		struct ahash_hmac_ctx *tctx = crypto_ahash_ctx(parent);
   235		struct crypto_ahash *fb = crypto_ahash_fb(tctx->hash);
   236		int ds = crypto_ahash_digestsize(parent);
   237		int bs = crypto_ahash_blocksize(parent);
   238		int ss = crypto_ahash_statesize(parent);
   239		HASH_REQUEST_ON_STACK(req, fb);
   240		u8 *opad = &tctx->pads[ss];
   241		u8 *ipad = &tctx->pads[0];
   242		int err, i;
   243	
   244		if (fips_enabled && (keylen < 112 / 8))
   245			return -EINVAL;
   246	
   247		ahash_request_set_callback(req, 0, NULL, NULL);
   248	
   249		if (keylen > bs) {
   250			ahash_request_set_virt(req, inkey, ipad, keylen);
   251			err = crypto_ahash_digest(req);
   252			if (err)
   253				goto out_zero_req;
   254	
   255			keylen = ds;
   256		} else
   257			memcpy(ipad, inkey, keylen);
   258	
   259		memset(ipad + keylen, 0, bs - keylen);
   260		memcpy(opad, ipad, bs);
   261	
   262		for (i = 0; i < bs; i++) {
   263			ipad[i] ^= HMAC_IPAD_VALUE;
   264			opad[i] ^= HMAC_OPAD_VALUE;
   265		}
   266	
   267		ahash_request_set_virt(req, ipad, NULL, bs);
   268		err = crypto_ahash_init(req) ?:
   269		      crypto_ahash_update(req) ?:
   270		      crypto_ahash_export(req, ipad);
   271	
   272		ahash_request_set_virt(req, opad, NULL, bs);
   273		err = err ?:
   274		      crypto_ahash_init(req) ?:
   275		      crypto_ahash_update(req) ?:
   276		      crypto_ahash_export(req, opad);
   277	
   278	out_zero_req:
   279		HASH_REQUEST_ZERO(req);
   280		return err;
   281	}
   282	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2025-05-10 15:43 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-05-05 12:32 [PATCH 1/6] crypto: hash - Move core export and import into internel/hash.h Herbert Xu
2025-05-05 12:32 ` [PATCH 2/6] crypto: ahash - Handle partial blocks in API Herbert Xu
2025-05-08 22:03   ` kernel test robot
2025-05-05 12:32 ` [PATCH 3/6] crypto: hmac - Zero shash desc in setkey Herbert Xu
2025-05-05 12:32 ` [PATCH 4/6] crypto: shash - Set reqsize in shash_alg Herbert Xu
2025-05-05 12:32 ` [PATCH 5/6] crypto: algapi - Add driver template support to crypto_inst_setname Herbert Xu
2025-05-05 12:32 ` [PATCH 6/6] crypto: hmac - Add ahash support Herbert Xu
2025-05-10 15:42   ` kernel test robot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox