From: Taehee Yoo <ap420073@gmail.com>
To: linux-crypto@vger.kernel.org, herbert@gondor.apana.org.au,
davem@davemloft.net, x86@kernel.org
Cc: elliott@hpe.com, jussi.kivilinna@iki.fi, ebiggers@kernel.org,
ap420073@gmail.com
Subject: [PATCH v8 1/4] crypto: aria: add keystream array into request ctx
Date: Sun, 1 Jan 2023 09:12:49 +0000 [thread overview]
Message-ID: <20230101091252.700117-2-ap420073@gmail.com> (raw)
In-Reply-To: <20230101091252.700117-1-ap420073@gmail.com>
avx accelerated aria module used local keystream array.
But, keystream array size is too big.
So, it puts the keystream array into request ctx.
Signed-off-by: Taehee Yoo <ap420073@gmail.com>
---
v8:
- No changes.
v7:
- No changes.
v6:
- No changes.
v5:
- No changes.
v4:
- Add aria_avx_request ctx for keystream array
v3:
- No changes.
v2:
- Patch introduced.
arch/x86/crypto/aria_aesni_avx_glue.c | 39 ++++++++++++++++++---------
1 file changed, 26 insertions(+), 13 deletions(-)
diff --git a/arch/x86/crypto/aria_aesni_avx_glue.c b/arch/x86/crypto/aria_aesni_avx_glue.c
index c561ea4fefa5..5f97e442349f 100644
--- a/arch/x86/crypto/aria_aesni_avx_glue.c
+++ b/arch/x86/crypto/aria_aesni_avx_glue.c
@@ -33,6 +33,10 @@ asmlinkage void aria_aesni_avx_gfni_ctr_crypt_16way(const void *ctx, u8 *dst,
static struct aria_avx_ops aria_ops;
+struct aria_avx_request_ctx {
+ u8 keystream[ARIA_AESNI_PARALLEL_BLOCK_SIZE];
+};
+
static int ecb_do_encrypt(struct skcipher_request *req, const u32 *rkey)
{
ECB_WALK_START(req, ARIA_BLOCK_SIZE, ARIA_AESNI_PARALLEL_BLOCKS);
@@ -73,6 +77,7 @@ static int aria_avx_set_key(struct crypto_skcipher *tfm, const u8 *key,
static int aria_avx_ctr_encrypt(struct skcipher_request *req)
{
+ struct aria_avx_request_ctx *req_ctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aria_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_walk walk;
@@ -86,10 +91,9 @@ static int aria_avx_ctr_encrypt(struct skcipher_request *req)
u8 *dst = walk.dst.virt.addr;
while (nbytes >= ARIA_AESNI_PARALLEL_BLOCK_SIZE) {
- u8 keystream[ARIA_AESNI_PARALLEL_BLOCK_SIZE];
-
kernel_fpu_begin();
- aria_ops.aria_ctr_crypt_16way(ctx, dst, src, keystream,
+ aria_ops.aria_ctr_crypt_16way(ctx, dst, src,
+ &req_ctx->keystream[0],
walk.iv);
kernel_fpu_end();
dst += ARIA_AESNI_PARALLEL_BLOCK_SIZE;
@@ -98,28 +102,29 @@ static int aria_avx_ctr_encrypt(struct skcipher_request *req)
}
while (nbytes >= ARIA_BLOCK_SIZE) {
- u8 keystream[ARIA_BLOCK_SIZE];
-
- memcpy(keystream, walk.iv, ARIA_BLOCK_SIZE);
+ memcpy(&req_ctx->keystream[0], walk.iv, ARIA_BLOCK_SIZE);
crypto_inc(walk.iv, ARIA_BLOCK_SIZE);
- aria_encrypt(ctx, keystream, keystream);
+ aria_encrypt(ctx, &req_ctx->keystream[0],
+ &req_ctx->keystream[0]);
- crypto_xor_cpy(dst, src, keystream, ARIA_BLOCK_SIZE);
+ crypto_xor_cpy(dst, src, &req_ctx->keystream[0],
+ ARIA_BLOCK_SIZE);
dst += ARIA_BLOCK_SIZE;
src += ARIA_BLOCK_SIZE;
nbytes -= ARIA_BLOCK_SIZE;
}
if (walk.nbytes == walk.total && nbytes > 0) {
- u8 keystream[ARIA_BLOCK_SIZE];
-
- memcpy(keystream, walk.iv, ARIA_BLOCK_SIZE);
+ memcpy(&req_ctx->keystream[0], walk.iv,
+ ARIA_BLOCK_SIZE);
crypto_inc(walk.iv, ARIA_BLOCK_SIZE);
- aria_encrypt(ctx, keystream, keystream);
+ aria_encrypt(ctx, &req_ctx->keystream[0],
+ &req_ctx->keystream[0]);
- crypto_xor_cpy(dst, src, keystream, nbytes);
+ crypto_xor_cpy(dst, src, &req_ctx->keystream[0],
+ nbytes);
dst += nbytes;
src += nbytes;
nbytes = 0;
@@ -130,6 +135,13 @@ static int aria_avx_ctr_encrypt(struct skcipher_request *req)
return err;
}
+static int aria_avx_init_tfm(struct crypto_skcipher *tfm)
+{
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct aria_avx_request_ctx));
+
+ return 0;
+}
+
static struct skcipher_alg aria_algs[] = {
{
.base.cra_name = "__ecb(aria)",
@@ -160,6 +172,7 @@ static struct skcipher_alg aria_algs[] = {
.setkey = aria_avx_set_key,
.encrypt = aria_avx_ctr_encrypt,
.decrypt = aria_avx_ctr_encrypt,
+ .init = aria_avx_init_tfm,
}
};
--
2.34.1
next prev parent reply other threads:[~2023-01-01 9:13 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-01-01 9:12 [PATCH v8 0/4] crypto: aria: implement aria-avx2 and aria-avx512 Taehee Yoo
2023-01-01 9:12 ` Taehee Yoo [this message]
2023-01-01 9:12 ` [PATCH v8 2/4] crypto: aria: do not use magic number offsets of aria_ctx Taehee Yoo
2023-01-01 9:12 ` [PATCH v8 3/4] crypto: aria: implement aria-avx2 Taehee Yoo
2023-01-01 9:12 ` [PATCH v8 4/4] crypto: aria: implement aria-avx512 Taehee Yoo
2023-01-06 15:19 ` [PATCH v8 0/4] crypto: aria: implement aria-avx2 and aria-avx512 Herbert Xu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230101091252.700117-2-ap420073@gmail.com \
--to=ap420073@gmail.com \
--cc=davem@davemloft.net \
--cc=ebiggers@kernel.org \
--cc=elliott@hpe.com \
--cc=herbert@gondor.apana.org.au \
--cc=jussi.kivilinna@iki.fi \
--cc=linux-crypto@vger.kernel.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).