From: ard.biesheuvel@linaro.org (Ard Biesheuvel)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH resend 14/15] arm64/crypto: add voluntary preemption to Crypto Extensions SHA2
Date: Thu, 1 May 2014 17:51:25 +0200 [thread overview]
Message-ID: <1398959486-8222-5-git-send-email-ard.biesheuvel@linaro.org> (raw)
In-Reply-To: <1398959486-8222-1-git-send-email-ard.biesheuvel@linaro.org>
The Crypto Extensions based SHA2 implementation uses the NEON register file,
and hence runs with preemption disabled. This patch adds a TIF_NEED_RESCHED
check to its inner loop so we at least give up the CPU voluntarily when we
are running in process context and have been tagged for preemption by the
scheduler.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
arch/arm64/crypto/sha2-ce-core.S | 19 ++++++++-------
arch/arm64/crypto/sha2-ce-glue.c | 51 ++++++++++++++++++++++++++++++----------
2 files changed, 50 insertions(+), 20 deletions(-)
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
index 53e750614169..46b669d91c29 100644
--- a/arch/arm64/crypto/sha2-ce-core.S
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -73,8 +73,8 @@
.word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
/*
- * void sha2_ce_transform(int blocks, u8 const *src, u32 *state,
- * u8 *head, long bytes)
+ * int sha2_ce_transform(int blocks, u8 const *src, u32 *state,
+ * u8 *head, long bytes, struct thread_info *ti)
*/
ENTRY(sha2_ce_transform)
/* load round constants */
@@ -131,7 +131,14 @@ CPU_LE( rev32 v19.16b, v19.16b )
add dgbv.4s, dgbv.4s, dg1v.4s
/* handled all input blocks? */
- cbnz w0, 0b
+ cbz w0, 4f
+
+ /* should we exit early? */
+ b_if_no_resched x5, x8, 0b
+
+ /* store new state */
+3: stp dga, dgb, [x2]
+ ret
/*
* Final block: add padding and total bit count.
@@ -139,7 +146,7 @@ CPU_LE( rev32 v19.16b, v19.16b )
* size was not a round multiple of the block size, and the padding is
* handled by the C code.
*/
- cbz x4, 3f
+4: cbz x4, 3b
movi v17.2d, #0
mov x8, #0x80000000
movi v18.2d, #0
@@ -149,8 +156,4 @@ CPU_LE( rev32 v19.16b, v19.16b )
mov v19.d[0], xzr
mov v19.d[1], x7
b 2b
-
- /* store new state */
-3: stp dga, dgb, [x2]
- ret
ENDPROC(sha2_ce_transform)
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
index 81617262b3df..6566ad3fdf82 100644
--- a/arch/arm64/crypto/sha2-ce-glue.c
+++ b/arch/arm64/crypto/sha2-ce-glue.c
@@ -1,4 +1,4 @@
-/*
+h/*
* sha2-ce-glue.c - SHA-224/SHA-256 using ARMv8 Crypto Extensions
*
* Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
@@ -20,8 +20,8 @@ MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
-asmlinkage void sha2_ce_transform(int blocks, u8 const *src, u32 *state,
- u8 *head, long bytes);
+asmlinkage int sha2_ce_transform(int blocks, u8 const *src, u32 *state,
+ u8 *head, long bytes, struct thread_info *ti);
static int sha224_init(struct shash_desc *desc)
{
@@ -58,6 +58,7 @@ static int sha2_update(struct shash_desc *desc, const u8 *data,
sctx->count += len;
if ((partial + len) >= SHA256_BLOCK_SIZE) {
+ struct thread_info *ti = NULL;
int blocks;
if (partial) {
@@ -68,16 +69,30 @@ static int sha2_update(struct shash_desc *desc, const u8 *data,
len -= p;
}
+ /*
+ * Pass current's thread info pointer to sha2_ce_transform()
+ * below if we want it to play nice under preemption.
+ */
+ if ((IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY) ||
+ IS_ENABLED(CONFIG_PREEMPT)) && !in_interrupt())
+ ti = current_thread_info();
+
blocks = len / SHA256_BLOCK_SIZE;
len %= SHA256_BLOCK_SIZE;
- kernel_neon_begin_partial(28);
- sha2_ce_transform(blocks, data, sctx->state,
- partial ? sctx->buf : NULL, 0);
- kernel_neon_end();
+ do {
+ int rem;
+
+ kernel_neon_begin_partial(28);
+ rem = sha2_ce_transform(blocks, data, sctx->state,
+ partial ? sctx->buf : NULL,
+ 0, ti);
+ kernel_neon_end();
- data += blocks * SHA256_BLOCK_SIZE;
- partial = 0;
+ data += (blocks - rem) * SHA256_BLOCK_SIZE;
+ blocks = rem;
+ partial = 0;
+ } while (unlikely(ti && blocks > 0));
}
if (len)
memcpy(sctx->buf + partial, data, len);
@@ -131,6 +146,7 @@ static void sha2_finup(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct thread_info *ti = NULL;
int blocks;
if (sctx->count || !len || (len % SHA256_BLOCK_SIZE)) {
@@ -147,9 +163,20 @@ static void sha2_finup(struct shash_desc *desc, const u8 *data,
*/
blocks = len / SHA256_BLOCK_SIZE;
- kernel_neon_begin_partial(28);
- sha2_ce_transform(blocks, data, sctx->state, NULL, len);
- kernel_neon_end();
+ if ((IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY) ||
+ IS_ENABLED(CONFIG_PREEMPT)) && !in_interrupt())
+ ti = current_thread_info();
+
+ do {
+ int rem;
+
+ kernel_neon_begin_partial(28);
+ rem = sha2_ce_transform(blocks, data, sctx->state,
+ NULL, len, ti);
+ kernel_neon_end();
+ data += (blocks - rem) * SHA256_BLOCK_SIZE;
+ blocks = rem;
+ } while (unlikely(ti && blocks > 0));
}
static int sha224_finup(struct shash_desc *desc, const u8 *data,
--
1.8.3.2
next prev parent reply other threads:[~2014-05-01 15:51 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-05-01 15:51 [PATCH resend 10/15] arm64: pull in <asm/simd.h> from asm-generic Ard Biesheuvel
2014-05-01 15:51 ` [PATCH resend 11/15] arm64/crypto: AES-ECB/CBC/CTR/XTS using ARMv8 NEON and Crypto Extensions Ard Biesheuvel
2014-05-01 15:51 ` [PATCH resend 12/15] arm64/crypto: add shared macro to test for NEED_RESCHED Ard Biesheuvel
2014-05-01 15:51 ` [PATCH resend 13/15] arm64/crypto: add voluntary preemption to Crypto Extensions SHA1 Ard Biesheuvel
2014-05-13 18:58 ` Jussi Kivilinna
2014-05-14 1:36 ` Herbert Xu
2014-05-01 15:51 ` Ard Biesheuvel [this message]
2014-05-01 15:51 ` [PATCH resend 15/15] arm64/crypto: add voluntary preemption to Crypto Extensions GHASH Ard Biesheuvel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1398959486-8222-5-git-send-email-ard.biesheuvel@linaro.org \
--to=ard.biesheuvel@linaro.org \
--cc=linux-arm-kernel@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).