linux-crypto.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] sha1-ni-asm.S cleanups
@ 2025-07-18 19:18 Eric Biggers
  2025-07-18 19:18 ` [PATCH 1/2] lib/crypto: x86/sha1-ni: Minor optimizations and cleanup Eric Biggers
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Eric Biggers @ 2025-07-18 19:18 UTC (permalink / raw)
  To: linux-crypto
  Cc: linux-kernel, x86, Ard Biesheuvel, Jason A . Donenfeld,
	Eric Biggers

This series cleans up the x86_64 SHA-NI optimized SHA-1 code.

This is targeting libcrypto-next.

Eric Biggers (2):
  lib/crypto: x86/sha1-ni: Minor optimizations and cleanup
  lib/crypto: x86/sha1-ni: Convert to use rounds macros

 lib/crypto/x86/sha1-ni-asm.S | 253 ++++++++---------------------------
 1 file changed, 53 insertions(+), 200 deletions(-)


base-commit: 78792545ad68c05d1eb7b3c3b03c4309da921a04
-- 
2.50.1


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH 1/2] lib/crypto: x86/sha1-ni: Minor optimizations and cleanup
  2025-07-18 19:18 [PATCH 0/2] sha1-ni-asm.S cleanups Eric Biggers
@ 2025-07-18 19:18 ` Eric Biggers
  2025-07-18 19:19 ` [PATCH 2/2] lib/crypto: x86/sha1-ni: Convert to use rounds macros Eric Biggers
  2025-07-21  4:30 ` [PATCH 0/2] sha1-ni-asm.S cleanups Ard Biesheuvel
  2 siblings, 0 replies; 4+ messages in thread
From: Eric Biggers @ 2025-07-18 19:18 UTC (permalink / raw)
  To: linux-crypto
  Cc: linux-kernel, x86, Ard Biesheuvel, Jason A . Donenfeld,
	Eric Biggers

- Store the previous state in %xmm8-%xmm9 instead of spilling it to the
  stack.  There are plenty of unused XMM registers here, so there is no
  reason to spill to the stack.  (While 32-bit code is limited to
  %xmm0-%xmm7, this is 64-bit code, so it's free to use %xmm8-%xmm15.)

- Remove the unnecessary check for nblocks == 0.  sha1_ni_transform() is
  always passed a positive nblocks.

- To get an XMM register with 'e' in the high dword and the rest zeroes,
  just zeroize the register using pxor, then load 'e'.  Previously the
  code loaded 'e', then zeroized the lower dwords by AND-ing with a
  constant, which was slightly less efficient.

- Instead of computing &DATA_PTR[NBLOCKS << 6] and stopping when
  DATA_PTR reaches that value, instead just decrement NBLOCKS on each
  iteration and stop when it reaches 0.  This is fewer instructions.

- Rename DIGEST_PTR to STATE_PTR.  It points to the SHA-1 internal
  state, not a SHA-1 digest value.

This commit shrinks the code size of sha1_ni_transform() from 624 bytes
to 589 bytes and also shrinks rodata by 16 bytes.

Signed-off-by: Eric Biggers <ebiggers@kernel.org>
---
 lib/crypto/x86/sha1-ni-asm.S | 68 +++++++++++++-----------------------
 1 file changed, 25 insertions(+), 43 deletions(-)

diff --git a/lib/crypto/x86/sha1-ni-asm.S b/lib/crypto/x86/sha1-ni-asm.S
index 3989b0642ff5f..1d08b2f364ce7 100644
--- a/lib/crypto/x86/sha1-ni-asm.S
+++ b/lib/crypto/x86/sha1-ni-asm.S
@@ -53,65 +53,56 @@
  *
  */
 
 #include <linux/linkage.h>
 
-#define DIGEST_PTR	%rdi	/* 1st arg */
+#define STATE_PTR	%rdi	/* 1st arg */
 #define DATA_PTR	%rsi	/* 2nd arg */
 #define NUM_BLKS	%rdx	/* 3rd arg */
 
-/* gcc conversion */
-#define FRAME_SIZE	32	/* space for 2x16 bytes */
-
 #define ABCD		%xmm0
 #define E0		%xmm1	/* Need two E's b/c they ping pong */
 #define E1		%xmm2
 #define MSG0		%xmm3
 #define MSG1		%xmm4
 #define MSG2		%xmm5
 #define MSG3		%xmm6
 #define SHUF_MASK	%xmm7
-
+#define ABCD_SAVED	%xmm8
+#define E0_SAVED	%xmm9
 
 /*
  * Intel SHA Extensions optimized implementation of a SHA-1 block function
  *
  * This function takes a pointer to the current SHA-1 state, a pointer to the
- * input data, and the number of 64-byte blocks to process.  Once all blocks
- * have been processed, the state is updated with the new state.  This function
- * only processes complete blocks.  State initialization, buffering of partial
+ * input data, and the number of 64-byte blocks to process.  The number of
+ * blocks to process is assumed to be nonzero.  Once all blocks have been
+ * processed, the state is updated with the new state.  This function only
+ * processes complete blocks.  State initialization, buffering of partial
  * blocks, and digest finalization are expected to be handled elsewhere.
  *
  * The indented lines in the loop are instructions related to rounds processing.
  * The non-indented lines are instructions related to the message schedule.
  *
  * void sha1_ni_transform(struct sha1_block_state *state,
  *			  const u8 *data, size_t nblocks)
  */
 .text
 SYM_FUNC_START(sha1_ni_transform)
-	push		%rbp
-	mov		%rsp, %rbp
-	sub		$FRAME_SIZE, %rsp
-	and		$~0xF, %rsp
-
-	shl		$6, NUM_BLKS		/* convert to bytes */
-	jz		.Ldone_hash
-	add		DATA_PTR, NUM_BLKS	/* pointer to end of data */
-
-	/* load initial hash values */
-	pinsrd		$3, 1*16(DIGEST_PTR), E0
-	movdqu		0*16(DIGEST_PTR), ABCD
-	pand		UPPER_WORD_MASK(%rip), E0
+
+	/* Load the initial state from STATE_PTR. */
+	pxor		E0, E0
+	pinsrd		$3, 16(STATE_PTR), E0
+	movdqu		(STATE_PTR), ABCD
 	pshufd		$0x1B, ABCD, ABCD
 
 	movdqa		PSHUFFLE_BYTE_FLIP_MASK(%rip), SHUF_MASK
 
-.Lloop0:
-	/* Save hash values for addition after rounds */
-	movdqa		E0, (0*16)(%rsp)
-	movdqa		ABCD, (1*16)(%rsp)
+.Lnext_block:
+	/* Save the state for addition after the rounds. */
+	movdqa		E0, E0_SAVED
+	movdqa		ABCD, ABCD_SAVED
 
 	/* Rounds 0-3 */
 	movdqu		0*16(DATA_PTR), MSG0
 	pshufb		SHUF_MASK, MSG0
 		paddd		MSG0, E0
@@ -265,35 +256,26 @@ SYM_FUNC_START(sha1_ni_transform)
 	/* Rounds 76-79 */
 		sha1nexte	MSG3, E1
 		movdqa		ABCD, E0
 		sha1rnds4	$3, E1, ABCD
 
-	/* Add current hash values with previously saved */
-	sha1nexte	(0*16)(%rsp), E0
-	paddd		(1*16)(%rsp), ABCD
+	/* Add the previous state (before the rounds) to the current state. */
+	sha1nexte	E0_SAVED, E0
+	paddd		ABCD_SAVED, ABCD
 
-	/* Increment data pointer and loop if more to process */
+	/* Advance to the next block, or break if there are no more blocks. */
 	add		$64, DATA_PTR
-	cmp		NUM_BLKS, DATA_PTR
-	jne		.Lloop0
+	dec		NUM_BLKS
+	jnz		.Lnext_block
 
-	/* Write hash values back in the correct order */
+	/* Store the new state to STATE_PTR. */
+	pextrd		$3, E0, 16(STATE_PTR)
 	pshufd		$0x1B, ABCD, ABCD
-	movdqu		ABCD, 0*16(DIGEST_PTR)
-	pextrd		$3, E0, 1*16(DIGEST_PTR)
-
-.Ldone_hash:
-	mov		%rbp, %rsp
-	pop		%rbp
+	movdqu		ABCD, (STATE_PTR)
 
 	RET
 SYM_FUNC_END(sha1_ni_transform)
 
 .section	.rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16
 .align 16
 PSHUFFLE_BYTE_FLIP_MASK:
 	.octa 0x000102030405060708090a0b0c0d0e0f
-
-.section	.rodata.cst16.UPPER_WORD_MASK, "aM", @progbits, 16
-.align 16
-UPPER_WORD_MASK:
-	.octa 0xFFFFFFFF000000000000000000000000
-- 
2.50.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/2] lib/crypto: x86/sha1-ni: Convert to use rounds macros
  2025-07-18 19:18 [PATCH 0/2] sha1-ni-asm.S cleanups Eric Biggers
  2025-07-18 19:18 ` [PATCH 1/2] lib/crypto: x86/sha1-ni: Minor optimizations and cleanup Eric Biggers
@ 2025-07-18 19:19 ` Eric Biggers
  2025-07-21  4:30 ` [PATCH 0/2] sha1-ni-asm.S cleanups Ard Biesheuvel
  2 siblings, 0 replies; 4+ messages in thread
From: Eric Biggers @ 2025-07-18 19:19 UTC (permalink / raw)
  To: linux-crypto
  Cc: linux-kernel, x86, Ard Biesheuvel, Jason A . Donenfeld,
	Eric Biggers

The assembly code that does all 80 rounds of SHA-1 is highly repetitive.
Replace it with 20 expansions of a macro that does 4 rounds, using the
macro arguments and .if directives to handle the slight variations
between rounds.  This reduces the length of sha1-ni-asm.S by 129 lines
while still producing the exact same object file.  This mirrors
sha256-ni-asm.S which uses this same strategy.

Signed-off-by: Eric Biggers <ebiggers@kernel.org>
---
 lib/crypto/x86/sha1-ni-asm.S | 187 ++++++-----------------------------
 1 file changed, 29 insertions(+), 158 deletions(-)

diff --git a/lib/crypto/x86/sha1-ni-asm.S b/lib/crypto/x86/sha1-ni-asm.S
index 1d08b2f364ce7..428f9b9605943 100644
--- a/lib/crypto/x86/sha1-ni-asm.S
+++ b/lib/crypto/x86/sha1-ni-asm.S
@@ -68,23 +68,43 @@
 #define MSG3		%xmm6
 #define SHUF_MASK	%xmm7
 #define ABCD_SAVED	%xmm8
 #define E0_SAVED	%xmm9
 
+.macro do_4rounds	i, m0, m1, m2, m3, e0, e1
+.if \i < 16
+	movdqu		\i*4(DATA_PTR), \m0
+	pshufb		SHUF_MASK, \m0
+.endif
+.if \i == 0
+	paddd		\m0, \e0
+.else
+	sha1nexte	\m0, \e0
+.endif
+	movdqa		ABCD, \e1
+.if \i >= 12 && \i < 76
+	sha1msg2	\m0, \m1
+.endif
+	sha1rnds4	$\i / 20, \e0, ABCD
+.if \i >= 4 && \i < 68
+	sha1msg1	\m0, \m3
+.endif
+.if \i >= 8 && \i < 72
+	pxor		\m0, \m2
+.endif
+.endm
+
 /*
  * Intel SHA Extensions optimized implementation of a SHA-1 block function
  *
  * This function takes a pointer to the current SHA-1 state, a pointer to the
  * input data, and the number of 64-byte blocks to process.  The number of
  * blocks to process is assumed to be nonzero.  Once all blocks have been
  * processed, the state is updated with the new state.  This function only
  * processes complete blocks.  State initialization, buffering of partial
  * blocks, and digest finalization are expected to be handled elsewhere.
  *
- * The indented lines in the loop are instructions related to rounds processing.
- * The non-indented lines are instructions related to the message schedule.
- *
  * void sha1_ni_transform(struct sha1_block_state *state,
  *			  const u8 *data, size_t nblocks)
  */
 .text
 SYM_FUNC_START(sha1_ni_transform)
@@ -100,165 +120,16 @@ SYM_FUNC_START(sha1_ni_transform)
 .Lnext_block:
 	/* Save the state for addition after the rounds. */
 	movdqa		E0, E0_SAVED
 	movdqa		ABCD, ABCD_SAVED
 
-	/* Rounds 0-3 */
-	movdqu		0*16(DATA_PTR), MSG0
-	pshufb		SHUF_MASK, MSG0
-		paddd		MSG0, E0
-		movdqa		ABCD, E1
-		sha1rnds4	$0, E0, ABCD
-
-	/* Rounds 4-7 */
-	movdqu		1*16(DATA_PTR), MSG1
-	pshufb		SHUF_MASK, MSG1
-		sha1nexte	MSG1, E1
-		movdqa		ABCD, E0
-		sha1rnds4	$0, E1, ABCD
-	sha1msg1	MSG1, MSG0
-
-	/* Rounds 8-11 */
-	movdqu		2*16(DATA_PTR), MSG2
-	pshufb		SHUF_MASK, MSG2
-		sha1nexte	MSG2, E0
-		movdqa		ABCD, E1
-		sha1rnds4	$0, E0, ABCD
-	sha1msg1	MSG2, MSG1
-	pxor		MSG2, MSG0
-
-	/* Rounds 12-15 */
-	movdqu		3*16(DATA_PTR), MSG3
-	pshufb		SHUF_MASK, MSG3
-		sha1nexte	MSG3, E1
-		movdqa		ABCD, E0
-	sha1msg2	MSG3, MSG0
-		sha1rnds4	$0, E1, ABCD
-	sha1msg1	MSG3, MSG2
-	pxor		MSG3, MSG1
-
-	/* Rounds 16-19 */
-		sha1nexte	MSG0, E0
-		movdqa		ABCD, E1
-	sha1msg2	MSG0, MSG1
-		sha1rnds4	$0, E0, ABCD
-	sha1msg1	MSG0, MSG3
-	pxor		MSG0, MSG2
-
-	/* Rounds 20-23 */
-		sha1nexte	MSG1, E1
-		movdqa		ABCD, E0
-	sha1msg2	MSG1, MSG2
-		sha1rnds4	$1, E1, ABCD
-	sha1msg1	MSG1, MSG0
-	pxor		MSG1, MSG3
-
-	/* Rounds 24-27 */
-		sha1nexte	MSG2, E0
-		movdqa		ABCD, E1
-	sha1msg2	MSG2, MSG3
-		sha1rnds4	$1, E0, ABCD
-	sha1msg1	MSG2, MSG1
-	pxor		MSG2, MSG0
-
-	/* Rounds 28-31 */
-		sha1nexte	MSG3, E1
-		movdqa		ABCD, E0
-	sha1msg2	MSG3, MSG0
-		sha1rnds4	$1, E1, ABCD
-	sha1msg1	MSG3, MSG2
-	pxor		MSG3, MSG1
-
-	/* Rounds 32-35 */
-		sha1nexte	MSG0, E0
-		movdqa		ABCD, E1
-	sha1msg2	MSG0, MSG1
-		sha1rnds4	$1, E0, ABCD
-	sha1msg1	MSG0, MSG3
-	pxor		MSG0, MSG2
-
-	/* Rounds 36-39 */
-		sha1nexte	MSG1, E1
-		movdqa		ABCD, E0
-	sha1msg2	MSG1, MSG2
-		sha1rnds4	$1, E1, ABCD
-	sha1msg1	MSG1, MSG0
-	pxor		MSG1, MSG3
-
-	/* Rounds 40-43 */
-		sha1nexte	MSG2, E0
-		movdqa		ABCD, E1
-	sha1msg2	MSG2, MSG3
-		sha1rnds4	$2, E0, ABCD
-	sha1msg1	MSG2, MSG1
-	pxor		MSG2, MSG0
-
-	/* Rounds 44-47 */
-		sha1nexte	MSG3, E1
-		movdqa		ABCD, E0
-	sha1msg2	MSG3, MSG0
-		sha1rnds4	$2, E1, ABCD
-	sha1msg1	MSG3, MSG2
-	pxor		MSG3, MSG1
-
-	/* Rounds 48-51 */
-		sha1nexte	MSG0, E0
-		movdqa		ABCD, E1
-	sha1msg2	MSG0, MSG1
-		sha1rnds4	$2, E0, ABCD
-	sha1msg1	MSG0, MSG3
-	pxor		MSG0, MSG2
-
-	/* Rounds 52-55 */
-		sha1nexte	MSG1, E1
-		movdqa		ABCD, E0
-	sha1msg2	MSG1, MSG2
-		sha1rnds4	$2, E1, ABCD
-	sha1msg1	MSG1, MSG0
-	pxor		MSG1, MSG3
-
-	/* Rounds 56-59 */
-		sha1nexte	MSG2, E0
-		movdqa		ABCD, E1
-	sha1msg2	MSG2, MSG3
-		sha1rnds4	$2, E0, ABCD
-	sha1msg1	MSG2, MSG1
-	pxor		MSG2, MSG0
-
-	/* Rounds 60-63 */
-		sha1nexte	MSG3, E1
-		movdqa		ABCD, E0
-	sha1msg2	MSG3, MSG0
-		sha1rnds4	$3, E1, ABCD
-	sha1msg1	MSG3, MSG2
-	pxor		MSG3, MSG1
-
-	/* Rounds 64-67 */
-		sha1nexte	MSG0, E0
-		movdqa		ABCD, E1
-	sha1msg2	MSG0, MSG1
-		sha1rnds4	$3, E0, ABCD
-	sha1msg1	MSG0, MSG3
-	pxor		MSG0, MSG2
-
-	/* Rounds 68-71 */
-		sha1nexte	MSG1, E1
-		movdqa		ABCD, E0
-	sha1msg2	MSG1, MSG2
-		sha1rnds4	$3, E1, ABCD
-	pxor		MSG1, MSG3
-
-	/* Rounds 72-75 */
-		sha1nexte	MSG2, E0
-		movdqa		ABCD, E1
-	sha1msg2	MSG2, MSG3
-		sha1rnds4	$3, E0, ABCD
-
-	/* Rounds 76-79 */
-		sha1nexte	MSG3, E1
-		movdqa		ABCD, E0
-		sha1rnds4	$3, E1, ABCD
+.irp i, 0, 16, 32, 48, 64
+	do_4rounds	(\i + 0),  MSG0, MSG1, MSG2, MSG3, E0, E1
+	do_4rounds	(\i + 4),  MSG1, MSG2, MSG3, MSG0, E1, E0
+	do_4rounds	(\i + 8),  MSG2, MSG3, MSG0, MSG1, E0, E1
+	do_4rounds	(\i + 12), MSG3, MSG0, MSG1, MSG2, E1, E0
+.endr
 
 	/* Add the previous state (before the rounds) to the current state. */
 	sha1nexte	E0_SAVED, E0
 	paddd		ABCD_SAVED, ABCD
 
-- 
2.50.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH 0/2] sha1-ni-asm.S cleanups
  2025-07-18 19:18 [PATCH 0/2] sha1-ni-asm.S cleanups Eric Biggers
  2025-07-18 19:18 ` [PATCH 1/2] lib/crypto: x86/sha1-ni: Minor optimizations and cleanup Eric Biggers
  2025-07-18 19:19 ` [PATCH 2/2] lib/crypto: x86/sha1-ni: Convert to use rounds macros Eric Biggers
@ 2025-07-21  4:30 ` Ard Biesheuvel
  2 siblings, 0 replies; 4+ messages in thread
From: Ard Biesheuvel @ 2025-07-21  4:30 UTC (permalink / raw)
  To: Eric Biggers; +Cc: linux-crypto, linux-kernel, x86, Jason A . Donenfeld

On Sat, 19 Jul 2025 at 05:20, Eric Biggers <ebiggers@kernel.org> wrote:
>
> This series cleans up the x86_64 SHA-NI optimized SHA-1 code.
>
> This is targeting libcrypto-next.
>
> Eric Biggers (2):
>   lib/crypto: x86/sha1-ni: Minor optimizations and cleanup
>   lib/crypto: x86/sha1-ni: Convert to use rounds macros
>

Reviewed-by: Ard Biesheuvel <ardb@kernel.org>

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2025-07-21  4:31 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-07-18 19:18 [PATCH 0/2] sha1-ni-asm.S cleanups Eric Biggers
2025-07-18 19:18 ` [PATCH 1/2] lib/crypto: x86/sha1-ni: Minor optimizations and cleanup Eric Biggers
2025-07-18 19:19 ` [PATCH 2/2] lib/crypto: x86/sha1-ni: Convert to use rounds macros Eric Biggers
2025-07-21  4:30 ` [PATCH 0/2] sha1-ni-asm.S cleanups Ard Biesheuvel

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).