From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: berrange@redhat.com, ardb@kernel.org
Subject: [PATCH 18/18] host/include/aarch64: Implement clmul.h
Date: Thu, 13 Jul 2023 22:14:35 +0100 [thread overview]
Message-ID: <20230713211435.13505-19-richard.henderson@linaro.org> (raw)
In-Reply-To: <20230713211435.13505-1-richard.henderson@linaro.org>
Detect PMULL in cpuinfo; implement the accel hooks.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
host/include/aarch64/host/cpuinfo.h | 1 +
host/include/aarch64/host/crypto/clmul.h | 230 +++++++++++++++++++++++
util/cpuinfo-aarch64.c | 4 +-
3 files changed, 234 insertions(+), 1 deletion(-)
create mode 100644 host/include/aarch64/host/crypto/clmul.h
diff --git a/host/include/aarch64/host/cpuinfo.h b/host/include/aarch64/host/cpuinfo.h
index 05feeb4f43..da268dce13 100644
--- a/host/include/aarch64/host/cpuinfo.h
+++ b/host/include/aarch64/host/cpuinfo.h
@@ -10,6 +10,7 @@
#define CPUINFO_LSE (1u << 1)
#define CPUINFO_LSE2 (1u << 2)
#define CPUINFO_AES (1u << 3)
+#define CPUINFO_PMULL (1u << 4)
/* Initialized with a constructor. */
extern unsigned cpuinfo;
diff --git a/host/include/aarch64/host/crypto/clmul.h b/host/include/aarch64/host/crypto/clmul.h
new file mode 100644
index 0000000000..7fd827898b
--- /dev/null
+++ b/host/include/aarch64/host/crypto/clmul.h
@@ -0,0 +1,230 @@
+/*
+ * AArch64 specific clmul acceleration.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef AARCH64_HOST_CRYPTO_CLMUL_H
+#define AARCH64_HOST_CRYPTO_CLMUL_H
+
+#include "host/cpuinfo.h"
+#include <arm_neon.h>
+
+/* Both FEAT_AES and FEAT_PMULL are covered under the same macro. */
+#ifdef __ARM_FEATURE_AES
+# define HAVE_CLMUL_ACCEL true
+#else
+# define HAVE_CLMUL_ACCEL likely(cpuinfo & CPUINFO_PMULL)
+#endif
+#if !defined(__ARM_FEATURE_AES) && defined(CONFIG_ARM_AES_BUILTIN)
+# define ATTR_CLMUL_ACCEL __attribute__((target("+crypto")))
+#else
+# define ATTR_CLMUL_ACCEL
+#endif
+
+/*
+ * The 8x8->8 pmul and 8x8->16 pmull are available unconditionally.
+ */
+
+static inline uint64_t clmul_8x8_low(uint64_t n, uint64_t m)
+{
+ return (uint64_t)vmul_p8((poly8x8_t)n, (poly8x8_t)m);
+}
+
+static inline Int128 clmul_8x8_packed(uint64_t n, uint64_t m)
+{
+ union { poly16x8_t v; Int128 s; } u;
+ u.v = vmull_p8((poly8x8_t)n, (poly8x8_t)m);
+ return u.s;
+}
+
+static inline Int128 clmul_8x8_even(Int128 n, Int128 m)
+{
+ union { uint16x8_t v; Int128 s; } un, um;
+ uint8x8_t pn, pm;
+
+ un.s = n;
+ um.s = m;
+ pn = vmovn_u16(un.v);
+ pm = vmovn_u16(um.v);
+ return clmul_8x8_packed((uint64_t)pn, (uint64_t)pm);
+}
+
+static inline Int128 clmul_8x8_odd(Int128 n, Int128 m)
+{
+ union { uint8x16_t v; Int128 s; } un, um;
+ uint8x8_t pn, pm;
+
+ un.s = n;
+ um.s = m;
+ pn = vqtbl1_u8(un.v, (uint8x8_t){ 1, 3, 5, 7, 9, 11, 13, 15 });
+ pm = vqtbl1_u8(um.v, (uint8x8_t){ 1, 3, 5, 7, 9, 11, 13, 15 });
+ return clmul_8x8_packed((uint64_t)pn, (uint64_t)pm);
+}
+
+static inline uint64_t clmul_8x4_even(uint64_t n, uint64_t m)
+{
+ return int128_getlo(clmul_8x8_even(int128_make64(n), int128_make64(m)));
+}
+
+static inline uint64_t clmul_8x4_odd(uint64_t n, uint64_t m)
+{
+ return int128_getlo(clmul_8x8_odd(int128_make64(n), int128_make64(m)));
+}
+
+static inline Int128 clmul_16x4_packed_accel(uint16x4_t n, uint16x4_t m)
+{
+ union { uint32x4_t v; Int128 s; } u;
+ uint32x4_t r0, r1, r2;
+
+ /*
+ * Considering the per-byte multiplication:
+ * ab
+ * cd
+ * -----
+ * bd << 0
+ * bc << 8
+ * ad << 8
+ * ac << 16
+ *
+ * We get the ac and bd rows of the result for free from the expanding
+ * packed multiply. Reverse the two bytes in M, repeat, and we get the
+ * ad and bc results, but in the wrong column; shift to fix and sum all.
+ */
+ r0 = (uint32x4_t)vmull_p8((poly8x8_t)n, (poly8x8_t)m);
+ r1 = (uint32x4_t)vmull_p8((poly8x8_t)n, vrev16_p8((poly8x8_t)m));
+ r2 = r1 << 8; /* bc */
+ r1 = r1 >> 8; /* ad */
+ r1 &= (uint32x4_t){ 0x00ffff00, 0x00ffff00, 0x00ffff00, 0x00ffff00 };
+ r2 &= (uint32x4_t){ 0x00ffff00, 0x00ffff00, 0x00ffff00, 0x00ffff00 };
+ r0 = r0 ^ r1 ^ r2;
+
+ u.v = r0;
+ return u.s;
+}
+
+static inline Int128 clmul_16x4_even(Int128 n, Int128 m)
+{
+ union { uint32x4_t v; Int128 s; } um, un;
+ uint16x4_t pn, pm;
+
+ /* Extract even uint16_t. */
+ un.s = n;
+ um.s = m;
+ pn = vmovn_u32(un.v);
+ pm = vmovn_u32(um.v);
+ return clmul_16x4_packed_accel(pn, pm);
+}
+
+static inline Int128 clmul_16x4_odd(Int128 n, Int128 m)
+{
+ union { uint8x16_t v; Int128 s; } um, un;
+ uint16x4_t pn, pm;
+
+ /* Extract odd uint16_t. */
+ un.s = n;
+ um.s = m;
+ pn = (uint16x4_t)vqtbl1_u8(un.v, (uint8x8_t){ 2, 3, 6, 7, 10, 11, 14, 15 });
+ pm = (uint16x4_t)vqtbl1_u8(um.v, (uint8x8_t){ 2, 3, 6, 7, 10, 11, 14, 15 });
+ return clmul_16x4_packed_accel(pn, pm);
+}
+
+static inline uint64_t clmul_16x2_even(uint64_t n, uint64_t m)
+{
+ return int128_getlo(clmul_16x4_even(int128_make64(n), int128_make64(m)));
+}
+
+static inline uint64_t clmul_16x2_odd(uint64_t n, uint64_t m)
+{
+ return int128_getlo(clmul_16x4_odd(int128_make64(n), int128_make64(m)));
+}
+
+/*
+ * The 64x64->128 pmull is available with FEAT_PMULL.
+ */
+
+static inline Int128 ATTR_CLMUL_ACCEL
+clmul_64(uint64_t n, uint64_t m)
+{
+ union { poly128_t v; Int128 s; } u;
+
+ if (!HAVE_CLMUL_ACCEL) {
+ return clmul_64_gen(n, m);
+ }
+
+#ifdef CONFIG_ARM_AES_BUILTIN
+ u.v = vmull_p64((poly64_t)n, (poly64_t)m);
+#else
+ asm(".arch_extension aes\n\t"
+ "pmull %0.1q, %1.1d, %2.1d" : "=w"(u.v) : "w"(n), "w"(m));
+#endif
+ return u.s;
+}
+
+static inline uint64_t ATTR_CLMUL_ACCEL
+clmul_32(uint32_t n, uint32_t m)
+{
+ if (!HAVE_CLMUL_ACCEL) {
+ return clmul_32_gen(n, m);
+ }
+ return int128_getlo(clmul_64(n, m));
+}
+
+static inline Int128 ATTR_CLMUL_ACCEL
+clmul_32x2_even(Int128 n, Int128 m)
+{
+ union { uint64x2_t v; poly64_t h; Int128 s; } um, un, ur;
+ uint64x2_t r0, r2;
+
+ if (!HAVE_CLMUL_ACCEL) {
+ return clmul_32x2_even_gen(n, m);
+ }
+
+ un.s = n;
+ um.s = m;
+ un.v &= (uint64x2_t){ 0xffffffffu, 0xffffffffu };
+ um.v &= (uint64x2_t){ 0xffffffffu, 0xffffffffu };
+
+#ifdef CONFIG_ARM_AES_BUILTIN
+ r0 = (uint64x2_t)vmull_p64(un.h, um.h);
+ r2 = (uint64x2_t)vmull_high_p64((poly64x2_t)un.v, (poly64x2_t)um.v);
+#else
+ asm(".arch_extension aes\n\t"
+ "pmull %0.1q, %2.1d, %3.1d\n\t"
+ "pmull2 %1.1q, %2.2d, %3.2d"
+ : "=&w"(r0), "=w"(r2) : "w"(un.v), "w"(um.v));
+#endif
+
+ ur.v = vzip1q_u64(r0, r2);
+ return ur.s;
+}
+
+static inline Int128 ATTR_CLMUL_ACCEL
+clmul_32x2_odd(Int128 n, Int128 m)
+{
+ union { uint64x2_t v; poly64_t h; Int128 s; } um, un, ur;
+ uint64x2_t r0, r2;
+
+ if (!HAVE_CLMUL_ACCEL) {
+ return clmul_32x2_odd_gen(n, m);
+ }
+
+ un.s = n;
+ um.s = m;
+ un.v &= (uint64x2_t){ 0xffffffff00000000ull, 0xffffffff00000000ull };
+ um.v &= (uint64x2_t){ 0xffffffff00000000ull, 0xffffffff00000000ull };
+
+#ifdef CONFIG_ARM_AES_BUILTIN
+ r0 = (uint64x2_t)vmull_p64(un.h, um.h);
+ r2 = (uint64x2_t)vmull_high_p64((poly64x2_t)un.v, (poly64x2_t)um.v);
+#else
+ asm(".arch_extension aes\n\t"
+ "pmull %0.1q, %2.1d, %3.1d\n\t"
+ "pmull2 %1.1q, %2.2d, %3.2d"
+ : "=&w"(r0), "=w"(r2) : "w"(un.v), "w"(um.v));
+#endif
+
+ ur.v = vzip2q_u64(r0, r2);
+ return ur.s;
+}
+
+#endif /* AARCH64_HOST_CRYPTO_CLMUL_H */
diff --git a/util/cpuinfo-aarch64.c b/util/cpuinfo-aarch64.c
index ababc39550..1d565b8420 100644
--- a/util/cpuinfo-aarch64.c
+++ b/util/cpuinfo-aarch64.c
@@ -56,12 +56,14 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
info |= (hwcap & HWCAP_ATOMICS ? CPUINFO_LSE : 0);
info |= (hwcap & HWCAP_USCAT ? CPUINFO_LSE2 : 0);
- info |= (hwcap & HWCAP_AES ? CPUINFO_AES: 0);
+ info |= (hwcap & HWCAP_AES ? CPUINFO_AES : 0);
+ info |= (hwcap & HWCAP_PMULL ? CPUINFO_PMULL : 0);
#endif
#ifdef CONFIG_DARWIN
info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE") * CPUINFO_LSE;
info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE2") * CPUINFO_LSE2;
info |= sysctl_for_bool("hw.optional.arm.FEAT_AES") * CPUINFO_AES;
+ info |= sysctl_for_bool("hw.optional.arm.FEAT_PMULL") * CPUINFO_PMULL;
#endif
cpuinfo = info;
--
2.34.1
next prev parent reply other threads:[~2023-07-13 21:17 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-13 21:14 [RFC PATCH for-8.2 00/18] crypto: Provide clmul.h and host accel Richard Henderson
2023-07-13 21:14 ` [PATCH 01/18] crypto: Add generic 8-bit carry-less multiply routines Richard Henderson
2023-07-13 21:14 ` [PATCH 02/18] target/arm: Use clmul_8* routines Richard Henderson
2023-07-13 21:43 ` Philippe Mathieu-Daudé
2023-07-13 21:14 ` [PATCH 03/18] target/s390x: " Richard Henderson
2023-07-13 21:14 ` [PATCH 04/18] target/ppc: " Richard Henderson
2023-07-13 21:14 ` [PATCH 05/18] crypto: Add generic 16-bit carry-less multiply routines Richard Henderson
2023-07-13 21:14 ` [PATCH 06/18] target/arm: Use clmul_16* routines Richard Henderson
2023-07-13 21:14 ` [PATCH 07/18] target/s390x: " Richard Henderson
2023-07-13 21:14 ` [PATCH 08/18] target/ppc: " Richard Henderson
2023-07-13 21:14 ` [PATCH 09/18] crypto: Add generic 32-bit carry-less multiply routines Richard Henderson
2023-07-13 21:14 ` [PATCH 10/18] target/arm: Use clmul_32* routines Richard Henderson
2023-07-13 21:14 ` [PATCH 11/18] target/s390x: " Richard Henderson
2023-07-13 21:14 ` [PATCH 12/18] target/ppc: " Richard Henderson
2023-07-13 21:14 ` [PATCH 13/18] crypto: Add generic 64-bit carry-less multiply routine Richard Henderson
2023-07-13 21:14 ` [PATCH 14/18] target/arm: Use clmul_64 Richard Henderson
2023-07-13 21:14 ` [PATCH 15/18] target/s390x: " Richard Henderson
2023-07-13 21:14 ` [PATCH 16/18] target/ppc: " Richard Henderson
2023-07-13 21:14 ` [PATCH 17/18] host/include/i386: Implement clmul.h Richard Henderson
2023-07-19 11:52 ` Ilya Leoshkevich
2023-07-22 11:47 ` Richard Henderson
2023-07-13 21:14 ` Richard Henderson [this message]
2023-08-03 14:02 ` [RFC PATCH for-8.2 00/18] crypto: Provide clmul.h and host accel Ard Biesheuvel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230713211435.13505-19-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=ardb@kernel.org \
--cc=berrange@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).