From: "Philippe Mathieu-Daudé" <philmd@linaro.org>
To: Richard Henderson <richard.henderson@linaro.org>,
qemu-devel@nongnu.org, Ard Biesheuvel <ardb@kernel.org>
Cc: qemu-arm@nongnu.org, qemu-riscv@nongnu.org, pbonzini@redhat.com,
eduardo@habkost.net, alistair.francis@wdc.com,
danielhb413@gmail.com
Subject: Re: [PATCH v4 13/37] host/include/aarch64: Implement aes-round.h
Date: Sat, 8 Jul 2023 19:35:46 +0200 [thread overview]
Message-ID: <5c78b1c8-0515-23ea-8f93-6563a46a8637@linaro.org> (raw)
In-Reply-To: <20230703100520.68224-14-richard.henderson@linaro.org>
+Ard
On 3/7/23 12:04, Richard Henderson wrote:
> Detect AES in cpuinfo; implement the accel hooks.
>
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> meson.build | 9 +
> host/include/aarch64/host/cpuinfo.h | 1 +
> host/include/aarch64/host/crypto/aes-round.h | 205 +++++++++++++++++++
> util/cpuinfo-aarch64.c | 2 +
> 4 files changed, 217 insertions(+)
> create mode 100644 host/include/aarch64/host/crypto/aes-round.h
>
> diff --git a/meson.build b/meson.build
> index a9ba0bfab3..029c6c0048 100644
> --- a/meson.build
> +++ b/meson.build
> @@ -2674,6 +2674,15 @@ config_host_data.set('CONFIG_AVX512BW_OPT', get_option('avx512bw') \
> int main(int argc, char *argv[]) { return bar(argv[0]); }
> '''), error_message: 'AVX512BW not available').allowed())
>
> +# For both AArch64 and AArch32, detect if builtins are available.
> +config_host_data.set('CONFIG_ARM_AES_BUILTIN', cc.compiles('''
> + #include <arm_neon.h>
> + #ifndef __ARM_FEATURE_AES
> + __attribute__((target("+crypto")))
> + #endif
> + void foo(uint8x16_t *p) { *p = vaesmcq_u8(*p); }
> + '''))
> +
> have_pvrdma = get_option('pvrdma') \
> .require(rdma.found(), error_message: 'PVRDMA requires OpenFabrics libraries') \
> .require(cc.compiles(gnu_source_prefix + '''
> diff --git a/host/include/aarch64/host/cpuinfo.h b/host/include/aarch64/host/cpuinfo.h
> index 82227890b4..05feeb4f43 100644
> --- a/host/include/aarch64/host/cpuinfo.h
> +++ b/host/include/aarch64/host/cpuinfo.h
> @@ -9,6 +9,7 @@
> #define CPUINFO_ALWAYS (1u << 0) /* so cpuinfo is nonzero */
> #define CPUINFO_LSE (1u << 1)
> #define CPUINFO_LSE2 (1u << 2)
> +#define CPUINFO_AES (1u << 3)
>
> /* Initialized with a constructor. */
> extern unsigned cpuinfo;
> diff --git a/host/include/aarch64/host/crypto/aes-round.h b/host/include/aarch64/host/crypto/aes-round.h
> new file mode 100644
> index 0000000000..8b5f88d50c
> --- /dev/null
> +++ b/host/include/aarch64/host/crypto/aes-round.h
> @@ -0,0 +1,205 @@
> +/*
> + * AArch64 specific aes acceleration.
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + */
> +
> +#ifndef AARCH64_HOST_CRYPTO_AES_ROUND_H
> +#define AARCH64_HOST_CRYPTO_AES_ROUND_H
> +
> +#include "host/cpuinfo.h"
> +#include <arm_neon.h>
> +
> +#ifdef __ARM_FEATURE_AES
> +# define HAVE_AES_ACCEL true
> +#else
> +# define HAVE_AES_ACCEL likely(cpuinfo & CPUINFO_AES)
> +#endif
> +#if !defined(__ARM_FEATURE_AES) && defined(CONFIG_ARM_AES_BUILTIN)
> +# define ATTR_AES_ACCEL __attribute__((target("+crypto")))
> +#else
> +# define ATTR_AES_ACCEL
> +#endif
> +
> +static inline uint8x16_t aes_accel_bswap(uint8x16_t x)
> +{
> + return vqtbl1q_u8(x, (uint8x16_t){ 15, 14, 13, 12, 11, 10, 9, 8,
> + 7, 6, 5, 4, 3, 2, 1, 0, });
> +}
> +
> +#ifdef CONFIG_ARM_AES_BUILTIN
> +# define aes_accel_aesd vaesdq_u8
> +# define aes_accel_aese vaeseq_u8
> +# define aes_accel_aesmc vaesmcq_u8
> +# define aes_accel_aesimc vaesimcq_u8
> +# define aes_accel_aesd_imc(S, K) vaesimcq_u8(vaesdq_u8(S, K))
> +# define aes_accel_aese_mc(S, K) vaesmcq_u8(vaeseq_u8(S, K))
> +#else
> +static inline uint8x16_t aes_accel_aesd(uint8x16_t d, uint8x16_t k)
> +{
> + asm(".arch_extension aes\n\t"
> + "aesd %0.16b, %1.16b" : "+w"(d) : "w"(k));
> + return d;
> +}
> +
> +static inline uint8x16_t aes_accel_aese(uint8x16_t d, uint8x16_t k)
> +{
> + asm(".arch_extension aes\n\t"
> + "aese %0.16b, %1.16b" : "+w"(d) : "w"(k));
> + return d;
> +}
> +
> +static inline uint8x16_t aes_accel_aesmc(uint8x16_t d)
> +{
> + asm(".arch_extension aes\n\t"
> + "aesmc %0.16b, %1.16b" : "=w"(d) : "w"(d));
> + return d;
> +}
> +
> +static inline uint8x16_t aes_accel_aesimc(uint8x16_t d)
> +{
> + asm(".arch_extension aes\n\t"
> + "aesimc %0.16b, %1.16b" : "=w"(d) : "w"(d));
> + return d;
> +}
> +
> +/* Most CPUs fuse AESD+AESIMC in the execution pipeline. */
> +static inline uint8x16_t aes_accel_aesd_imc(uint8x16_t d, uint8x16_t k)
> +{
> + asm(".arch_extension aes\n\t"
> + "aesd %0.16b, %1.16b\n\t"
> + "aesimc %0.16b, %0.16b" : "+w"(d) : "w"(k));
> + return d;
> +}
> +
> +/* Most CPUs fuse AESE+AESMC in the execution pipeline. */
> +static inline uint8x16_t aes_accel_aese_mc(uint8x16_t d, uint8x16_t k)
> +{
> + asm(".arch_extension aes\n\t"
> + "aese %0.16b, %1.16b\n\t"
> + "aesmc %0.16b, %0.16b" : "+w"(d) : "w"(k));
> + return d;
> +}
> +#endif /* CONFIG_ARM_AES_BUILTIN */
> +
> +static inline void ATTR_AES_ACCEL
> +aesenc_MC_accel(AESState *ret, const AESState *st, bool be)
> +{
> + uint8x16_t t = (uint8x16_t)st->v;
> +
> + if (be) {
> + t = aes_accel_bswap(t);
> + t = aes_accel_aesmc(t);
> + t = aes_accel_bswap(t);
> + } else {
> + t = aes_accel_aesmc(t);
> + }
> + ret->v = (AESStateVec)t;
> +}
> +
> +static inline void ATTR_AES_ACCEL
> +aesenc_SB_SR_AK_accel(AESState *ret, const AESState *st,
> + const AESState *rk, bool be)
> +{
> + uint8x16_t t = (uint8x16_t)st->v;
> + uint8x16_t z = { };
> +
> + if (be) {
> + t = aes_accel_bswap(t);
> + t = aes_accel_aese(t, z);
> + t = aes_accel_bswap(t);
> + } else {
> + t = aes_accel_aese(t, z);
> + }
> + ret->v = (AESStateVec)t ^ rk->v;
> +}
> +
> +static inline void ATTR_AES_ACCEL
> +aesenc_SB_SR_MC_AK_accel(AESState *ret, const AESState *st,
> + const AESState *rk, bool be)
> +{
> + uint8x16_t t = (uint8x16_t)st->v;
> + uint8x16_t z = { };
> +
> + if (be) {
> + t = aes_accel_bswap(t);
> + t = aes_accel_aese_mc(t, z);
> + t = aes_accel_bswap(t);
> + } else {
> + t = aes_accel_aese_mc(t, z);
> + }
> + ret->v = (AESStateVec)t ^ rk->v;
> +}
> +
> +static inline void ATTR_AES_ACCEL
> +aesdec_IMC_accel(AESState *ret, const AESState *st, bool be)
> +{
> + uint8x16_t t = (uint8x16_t)st->v;
> +
> + if (be) {
> + t = aes_accel_bswap(t);
> + t = aes_accel_aesimc(t);
> + t = aes_accel_bswap(t);
> + } else {
> + t = aes_accel_aesimc(t);
> + }
> + ret->v = (AESStateVec)t;
> +}
> +
> +static inline void ATTR_AES_ACCEL
> +aesdec_ISB_ISR_AK_accel(AESState *ret, const AESState *st,
> + const AESState *rk, bool be)
> +{
> + uint8x16_t t = (uint8x16_t)st->v;
> + uint8x16_t z = { };
> +
> + if (be) {
> + t = aes_accel_bswap(t);
> + t = aes_accel_aesd(t, z);
> + t = aes_accel_bswap(t);
> + } else {
> + t = aes_accel_aesd(t, z);
> + }
> + ret->v = (AESStateVec)t ^ rk->v;
> +}
> +
> +static inline void ATTR_AES_ACCEL
> +aesdec_ISB_ISR_AK_IMC_accel(AESState *ret, const AESState *st,
> + const AESState *rk, bool be)
> +{
> + uint8x16_t t = (uint8x16_t)st->v;
> + uint8x16_t k = (uint8x16_t)rk->v;
> + uint8x16_t z = { };
> +
> + if (be) {
> + t = aes_accel_bswap(t);
> + k = aes_accel_bswap(k);
> + t = aes_accel_aesd(t, z);
> + t ^= k;
> + t = aes_accel_aesimc(t);
> + t = aes_accel_bswap(t);
> + } else {
> + t = aes_accel_aesd(t, z);
> + t ^= k;
> + t = aes_accel_aesimc(t);
> + }
> + ret->v = (AESStateVec)t;
> +}
> +
> +static inline void ATTR_AES_ACCEL
> +aesdec_ISB_ISR_IMC_AK_accel(AESState *ret, const AESState *st,
> + const AESState *rk, bool be)
> +{
> + uint8x16_t t = (uint8x16_t)st->v;
> + uint8x16_t z = { };
> +
> + if (be) {
> + t = aes_accel_bswap(t);
> + t = aes_accel_aesd_imc(t, z);
> + t = aes_accel_bswap(t);
> + } else {
> + t = aes_accel_aesd_imc(t, z);
> + }
> + ret->v = (AESStateVec)t ^ rk->v;
> +}
> +
> +#endif /* AARCH64_HOST_CRYPTO_AES_ROUND_H */
> diff --git a/util/cpuinfo-aarch64.c b/util/cpuinfo-aarch64.c
> index f99acb7884..ababc39550 100644
> --- a/util/cpuinfo-aarch64.c
> +++ b/util/cpuinfo-aarch64.c
> @@ -56,10 +56,12 @@ unsigned __attribute__((constructor)) cpuinfo_init(void)
> unsigned long hwcap = qemu_getauxval(AT_HWCAP);
> info |= (hwcap & HWCAP_ATOMICS ? CPUINFO_LSE : 0);
> info |= (hwcap & HWCAP_USCAT ? CPUINFO_LSE2 : 0);
> + info |= (hwcap & HWCAP_AES ? CPUINFO_AES: 0);
> #endif
> #ifdef CONFIG_DARWIN
> info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE") * CPUINFO_LSE;
> info |= sysctl_for_bool("hw.optional.arm.FEAT_LSE2") * CPUINFO_LSE2;
> + info |= sysctl_for_bool("hw.optional.arm.FEAT_AES") * CPUINFO_AES;
> #endif
>
> cpuinfo = info;
next prev parent reply other threads:[~2023-07-08 17:36 UTC|newest]
Thread overview: 56+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-03 10:04 [PATCH v4 00/37] crypto: Provide aes-round.h and host accel Richard Henderson
2023-07-03 10:04 ` [PATCH v4 01/37] util: Add cpuinfo-ppc.c Richard Henderson
2023-07-03 10:04 ` [PATCH v4 02/37] tests/multiarch: Add test-aes Richard Henderson
2023-07-03 12:08 ` Christoph Müllner
2023-07-05 14:28 ` Richard Henderson
2023-07-03 10:04 ` [PATCH v4 03/37] target/arm: Move aesmc and aesimc tables to crypto/aes.c Richard Henderson
2023-07-03 10:04 ` [PATCH v4 04/37] crypto/aes: Add AES_SH, AES_ISH macros Richard Henderson
2023-07-03 10:04 ` [PATCH v4 05/37] crypto: Add aesenc_SB_SR_AK Richard Henderson
2023-07-03 10:04 ` [PATCH v4 06/37] crypto: Add aesdec_ISB_ISR_AK Richard Henderson
2023-07-03 10:04 ` [PATCH v4 07/37] crypto: Add aesenc_MC Richard Henderson
2023-07-03 10:04 ` [PATCH v4 08/37] crypto: Add aesdec_IMC Richard Henderson
2023-07-03 10:04 ` [PATCH v4 09/37] crypto: Add aesenc_SB_SR_MC_AK Richard Henderson
2023-07-03 10:04 ` [PATCH v4 10/37] crypto: Add aesdec_ISB_ISR_IMC_AK Richard Henderson
2023-07-03 10:04 ` [PATCH v4 11/37] crypto: Add aesdec_ISB_ISR_AK_IMC Richard Henderson
2023-07-03 10:04 ` [PATCH v4 12/37] host/include/i386: Implement aes-round.h Richard Henderson
2023-07-03 10:04 ` [PATCH v4 13/37] host/include/aarch64: " Richard Henderson
2023-07-08 17:35 ` Philippe Mathieu-Daudé [this message]
2023-07-03 10:04 ` [PATCH v4 14/37] host/include/ppc: " Richard Henderson
2023-07-03 10:04 ` [PATCH v4 15/37] target/ppc: Use aesenc_SB_SR_AK Richard Henderson
2023-07-03 10:04 ` [PATCH v4 16/37] target/ppc: Use aesdec_ISB_ISR_AK Richard Henderson
2023-07-03 10:05 ` [PATCH v4 17/37] target/ppc: Use aesenc_SB_SR_MC_AK Richard Henderson
2023-07-07 19:50 ` Philippe Mathieu-Daudé
2023-07-03 10:05 ` [PATCH v4 18/37] target/ppc: Use aesdec_ISB_ISR_AK_IMC Richard Henderson
2023-07-03 10:05 ` [PATCH v4 19/37] target/i386: Use aesenc_SB_SR_AK Richard Henderson
2023-07-03 10:05 ` [PATCH v4 20/37] target/i386: Use aesdec_ISB_ISR_AK Richard Henderson
2023-07-03 10:05 ` [PATCH v4 21/37] target/i386: Use aesdec_IMC Richard Henderson
2023-07-07 19:48 ` Philippe Mathieu-Daudé
2023-07-03 10:05 ` [PATCH v4 22/37] target/i386: Use aesenc_SB_SR_MC_AK Richard Henderson
2023-07-07 19:50 ` Philippe Mathieu-Daudé
2023-07-03 10:05 ` [PATCH v4 23/37] target/i386: Use aesdec_ISB_ISR_IMC_AK Richard Henderson
2023-07-07 20:34 ` Philippe Mathieu-Daudé
2023-07-03 10:05 ` [PATCH v4 24/37] target/arm: Demultiplex AESE and AESMC Richard Henderson
2023-07-03 10:05 ` [PATCH v4 25/37] target/arm: Use aesenc_SB_SR_AK Richard Henderson
2023-07-08 17:18 ` Philippe Mathieu-Daudé
2023-07-03 10:05 ` [PATCH v4 26/37] target/arm: Use aesdec_ISB_ISR_AK Richard Henderson
2023-07-08 17:19 ` Philippe Mathieu-Daudé
2023-07-03 10:05 ` [PATCH v4 27/37] target/arm: Use aesenc_MC Richard Henderson
2023-07-08 17:20 ` Philippe Mathieu-Daudé
2023-07-03 10:05 ` [PATCH v4 28/37] target/arm: Use aesdec_IMC Richard Henderson
2023-07-08 17:20 ` Philippe Mathieu-Daudé
2023-07-03 10:05 ` [PATCH v4 29/37] target/riscv: Use aesenc_SB_SR_AK Richard Henderson
2023-07-07 20:38 ` Philippe Mathieu-Daudé
2023-07-03 10:05 ` [PATCH v4 30/37] target/riscv: Use aesdec_ISB_ISR_AK Richard Henderson
2023-07-07 20:32 ` Philippe Mathieu-Daudé
2023-07-03 10:05 ` [PATCH v4 31/37] target/riscv: Use aesdec_IMC Richard Henderson
2023-07-08 17:22 ` Philippe Mathieu-Daudé
2023-07-03 10:05 ` [PATCH v4 32/37] target/riscv: Use aesenc_SB_SR_MC_AK Richard Henderson
2023-07-07 20:26 ` Philippe Mathieu-Daudé
2023-07-03 10:05 ` [PATCH v4 33/37] target/riscv: Use aesdec_ISB_ISR_IMC_AK Richard Henderson
2023-07-08 17:33 ` Philippe Mathieu-Daudé
2023-07-03 10:05 ` [PATCH v4 34/37] crypto: Remove AES_shifts, AES_ishifts Richard Henderson
2023-07-03 10:05 ` [PATCH v4 35/37] crypto: Implement aesdec_IMC with AES_imc_rot Richard Henderson
2023-07-03 10:05 ` [PATCH v4 36/37] crypto: Remove AES_imc Richard Henderson
2023-07-03 10:05 ` [PATCH v4 37/37] crypto: Unexport AES_*_rot, AES_TeN, AES_TdN Richard Henderson
2023-07-07 17:30 ` [PATCH v4 00/37] crypto: Provide aes-round.h and host accel Daniel Henrique Barboza
2023-07-08 17:38 ` Philippe Mathieu-Daudé
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5c78b1c8-0515-23ea-8f93-6563a46a8637@linaro.org \
--to=philmd@linaro.org \
--cc=alistair.francis@wdc.com \
--cc=ardb@kernel.org \
--cc=danielhb413@gmail.com \
--cc=eduardo@habkost.net \
--cc=pbonzini@redhat.com \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=qemu-riscv@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).