From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: berrange@redhat.com, ardb@kernel.org
Subject: [PATCH 01/18] crypto: Add generic 8-bit carry-less multiply routines
Date: Thu, 13 Jul 2023 22:14:18 +0100 [thread overview]
Message-ID: <20230713211435.13505-2-richard.henderson@linaro.org> (raw)
In-Reply-To: <20230713211435.13505-1-richard.henderson@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
host/include/generic/host/crypto/clmul.h | 17 ++++++
include/crypto/clmul.h | 61 +++++++++++++++++++
crypto/clmul.c | 76 ++++++++++++++++++++++++
crypto/meson.build | 9 ++-
4 files changed, 160 insertions(+), 3 deletions(-)
create mode 100644 host/include/generic/host/crypto/clmul.h
create mode 100644 include/crypto/clmul.h
create mode 100644 crypto/clmul.c
diff --git a/host/include/generic/host/crypto/clmul.h b/host/include/generic/host/crypto/clmul.h
new file mode 100644
index 0000000000..694705f703
--- /dev/null
+++ b/host/include/generic/host/crypto/clmul.h
@@ -0,0 +1,17 @@
+/*
+ * No host specific carry-less multiply acceleration.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef GENERIC_HOST_CRYPTO_CLMUL_H
+#define GENERIC_HOST_CRYPTO_CLMUL_H
+
+/* Defer everything to the generic routines. */
+#define clmul_8x8_low clmul_8x8_low_gen
+#define clmul_8x4_even clmul_8x4_even_gen
+#define clmul_8x4_odd clmul_8x4_odd_gen
+#define clmul_8x8_even clmul_8x8_even_gen
+#define clmul_8x8_odd clmul_8x8_odd_gen
+#define clmul_8x8_packed clmul_8x8_packed_gen
+
+#endif /* GENERIC_HOST_CRYPTO_CLMUL_H */
diff --git a/include/crypto/clmul.h b/include/crypto/clmul.h
new file mode 100644
index 0000000000..7f19205d6f
--- /dev/null
+++ b/include/crypto/clmul.h
@@ -0,0 +1,61 @@
+/*
+ * Carry-less multiply
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Copyright (C) 2023 Linaro, Ltd.
+ */
+
+#ifndef CRYPTO_CLMUL_H
+#define CRYPTO_CLMUL_H
+
+#include "qemu/int128.h"
+
+/**
+ * clmul_8x8_low:
+ *
+ * Perform eight 8x8->8 carry-less multiplies.
+ */
+uint64_t clmul_8x8_low_gen(uint64_t, uint64_t);
+
+/**
+ * clmul_8x4_even:
+ *
+ * Perform four 8x8->16 carry-less multiplies.
+ * The odd bytes of the inputs are ignored.
+ */
+uint64_t clmul_8x4_even_gen(uint64_t, uint64_t);
+
+/**
+ * clmul_8x4_odd:
+ *
+ * Perform four 8x8->16 carry-less multiplies.
+ * The even bytes of the inputs are ignored.
+ */
+uint64_t clmul_8x4_odd_gen(uint64_t, uint64_t);
+
+/**
+ * clmul_8x8_even:
+ *
+ * Perform eight 8x8->16 carry-less multiplies.
+ * The odd bytes of the inputs are ignored.
+ */
+Int128 clmul_8x8_even_gen(Int128, Int128);
+
+/**
+ * clmul_8x8_odd:
+ *
+ * Perform eight 8x8->16 carry-less multiplies.
+ * The even bytes of the inputs are ignored.
+ */
+Int128 clmul_8x8_odd_gen(Int128, Int128);
+
+/**
+ * clmul_8x8_packed:
+ *
+ * Perform eight 8x8->16 carry-less multiplies.
+ */
+Int128 clmul_8x8_packed_gen(uint64_t, uint64_t);
+
+#include "host/crypto/clmul.h"
+
+#endif /* CRYPTO_CLMUL_H */
diff --git a/crypto/clmul.c b/crypto/clmul.c
new file mode 100644
index 0000000000..866704e751
--- /dev/null
+++ b/crypto/clmul.c
@@ -0,0 +1,76 @@
+/*
+ * No host specific carry-less multiply acceleration.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "crypto/clmul.h"
+
+uint64_t clmul_8x8_low_gen(uint64_t n, uint64_t m)
+{
+ uint64_t r = 0;
+
+ for (int i = 0; i < 8; ++i) {
+ uint64_t mask = (n & 0x0101010101010101ull) * 0xff;
+ r ^= m & mask;
+ m = (m << 1) & 0xfefefefefefefefeull;
+ n >>= 1;
+ }
+ return r;
+}
+
+uint64_t clmul_8x4_even_gen(uint64_t n, uint64_t m)
+{
+ uint64_t r = 0;
+
+ n &= 0x00ff00ff00ff00ffull;
+ m &= 0x00ff00ff00ff00ffull;
+
+ for (int i = 0; i < 8; ++i) {
+ uint64_t mask = (n & 0x0001000100010001ull) * 0xffff;
+ r ^= m & mask;
+ n >>= 1;
+ m <<= 1;
+ }
+ return r;
+}
+
+uint64_t clmul_8x4_odd_gen(uint64_t n, uint64_t m)
+{
+ return clmul_8x4_even_gen(n >> 8, m >> 8);
+}
+
+Int128 clmul_8x8_even_gen(Int128 n, Int128 m)
+{
+ uint64_t rl, rh;
+
+ rl = clmul_8x4_even_gen(int128_getlo(n), int128_getlo(m));
+ rh = clmul_8x4_even_gen(int128_gethi(n), int128_gethi(m));
+ return int128_make128(rl, rh);
+}
+
+Int128 clmul_8x8_odd_gen(Int128 n, Int128 m)
+{
+ uint64_t rl, rh;
+
+ rl = clmul_8x4_odd_gen(int128_getlo(n), int128_getlo(m));
+ rh = clmul_8x4_odd_gen(int128_gethi(n), int128_gethi(m));
+ return int128_make128(rl, rh);
+}
+
+static uint64_t unpack_8_to_16(uint64_t x)
+{
+ return (x & 0x000000ff)
+ | ((x & 0x0000ff00) << 8)
+ | ((x & 0x00ff0000) << 16)
+ | ((x & 0xff000000) << 24);
+}
+
+Int128 clmul_8x8_packed_gen(uint64_t n, uint64_t m)
+{
+ uint64_t rl, rh;
+
+ rl = clmul_8x4_even_gen(unpack_8_to_16(n), unpack_8_to_16(m));
+ rh = clmul_8x4_even_gen(unpack_8_to_16(n >> 32), unpack_8_to_16(m >> 32));
+ return int128_make128(rl, rh);
+}
diff --git a/crypto/meson.build b/crypto/meson.build
index 5f03a30d34..9ac1a89802 100644
--- a/crypto/meson.build
+++ b/crypto/meson.build
@@ -48,9 +48,12 @@ if have_afalg
endif
crypto_ss.add(when: gnutls, if_true: files('tls-cipher-suites.c'))
-util_ss.add(files('sm4.c'))
-util_ss.add(files('aes.c'))
-util_ss.add(files('init.c'))
+util_ss.add(files(
+ 'aes.c',
+ 'clmul.c',
+ 'init.c',
+ 'sm4.c',
+))
if gnutls.found()
util_ss.add(gnutls)
endif
--
2.34.1
next prev parent reply other threads:[~2023-07-13 21:18 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-07-13 21:14 [RFC PATCH for-8.2 00/18] crypto: Provide clmul.h and host accel Richard Henderson
2023-07-13 21:14 ` Richard Henderson [this message]
2023-07-13 21:14 ` [PATCH 02/18] target/arm: Use clmul_8* routines Richard Henderson
2023-07-13 21:43 ` Philippe Mathieu-Daudé
2023-07-13 21:14 ` [PATCH 03/18] target/s390x: " Richard Henderson
2023-07-13 21:14 ` [PATCH 04/18] target/ppc: " Richard Henderson
2023-07-13 21:14 ` [PATCH 05/18] crypto: Add generic 16-bit carry-less multiply routines Richard Henderson
2023-07-13 21:14 ` [PATCH 06/18] target/arm: Use clmul_16* routines Richard Henderson
2023-07-13 21:14 ` [PATCH 07/18] target/s390x: " Richard Henderson
2023-07-13 21:14 ` [PATCH 08/18] target/ppc: " Richard Henderson
2023-07-13 21:14 ` [PATCH 09/18] crypto: Add generic 32-bit carry-less multiply routines Richard Henderson
2023-07-13 21:14 ` [PATCH 10/18] target/arm: Use clmul_32* routines Richard Henderson
2023-07-13 21:14 ` [PATCH 11/18] target/s390x: " Richard Henderson
2023-07-13 21:14 ` [PATCH 12/18] target/ppc: " Richard Henderson
2023-07-13 21:14 ` [PATCH 13/18] crypto: Add generic 64-bit carry-less multiply routine Richard Henderson
2023-07-13 21:14 ` [PATCH 14/18] target/arm: Use clmul_64 Richard Henderson
2023-07-13 21:14 ` [PATCH 15/18] target/s390x: " Richard Henderson
2023-07-13 21:14 ` [PATCH 16/18] target/ppc: " Richard Henderson
2023-07-13 21:14 ` [PATCH 17/18] host/include/i386: Implement clmul.h Richard Henderson
2023-07-19 11:52 ` Ilya Leoshkevich
2023-07-22 11:47 ` Richard Henderson
2023-07-13 21:14 ` [PATCH 18/18] host/include/aarch64: " Richard Henderson
2023-08-03 14:02 ` [RFC PATCH for-8.2 00/18] crypto: Provide clmul.h and host accel Ard Biesheuvel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230713211435.13505-2-richard.henderson@linaro.org \
--to=richard.henderson@linaro.org \
--cc=ardb@kernel.org \
--cc=berrange@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).