* [PATCH 0/8] crypto: more alignmask cleanups
@ 2024-12-07 19:57 Eric Biggers
2024-12-07 19:57 ` [PATCH 1/8] crypto: anubis - stop using cra_alignmask Eric Biggers
` (9 more replies)
0 siblings, 10 replies; 11+ messages in thread
From: Eric Biggers @ 2024-12-07 19:57 UTC (permalink / raw)
To: linux-crypto
Remove some of the remaining uses of cra_alignmask.
Eric Biggers (8):
crypto: anubis - stop using cra_alignmask
crypto: aria - stop using cra_alignmask
crypto: tea - stop using cra_alignmask
crypto: khazad - stop using cra_alignmask
crypto: seed - stop using cra_alignmask
crypto: x86 - remove assignments of 0 to cra_alignmask
crypto: aegis - remove assignments of 0 to cra_alignmask
crypto: keywrap - remove assignment of 0 to cra_alignmask
arch/x86/crypto/aegis128-aesni-glue.c | 1 -
arch/x86/crypto/blowfish_glue.c | 1 -
arch/x86/crypto/camellia_glue.c | 1 -
arch/x86/crypto/des3_ede_glue.c | 1 -
arch/x86/crypto/twofish_glue.c | 1 -
crypto/aegis128-core.c | 2 -
crypto/anubis.c | 14 ++---
crypto/aria_generic.c | 37 ++++++------
crypto/keywrap.c | 1 -
crypto/khazad.c | 17 ++----
crypto/seed.c | 48 +++++++---------
crypto/tea.c | 83 +++++++++++----------------
12 files changed, 82 insertions(+), 125 deletions(-)
base-commit: b5f217084ab3ddd4bdd03cd437f8e3b7e2d1f5b6
--
2.47.1
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH 1/8] crypto: anubis - stop using cra_alignmask
2024-12-07 19:57 [PATCH 0/8] crypto: more alignmask cleanups Eric Biggers
@ 2024-12-07 19:57 ` Eric Biggers
2024-12-07 19:57 ` [PATCH 2/8] crypto: aria " Eric Biggers
` (8 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Eric Biggers @ 2024-12-07 19:57 UTC (permalink / raw)
To: linux-crypto
From: Eric Biggers <ebiggers@google.com>
Instead of specifying a nonzero alignmask, use the unaligned access
helpers. This eliminates unnecessary alignment operations on most CPUs,
which can handle unaligned accesses efficiently, and brings us a step
closer to eventually removing support for the alignmask field.
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
crypto/anubis.c | 14 +++++---------
1 file changed, 5 insertions(+), 9 deletions(-)
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 9f0cf61bbc6e2..886e7c9136886 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -31,11 +31,11 @@
#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/byteorder.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
#define ANUBIS_MIN_KEY_SIZE 16
#define ANUBIS_MAX_KEY_SIZE 40
#define ANUBIS_BLOCK_SIZE 16
@@ -461,11 +461,10 @@ static const u32 rc[] = {
static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *key = (const __be32 *)in_key;
int N, R, i, r;
u32 kappa[ANUBIS_MAX_N];
u32 inter[ANUBIS_MAX_N];
switch (key_len) {
@@ -480,11 +479,11 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
N = ctx->key_len >> 5;
ctx->R = R = 8 + N;
/* * map cipher key to initial key state (mu): */
for (i = 0; i < N; i++)
- kappa[i] = be32_to_cpu(key[i]);
+ kappa[i] = get_unaligned_be32(&in_key[4 * i]);
/*
* generate R + 1 round keys:
*/
for (r = 0; r <= R; r++) {
@@ -568,24 +567,22 @@ static int anubis_setkey(struct crypto_tfm *tfm, const u8 *in_key,
return 0;
}
static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
- u8 *ciphertext, const u8 *plaintext, const int R)
+ u8 *dst, const u8 *src, const int R)
{
- const __be32 *src = (const __be32 *)plaintext;
- __be32 *dst = (__be32 *)ciphertext;
int i, r;
u32 state[4];
u32 inter[4];
/*
* map plaintext block to cipher state (mu)
* and add initial round key (sigma[K^0]):
*/
for (i = 0; i < 4; i++)
- state[i] = be32_to_cpu(src[i]) ^ roundKey[0][i];
+ state[i] = get_unaligned_be32(&src[4 * i]) ^ roundKey[0][i];
/*
* R - 1 full rounds:
*/
@@ -652,11 +649,11 @@ static void anubis_crypt(u32 roundKey[ANUBIS_MAX_ROUNDS + 1][4],
/*
* map cipher state to ciphertext block (mu^{-1}):
*/
for (i = 0; i < 4; i++)
- dst[i] = cpu_to_be32(inter[i]);
+ put_unaligned_be32(inter[i], &dst[4 * i]);
}
static void anubis_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct anubis_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -673,11 +670,10 @@ static struct crypto_alg anubis_alg = {
.cra_name = "anubis",
.cra_driver_name = "anubis-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ANUBIS_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct anubis_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = ANUBIS_MIN_KEY_SIZE,
.cia_max_keysize = ANUBIS_MAX_KEY_SIZE,
.cia_setkey = anubis_setkey,
--
2.47.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 2/8] crypto: aria - stop using cra_alignmask
2024-12-07 19:57 [PATCH 0/8] crypto: more alignmask cleanups Eric Biggers
2024-12-07 19:57 ` [PATCH 1/8] crypto: anubis - stop using cra_alignmask Eric Biggers
@ 2024-12-07 19:57 ` Eric Biggers
2024-12-07 19:57 ` [PATCH 3/8] crypto: tea " Eric Biggers
` (7 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Eric Biggers @ 2024-12-07 19:57 UTC (permalink / raw)
To: linux-crypto
From: Eric Biggers <ebiggers@google.com>
Instead of specifying a nonzero alignmask, use the unaligned access
helpers. This eliminates unnecessary alignment operations on most CPUs,
which can handle unaligned accesses efficiently, and brings us a step
closer to eventually removing support for the alignmask field.
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
crypto/aria_generic.c | 37 +++++++++++++++++--------------------
1 file changed, 17 insertions(+), 20 deletions(-)
diff --git a/crypto/aria_generic.c b/crypto/aria_generic.c
index d96dfc4fdde67..bd359d3313c22 100644
--- a/crypto/aria_generic.c
+++ b/crypto/aria_generic.c
@@ -13,10 +13,11 @@
*
* Public domain version is distributed above.
*/
#include <crypto/aria.h>
+#include <linux/unaligned.h>
static const u32 key_rc[20] = {
0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0,
0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0,
0xdb92371d, 0x2126e970, 0x03249775, 0x04e8c90e,
@@ -25,36 +26,35 @@ static const u32 key_rc[20] = {
};
static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
- const __be32 *key = (const __be32 *)in_key;
u32 w0[4], w1[4], w2[4], w3[4];
u32 reg0, reg1, reg2, reg3;
const u32 *ck;
int rkidx = 0;
ck = &key_rc[(key_len - 16) / 2];
- w0[0] = be32_to_cpu(key[0]);
- w0[1] = be32_to_cpu(key[1]);
- w0[2] = be32_to_cpu(key[2]);
- w0[3] = be32_to_cpu(key[3]);
+ w0[0] = get_unaligned_be32(&in_key[0]);
+ w0[1] = get_unaligned_be32(&in_key[4]);
+ w0[2] = get_unaligned_be32(&in_key[8]);
+ w0[3] = get_unaligned_be32(&in_key[12]);
reg0 = w0[0] ^ ck[0];
reg1 = w0[1] ^ ck[1];
reg2 = w0[2] ^ ck[2];
reg3 = w0[3] ^ ck[3];
aria_subst_diff_odd(®0, ®1, ®2, ®3);
if (key_len > 16) {
- w1[0] = be32_to_cpu(key[4]);
- w1[1] = be32_to_cpu(key[5]);
+ w1[0] = get_unaligned_be32(&in_key[16]);
+ w1[1] = get_unaligned_be32(&in_key[20]);
if (key_len > 24) {
- w1[2] = be32_to_cpu(key[6]);
- w1[3] = be32_to_cpu(key[7]);
+ w1[2] = get_unaligned_be32(&in_key[24]);
+ w1[3] = get_unaligned_be32(&in_key[28]);
} else {
w1[2] = 0;
w1[3] = 0;
}
} else {
@@ -193,21 +193,19 @@ int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len)
EXPORT_SYMBOL_GPL(aria_set_key);
static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
u32 key[][ARIA_RD_KEY_WORDS])
{
- const __be32 *src = (const __be32 *)in;
- __be32 *dst = (__be32 *)out;
u32 reg0, reg1, reg2, reg3;
int rounds, rkidx = 0;
rounds = ctx->rounds;
- reg0 = be32_to_cpu(src[0]);
- reg1 = be32_to_cpu(src[1]);
- reg2 = be32_to_cpu(src[2]);
- reg3 = be32_to_cpu(src[3]);
+ reg0 = get_unaligned_be32(&in[0]);
+ reg1 = get_unaligned_be32(&in[4]);
+ reg2 = get_unaligned_be32(&in[8]);
+ reg3 = get_unaligned_be32(&in[12]);
aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3);
rkidx++;
aria_subst_diff_odd(®0, ®1, ®2, ®3);
@@ -239,14 +237,14 @@ static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
reg3 = key[rkidx][3] ^ make_u32((u8)(x1[get_u8(reg3, 0)]),
(u8)(x2[get_u8(reg3, 1)] >> 8),
(u8)(s1[get_u8(reg3, 2)]),
(u8)(s2[get_u8(reg3, 3)]));
- dst[0] = cpu_to_be32(reg0);
- dst[1] = cpu_to_be32(reg1);
- dst[2] = cpu_to_be32(reg2);
- dst[3] = cpu_to_be32(reg3);
+ put_unaligned_be32(reg0, &out[0]);
+ put_unaligned_be32(reg1, &out[4]);
+ put_unaligned_be32(reg2, &out[8]);
+ put_unaligned_be32(reg3, &out[12]);
}
void aria_encrypt(void *_ctx, u8 *out, const u8 *in)
{
struct aria_ctx *ctx = (struct aria_ctx *)_ctx;
@@ -282,11 +280,10 @@ static struct crypto_alg aria_alg = {
.cra_driver_name = "aria-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ARIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aria_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = ARIA_MIN_KEY_SIZE,
.cia_max_keysize = ARIA_MAX_KEY_SIZE,
--
2.47.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 3/8] crypto: tea - stop using cra_alignmask
2024-12-07 19:57 [PATCH 0/8] crypto: more alignmask cleanups Eric Biggers
2024-12-07 19:57 ` [PATCH 1/8] crypto: anubis - stop using cra_alignmask Eric Biggers
2024-12-07 19:57 ` [PATCH 2/8] crypto: aria " Eric Biggers
@ 2024-12-07 19:57 ` Eric Biggers
2024-12-07 19:57 ` [PATCH 4/8] crypto: khazad " Eric Biggers
` (6 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Eric Biggers @ 2024-12-07 19:57 UTC (permalink / raw)
To: linux-crypto
From: Eric Biggers <ebiggers@google.com>
Instead of specifying a nonzero alignmask, use the unaligned access
helpers. This eliminates unnecessary alignment operations on most CPUs,
which can handle unaligned accesses efficiently, and brings us a step
closer to eventually removing support for the alignmask field.
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
crypto/tea.c | 83 +++++++++++++++++++++-------------------------------
1 file changed, 33 insertions(+), 50 deletions(-)
diff --git a/crypto/tea.c b/crypto/tea.c
index 896f863f3067c..b315da8c89ebc 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -16,11 +16,11 @@
#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/byteorder.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
#define TEA_KEY_SIZE 16
#define TEA_BLOCK_SIZE 8
#define TEA_ROUNDS 32
@@ -41,31 +41,28 @@ struct xtea_ctx {
static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *key = (const __le32 *)in_key;
- ctx->KEY[0] = le32_to_cpu(key[0]);
- ctx->KEY[1] = le32_to_cpu(key[1]);
- ctx->KEY[2] = le32_to_cpu(key[2]);
- ctx->KEY[3] = le32_to_cpu(key[3]);
+ ctx->KEY[0] = get_unaligned_le32(&in_key[0]);
+ ctx->KEY[1] = get_unaligned_le32(&in_key[4]);
+ ctx->KEY[2] = get_unaligned_le32(&in_key[8]);
+ ctx->KEY[3] = get_unaligned_le32(&in_key[12]);
return 0;
}
static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, n, sum = 0;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
k2 = ctx->KEY[2];
k3 = ctx->KEY[3];
@@ -76,24 +73,22 @@ static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
sum += TEA_DELTA;
y += ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1);
z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3);
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, n, sum;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
k2 = ctx->KEY[2];
k3 = ctx->KEY[3];
@@ -106,123 +101,113 @@ static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
z -= ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3);
y -= ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1);
sum -= TEA_DELTA;
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *key = (const __le32 *)in_key;
- ctx->KEY[0] = le32_to_cpu(key[0]);
- ctx->KEY[1] = le32_to_cpu(key[1]);
- ctx->KEY[2] = le32_to_cpu(key[2]);
- ctx->KEY[3] = le32_to_cpu(key[3]);
+ ctx->KEY[0] = get_unaligned_le32(&in_key[0]);
+ ctx->KEY[1] = get_unaligned_le32(&in_key[4]);
+ ctx->KEY[2] = get_unaligned_le32(&in_key[8]);
+ ctx->KEY[3] = get_unaligned_le32(&in_key[12]);
return 0;
}
static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
while (sum != limit) {
y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]);
sum += XTEA_DELTA;
z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]);
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
sum = XTEA_DELTA * XTEA_ROUNDS;
while (sum) {
z -= ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 & 3]);
sum -= XTEA_DELTA;
y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]);
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
while (sum != limit) {
y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3];
sum += XTEA_DELTA;
z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3];
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
- const __le32 *in = (const __le32 *)src;
- __le32 *out = (__le32 *)dst;
- y = le32_to_cpu(in[0]);
- z = le32_to_cpu(in[1]);
+ y = get_unaligned_le32(&src[0]);
+ z = get_unaligned_le32(&src[4]);
sum = XTEA_DELTA * XTEA_ROUNDS;
while (sum) {
z -= (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 & 3];
sum -= XTEA_DELTA;
y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3];
}
- out[0] = cpu_to_le32(y);
- out[1] = cpu_to_le32(z);
+ put_unaligned_le32(y, &dst[0]);
+ put_unaligned_le32(z, &dst[4]);
}
static struct crypto_alg tea_algs[3] = { {
.cra_name = "tea",
.cra_driver_name = "tea-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct tea_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = TEA_KEY_SIZE,
.cia_max_keysize = TEA_KEY_SIZE,
.cia_setkey = tea_setkey,
@@ -232,11 +217,10 @@ static struct crypto_alg tea_algs[3] = { {
.cra_name = "xtea",
.cra_driver_name = "xtea-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
.cia_max_keysize = XTEA_KEY_SIZE,
.cia_setkey = xtea_setkey,
@@ -246,11 +230,10 @@ static struct crypto_alg tea_algs[3] = { {
.cra_name = "xeta",
.cra_driver_name = "xeta-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
.cia_max_keysize = XTEA_KEY_SIZE,
.cia_setkey = xtea_setkey,
--
2.47.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 4/8] crypto: khazad - stop using cra_alignmask
2024-12-07 19:57 [PATCH 0/8] crypto: more alignmask cleanups Eric Biggers
` (2 preceding siblings ...)
2024-12-07 19:57 ` [PATCH 3/8] crypto: tea " Eric Biggers
@ 2024-12-07 19:57 ` Eric Biggers
2024-12-07 19:57 ` [PATCH 5/8] crypto: seed " Eric Biggers
` (5 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Eric Biggers @ 2024-12-07 19:57 UTC (permalink / raw)
To: linux-crypto
From: Eric Biggers <ebiggers@google.com>
Instead of specifying a nonzero alignmask, use the unaligned access
helpers. This eliminates unnecessary alignment operations on most CPUs,
which can handle unaligned accesses efficiently, and brings us a step
closer to eventually removing support for the alignmask field.
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
crypto/khazad.c | 17 ++++++-----------
1 file changed, 6 insertions(+), 11 deletions(-)
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 70cafe73f9740..7ad338ca2c18f 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -21,11 +21,11 @@
#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <asm/byteorder.h>
+#include <linux/unaligned.h>
#include <linux/types.h>
#define KHAZAD_KEY_SIZE 16
#define KHAZAD_BLOCK_SIZE 8
#define KHAZAD_ROUNDS 8
@@ -755,18 +755,16 @@ static const u64 c[KHAZAD_ROUNDS + 1] = {
static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *key = (const __be32 *)in_key;
int r;
const u64 *S = T7;
u64 K2, K1;
- /* key is supposed to be 32-bit aligned */
- K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]);
- K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]);
+ K2 = get_unaligned_be64(&in_key[0]);
+ K1 = get_unaligned_be64(&in_key[8]);
/* setup the encrypt key */
for (r = 0; r <= KHAZAD_ROUNDS; r++) {
ctx->E[r] = T0[(int)(K1 >> 56) ] ^
T1[(int)(K1 >> 48) & 0xff] ^
@@ -798,18 +796,16 @@ static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
return 0;
}
static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
- u8 *ciphertext, const u8 *plaintext)
+ u8 *dst, const u8 *src)
{
- const __be64 *src = (const __be64 *)plaintext;
- __be64 *dst = (__be64 *)ciphertext;
int r;
u64 state;
- state = be64_to_cpu(*src) ^ roundKey[0];
+ state = get_unaligned_be64(src) ^ roundKey[0];
for (r = 1; r < KHAZAD_ROUNDS; r++) {
state = T0[(int)(state >> 56) ] ^
T1[(int)(state >> 48) & 0xff] ^
T2[(int)(state >> 40) & 0xff] ^
@@ -829,11 +825,11 @@ static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
(T5[(int)(state >> 16) & 0xff] & 0x0000000000ff0000ULL) ^
(T6[(int)(state >> 8) & 0xff] & 0x000000000000ff00ULL) ^
(T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^
roundKey[KHAZAD_ROUNDS];
- *dst = cpu_to_be64(state);
+ put_unaligned_be64(state, dst);
}
static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -850,11 +846,10 @@ static struct crypto_alg khazad_alg = {
.cra_name = "khazad",
.cra_driver_name = "khazad-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = KHAZAD_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct khazad_ctx),
- .cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = KHAZAD_KEY_SIZE,
.cia_max_keysize = KHAZAD_KEY_SIZE,
.cia_setkey = khazad_setkey,
--
2.47.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 5/8] crypto: seed - stop using cra_alignmask
2024-12-07 19:57 [PATCH 0/8] crypto: more alignmask cleanups Eric Biggers
` (3 preceding siblings ...)
2024-12-07 19:57 ` [PATCH 4/8] crypto: khazad " Eric Biggers
@ 2024-12-07 19:57 ` Eric Biggers
2024-12-07 19:57 ` [PATCH 6/8] crypto: x86 - remove assignments of 0 to cra_alignmask Eric Biggers
` (4 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Eric Biggers @ 2024-12-07 19:57 UTC (permalink / raw)
To: linux-crypto
From: Eric Biggers <ebiggers@google.com>
Instead of specifying a nonzero alignmask, use the unaligned access
helpers. This eliminates unnecessary alignment operations on most CPUs,
which can handle unaligned accesses efficiently, and brings us a step
closer to eventually removing support for the alignmask field.
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
crypto/seed.c | 48 +++++++++++++++++++++---------------------------
1 file changed, 21 insertions(+), 27 deletions(-)
diff --git a/crypto/seed.c b/crypto/seed.c
index d0506ade2a5f8..d05d8ed909fa7 100644
--- a/crypto/seed.c
+++ b/crypto/seed.c
@@ -11,11 +11,11 @@
#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <asm/byteorder.h>
+#include <linux/unaligned.h>
#define SEED_NUM_KCONSTANTS 16
#define SEED_KEY_SIZE 16
#define SEED_BLOCK_SIZE 16
#define SEED_KEYSCHED_LEN 32
@@ -327,17 +327,16 @@ static const u32 KC[SEED_NUM_KCONSTANTS] = {
static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *keyout = ctx->keysched;
- const __be32 *key = (const __be32 *)in_key;
u32 i, t0, t1, x1, x2, x3, x4;
- x1 = be32_to_cpu(key[0]);
- x2 = be32_to_cpu(key[1]);
- x3 = be32_to_cpu(key[2]);
- x4 = be32_to_cpu(key[3]);
+ x1 = get_unaligned_be32(&in_key[0]);
+ x2 = get_unaligned_be32(&in_key[4]);
+ x3 = get_unaligned_be32(&in_key[8]);
+ x4 = get_unaligned_be32(&in_key[12]);
for (i = 0; i < SEED_NUM_KCONSTANTS; i++) {
t0 = x1 + x3 - KC[i];
t1 = x2 + KC[i] - x4;
*(keyout++) = SS0[byte(t0, 0)] ^ SS1[byte(t0, 1)] ^
@@ -362,19 +361,17 @@ static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key,
/* encrypt a block of text */
static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *src = (const __be32 *)in;
- __be32 *dst = (__be32 *)out;
u32 x1, x2, x3, x4, t0, t1;
const u32 *ks = ctx->keysched;
- x1 = be32_to_cpu(src[0]);
- x2 = be32_to_cpu(src[1]);
- x3 = be32_to_cpu(src[2]);
- x4 = be32_to_cpu(src[3]);
+ x1 = get_unaligned_be32(&in[0]);
+ x2 = get_unaligned_be32(&in[4]);
+ x3 = get_unaligned_be32(&in[8]);
+ x4 = get_unaligned_be32(&in[12]);
OP(x1, x2, x3, x4, 0);
OP(x3, x4, x1, x2, 2);
OP(x1, x2, x3, x4, 4);
OP(x3, x4, x1, x2, 6);
@@ -389,30 +386,28 @@ static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
OP(x1, x2, x3, x4, 24);
OP(x3, x4, x1, x2, 26);
OP(x1, x2, x3, x4, 28);
OP(x3, x4, x1, x2, 30);
- dst[0] = cpu_to_be32(x3);
- dst[1] = cpu_to_be32(x4);
- dst[2] = cpu_to_be32(x1);
- dst[3] = cpu_to_be32(x2);
+ put_unaligned_be32(x3, &out[0]);
+ put_unaligned_be32(x4, &out[4]);
+ put_unaligned_be32(x1, &out[8]);
+ put_unaligned_be32(x2, &out[12]);
}
/* decrypt a block of text */
static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
- const __be32 *src = (const __be32 *)in;
- __be32 *dst = (__be32 *)out;
u32 x1, x2, x3, x4, t0, t1;
const u32 *ks = ctx->keysched;
- x1 = be32_to_cpu(src[0]);
- x2 = be32_to_cpu(src[1]);
- x3 = be32_to_cpu(src[2]);
- x4 = be32_to_cpu(src[3]);
+ x1 = get_unaligned_be32(&in[0]);
+ x2 = get_unaligned_be32(&in[4]);
+ x3 = get_unaligned_be32(&in[8]);
+ x4 = get_unaligned_be32(&in[12]);
OP(x1, x2, x3, x4, 30);
OP(x3, x4, x1, x2, 28);
OP(x1, x2, x3, x4, 26);
OP(x3, x4, x1, x2, 24);
@@ -427,25 +422,24 @@ static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
OP(x1, x2, x3, x4, 6);
OP(x3, x4, x1, x2, 4);
OP(x1, x2, x3, x4, 2);
OP(x3, x4, x1, x2, 0);
- dst[0] = cpu_to_be32(x3);
- dst[1] = cpu_to_be32(x4);
- dst[2] = cpu_to_be32(x1);
- dst[3] = cpu_to_be32(x2);
+ put_unaligned_be32(x3, &out[0]);
+ put_unaligned_be32(x4, &out[4]);
+ put_unaligned_be32(x1, &out[8]);
+ put_unaligned_be32(x2, &out[12]);
}
static struct crypto_alg seed_alg = {
.cra_name = "seed",
.cra_driver_name = "seed-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SEED_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct seed_ctx),
- .cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = SEED_KEY_SIZE,
.cia_max_keysize = SEED_KEY_SIZE,
--
2.47.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 6/8] crypto: x86 - remove assignments of 0 to cra_alignmask
2024-12-07 19:57 [PATCH 0/8] crypto: more alignmask cleanups Eric Biggers
` (4 preceding siblings ...)
2024-12-07 19:57 ` [PATCH 5/8] crypto: seed " Eric Biggers
@ 2024-12-07 19:57 ` Eric Biggers
2024-12-07 19:57 ` [PATCH 7/8] crypto: aegis " Eric Biggers
` (3 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Eric Biggers @ 2024-12-07 19:57 UTC (permalink / raw)
To: linux-crypto
From: Eric Biggers <ebiggers@google.com>
Struct fields are zero by default, so these lines of code have no
effect. Remove them to reduce the number of matches that are found when
grepping for cra_alignmask.
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
arch/x86/crypto/aegis128-aesni-glue.c | 1 -
arch/x86/crypto/blowfish_glue.c | 1 -
arch/x86/crypto/camellia_glue.c | 1 -
arch/x86/crypto/des3_ede_glue.c | 1 -
arch/x86/crypto/twofish_glue.c | 1 -
5 files changed, 5 deletions(-)
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index c19d8e3d96a35..01fa568dc5fc4 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -238,11 +238,10 @@ static struct aead_alg crypto_aegis128_aesni_alg = {
.base = {
.cra_flags = CRYPTO_ALG_INTERNAL,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aegis_ctx) +
__alignof__(struct aegis_ctx),
- .cra_alignmask = 0,
.cra_priority = 400,
.cra_name = "__aegis128",
.cra_driver_name = "__aegis128-aesni",
diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
index 552f2df0643f2..26c5f2ee5d103 100644
--- a/arch/x86/crypto/blowfish_glue.c
+++ b/arch/x86/crypto/blowfish_glue.c
@@ -92,11 +92,10 @@ static struct crypto_alg bf_cipher_alg = {
.cra_driver_name = "blowfish-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx),
- .cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = BF_MIN_KEY_SIZE,
.cia_max_keysize = BF_MAX_KEY_SIZE,
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
index f110708c8038c..3bd37d6641216 100644
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -1311,11 +1311,10 @@ static struct crypto_alg camellia_cipher_alg = {
.cra_driver_name = "camellia-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAMELLIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct camellia_ctx),
- .cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = CAMELLIA_MIN_KEY_SIZE,
.cia_max_keysize = CAMELLIA_MAX_KEY_SIZE,
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
index abb8b1fe123b4..e88439d3828ea 100644
--- a/arch/x86/crypto/des3_ede_glue.c
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -289,11 +289,10 @@ static struct crypto_alg des3_ede_cipher = {
.cra_driver_name = "des3_ede-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = DES3_EDE_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct des3_ede_x86_ctx),
- .cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = DES3_EDE_KEY_SIZE,
.cia_max_keysize = DES3_EDE_KEY_SIZE,
diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
index 0614beece2793..4c67184dc573e 100644
--- a/arch/x86/crypto/twofish_glue.c
+++ b/arch/x86/crypto/twofish_glue.c
@@ -66,11 +66,10 @@ static struct crypto_alg alg = {
.cra_driver_name = "twofish-asm",
.cra_priority = 200,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
- .cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = TF_MIN_KEY_SIZE,
.cia_max_keysize = TF_MAX_KEY_SIZE,
--
2.47.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 7/8] crypto: aegis - remove assignments of 0 to cra_alignmask
2024-12-07 19:57 [PATCH 0/8] crypto: more alignmask cleanups Eric Biggers
` (5 preceding siblings ...)
2024-12-07 19:57 ` [PATCH 6/8] crypto: x86 - remove assignments of 0 to cra_alignmask Eric Biggers
@ 2024-12-07 19:57 ` Eric Biggers
2024-12-07 19:57 ` [PATCH 8/8] crypto: keywrap - remove assignment " Eric Biggers
` (2 subsequent siblings)
9 siblings, 0 replies; 11+ messages in thread
From: Eric Biggers @ 2024-12-07 19:57 UTC (permalink / raw)
To: linux-crypto
From: Eric Biggers <ebiggers@google.com>
Struct fields are zero by default, so these lines of code have no
effect. Remove them to reduce the number of matches that are found when
grepping for cra_alignmask.
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
crypto/aegis128-core.c | 2 --
1 file changed, 2 deletions(-)
diff --git a/crypto/aegis128-core.c b/crypto/aegis128-core.c
index 4fdb53435827e..6cbff298722b4 100644
--- a/crypto/aegis128-core.c
+++ b/crypto/aegis128-core.c
@@ -514,11 +514,10 @@ static struct aead_alg crypto_aegis128_alg_generic = {
.maxauthsize = AEGIS128_MAX_AUTH_SIZE,
.chunksize = AEGIS_BLOCK_SIZE,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aegis_ctx),
- .base.cra_alignmask = 0,
.base.cra_priority = 100,
.base.cra_name = "aegis128",
.base.cra_driver_name = "aegis128-generic",
.base.cra_module = THIS_MODULE,
};
@@ -533,11 +532,10 @@ static struct aead_alg crypto_aegis128_alg_simd = {
.maxauthsize = AEGIS128_MAX_AUTH_SIZE,
.chunksize = AEGIS_BLOCK_SIZE,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct aegis_ctx),
- .base.cra_alignmask = 0,
.base.cra_priority = 200,
.base.cra_name = "aegis128",
.base.cra_driver_name = "aegis128-simd",
.base.cra_module = THIS_MODULE,
};
--
2.47.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH 8/8] crypto: keywrap - remove assignment of 0 to cra_alignmask
2024-12-07 19:57 [PATCH 0/8] crypto: more alignmask cleanups Eric Biggers
` (6 preceding siblings ...)
2024-12-07 19:57 ` [PATCH 7/8] crypto: aegis " Eric Biggers
@ 2024-12-07 19:57 ` Eric Biggers
2024-12-09 10:23 ` [PATCH 0/8] crypto: more alignmask cleanups Ard Biesheuvel
2024-12-14 9:29 ` Herbert Xu
9 siblings, 0 replies; 11+ messages in thread
From: Eric Biggers @ 2024-12-07 19:57 UTC (permalink / raw)
To: linux-crypto
From: Eric Biggers <ebiggers@google.com>
Since this code is zero-initializing the algorithm struct, the
assignment of 0 to cra_alignmask is redundant. Remove it to reduce the
number of matches that are found when grepping for cra_alignmask.
Signed-off-by: Eric Biggers <ebiggers@google.com>
---
crypto/keywrap.c | 1 -
1 file changed, 1 deletion(-)
diff --git a/crypto/keywrap.c b/crypto/keywrap.c
index 385ffdfd5a9b4..5ec4f94d46bd0 100644
--- a/crypto/keywrap.c
+++ b/crypto/keywrap.c
@@ -277,11 +277,10 @@ static int crypto_kw_create(struct crypto_template *tmpl, struct rtattr **tb)
/* Section 5.1 requirement for KW */
if (alg->cra_blocksize != sizeof(struct crypto_kw_block))
goto out_free_inst;
inst->alg.base.cra_blocksize = SEMIBSIZE;
- inst->alg.base.cra_alignmask = 0;
inst->alg.ivsize = SEMIBSIZE;
inst->alg.encrypt = crypto_kw_encrypt;
inst->alg.decrypt = crypto_kw_decrypt;
--
2.47.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH 0/8] crypto: more alignmask cleanups
2024-12-07 19:57 [PATCH 0/8] crypto: more alignmask cleanups Eric Biggers
` (7 preceding siblings ...)
2024-12-07 19:57 ` [PATCH 8/8] crypto: keywrap - remove assignment " Eric Biggers
@ 2024-12-09 10:23 ` Ard Biesheuvel
2024-12-14 9:29 ` Herbert Xu
9 siblings, 0 replies; 11+ messages in thread
From: Ard Biesheuvel @ 2024-12-09 10:23 UTC (permalink / raw)
To: Eric Biggers; +Cc: linux-crypto
On Sat, 7 Dec 2024 at 20:58, Eric Biggers <ebiggers@kernel.org> wrote:
>
> Remove some of the remaining uses of cra_alignmask.
>
> Eric Biggers (8):
> crypto: anubis - stop using cra_alignmask
> crypto: aria - stop using cra_alignmask
> crypto: tea - stop using cra_alignmask
> crypto: khazad - stop using cra_alignmask
> crypto: seed - stop using cra_alignmask
> crypto: x86 - remove assignments of 0 to cra_alignmask
> crypto: aegis - remove assignments of 0 to cra_alignmask
> crypto: keywrap - remove assignment of 0 to cra_alignmask
>
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Is it time yet to remove anubis and khazad entirely?
> arch/x86/crypto/aegis128-aesni-glue.c | 1 -
> arch/x86/crypto/blowfish_glue.c | 1 -
> arch/x86/crypto/camellia_glue.c | 1 -
> arch/x86/crypto/des3_ede_glue.c | 1 -
> arch/x86/crypto/twofish_glue.c | 1 -
> crypto/aegis128-core.c | 2 -
> crypto/anubis.c | 14 ++---
> crypto/aria_generic.c | 37 ++++++------
> crypto/keywrap.c | 1 -
> crypto/khazad.c | 17 ++----
> crypto/seed.c | 48 +++++++---------
> crypto/tea.c | 83 +++++++++++----------------
> 12 files changed, 82 insertions(+), 125 deletions(-)
>
>
> base-commit: b5f217084ab3ddd4bdd03cd437f8e3b7e2d1f5b6
> --
> 2.47.1
>
>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH 0/8] crypto: more alignmask cleanups
2024-12-07 19:57 [PATCH 0/8] crypto: more alignmask cleanups Eric Biggers
` (8 preceding siblings ...)
2024-12-09 10:23 ` [PATCH 0/8] crypto: more alignmask cleanups Ard Biesheuvel
@ 2024-12-14 9:29 ` Herbert Xu
9 siblings, 0 replies; 11+ messages in thread
From: Herbert Xu @ 2024-12-14 9:29 UTC (permalink / raw)
To: Eric Biggers; +Cc: linux-crypto
Eric Biggers <ebiggers@kernel.org> wrote:
> Remove some of the remaining uses of cra_alignmask.
>
> Eric Biggers (8):
> crypto: anubis - stop using cra_alignmask
> crypto: aria - stop using cra_alignmask
> crypto: tea - stop using cra_alignmask
> crypto: khazad - stop using cra_alignmask
> crypto: seed - stop using cra_alignmask
> crypto: x86 - remove assignments of 0 to cra_alignmask
> crypto: aegis - remove assignments of 0 to cra_alignmask
> crypto: keywrap - remove assignment of 0 to cra_alignmask
>
> arch/x86/crypto/aegis128-aesni-glue.c | 1 -
> arch/x86/crypto/blowfish_glue.c | 1 -
> arch/x86/crypto/camellia_glue.c | 1 -
> arch/x86/crypto/des3_ede_glue.c | 1 -
> arch/x86/crypto/twofish_glue.c | 1 -
> crypto/aegis128-core.c | 2 -
> crypto/anubis.c | 14 ++---
> crypto/aria_generic.c | 37 ++++++------
> crypto/keywrap.c | 1 -
> crypto/khazad.c | 17 ++----
> crypto/seed.c | 48 +++++++---------
> crypto/tea.c | 83 +++++++++++----------------
> 12 files changed, 82 insertions(+), 125 deletions(-)
>
>
> base-commit: b5f217084ab3ddd4bdd03cd437f8e3b7e2d1f5b6
All applied. Thanks.
--
Email: Herbert Xu <herbert@gondor.apana.org.au>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt
^ permalink raw reply [flat|nested] 11+ messages in thread
end of thread, other threads:[~2024-12-14 9:29 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-12-07 19:57 [PATCH 0/8] crypto: more alignmask cleanups Eric Biggers
2024-12-07 19:57 ` [PATCH 1/8] crypto: anubis - stop using cra_alignmask Eric Biggers
2024-12-07 19:57 ` [PATCH 2/8] crypto: aria " Eric Biggers
2024-12-07 19:57 ` [PATCH 3/8] crypto: tea " Eric Biggers
2024-12-07 19:57 ` [PATCH 4/8] crypto: khazad " Eric Biggers
2024-12-07 19:57 ` [PATCH 5/8] crypto: seed " Eric Biggers
2024-12-07 19:57 ` [PATCH 6/8] crypto: x86 - remove assignments of 0 to cra_alignmask Eric Biggers
2024-12-07 19:57 ` [PATCH 7/8] crypto: aegis " Eric Biggers
2024-12-07 19:57 ` [PATCH 8/8] crypto: keywrap - remove assignment " Eric Biggers
2024-12-09 10:23 ` [PATCH 0/8] crypto: more alignmask cleanups Ard Biesheuvel
2024-12-14 9:29 ` Herbert Xu
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).