From: Stephan Mueller <smueller@chronox.de>
To: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Daniel Borkmann <dborkman@redhat.com>,
quentin.gouchet@gmail.com, LKML <linux-kernel@vger.kernel.org>,
linux-crypto@vger.kernel.org, ABI/API <linux-api@vger.kernel.org>
Subject: [PATCH v2 04/10] crypto: AF_ALG: crypto API calls to inline functions
Date: Sun, 16 Nov 2014 03:25:49 +0100 [thread overview]
Message-ID: <8139637.nYqaFSIk4I@tachyon.chronox.de> (raw)
In-Reply-To: <5365136.g8vbXlhRyC@tachyon.chronox.de>
To avoid excessive branches and cluttering the code, all kernel crypto
API calls are extracted into separate inline functions. These functions
invoke either the ablkcipher or the aead crypto API function calls, as
necessary.
Signed-off-by: Stephan Mueller <smueller@chronox.de>
---
crypto/algif_skcipher.c | 143 ++++++++++++++++++++++++++++++++++++++++++------
1 file changed, 125 insertions(+), 18 deletions(-)
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 483ff97..d0e31ab 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -247,14 +247,121 @@ static void skcipher_data_wakeup(struct sock *sk)
rcu_read_unlock();
}
+static inline bool skcipher_is_aead(struct crypto_tfm *tfm)
+{
+ return ((crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_MASK) ==
+ CRYPTO_ALG_TYPE_AEAD);
+}
+
+static inline unsigned int skcipher_crypto_ivsize(void *private)
+{
+ if (skcipher_is_aead(private))
+ return crypto_aead_ivsize(private);
+ else
+ return crypto_ablkcipher_ivsize(private);
+}
+
+static inline unsigned int skcipher_crypto_ivsize_ctx(struct skcipher_ctx *ctx)
+{
+ if (ctx->aead)
+ return crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->u.aead_req));
+ else
+ return crypto_ablkcipher_ivsize(
+ crypto_ablkcipher_reqtfm(&ctx->u.ablkcipher_req));
+}
+
+static inline unsigned int skcipher_crypto_blocksize(struct skcipher_ctx *ctx)
+{
+ if (ctx->aead)
+ return crypto_aead_blocksize(
+ crypto_aead_reqtfm(&ctx->u.aead_req));
+ else
+ return crypto_ablkcipher_blocksize(
+ crypto_ablkcipher_reqtfm(&ctx->u.ablkcipher_req));
+}
+
+static inline unsigned int skcipher_crypto_reqsize(void *private)
+{
+ if (skcipher_is_aead(private))
+ return crypto_aead_reqsize(private);
+ else
+ return crypto_ablkcipher_reqsize(private);
+}
+
+static inline unsigned int skcipher_crypto_setkey(void *private, const u8 *key,
+ unsigned int keylen)
+{
+ if (skcipher_is_aead(private))
+ return crypto_aead_setkey(private, key, keylen);
+ else
+ return crypto_ablkcipher_setkey(private, key, keylen);
+}
+
+static inline void skcipher_crypto_free(void *private)
+{
+ if (skcipher_is_aead(private))
+ crypto_free_aead(private);
+ else
+ crypto_free_ablkcipher(private);
+}
+
+static inline void skcipher_request_set_tfm(struct skcipher_ctx *ctx, void *tfm)
+{
+ if (ctx->aead)
+ aead_request_set_tfm(&ctx->u.aead_req, tfm);
+ else
+ ablkcipher_request_set_tfm(&ctx->u.ablkcipher_req, tfm);
+}
+
+static inline int skcipher_crypto_encrypt(struct skcipher_ctx *ctx)
+{
+ if (ctx->aead)
+ return crypto_aead_encrypt(&ctx->u.aead_req);
+ else
+ return crypto_ablkcipher_encrypt(&ctx->u.ablkcipher_req);
+}
+
+static inline int skcipher_crypto_decrypt(struct skcipher_ctx *ctx)
+{
+ if (ctx->aead)
+ return crypto_aead_decrypt(&ctx->u.aead_req);
+ else
+ return crypto_ablkcipher_decrypt(&ctx->u.ablkcipher_req);
+}
+
+static inline void skcipher_crypto_set_crypt(struct skcipher_ctx *ctx,
+ struct scatterlist *src,
+ struct scatterlist *dst,
+ unsigned int cryptlen, u8 *iv)
+{
+ if (ctx->aead)
+ return aead_request_set_crypt(&ctx->u.aead_req, src, dst,
+ cryptlen, iv);
+ else
+ return ablkcipher_request_set_crypt(&ctx->u.ablkcipher_req, src,
+ dst, cryptlen, iv);
+}
+
+static inline void skcipher_request_set_callback(struct skcipher_ctx *ctx,
+ u32 flags,
+ crypto_completion_t complete,
+ void *data)
+{
+ if (ctx->aead)
+ aead_request_set_callback(&ctx->u.aead_req, flags, complete,
+ data);
+ else
+ ablkcipher_request_set_callback(&ctx->u.ablkcipher_req, flags,
+ complete, data);
+}
+
static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
- unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
+ unsigned ivsize = skcipher_crypto_ivsize_ctx(ctx);
struct skcipher_sg_list *sgl;
struct af_alg_control con = {};
long copied = 0;
@@ -432,8 +539,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
- unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
- &ctx->req));
+ unsigned bs = skcipher_crypto_blocksize(ctx);
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
unsigned long iovlen;
@@ -483,8 +589,8 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
err = af_alg_wait_for_completion(
ctx->enc ?
- crypto_ablkcipher_encrypt(&ctx->req) :
- crypto_ablkcipher_decrypt(&ctx->req),
+ skcipher_crypto_encrypt(ctx) :
+ skcipher_crypto_decrypt(ctx),
&ctx->completion);
free:
@@ -603,23 +709,23 @@ static void *skcipher_bind(const char *name, u32 type, u32 mask)
static void skcipher_release(void *private)
{
- crypto_free_ablkcipher(private);
+ skcipher_crypto_free(private);
}
static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
{
- return crypto_ablkcipher_setkey(private, key, keylen);
+ return skcipher_crypto_setkey(private, key, keylen);
}
static void skcipher_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
+ unsigned int ivlen = skcipher_crypto_ivsize_ctx(ctx);
skcipher_free_sgl(sk);
- memzero_explicit(ctx->iv, crypto_ablkcipher_ivsize(tfm));
- sock_kfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
+ memzero_explicit(ctx->iv, ivlen);
+ sock_kfree_s(sk, ctx->iv, ivlen);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
@@ -628,20 +734,20 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
{
struct skcipher_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);
+ unsigned int len = sizeof(*ctx) + skcipher_crypto_reqsize(private);
+ unsigned int ivlen = skcipher_crypto_ivsize(private);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
- GFP_KERNEL);
+ ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
if (!ctx->iv) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
- memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));
+ memset(ctx->iv, 0, ivlen);
INIT_LIST_HEAD(&ctx->tsgl);
ctx->len = len;
@@ -649,13 +755,14 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
+ ctx->aead = skcipher_is_aead(private);
af_alg_init_completion(&ctx->completion);
ask->private = ctx;
- ablkcipher_request_set_tfm(&ctx->req, private);
- ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- af_alg_complete, &ctx->completion);
+ skcipher_request_set_tfm(ctx, private);
+ skcipher_request_set_callback(ctx, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ af_alg_complete, &ctx->completion);
sk->sk_destruct = skcipher_sock_destruct;
--
2.1.0
next prev parent reply other threads:[~2014-11-16 2:25 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2014-11-16 2:23 [PATCH v2 00/10] crypto: AF_ALG: add AEAD and RNG support Stephan Mueller
2014-11-16 2:23 ` [PATCH v2 01/10] crypto: AF_ALG: add user space interface for AEAD Stephan Mueller
2014-11-18 14:06 ` Herbert Xu
[not found] ` <20141118140631.GA12100-lOAM2aK0SrRLBo1qDEOMRrpzq4S04n8Q@public.gmane.org>
2014-11-19 0:34 ` Stephan Mueller
2014-11-19 4:20 ` Stephan Mueller
[not found] ` <2398701.sGeMzIcHaz-PJstQz4BMNNP20K/wil9xYQuADTiUCJX@public.gmane.org>
2014-11-19 4:27 ` Herbert Xu
[not found] ` <20141119042704.GA19258-lOAM2aK0SrRLBo1qDEOMRrpzq4S04n8Q@public.gmane.org>
2014-11-19 6:30 ` Stephan Mueller
[not found] ` <12318471.ucMNmAKX0e-PJstQz4BMNNP20K/wil9xYQuADTiUCJX@public.gmane.org>
2014-11-19 6:45 ` Herbert Xu
2014-11-16 2:24 ` [PATCH v2 02/10] crypto: AF_ALG: user space interface for cipher info Stephan Mueller
2014-11-18 14:08 ` Herbert Xu
2014-11-19 1:02 ` Stephan Mueller
2014-11-19 1:05 ` Herbert Xu
[not found] ` <20141118140822.GB12100-lOAM2aK0SrRLBo1qDEOMRrpzq4S04n8Q@public.gmane.org>
2014-11-20 4:03 ` Stephan Mueller
2014-11-20 6:32 ` Steffen Klassert
[not found] ` <16101836.sTaxopCThb-PJstQz4BMNNP20K/wil9xYQuADTiUCJX@public.gmane.org>
2014-11-20 4:07 ` Herbert Xu
[not found] ` <20141120040748.GB28420-lOAM2aK0SrRLBo1qDEOMRrpzq4S04n8Q@public.gmane.org>
2014-11-20 4:14 ` Stephan Mueller
[not found] ` <1886827.KUDIsNtfuk-PJstQz4BMNNP20K/wil9xYQuADTiUCJX@public.gmane.org>
2014-11-20 4:18 ` Herbert Xu
[not found] ` <20141120041824.GA28612-lOAM2aK0SrRLBo1qDEOMRrpzq4S04n8Q@public.gmane.org>
2014-11-20 4:23 ` Stephan Mueller
[not found] ` <3538055.XH2t7HNEVj-PJstQz4BMNNP20K/wil9xYQuADTiUCJX@public.gmane.org>
2014-11-20 4:46 ` crypto: user - Allow get request with empty driver name Herbert Xu
2014-11-20 7:11 ` Steffen Klassert
2014-11-20 7:45 ` Herbert Xu
[not found] ` <20141120074526.GB29544-lOAM2aK0SrRLBo1qDEOMRrpzq4S04n8Q@public.gmane.org>
2014-11-20 8:04 ` Steffen Klassert
[not found] ` <20141120080406.GV6390-opNxpl+3fjRBDgjK7y7TUQ@public.gmane.org>
2014-11-20 13:07 ` Stephan Mueller
[not found] ` <20141120044650.GA28691-lOAM2aK0SrRLBo1qDEOMRrpzq4S04n8Q@public.gmane.org>
2014-11-20 13:02 ` Stephan Mueller
2014-11-20 13:10 ` Stephan Mueller
[not found] ` <2283056.kKnMJs7Bfg-PJstQz4BMNNP20K/wil9xYQuADTiUCJX@public.gmane.org>
2014-11-20 13:40 ` Herbert Xu
2014-11-20 16:08 ` Stephan Mueller
[not found] ` <6655976.jRkjMUZ19x-PJstQz4BMNNP20K/wil9xYQuADTiUCJX@public.gmane.org>
2014-11-21 2:31 ` Herbert Xu
[not found] ` <20141121023131.GA3873-lOAM2aK0SrRLBo1qDEOMRrpzq4S04n8Q@public.gmane.org>
2014-11-21 2:42 ` Stephan Mueller
2014-11-21 4:40 ` Stephan Mueller
2014-11-20 7:05 ` [PATCH v2 02/10] crypto: AF_ALG: user space interface for cipher info Steffen Klassert
2014-11-16 2:25 ` [PATCH v2 03/10] crypto: AF_ALG: extend data structuers for AEAD Stephan Mueller
2014-11-16 2:25 ` Stephan Mueller [this message]
2014-11-16 2:26 ` [PATCH v2 05/10] crypto: AF_ALG: add AEAD support Stephan Mueller
[not found] ` <5365136.g8vbXlhRyC-PJstQz4BMNNP20K/wil9xYQuADTiUCJX@public.gmane.org>
2014-11-16 2:26 ` [PATCH v2 06/10] crypto: AF_ALG: make setkey optional Stephan Mueller
2014-11-18 14:10 ` Herbert Xu
2014-11-19 2:36 ` Stephan Mueller
2014-11-16 2:28 ` [PATCH v2 08/10] crypto: AF_ALG: enable RNG interface compilation Stephan Mueller
2014-11-16 2:27 ` [PATCH v2 07/10] crypto: AF_ALG: add random number generator support Stephan Mueller
2014-11-16 2:28 ` [PATCH v2 09/10] crypto: AF_ALG: user space interface for hash info Stephan Mueller
2014-11-16 2:29 ` [PATCH v2 10/10] crypto: AF_ALG: document the user space interface Stephan Mueller
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=8139637.nYqaFSIk4I@tachyon.chronox.de \
--to=smueller@chronox.de \
--cc=dborkman@redhat.com \
--cc=herbert@gondor.apana.org.au \
--cc=linux-api@vger.kernel.org \
--cc=linux-crypto@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=quentin.gouchet@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).