linux-crypto.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Ard Biesheuvel <ardb@kernel.org>
To: linux-crypto@vger.kernel.org
Cc: herbert@gondor.apana.org.au, ebiggers@kernel.org,
	Ard Biesheuvel <ardb@kernel.org>
Subject: [PATCH 3/3] crypto: tcrypt - include 1420 byte blocks in aead and skcipher benchmarks
Date: Mon,  9 Nov 2020 09:31:43 +0100	[thread overview]
Message-ID: <20201109083143.2884-4-ardb@kernel.org> (raw)
In-Reply-To: <20201109083143.2884-1-ardb@kernel.org>

WireGuard and IPsec both typically operate on input blocks that are
~1420 bytes in size, given the default Ethernet MTU of 1500 bytes and
the overhead of the VPN metadata.

Many aead and sckipher implementations are optimized for power-of-2
block sizes, and whether they perform well when operating on 1420
byte blocks cannot be easily extrapolated from the performance on
power-of-2 block size. So let's add 1420 bytes explicitly, and round
it up to the next blocksize multiple of the algo in question if it
does not support 1420 byte blocks.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
 crypto/tcrypt.c | 81 +++++++++++---------
 1 file changed, 44 insertions(+), 37 deletions(-)

diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index fc1f3e516694..a647bb298fbc 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -77,8 +77,8 @@ static const char *check[] = {
 	NULL
 };
 
-static u32 block_sizes[] = { 16, 64, 256, 1024, 1472, 8192, 0 };
-static u32 aead_sizes[] = { 16, 64, 256, 512, 1024, 2048, 4096, 8192, 0 };
+static const int block_sizes[] = { 16, 64, 256, 1024, 1420, 4096, 0 };
+static const int aead_sizes[] = { 16, 64, 256, 512, 1024, 1420, 4096, 8192, 0 };
 
 #define XBUFSIZE 8
 #define MAX_IVLEN 32
@@ -256,10 +256,10 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
 	struct test_mb_aead_data *data;
 	struct crypto_aead *tfm;
 	unsigned int i, j, iv_len;
+	const int *b_size;
 	const char *key;
 	const char *e;
 	void *assoc;
-	u32 *b_size;
 	char *iv;
 	int ret;
 
@@ -337,15 +337,17 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
 	do {
 		b_size = aead_sizes;
 		do {
-			if (*b_size + authsize > XBUFSIZE * PAGE_SIZE) {
+			int bs = round_up(*b_size, crypto_aead_blocksize(tfm));
+
+			if (bs + authsize > XBUFSIZE * PAGE_SIZE) {
 				pr_err("template (%u) too big for buffer (%lu)\n",
-				       authsize + *b_size,
+				       authsize + bs,
 				       XBUFSIZE * PAGE_SIZE);
 				goto out;
 			}
 
 			pr_info("test %u (%d bit key, %d byte blocks): ", i,
-				*keysize * 8, *b_size);
+				*keysize * 8, bs);
 
 			/* Set up tfm global state, i.e. the key */
 
@@ -380,11 +382,11 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
 				memset(assoc, 0xff, aad_size);
 
 				sg_init_aead(cur->sg, cur->xbuf,
-					     *b_size + (enc ? 0 : authsize),
+					     bs + (enc ? 0 : authsize),
 					     assoc, aad_size);
 
 				sg_init_aead(cur->sgout, cur->xoutbuf,
-					     *b_size + (enc ? authsize : 0),
+					     bs + (enc ? authsize : 0),
 					     assoc, aad_size);
 
 				aead_request_set_ad(cur->req, aad_size);
@@ -394,7 +396,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
 					aead_request_set_crypt(cur->req,
 							       cur->sgout,
 							       cur->sg,
-							       *b_size, iv);
+							       bs, iv);
 					ret = crypto_aead_encrypt(cur->req);
 					ret = do_one_aead_op(cur->req, ret);
 
@@ -406,18 +408,18 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
 				}
 
 				aead_request_set_crypt(cur->req, cur->sg,
-						       cur->sgout, *b_size +
+						       cur->sgout, bs +
 						       (enc ? 0 : authsize),
 						       iv);
 
 			}
 
 			if (secs) {
-				ret = test_mb_aead_jiffies(data, enc, *b_size,
+				ret = test_mb_aead_jiffies(data, enc, bs,
 							   secs, num_mb);
 				cond_resched();
 			} else {
-				ret = test_mb_aead_cycles(data, enc, *b_size,
+				ret = test_mb_aead_cycles(data, enc, bs,
 							  num_mb);
 			}
 
@@ -534,7 +536,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 	char *xbuf[XBUFSIZE];
 	char *xoutbuf[XBUFSIZE];
 	char *axbuf[XBUFSIZE];
-	unsigned int *b_size;
+	const int *b_size;
 	unsigned int iv_len;
 	struct crypto_wait wait;
 
@@ -590,12 +592,14 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 	do {
 		b_size = aead_sizes;
 		do {
+			u32 bs = round_up(*b_size, crypto_aead_blocksize(tfm));
+
 			assoc = axbuf[0];
 			memset(assoc, 0xff, aad_size);
 
-			if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
+			if ((*keysize + bs) > TVMEMSIZE * PAGE_SIZE) {
 				pr_err("template (%u) too big for tvmem (%lu)\n",
-				       *keysize + *b_size,
+				       *keysize + bs,
 					TVMEMSIZE * PAGE_SIZE);
 				goto out;
 			}
@@ -616,7 +620,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 
 			crypto_aead_clear_flags(tfm, ~0);
 			printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
-					i, *keysize * 8, *b_size);
+					i, *keysize * 8, bs);
 
 
 			memset(tvmem[0], 0xff, PAGE_SIZE);
@@ -627,11 +631,11 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 				goto out;
 			}
 
-			sg_init_aead(sg, xbuf, *b_size + (enc ? 0 : authsize),
+			sg_init_aead(sg, xbuf, bs + (enc ? 0 : authsize),
 				     assoc, aad_size);
 
 			sg_init_aead(sgout, xoutbuf,
-				     *b_size + (enc ? authsize : 0), assoc,
+				     bs + (enc ? authsize : 0), assoc,
 				     aad_size);
 
 			aead_request_set_ad(req, aad_size);
@@ -644,7 +648,7 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 				 * reversed (input <-> output) to calculate it
 				 */
 				aead_request_set_crypt(req, sgout, sg,
-						       *b_size, iv);
+						       bs, iv);
 				ret = do_one_aead_op(req,
 						     crypto_aead_encrypt(req));
 
@@ -656,15 +660,15 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
 			}
 
 			aead_request_set_crypt(req, sg, sgout,
-					       *b_size + (enc ? 0 : authsize),
+					       bs + (enc ? 0 : authsize),
 					       iv);
 
 			if (secs) {
-				ret = test_aead_jiffies(req, enc, *b_size,
+				ret = test_aead_jiffies(req, enc, bs,
 							secs);
 				cond_resched();
 			} else {
-				ret = test_aead_cycles(req, enc, *b_size);
+				ret = test_aead_cycles(req, enc, bs);
 			}
 
 			if (ret) {
@@ -1253,9 +1257,9 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
 	struct test_mb_skcipher_data *data;
 	struct crypto_skcipher *tfm;
 	unsigned int i, j, iv_len;
+	const int *b_size;
 	const char *key;
 	const char *e;
-	u32 *b_size;
 	char iv[128];
 	int ret;
 
@@ -1316,14 +1320,16 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
 	do {
 		b_size = block_sizes;
 		do {
-			if (*b_size > XBUFSIZE * PAGE_SIZE) {
+			u32 bs = round_up(*b_size, crypto_skcipher_blocksize(tfm));
+
+			if (bs > XBUFSIZE * PAGE_SIZE) {
 				pr_err("template (%u) too big for buffer (%lu)\n",
 				       *b_size, XBUFSIZE * PAGE_SIZE);
 				goto out;
 			}
 
 			pr_info("test %u (%d bit key, %d byte blocks): ", i,
-				*keysize * 8, *b_size);
+				*keysize * 8, bs);
 
 			/* Set up tfm global state, i.e. the key */
 
@@ -1353,7 +1359,7 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
 
 			for (j = 0; j < num_mb; ++j) {
 				struct test_mb_skcipher_data *cur = &data[j];
-				unsigned int k = *b_size;
+				unsigned int k = bs;
 				unsigned int pages = DIV_ROUND_UP(k, PAGE_SIZE);
 				unsigned int p = 0;
 
@@ -1377,12 +1383,12 @@ static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
 
 			if (secs) {
 				ret = test_mb_acipher_jiffies(data, enc,
-							      *b_size, secs,
+							      bs, secs,
 							      num_mb);
 				cond_resched();
 			} else {
 				ret = test_mb_acipher_cycles(data, enc,
-							     *b_size, num_mb);
+							     bs, num_mb);
 			}
 
 			if (ret) {
@@ -1497,8 +1503,8 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
 	char iv[128];
 	struct skcipher_request *req;
 	struct crypto_skcipher *tfm;
+	const int *b_size;
 	const char *e;
-	u32 *b_size;
 
 	if (enc == ENCRYPT)
 		e = "encryption";
@@ -1533,17 +1539,18 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
 		b_size = block_sizes;
 
 		do {
+			u32 bs = round_up(*b_size, crypto_skcipher_blocksize(tfm));
 			struct scatterlist sg[TVMEMSIZE];
 
-			if ((*keysize + *b_size) > TVMEMSIZE * PAGE_SIZE) {
+			if ((*keysize + bs) > TVMEMSIZE * PAGE_SIZE) {
 				pr_err("template (%u) too big for "
-				       "tvmem (%lu)\n", *keysize + *b_size,
+				       "tvmem (%lu)\n", *keysize + bs,
 				       TVMEMSIZE * PAGE_SIZE);
 				goto out_free_req;
 			}
 
 			pr_info("test %u (%d bit key, %d byte blocks): ", i,
-				*keysize * 8, *b_size);
+				*keysize * 8, bs);
 
 			memset(tvmem[0], 0xff, PAGE_SIZE);
 
@@ -1565,7 +1572,7 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
 				goto out_free_req;
 			}
 
-			k = *keysize + *b_size;
+			k = *keysize + bs;
 			sg_init_table(sg, DIV_ROUND_UP(k, PAGE_SIZE));
 
 			if (k > PAGE_SIZE) {
@@ -1582,22 +1589,22 @@ static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
 				sg_set_buf(sg + j, tvmem[j], k);
 				memset(tvmem[j], 0xff, k);
 			} else {
-				sg_set_buf(sg, tvmem[0] + *keysize, *b_size);
+				sg_set_buf(sg, tvmem[0] + *keysize, bs);
 			}
 
 			iv_len = crypto_skcipher_ivsize(tfm);
 			if (iv_len)
 				memset(&iv, 0xff, iv_len);
 
-			skcipher_request_set_crypt(req, sg, sg, *b_size, iv);
+			skcipher_request_set_crypt(req, sg, sg, bs, iv);
 
 			if (secs) {
 				ret = test_acipher_jiffies(req, enc,
-							   *b_size, secs);
+							   bs, secs);
 				cond_resched();
 			} else {
 				ret = test_acipher_cycles(req, enc,
-							  *b_size);
+							  bs);
 			}
 
 			if (ret) {
-- 
2.17.1


      parent reply	other threads:[~2020-11-09  8:33 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-09  8:31 [PATCH 0/3] crypto: tcrypt enhancements Ard Biesheuvel
2020-11-09  8:31 ` [PATCH 1/3] crypto: tcrypt - don't initialize at subsys_initcall time Ard Biesheuvel
2020-11-09 17:59   ` Eric Biggers
2020-11-09  8:31 ` [PATCH 2/3] crypto: tcrypt - permit tcrypt.ko to be builtin Ard Biesheuvel
2020-11-20  3:44   ` Herbert Xu
2020-11-20  9:24     ` Ard Biesheuvel
2020-11-20 10:09       ` Herbert Xu
2020-11-20 10:34         ` Ard Biesheuvel
2020-11-20 10:37           ` Herbert Xu
2020-11-20 10:40             ` Ard Biesheuvel
2020-11-20 10:42               ` Herbert Xu
2020-11-20 10:43                 ` Ard Biesheuvel
2020-11-20 10:45                   ` Herbert Xu
2020-11-20 10:45                     ` Ard Biesheuvel
2020-11-09  8:31 ` Ard Biesheuvel [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201109083143.2884-4-ardb@kernel.org \
    --to=ardb@kernel.org \
    --cc=ebiggers@kernel.org \
    --cc=herbert@gondor.apana.org.au \
    --cc=linux-crypto@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).