linux-doc.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jakub Kicinski <kuba@kernel.org>
To: davem@davemloft.net
Cc: netdev@vger.kernel.org, edumazet@google.com, pabeni@redhat.com,
	john.fastabend@gmail.com, borisp@nvidia.com,
	linux-doc@vger.kernel.org, linux-kselftest@vger.kernel.org,
	maximmi@nvidia.com, Jakub Kicinski <kuba@kernel.org>
Subject: [PATCH net-next 2/5] tls: rx: support optimistic decrypt to user buffer with TLS 1.3
Date: Tue,  5 Jul 2022 16:59:23 -0700	[thread overview]
Message-ID: <20220705235926.1035407-3-kuba@kernel.org> (raw)
In-Reply-To: <20220705235926.1035407-1-kuba@kernel.org>

We currently don't support decrypt to user buffer with TLS 1.3
because we don't know the record type and how much padding
record contains before decryption. In practice data records
are by far most common and padding gets used rarely so
we can assume data record, no padding, and if we find out
that wasn't the case - retry the crypto in place (decrypt
to skb).

To safeguard from user overwriting content type and padding
before we can check it attach a 1B sg entry where last byte
of the record will land.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
 net/tls/tls_sw.c | 38 +++++++++++++++++++++++++++++---------
 1 file changed, 29 insertions(+), 9 deletions(-)

diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 7fcb54e43a08..2bac57684429 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -47,6 +47,7 @@
 struct tls_decrypt_arg {
 	bool zc;
 	bool async;
+	u8 tail;
 };
 
 noinline void tls_err_abort(struct sock *sk, int err)
@@ -133,7 +134,8 @@ static int skb_nsg(struct sk_buff *skb, int offset, int len)
         return __skb_nsg(skb, offset, len, 0);
 }
 
-static int padding_length(struct tls_prot_info *prot, struct sk_buff *skb)
+static int tls_padding_length(struct tls_prot_info *prot, struct sk_buff *skb,
+			      struct tls_decrypt_arg *darg)
 {
 	struct strp_msg *rxm = strp_msg(skb);
 	struct tls_msg *tlm = tls_msg(skb);
@@ -142,7 +144,7 @@ static int padding_length(struct tls_prot_info *prot, struct sk_buff *skb)
 	/* Determine zero-padding length */
 	if (prot->version == TLS_1_3_VERSION) {
 		int offset = rxm->full_len - TLS_TAG_SIZE - 1;
-		char content_type = 0;
+		char content_type = darg->zc ? darg->tail : 0;
 		int err;
 
 		while (content_type == 0) {
@@ -1418,17 +1420,18 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
 	struct strp_msg *rxm = strp_msg(skb);
 	struct tls_msg *tlm = tls_msg(skb);
 	int n_sgin, n_sgout, nsg, mem_size, aead_size, err, pages = 0;
+	u8 *aad, *iv, *tail, *mem = NULL;
 	struct aead_request *aead_req;
 	struct sk_buff *unused;
-	u8 *aad, *iv, *mem = NULL;
 	struct scatterlist *sgin = NULL;
 	struct scatterlist *sgout = NULL;
 	const int data_len = rxm->full_len - prot->overhead_size;
+	int tail_pages = !!prot->tail_size;
 	int iv_offset = 0;
 
 	if (darg->zc && (out_iov || out_sg)) {
 		if (out_iov)
-			n_sgout = 1 +
+			n_sgout = 1 + tail_pages +
 				iov_iter_npages_cap(out_iov, INT_MAX, data_len);
 		else
 			n_sgout = sg_nents(out_sg);
@@ -1452,9 +1455,10 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
 	mem_size = aead_size + (nsg * sizeof(struct scatterlist));
 	mem_size = mem_size + prot->aad_size;
 	mem_size = mem_size + MAX_IV_SIZE;
+	mem_size = mem_size + prot->tail_size;
 
 	/* Allocate a single block of memory which contains
-	 * aead_req || sgin[] || sgout[] || aad || iv.
+	 * aead_req || sgin[] || sgout[] || aad || iv || tail.
 	 * This order achieves correct alignment for aead_req, sgin, sgout.
 	 */
 	mem = kmalloc(mem_size, sk->sk_allocation);
@@ -1467,6 +1471,7 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
 	sgout = sgin + n_sgin;
 	aad = (u8 *)(sgout + n_sgout);
 	iv = aad + prot->aad_size;
+	tail = iv + MAX_IV_SIZE;
 
 	/* For CCM based ciphers, first byte of nonce+iv is a constant */
 	switch (prot->cipher_type) {
@@ -1518,12 +1523,18 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
 			sg_init_table(sgout, n_sgout);
 			sg_set_buf(&sgout[0], aad, prot->aad_size);
 
-			err = tls_setup_from_iter(out_iov,
-						  data_len + prot->tail_size,
+			err = tls_setup_from_iter(out_iov, data_len,
 						  &pages, &sgout[1],
-						  (n_sgout - 1));
+						  (n_sgout - 1 - tail_pages));
 			if (err < 0)
 				goto fallback_to_reg_recv;
+
+			if (prot->tail_size) {
+				sg_unmark_end(&sgout[pages]);
+				sg_set_buf(&sgout[pages + 1], tail,
+					   prot->tail_size);
+				sg_mark_end(&sgout[pages + 1]);
+			}
 		} else if (out_sg) {
 			memcpy(sgout, out_sg, n_sgout * sizeof(*sgout));
 		} else {
@@ -1542,6 +1553,9 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
 	if (darg->async)
 		return 0;
 
+	if (prot->tail_size)
+		darg->tail = *tail;
+
 	/* Release the pages in case iov was mapped to pages */
 	for (; pages > 0; pages--)
 		put_page(sg_page(&sgout[pages]));
@@ -1583,9 +1597,15 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
 		return err;
 	if (darg->async)
 		goto decrypt_next;
+	/* If opportunistic TLS 1.3 ZC failed retry without ZC */
+	if (unlikely(darg->zc && prot->version == TLS_1_3_VERSION &&
+		     darg->tail != TLS_RECORD_TYPE_DATA)) {
+		darg->zc = false;
+		return decrypt_skb_update(sk, skb, dest, darg);
+	}
 
 decrypt_done:
-	pad = padding_length(prot, skb);
+	pad = tls_padding_length(prot, skb, darg);
 	if (pad < 0)
 		return pad;
 
-- 
2.36.1


  parent reply	other threads:[~2022-07-05 23:59 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-05 23:59 [PATCH net-next 0/5] tls: rx: nopad and backlog flushing Jakub Kicinski
2022-07-05 23:59 ` [PATCH net-next 1/5] tls: rx: don't include tail size in data_len Jakub Kicinski
2022-07-05 23:59 ` Jakub Kicinski [this message]
2022-07-05 23:59 ` [PATCH net-next 3/5] tls: rx: add sockopt for enabling optimistic decrypt with TLS 1.3 Jakub Kicinski
2022-07-08 14:14   ` Maxim Mikityanskiy
2022-07-08 18:18     ` Jakub Kicinski
2022-07-05 23:59 ` [PATCH net-next 4/5] selftests: tls: add selftest variant for pad Jakub Kicinski
2022-07-05 23:59 ` [PATCH net-next 5/5] tls: rx: periodically flush socket backlog Jakub Kicinski
2022-07-06 12:10 ` [PATCH net-next 0/5] tls: rx: nopad and backlog flushing patchwork-bot+netdevbpf

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220705235926.1035407-3-kuba@kernel.org \
    --to=kuba@kernel.org \
    --cc=borisp@nvidia.com \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=john.fastabend@gmail.com \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=maximmi@nvidia.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).