From: Rishikesh Jethwani <rjethwani@purestorage.com>
To: netdev@vger.kernel.org
Cc: saeedm@nvidia.com, tariqt@nvidia.com, mbloch@nvidia.com,
borisp@nvidia.com, john.fastabend@gmail.com, kuba@kernel.org,
sd@queasysnail.net, davem@davemloft.net, pabeni@redhat.com,
edumazet@google.com, leon@kernel.org,
Rishikesh Jethwani <rjethwani@purestorage.com>
Subject: [PATCH v14 5/9] tls: prep helpers and refactors for HW offload KeyUpdate
Date: Fri, 15 May 2026 15:27:11 -0600 [thread overview]
Message-ID: <20260515212715.3151307-6-rjethwani@purestorage.com> (raw)
In-Reply-To: <20260515212715.3151307-1-rjethwani@purestorage.com>
Preparatory refactors for TX and RX HW rekey support; no functional
change.
- Hoist cipher_context / tls_crypto_context above
tls_offload_context_tx so they can be embedded in offload
contexts.
- Add tls_tx_cipher_ctx() accessor and factor tls_sw_ctx_tx_init()
so the TX path can redirect to a temporary SW context during
rekey.
- Split tls_set_device_offload() into a dispatcher and
tls_set_device_offload_initial(); a _rekey() sibling follows.
- Factor tls_device_dev_add_tx() and tls_device_commit_start_marker()
so the rekey completion path can reuse them.
- Move crypto_aead_setauthsize() into the !*aead block so a fresh
AEAD is correctly configured when RX HW rekey allocates one.
Signed-off-by: Rishikesh Jethwani <rjethwani@purestorage.com>
---
include/net/tls.h | 38 +++++++-----
net/tls/tls.h | 1 +
net/tls/tls_device.c | 139 ++++++++++++++++++++++++++-----------------
net/tls/tls_sw.c | 33 +++++-----
4 files changed, 127 insertions(+), 84 deletions(-)
diff --git a/include/net/tls.h b/include/net/tls.h
index ebd2550280ae..2512a3799b21 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -151,6 +151,22 @@ struct tls_record_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
+struct cipher_context {
+ char iv[TLS_MAX_IV_SIZE + TLS_MAX_SALT_SIZE];
+ char rec_seq[TLS_MAX_REC_SEQ_SIZE];
+};
+
+union tls_crypto_context {
+ struct tls_crypto_info info;
+ union {
+ struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+ struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
+ struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
+ struct tls12_crypto_info_sm4_gcm sm4_gcm;
+ struct tls12_crypto_info_sm4_ccm sm4_ccm;
+ };
+};
+
#define TLS_DRIVER_STATE_SIZE_TX 16
struct tls_offload_context_tx {
struct crypto_aead *aead_send;
@@ -191,22 +207,6 @@ enum tls_context_flags {
TLS_RX_DEV_CLOSED = 2,
};
-struct cipher_context {
- char iv[TLS_MAX_IV_SIZE + TLS_MAX_SALT_SIZE];
- char rec_seq[TLS_MAX_REC_SEQ_SIZE];
-};
-
-union tls_crypto_context {
- struct tls_crypto_info info;
- union {
- struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
- struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
- struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
- struct tls12_crypto_info_sm4_gcm sm4_gcm;
- struct tls12_crypto_info_sm4_ccm sm4_ccm;
- };
-};
-
struct tls_prot_info {
u16 version;
u16 cipher_type;
@@ -388,6 +388,12 @@ static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
}
+static inline struct cipher_context *tls_tx_cipher_ctx(
+ const struct tls_context *tls_ctx)
+{
+ return (struct cipher_context *)&tls_ctx->tx;
+}
+
static inline struct tls_offload_context_tx *
tls_offload_ctx_tx(const struct tls_context *tls_ctx)
{
diff --git a/net/tls/tls.h b/net/tls/tls.h
index 44bedb0dfdda..cd992fc161e5 100644
--- a/net/tls/tls.h
+++ b/net/tls/tls.h
@@ -157,6 +157,7 @@ void tls_update_rx_zc_capable(struct tls_context *tls_ctx);
void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
void tls_sw_strparser_done(struct tls_context *tls_ctx);
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
+void tls_sw_ctx_tx_init(struct sock *sk, struct tls_sw_context_tx *sw_ctx);
void tls_sw_splice_eof(struct socket *sock);
void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
void tls_sw_release_resources_tx(struct sock *sk);
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index f22f8a550c82..7a98d2f6cbd3 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -138,6 +138,41 @@ static struct net_device *get_netdev_for_sock(struct sock *sk)
return lowest_dev;
}
+static int tls_device_dev_add_tx(struct sock *sk, struct net_device *netdev,
+ struct tls_crypto_info *crypto_info,
+ u32 write_seq)
+{
+ const struct tls_cipher_desc *cipher_desc;
+ char *rec_seq;
+ int rc;
+
+ cipher_desc = get_cipher_desc(crypto_info->cipher_type);
+ DEBUG_NET_WARN_ON_ONCE(!cipher_desc || !cipher_desc->offloadable);
+
+ rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
+ crypto_info, write_seq);
+ rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc);
+ trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
+ write_seq, rec_seq, rc);
+ return rc;
+}
+
+static void tls_device_commit_start_marker(struct sock *sk,
+ struct tls_offload_context_tx *offload_ctx,
+ struct tls_record_info *start_marker_record)
+{
+ start_marker_record->end_seq = tcp_sk(sk)->write_seq;
+ start_marker_record->len = 0;
+ start_marker_record->num_frags = 0;
+ list_add_tail_rcu(&start_marker_record->list, &offload_ctx->records_list);
+
+ /* TLS offload is greatly simplified if we don't send
+ * SKBs where only part of the payload needs to be encrypted.
+ * So mark the last skb in the write queue as end of record.
+ */
+ tcp_write_collapse_fence(sk);
+}
+
static void destroy_record(struct tls_record_info *record)
{
int i;
@@ -1068,57 +1103,31 @@ static struct tls_offload_context_tx *alloc_offload_ctx_tx(struct tls_context *c
return offload_ctx;
}
-int tls_set_device_offload(struct sock *sk)
+static int tls_set_device_offload_initial(struct sock *sk,
+ struct tls_context *ctx,
+ struct net_device *netdev,
+ struct tls_crypto_info *crypto_info,
+ const struct tls_cipher_desc *cipher_desc)
{
+ struct tls_prot_info *prot = &ctx->prot_info;
struct tls_record_info *start_marker_record;
struct tls_offload_context_tx *offload_ctx;
- const struct tls_cipher_desc *cipher_desc;
- struct tls_crypto_info *crypto_info;
- struct tls_prot_info *prot;
- struct net_device *netdev;
- struct tls_context *ctx;
char *iv, *rec_seq;
int rc;
- ctx = tls_get_ctx(sk);
- prot = &ctx->prot_info;
-
- if (ctx->priv_ctx_tx)
- return -EEXIST;
-
- netdev = get_netdev_for_sock(sk);
- if (!netdev) {
- pr_err_ratelimited("%s: netdev not found\n", __func__);
- return -EINVAL;
- }
-
- if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
- rc = -EOPNOTSUPP;
- goto release_netdev;
- }
-
- crypto_info = &ctx->crypto_send.info;
- cipher_desc = get_cipher_desc(crypto_info->cipher_type);
- if (!cipher_desc || !cipher_desc->offloadable) {
- rc = -EINVAL;
- goto release_netdev;
- }
+ iv = crypto_info_iv(crypto_info, cipher_desc);
+ rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc);
rc = init_prot_info(prot, crypto_info, cipher_desc);
if (rc)
- goto release_netdev;
-
- iv = crypto_info_iv(crypto_info, cipher_desc);
- rec_seq = crypto_info_rec_seq(crypto_info, cipher_desc);
+ return rc;
memcpy(ctx->tx.iv + cipher_desc->salt, iv, cipher_desc->iv);
memcpy(ctx->tx.rec_seq, rec_seq, cipher_desc->rec_seq);
start_marker_record = kmalloc_obj(*start_marker_record);
- if (!start_marker_record) {
- rc = -ENOMEM;
- goto release_netdev;
- }
+ if (!start_marker_record)
+ return -ENOMEM;
offload_ctx = alloc_offload_ctx_tx(ctx);
if (!offload_ctx) {
@@ -1130,20 +1139,11 @@ int tls_set_device_offload(struct sock *sk)
if (rc)
goto free_offload_ctx;
- start_marker_record->end_seq = tcp_sk(sk)->write_seq;
- start_marker_record->len = 0;
- start_marker_record->num_frags = 0;
- list_add_tail(&start_marker_record->list, &offload_ctx->records_list);
+ tls_device_commit_start_marker(sk, offload_ctx, start_marker_record);
clean_acked_data_enable(tcp_sk(sk), &tls_tcp_clean_acked);
ctx->push_pending_record = tls_device_push_pending_record;
- /* TLS offload is greatly simplified if we don't send
- * SKBs where only part of the payload needs to be encrypted.
- * So mark the last skb in the write queue as end of record.
- */
- tcp_write_collapse_fence(sk);
-
/* Avoid offloading if the device is down
* We don't want to offload new flows after
* the NETDEV_DOWN event
@@ -1159,11 +1159,8 @@ int tls_set_device_offload(struct sock *sk)
}
ctx->priv_ctx_tx = offload_ctx;
- rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
- &ctx->crypto_send.info,
- tcp_sk(sk)->write_seq);
- trace_tls_device_offload_set(sk, TLS_OFFLOAD_CTX_DIR_TX,
- tcp_sk(sk)->write_seq, rec_seq, rc);
+ rc = tls_device_dev_add_tx(sk, netdev, crypto_info,
+ tcp_sk(sk)->write_seq);
if (rc)
goto release_lock;
@@ -1175,7 +1172,6 @@ int tls_set_device_offload(struct sock *sk)
* by the netdev's xmit function.
*/
smp_store_release(&sk->sk_validate_xmit_skb, tls_validate_xmit_skb);
- dev_put(netdev);
return 0;
@@ -1188,6 +1184,43 @@ int tls_set_device_offload(struct sock *sk)
ctx->priv_ctx_tx = NULL;
free_marker_record:
kfree(start_marker_record);
+ return rc;
+}
+
+int tls_set_device_offload(struct sock *sk)
+{
+ const struct tls_cipher_desc *cipher_desc;
+ struct tls_crypto_info *crypto_info;
+ struct net_device *netdev;
+ struct tls_context *ctx;
+ int rc;
+
+ ctx = tls_get_ctx(sk);
+
+ if (ctx->priv_ctx_tx)
+ return -EEXIST;
+
+ netdev = get_netdev_for_sock(sk);
+ if (!netdev) {
+ pr_err_ratelimited("%s: netdev not found\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!(netdev->features & NETIF_F_HW_TLS_TX)) {
+ rc = -EOPNOTSUPP;
+ goto release_netdev;
+ }
+
+ crypto_info = &ctx->crypto_send.info;
+ cipher_desc = get_cipher_desc(crypto_info->cipher_type);
+ if (!cipher_desc || !cipher_desc->offloadable) {
+ rc = -EINVAL;
+ goto release_netdev;
+ }
+
+ rc = tls_set_device_offload_initial(sk, ctx, netdev, crypto_info,
+ cipher_desc);
+
release_netdev:
dev_put(netdev);
return rc;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index dd8e88cc2a36..434d68cbbd20 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -555,11 +555,11 @@ static int tls_do_encryption(struct sock *sk,
break;
}
- memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
+ memcpy(&rec->iv_data[iv_offset], tls_tx_cipher_ctx(tls_ctx)->iv,
prot->iv_size + prot->salt_size);
tls_xor_iv_with_seq(prot, rec->iv_data + iv_offset,
- tls_ctx->tx.rec_seq);
+ tls_tx_cipher_ctx(tls_ctx)->rec_seq);
sge->offset += prot->prepend_size;
sge->length -= prot->prepend_size;
@@ -610,7 +610,7 @@ static int tls_do_encryption(struct sock *sk,
/* Unhook the record from context if encryption is not failure */
ctx->open_rec = NULL;
- tls_advance_record_sn(sk, prot, &tls_ctx->tx);
+ tls_advance_record_sn(sk, prot, tls_tx_cipher_ctx(tls_ctx));
return rc;
}
@@ -827,7 +827,7 @@ static int tls_push_record(struct sock *sk, int flags,
sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
- tls_ctx->tx.rec_seq, record_type, prot);
+ tls_tx_cipher_ctx(tls_ctx)->rec_seq, record_type, prot);
tls_fill_prepend(tls_ctx,
page_address(sg_page(&msg_en->sg.data[i])) +
@@ -2677,6 +2677,15 @@ static void tx_work_handler(struct work_struct *work)
}
}
+void tls_sw_ctx_tx_init(struct sock *sk, struct tls_sw_context_tx *sw_ctx)
+{
+ crypto_init_wait(&sw_ctx->async_wait);
+ atomic_set(&sw_ctx->encrypt_pending, 1);
+ INIT_LIST_HEAD(&sw_ctx->tx_list);
+ INIT_DELAYED_WORK(&sw_ctx->tx_work.work, tx_work_handler);
+ sw_ctx->tx_work.sk = sk;
+}
+
static bool tls_is_tx_ready(struct tls_sw_context_tx *ctx)
{
struct tls_rec *rec;
@@ -2728,11 +2737,7 @@ static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct soc
sw_ctx_tx = ctx->priv_ctx_tx;
}
- crypto_init_wait(&sw_ctx_tx->async_wait);
- atomic_set(&sw_ctx_tx->encrypt_pending, 1);
- INIT_LIST_HEAD(&sw_ctx_tx->tx_list);
- INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler);
- sw_ctx_tx->tx_work.sk = sk;
+ tls_sw_ctx_tx_init(sk, sw_ctx_tx);
return sw_ctx_tx;
}
@@ -2859,6 +2864,10 @@ int tls_sw_ctx_init(struct sock *sk, int tx,
*aead = NULL;
goto free_priv;
}
+
+ rc = crypto_aead_setauthsize(*aead, prot->tag_size);
+ if (rc)
+ goto free_aead;
}
ctx->push_pending_record = tls_sw_push_pending_record;
@@ -2875,12 +2884,6 @@ int tls_sw_ctx_init(struct sock *sk, int tx,
goto free_aead;
}
- if (!new_crypto_info) {
- rc = crypto_aead_setauthsize(*aead, prot->tag_size);
- if (rc)
- goto free_aead;
- }
-
if (!tx && !new_crypto_info) {
tfm = crypto_aead_tfm(sw_ctx_rx->aead_recv);
--
2.25.1
next prev parent reply other threads:[~2026-05-15 21:28 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-05-15 21:27 [PATCH net-next v14 0/9] tls: Add TLS 1.3 hardware offload support Rishikesh Jethwani
2026-05-15 21:27 ` [PATCH v14 1/9] net: tls: reject TLS 1.3 offload in chcr_ktls and nfp drivers Rishikesh Jethwani
2026-05-15 21:27 ` [PATCH v14 2/9] net/mlx5e: add TLS 1.3 hardware offload support Rishikesh Jethwani
2026-05-15 21:27 ` [PATCH v14 3/9] tls: " Rishikesh Jethwani
2026-05-15 21:27 ` [PATCH v14 4/9] tls: split tls_set_sw_offload into init and finalize stages Rishikesh Jethwani
2026-05-15 21:27 ` Rishikesh Jethwani [this message]
2026-05-15 21:27 ` [PATCH v14 6/9] tls: device: add TX KeyUpdate support Rishikesh Jethwani
2026-05-15 21:27 ` [PATCH v14 7/9] tls: device: add RX " Rishikesh Jethwani
2026-05-15 21:27 ` [PATCH v14 8/9] tls: device: add tracepoints for RX KeyUpdate path Rishikesh Jethwani
2026-05-15 21:27 ` [PATCH v14 9/9] selftests: net: add TLS hardware offload test Rishikesh Jethwani
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260515212715.3151307-6-rjethwani@purestorage.com \
--to=rjethwani@purestorage.com \
--cc=borisp@nvidia.com \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=john.fastabend@gmail.com \
--cc=kuba@kernel.org \
--cc=leon@kernel.org \
--cc=mbloch@nvidia.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=saeedm@nvidia.com \
--cc=sd@queasysnail.net \
--cc=tariqt@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox