public inbox for netdev@vger.kernel.org
 help / color / mirror / Atom feed
From: Dao Zhong Ma <cz1346219@gmail.com>
To: linux-kernel@vger.kernel.org, netdev@vger.kernel.org,
	antonio@openvpn.net
Cc: sd@queasysnail.net, andrew+netdev@lunn.ch, davem@davemloft.net,
	edumazet@google.com, kuba@kernel.org, pabeni@redhat.com,
	Dao Zhong Ma <cz1346219@gmail.com>
Subject: [PATCH 1/1] ovpn: tcp - defer TX from softirq to workqueue
Date: Fri,  1 May 2026 22:54:25 +0800	[thread overview]
Message-ID: <20260501145425.757147-2-cz1346219@gmail.com> (raw)
In-Reply-To: <20260501145425.757147-1-cz1346219@gmail.com>

ovpn_tcp_send_skb() holds sk->sk_lock.slock while performing the full TCP
send in softirq context. This can hold the spinlock for a long time
(large skb), blocking lock_sock() users. This can starve the RCU GP
kthread and trigger RCU stalls warnings and hung tasks.

Defer the TCP send operation to process context:
- In interrupt context, only enqueue the skb under the spinlock
  schedule tcp_tx_work.
- In process context, dequeue and flush the send queue under lock_sock()

This reduces the softirq critical section to a short duration, allowing
lock_sock() users to make progress and preventing RCU stalls.

Signed-off-by: Dao Zhong Ma <cz1346219@gmail.com>
---
 drivers/net/ovpn/tcp.c | 80 +++++++++++++++++++++++++++++++-----------
 1 file changed, 59 insertions(+), 21 deletions(-)

diff --git a/drivers/net/ovpn/tcp.c b/drivers/net/ovpn/tcp.c
index 65054cc84be5..d75ad0c22a30 100644
--- a/drivers/net/ovpn/tcp.c
+++ b/drivers/net/ovpn/tcp.c
@@ -6,6 +6,7 @@
  *  Author:	Antonio Quartulli <antonio@openvpn.net>
  */

+#include <linux/interrupt.h>
 #include <linux/skbuff.h>
 #include <net/hotdata.h>
 #include <net/inet_common.h>
@@ -312,6 +313,40 @@ static void ovpn_tcp_send_sock(struct ovpn_peer *peer, struct sock *sk)
 	peer->tcp.tx_in_progress = false;
 }

+/* Caller must hold sk->sk_lock.slock. */
+static bool ovpn_tcp_queue_skb(struct ovpn_peer *peer, struct sk_buff *skb)
+{
+	if (skb_queue_len(&peer->tcp.out_queue) >=
+	    READ_ONCE(net_hotdata.max_backlog)) {
+		dev_dstats_tx_dropped(peer->ovpn->dev);
+		kfree_skb(skb);
+		return false;
+	}
+
+	__skb_queue_tail(&peer->tcp.out_queue, skb);
+	return true;
+}
+
+/* Caller must hold sk->sk_lock.slock and own the socket. */
+static void ovpn_tcp_tx_flush(struct ovpn_peer *peer, struct sock *sk)
+{
+	struct sk_buff *skb;
+
+	if (peer->tcp.out_msg.skb)
+		ovpn_tcp_send_sock(peer, sk);
+
+	while (!peer->tcp.out_msg.skb) {
+		skb = __skb_dequeue(&peer->tcp.out_queue);
+		if (!skb)
+			break;
+
+		peer->tcp.out_msg.skb = skb;
+		peer->tcp.out_msg.len = skb->len;
+		peer->tcp.out_msg.offset = 0;
+		ovpn_tcp_send_sock(peer, sk);
+	}
+}
+
 void ovpn_tcp_tx_work(struct work_struct *work)
 {
 	struct ovpn_socket *sock;
@@ -320,7 +355,7 @@ void ovpn_tcp_tx_work(struct work_struct *work)

 	lock_sock(sock->sk);
 	if (sock->peer)
-		ovpn_tcp_send_sock(sock->peer, sock->sk);
+		ovpn_tcp_tx_flush(sock->peer, sock->sk);
 	release_sock(sock->sk);
 }

@@ -345,32 +380,38 @@ static void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sock *sk,
 void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct sock *sk,
 		       struct sk_buff *skb)
 {
+	struct ovpn_socket *sock;
 	u16 len = skb->len;
+	bool queued;

 	*(__be16 *)__skb_push(skb, sizeof(u16)) = htons(len);

-	spin_lock_nested(&sk->sk_lock.slock, OVPN_TCP_DEPTH_NESTING);
-	if (sock_owned_by_user(sk)) {
-		if (skb_queue_len(&peer->tcp.out_queue) >=
-		    READ_ONCE(net_hotdata.max_backlog)) {
-			dev_dstats_tx_dropped(peer->ovpn->dev);
-			kfree_skb(skb);
-			goto unlock;
-		}
-		__skb_queue_tail(&peer->tcp.out_queue, skb);
-	} else {
-		ovpn_tcp_send_sock_skb(peer, sk, skb);
+	if (unlikely(in_interrupt())) {
+		spin_lock_nested(&sk->sk_lock.slock, OVPN_TCP_DEPTH_NESTING);
+		queued = ovpn_tcp_queue_skb(peer, skb);
+		spin_unlock(&sk->sk_lock.slock);
+		if (!queued)
+			return;
+
+		rcu_read_lock();
+		sock = rcu_dereference_sk_user_data(sk);
+		if (sock)
+			schedule_work(&sock->tcp_tx_work);
+		rcu_read_unlock();
+		return;
 	}
-unlock:
-	spin_unlock(&sk->sk_lock.slock);
+
+	lock_sock_nested(sk, OVPN_TCP_DEPTH_NESTING);
+	queued = ovpn_tcp_queue_skb(peer, skb);
+	if (queued)
+		ovpn_tcp_tx_flush(peer, sk);
+	release_sock(sk);
 }

 static void ovpn_tcp_release(struct sock *sk)
 {
-	struct sk_buff_head queue;
 	struct ovpn_socket *sock;
 	struct ovpn_peer *peer;
-	struct sk_buff *skb;

 	rcu_read_lock();
 	sock = rcu_dereference_sk_user_data(sk);
@@ -390,11 +431,7 @@ static void ovpn_tcp_release(struct sock *sk)
 	}
 	rcu_read_unlock();

-	__skb_queue_head_init(&queue);
-	skb_queue_splice_init(&peer->tcp.out_queue, &queue);
-
-	while ((skb = __skb_dequeue(&queue)))
-		ovpn_tcp_send_sock_skb(peer, sk, skb);
+	ovpn_tcp_tx_flush(peer, sk);

 	peer->tcp.sk_cb.prot->release_cb(sk);
 	ovpn_peer_put(peer);
@@ -653,3 +690,4 @@ void __init ovpn_tcp_init(void)
 			      &inet6_stream_ops);
 #endif
 }
+
--
2.54.0


  reply	other threads:[~2026-05-01 15:03 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-05-01 14:54 [PATCH 0/1] ovpn: tcp - defer TX from softirq to workqueue Dao Zhong Ma
2026-05-01 14:54 ` Dao Zhong Ma [this message]
2026-05-01 23:12   ` [PATCH 1/1] " Jakub Kicinski
2026-05-03 21:14   ` Antonio Quartulli

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260501145425.757147-2-cz1346219@gmail.com \
    --to=cz1346219@gmail.com \
    --cc=andrew+netdev@lunn.ch \
    --cc=antonio@openvpn.net \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=kuba@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=sd@queasysnail.net \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox