netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [Patch bpf v2] sock_map: move cancel_work_sync() out of sock lock
@ 2022-11-02  4:34 Cong Wang
  2022-11-03  5:41 ` John Fastabend
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Cong Wang @ 2022-11-02  4:34 UTC (permalink / raw)
  To: netdev; +Cc: bpf, Cong Wang, Stanislav Fomichev, John Fastabend,
	Jakub Sitnicki

From: Cong Wang <cong.wang@bytedance.com>

Stanislav reported a lockdep warning, which is caused by the
cancel_work_sync() called inside sock_map_close(), as analyzed
below by Jakub:

psock->work.func = sk_psock_backlog()
  ACQUIRE psock->work_mutex
    sk_psock_handle_skb()
      skb_send_sock()
        __skb_send_sock()
          sendpage_unlocked()
            kernel_sendpage()
              sock->ops->sendpage = inet_sendpage()
                sk->sk_prot->sendpage = tcp_sendpage()
                  ACQUIRE sk->sk_lock
                    tcp_sendpage_locked()
                  RELEASE sk->sk_lock
  RELEASE psock->work_mutex

sock_map_close()
  ACQUIRE sk->sk_lock
  sk_psock_stop()
    sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED)
    cancel_work_sync()
      __cancel_work_timer()
        __flush_work()
          // wait for psock->work to finish
  RELEASE sk->sk_lock

We can move the cancel_work_sync() out of the sock lock protection,
but still before saved_close() was called.

Fixes: 799aa7f98d53 ("skmsg: Avoid lock_sock() in sk_psock_backlog()")
Reported-by: Stanislav Fomichev <sdf@google.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: Jakub Sitnicki <jakub@cloudflare.com>
Signed-off-by: Cong Wang <cong.wang@bytedance.com>
---
 include/linux/skmsg.h | 2 +-
 net/core/skmsg.c      | 7 ++-----
 net/core/sock_map.c   | 7 ++++---
 3 files changed, 7 insertions(+), 9 deletions(-)

diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 48f4b645193b..70d6cb94e580 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -376,7 +376,7 @@ static inline void sk_psock_report_error(struct sk_psock *psock, int err)
 }
 
 struct sk_psock *sk_psock_init(struct sock *sk, int node);
-void sk_psock_stop(struct sk_psock *psock, bool wait);
+void sk_psock_stop(struct sk_psock *psock);
 
 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 1efdc47a999b..e6b9ced3eda8 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -803,16 +803,13 @@ static void sk_psock_link_destroy(struct sk_psock *psock)
 	}
 }
 
-void sk_psock_stop(struct sk_psock *psock, bool wait)
+void sk_psock_stop(struct sk_psock *psock)
 {
 	spin_lock_bh(&psock->ingress_lock);
 	sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
 	sk_psock_cork_free(psock);
 	__sk_psock_zap_ingress(psock);
 	spin_unlock_bh(&psock->ingress_lock);
-
-	if (wait)
-		cancel_work_sync(&psock->work);
 }
 
 static void sk_psock_done_strp(struct sk_psock *psock);
@@ -850,7 +847,7 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
 		sk_psock_stop_verdict(sk, psock);
 	write_unlock_bh(&sk->sk_callback_lock);
 
-	sk_psock_stop(psock, false);
+	sk_psock_stop(psock);
 
 	INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
 	queue_rcu_work(system_wq, &psock->rwork);
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index a660baedd9e7..81beb16ab1eb 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -1596,7 +1596,7 @@ void sock_map_destroy(struct sock *sk)
 	saved_destroy = psock->saved_destroy;
 	sock_map_remove_links(sk, psock);
 	rcu_read_unlock();
-	sk_psock_stop(psock, false);
+	sk_psock_stop(psock);
 	sk_psock_put(sk, psock);
 	saved_destroy(sk);
 }
@@ -1619,9 +1619,10 @@ void sock_map_close(struct sock *sk, long timeout)
 	saved_close = psock->saved_close;
 	sock_map_remove_links(sk, psock);
 	rcu_read_unlock();
-	sk_psock_stop(psock, true);
-	sk_psock_put(sk, psock);
+	sk_psock_stop(psock);
 	release_sock(sk);
+	cancel_work_sync(&psock->work);
+	sk_psock_put(sk, psock);
 	saved_close(sk, timeout);
 }
 EXPORT_SYMBOL_GPL(sock_map_close);
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* RE: [Patch bpf v2] sock_map: move cancel_work_sync() out of sock lock
  2022-11-02  4:34 [Patch bpf v2] sock_map: move cancel_work_sync() out of sock lock Cong Wang
@ 2022-11-03  5:41 ` John Fastabend
  2022-11-03 12:37 ` Jakub Sitnicki
  2022-11-03 13:00 ` patchwork-bot+netdevbpf
  2 siblings, 0 replies; 4+ messages in thread
From: John Fastabend @ 2022-11-03  5:41 UTC (permalink / raw)
  To: Cong Wang, netdev
  Cc: bpf, Cong Wang, Stanislav Fomichev, John Fastabend,
	Jakub Sitnicki

Cong Wang wrote:
> From: Cong Wang <cong.wang@bytedance.com>
> 
> Stanislav reported a lockdep warning, which is caused by the
> cancel_work_sync() called inside sock_map_close(), as analyzed
> below by Jakub:
> 
> psock->work.func = sk_psock_backlog()
>   ACQUIRE psock->work_mutex
>     sk_psock_handle_skb()
>       skb_send_sock()
>         __skb_send_sock()
>           sendpage_unlocked()
>             kernel_sendpage()
>               sock->ops->sendpage = inet_sendpage()
>                 sk->sk_prot->sendpage = tcp_sendpage()
>                   ACQUIRE sk->sk_lock
>                     tcp_sendpage_locked()
>                   RELEASE sk->sk_lock
>   RELEASE psock->work_mutex
> 
> sock_map_close()
>   ACQUIRE sk->sk_lock
>   sk_psock_stop()
>     sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED)
>     cancel_work_sync()
>       __cancel_work_timer()
>         __flush_work()
>           // wait for psock->work to finish
>   RELEASE sk->sk_lock
> 
> We can move the cancel_work_sync() out of the sock lock protection,
> but still before saved_close() was called.
> 
> Fixes: 799aa7f98d53 ("skmsg: Avoid lock_sock() in sk_psock_backlog()")
> Reported-by: Stanislav Fomichev <sdf@google.com>
> Cc: John Fastabend <john.fastabend@gmail.com>
> Cc: Jakub Sitnicki <jakub@cloudflare.com>
> Signed-off-by: Cong Wang <cong.wang@bytedance.com>
> ---

LGTM. Thanks.

Acked-by: John Fastabend <john.fastabend@gmail.com>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Patch bpf v2] sock_map: move cancel_work_sync() out of sock lock
  2022-11-02  4:34 [Patch bpf v2] sock_map: move cancel_work_sync() out of sock lock Cong Wang
  2022-11-03  5:41 ` John Fastabend
@ 2022-11-03 12:37 ` Jakub Sitnicki
  2022-11-03 13:00 ` patchwork-bot+netdevbpf
  2 siblings, 0 replies; 4+ messages in thread
From: Jakub Sitnicki @ 2022-11-03 12:37 UTC (permalink / raw)
  To: Cong Wang; +Cc: netdev, bpf, Cong Wang, Stanislav Fomichev, John Fastabend

On Tue, Nov 01, 2022 at 09:34 PM -07, Cong Wang wrote:
> From: Cong Wang <cong.wang@bytedance.com>
>
> Stanislav reported a lockdep warning, which is caused by the
> cancel_work_sync() called inside sock_map_close(), as analyzed
> below by Jakub:
>
> psock->work.func = sk_psock_backlog()
>   ACQUIRE psock->work_mutex
>     sk_psock_handle_skb()
>       skb_send_sock()
>         __skb_send_sock()
>           sendpage_unlocked()
>             kernel_sendpage()
>               sock->ops->sendpage = inet_sendpage()
>                 sk->sk_prot->sendpage = tcp_sendpage()
>                   ACQUIRE sk->sk_lock
>                     tcp_sendpage_locked()
>                   RELEASE sk->sk_lock
>   RELEASE psock->work_mutex
>
> sock_map_close()
>   ACQUIRE sk->sk_lock
>   sk_psock_stop()
>     sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED)
>     cancel_work_sync()
>       __cancel_work_timer()
>         __flush_work()
>           // wait for psock->work to finish
>   RELEASE sk->sk_lock
>
> We can move the cancel_work_sync() out of the sock lock protection,
> but still before saved_close() was called.
>
> Fixes: 799aa7f98d53 ("skmsg: Avoid lock_sock() in sk_psock_backlog()")
> Reported-by: Stanislav Fomichev <sdf@google.com>
> Cc: John Fastabend <john.fastabend@gmail.com>
> Cc: Jakub Sitnicki <jakub@cloudflare.com>
> Signed-off-by: Cong Wang <cong.wang@bytedance.com>
> ---

[...]

Thanks!

Acked-by: Jakub Sitnicki <jakub@cloudflare.com>
Tested-by: Jakub Sitnicki <jakub@cloudflare.com>

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [Patch bpf v2] sock_map: move cancel_work_sync() out of sock lock
  2022-11-02  4:34 [Patch bpf v2] sock_map: move cancel_work_sync() out of sock lock Cong Wang
  2022-11-03  5:41 ` John Fastabend
  2022-11-03 12:37 ` Jakub Sitnicki
@ 2022-11-03 13:00 ` patchwork-bot+netdevbpf
  2 siblings, 0 replies; 4+ messages in thread
From: patchwork-bot+netdevbpf @ 2022-11-03 13:00 UTC (permalink / raw)
  To: Cong Wang; +Cc: netdev, bpf, cong.wang, sdf, john.fastabend, jakub

Hello:

This patch was applied to bpf/bpf.git (master)
by Daniel Borkmann <daniel@iogearbox.net>:

On Tue,  1 Nov 2022 21:34:17 -0700 you wrote:
> From: Cong Wang <cong.wang@bytedance.com>
> 
> Stanislav reported a lockdep warning, which is caused by the
> cancel_work_sync() called inside sock_map_close(), as analyzed
> below by Jakub:
> 
> psock->work.func = sk_psock_backlog()
>   ACQUIRE psock->work_mutex
>     sk_psock_handle_skb()
>       skb_send_sock()
>         __skb_send_sock()
>           sendpage_unlocked()
>             kernel_sendpage()
>               sock->ops->sendpage = inet_sendpage()
>                 sk->sk_prot->sendpage = tcp_sendpage()
>                   ACQUIRE sk->sk_lock
>                     tcp_sendpage_locked()
>                   RELEASE sk->sk_lock
>   RELEASE psock->work_mutex
> 
> [...]

Here is the summary with links:
  - [bpf,v2] sock_map: move cancel_work_sync() out of sock lock
    https://git.kernel.org/bpf/bpf/c/8bbabb3fddcd

You are awesome, thank you!
-- 
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html



^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-11-03 13:00 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-11-02  4:34 [Patch bpf v2] sock_map: move cancel_work_sync() out of sock lock Cong Wang
2022-11-03  5:41 ` John Fastabend
2022-11-03 12:37 ` Jakub Sitnicki
2022-11-03 13:00 ` patchwork-bot+netdevbpf

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).