* [PATCH net-next] net: use cmpxchg() in lock_sock_nested()
@ 2026-02-25 3:32 Eric Dumazet
2026-02-25 7:19 ` Kuniyuki Iwashima
2026-02-25 9:21 ` David Laight
0 siblings, 2 replies; 6+ messages in thread
From: Eric Dumazet @ 2026-02-25 3:32 UTC (permalink / raw)
To: David S . Miller, Jakub Kicinski, Paolo Abeni
Cc: Simon Horman, Kuniyuki Iwashima, netdev, eric.dumazet,
Eric Dumazet
Add a fast path in lock_sock_nested(), to avoid acquiring
the socket spinlock only to set @owned to one:
spin_lock_bh(&sk->sk_lock.slock);
if (unlikely(sock_owned_by_user_nocheck(sk)))
__lock_sock(sk);
sk->sk_lock.owned = 1;
spin_unlock_bh(&sk->sk_lock.slock);
On x86_64 compiler generates something quite efficient:
ffffffff81e96100 <lock_sock_nested>:
ffffffff81e96100: f3 0f 1e fa endbr64
ffffffff81e96104: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
ffffffff81e96109: b9 01 00 00 00 mov $0x1,%ecx
ffffffff81e9610e: 31 c0 xor %eax,%eax
ffffffff81e96110: f0 48 0f b1 8f 48 01 lock cmpxchg %rcx,0x148(%rdi)
ffffffff81e96117: 00 00
ffffffff81e96119: 48 85 c0 test %rax,%rax
ffffffff81e9611c: 74 33 je ffffffff81e96151 <lock_sock_nested+0x51>
...// slow path
ffffffff81e96151: 2e e9 19 c4 33 00 cs jmp ffffffff821d2570 <__pi___x86_return_thunk>
Signed-off-by: Eric Dumazet <edumazet@google.com>
---
include/net/sock.h | 9 +++++++--
net/core/sock.c | 10 ++++++++++
2 files changed, 17 insertions(+), 2 deletions(-)
diff --git a/include/net/sock.h b/include/net/sock.h
index 55b61e4b0d8318887d527e919fc1103d78ac6d14..84c21fb38a28406e387dc33e0fb5decd18893950 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -81,8 +81,13 @@
* mini-semaphore synchronizes multiple users amongst themselves.
*/
typedef struct {
- spinlock_t slock;
- int owned;
+ union {
+ struct slock_owned {
+ int owned;
+ spinlock_t slock;
+ };
+ long combined;
+ };
wait_queue_head_t wq;
/*
* We express the mutex-alike socket_lock semantics
diff --git a/net/core/sock.c b/net/core/sock.c
index cfb2a6209946089669882cdbd5d1b36c53838989..2a03d1432111f2dcea9d700f80efb7b0bb4a47d8 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3780,6 +3780,16 @@ void noinline lock_sock_nested(struct sock *sk, int subclass)
mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
might_sleep();
+#ifdef CONFIG_64BIT
+ if (sizeof(struct slock_owned) == sizeof(long)) {
+ socket_lock_t tmp;
+
+ tmp.slock = __SPIN_LOCK_UNLOCKED(tmp.slock);
+ tmp.owned = 1;
+ if (likely(!cmpxchg(&sk->sk_lock.combined, 0, tmp.combined)))
+ return;
+ }
+#endif
spin_lock_bh(&sk->sk_lock.slock);
if (unlikely(sock_owned_by_user_nocheck(sk)))
__lock_sock(sk);
--
2.53.0.414.gf7e9f6c205-goog
^ permalink raw reply related [flat|nested] 6+ messages in thread* Re: [PATCH net-next] net: use cmpxchg() in lock_sock_nested()
2026-02-25 3:32 [PATCH net-next] net: use cmpxchg() in lock_sock_nested() Eric Dumazet
@ 2026-02-25 7:19 ` Kuniyuki Iwashima
2026-02-25 7:23 ` Eric Dumazet
2026-02-25 9:21 ` David Laight
1 sibling, 1 reply; 6+ messages in thread
From: Kuniyuki Iwashima @ 2026-02-25 7:19 UTC (permalink / raw)
To: Eric Dumazet
Cc: David S . Miller, Jakub Kicinski, Paolo Abeni, Simon Horman,
netdev, eric.dumazet
On Tue, Feb 24, 2026 at 7:32 PM Eric Dumazet <edumazet@google.com> wrote:
>
> Add a fast path in lock_sock_nested(), to avoid acquiring
> the socket spinlock only to set @owned to one:
>
> spin_lock_bh(&sk->sk_lock.slock);
> if (unlikely(sock_owned_by_user_nocheck(sk)))
> __lock_sock(sk);
> sk->sk_lock.owned = 1;
> spin_unlock_bh(&sk->sk_lock.slock);
>
> On x86_64 compiler generates something quite efficient:
>
> ffffffff81e96100 <lock_sock_nested>:
> ffffffff81e96100: f3 0f 1e fa endbr64
> ffffffff81e96104: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
> ffffffff81e96109: b9 01 00 00 00 mov $0x1,%ecx
> ffffffff81e9610e: 31 c0 xor %eax,%eax
> ffffffff81e96110: f0 48 0f b1 8f 48 01 lock cmpxchg %rcx,0x148(%rdi)
> ffffffff81e96117: 00 00
> ffffffff81e96119: 48 85 c0 test %rax,%rax
> ffffffff81e9611c: 74 33 je ffffffff81e96151 <lock_sock_nested+0x51>
> ...// slow path
> ffffffff81e96151: 2e e9 19 c4 33 00 cs jmp ffffffff821d2570 <__pi___x86_return_thunk>
Wow, this is interesting !
>
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> ---
> include/net/sock.h | 9 +++++++--
> net/core/sock.c | 10 ++++++++++
> 2 files changed, 17 insertions(+), 2 deletions(-)
>
> diff --git a/include/net/sock.h b/include/net/sock.h
> index 55b61e4b0d8318887d527e919fc1103d78ac6d14..84c21fb38a28406e387dc33e0fb5decd18893950 100644
> --- a/include/net/sock.h
> +++ b/include/net/sock.h
> @@ -81,8 +81,13 @@
> * mini-semaphore synchronizes multiple users amongst themselves.
> */
> typedef struct {
> - spinlock_t slock;
> - int owned;
> + union {
> + struct slock_owned {
> + int owned;
> + spinlock_t slock;
> + };
> + long combined;
> + };
> wait_queue_head_t wq;
> /*
> * We express the mutex-alike socket_lock semantics
> diff --git a/net/core/sock.c b/net/core/sock.c
> index cfb2a6209946089669882cdbd5d1b36c53838989..2a03d1432111f2dcea9d700f80efb7b0bb4a47d8 100644
> --- a/net/core/sock.c
> +++ b/net/core/sock.c
> @@ -3780,6 +3780,16 @@ void noinline lock_sock_nested(struct sock *sk, int subclass)
> mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
>
> might_sleep();
> +#ifdef CONFIG_64BIT
> + if (sizeof(struct slock_owned) == sizeof(long)) {
> + socket_lock_t tmp;
> +
> + tmp.slock = __SPIN_LOCK_UNLOCKED(tmp.slock);
> + tmp.owned = 1;
> + if (likely(!cmpxchg(&sk->sk_lock.combined, 0, tmp.combined)))
Should we use __SPIN_LOCK_UNLOCKED() for the old
value too ? Looks like parisc assigns a non-zero value for it.
arch/parisc/include/asm/spinlock_types.h:5:#define
__ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46
> + return;
> + }
> +#endif
> spin_lock_bh(&sk->sk_lock.slock);
> if (unlikely(sock_owned_by_user_nocheck(sk)))
> __lock_sock(sk);
> --
> 2.53.0.414.gf7e9f6c205-goog
>
^ permalink raw reply [flat|nested] 6+ messages in thread* Re: [PATCH net-next] net: use cmpxchg() in lock_sock_nested()
2026-02-25 7:19 ` Kuniyuki Iwashima
@ 2026-02-25 7:23 ` Eric Dumazet
2026-02-25 7:29 ` Eric Dumazet
0 siblings, 1 reply; 6+ messages in thread
From: Eric Dumazet @ 2026-02-25 7:23 UTC (permalink / raw)
To: Kuniyuki Iwashima
Cc: David S . Miller, Jakub Kicinski, Paolo Abeni, Simon Horman,
netdev, eric.dumazet
On Wed, Feb 25, 2026 at 8:20 AM Kuniyuki Iwashima <kuniyu@google.com> wrote:
>
> On Tue, Feb 24, 2026 at 7:32 PM Eric Dumazet <edumazet@google.com> wrote:
> >
> > Add a fast path in lock_sock_nested(), to avoid acquiring
> > the socket spinlock only to set @owned to one:
> >
> > spin_lock_bh(&sk->sk_lock.slock);
> > if (unlikely(sock_owned_by_user_nocheck(sk)))
> > __lock_sock(sk);
> > sk->sk_lock.owned = 1;
> > spin_unlock_bh(&sk->sk_lock.slock);
> >
> > On x86_64 compiler generates something quite efficient:
> >
> > ffffffff81e96100 <lock_sock_nested>:
> > ffffffff81e96100: f3 0f 1e fa endbr64
> > ffffffff81e96104: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
> > ffffffff81e96109: b9 01 00 00 00 mov $0x1,%ecx
> > ffffffff81e9610e: 31 c0 xor %eax,%eax
> > ffffffff81e96110: f0 48 0f b1 8f 48 01 lock cmpxchg %rcx,0x148(%rdi)
> > ffffffff81e96117: 00 00
> > ffffffff81e96119: 48 85 c0 test %rax,%rax
> > ffffffff81e9611c: 74 33 je ffffffff81e96151 <lock_sock_nested+0x51>
> > ...// slow path
> > ffffffff81e96151: 2e e9 19 c4 33 00 cs jmp ffffffff821d2570 <__pi___x86_return_thunk>
>
> Wow, this is interesting !
And using try_cmpxchg() might remove the "test %rax,%rax" after the
"lock cmpxchg"
>
> Should we use __SPIN_LOCK_UNLOCKED() for the old
> value too ? Looks like parisc assigns a non-zero value for it.
>
> arch/parisc/include/asm/spinlock_types.h:5:#define
> __ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46
>
I will take a look thanks !
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH net-next] net: use cmpxchg() in lock_sock_nested()
2026-02-25 7:23 ` Eric Dumazet
@ 2026-02-25 7:29 ` Eric Dumazet
2026-02-25 7:43 ` Kuniyuki Iwashima
0 siblings, 1 reply; 6+ messages in thread
From: Eric Dumazet @ 2026-02-25 7:29 UTC (permalink / raw)
To: Kuniyuki Iwashima
Cc: David S . Miller, Jakub Kicinski, Paolo Abeni, Simon Horman,
netdev, eric.dumazet
On Wed, Feb 25, 2026 at 8:23 AM Eric Dumazet <edumazet@google.com> wrote:
>
> On Wed, Feb 25, 2026 at 8:20 AM Kuniyuki Iwashima <kuniyu@google.com> wrote:
> >
> > On Tue, Feb 24, 2026 at 7:32 PM Eric Dumazet <edumazet@google.com> wrote:
> > >
> > > Add a fast path in lock_sock_nested(), to avoid acquiring
> > > the socket spinlock only to set @owned to one:
> > >
> > > spin_lock_bh(&sk->sk_lock.slock);
> > > if (unlikely(sock_owned_by_user_nocheck(sk)))
> > > __lock_sock(sk);
> > > sk->sk_lock.owned = 1;
> > > spin_unlock_bh(&sk->sk_lock.slock);
> > >
> > > On x86_64 compiler generates something quite efficient:
> > >
> > > ffffffff81e96100 <lock_sock_nested>:
> > > ffffffff81e96100: f3 0f 1e fa endbr64
> > > ffffffff81e96104: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
> > > ffffffff81e96109: b9 01 00 00 00 mov $0x1,%ecx
> > > ffffffff81e9610e: 31 c0 xor %eax,%eax
> > > ffffffff81e96110: f0 48 0f b1 8f 48 01 lock cmpxchg %rcx,0x148(%rdi)
> > > ffffffff81e96117: 00 00
> > > ffffffff81e96119: 48 85 c0 test %rax,%rax
> > > ffffffff81e9611c: 74 33 je ffffffff81e96151 <lock_sock_nested+0x51>
> > > ...// slow path
> > > ffffffff81e96151: 2e e9 19 c4 33 00 cs jmp ffffffff821d2570 <__pi___x86_return_thunk>
> >
> > Wow, this is interesting !
>
> And using try_cmpxchg() might remove the "test %rax,%rax" after the
> "lock cmpxchg"
>
> >
> > Should we use __SPIN_LOCK_UNLOCKED() for the old
> > value too ? Looks like parisc assigns a non-zero value for it.
> >
> > arch/parisc/include/asm/spinlock_types.h:5:#define
> > __ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46
> >
>
> I will take a look thanks !
WIth this diff on top of V1 we indeed remove one test.
diff --git a/net/core/sock.c b/net/core/sock.c
index 2a03d1432111f2dcea9d700f80efb7b0bb4a47d8..86185f194d64b4eea0835ee0f9b53fda184c73cb
100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3782,11 +3782,14 @@ void noinline lock_sock_nested(struct sock
*sk, int subclass)
might_sleep();
#ifdef CONFIG_64BIT
if (sizeof(struct slock_owned) == sizeof(long)) {
- socket_lock_t tmp;
+ socket_lock_t tmp, old;
tmp.slock = __SPIN_LOCK_UNLOCKED(tmp.slock);
tmp.owned = 1;
- if (likely(!cmpxchg(&sk->sk_lock.combined, 0, tmp.combined)))
+ old.slock = __SPIN_LOCK_UNLOCKED(old.slock);
+ old.owned = 0;
+ if (likely(try_cmpxchg(&sk->sk_lock.combined,
+ &old.combined, tmp.combined)))
return;
}
#endif
00000000000077c0 <lock_sock_nested>:
77c0: f3 0f 1e fa endbr64
77c4: e8 00 00 00 00 call 77c9 <lock_sock_nested+0x9>
77c5: R_X86_64_PLT32 __fentry__-0x4
77c9: b9 01 00 00 00 mov $0x1,%ecx
77ce: 31 c0 xor %eax,%eax
77d0: f0 48 0f b1 8f 48 01 lock cmpxchg %rcx,0x148(%rdi)
77d7: 00 00
77d9: 75 06 jne 77e1
<lock_sock_nested+0x21> // slow path
77db: 2e e9 00 00 00 00 cs jmp 77e1 <lock_sock_nested+0x21>
77dd: R_X86_64_PLT32 __x86_return_thunk-0x4
^ permalink raw reply [flat|nested] 6+ messages in thread* Re: [PATCH net-next] net: use cmpxchg() in lock_sock_nested()
2026-02-25 7:29 ` Eric Dumazet
@ 2026-02-25 7:43 ` Kuniyuki Iwashima
0 siblings, 0 replies; 6+ messages in thread
From: Kuniyuki Iwashima @ 2026-02-25 7:43 UTC (permalink / raw)
To: Eric Dumazet
Cc: David S . Miller, Jakub Kicinski, Paolo Abeni, Simon Horman,
netdev, eric.dumazet
On Tue, Feb 24, 2026 at 11:29 PM Eric Dumazet <edumazet@google.com> wrote:
>
> On Wed, Feb 25, 2026 at 8:23 AM Eric Dumazet <edumazet@google.com> wrote:
> >
> > On Wed, Feb 25, 2026 at 8:20 AM Kuniyuki Iwashima <kuniyu@google.com> wrote:
> > >
> > > On Tue, Feb 24, 2026 at 7:32 PM Eric Dumazet <edumazet@google.com> wrote:
> > > >
> > > > Add a fast path in lock_sock_nested(), to avoid acquiring
> > > > the socket spinlock only to set @owned to one:
> > > >
> > > > spin_lock_bh(&sk->sk_lock.slock);
> > > > if (unlikely(sock_owned_by_user_nocheck(sk)))
> > > > __lock_sock(sk);
> > > > sk->sk_lock.owned = 1;
> > > > spin_unlock_bh(&sk->sk_lock.slock);
> > > >
> > > > On x86_64 compiler generates something quite efficient:
> > > >
> > > > ffffffff81e96100 <lock_sock_nested>:
> > > > ffffffff81e96100: f3 0f 1e fa endbr64
> > > > ffffffff81e96104: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
> > > > ffffffff81e96109: b9 01 00 00 00 mov $0x1,%ecx
> > > > ffffffff81e9610e: 31 c0 xor %eax,%eax
> > > > ffffffff81e96110: f0 48 0f b1 8f 48 01 lock cmpxchg %rcx,0x148(%rdi)
> > > > ffffffff81e96117: 00 00
> > > > ffffffff81e96119: 48 85 c0 test %rax,%rax
> > > > ffffffff81e9611c: 74 33 je ffffffff81e96151 <lock_sock_nested+0x51>
> > > > ...// slow path
> > > > ffffffff81e96151: 2e e9 19 c4 33 00 cs jmp ffffffff821d2570 <__pi___x86_return_thunk>
> > >
> > > Wow, this is interesting !
> >
> > And using try_cmpxchg() might remove the "test %rax,%rax" after the
> > "lock cmpxchg"
> >
> > >
> > > Should we use __SPIN_LOCK_UNLOCKED() for the old
> > > value too ? Looks like parisc assigns a non-zero value for it.
> > >
> > > arch/parisc/include/asm/spinlock_types.h:5:#define
> > > __ARCH_SPIN_LOCK_UNLOCKED_VAL 0x1a46
> > >
> >
> > I will take a look thanks !
>
> WIth this diff on top of V1 we indeed remove one test.
Cool, I was wondering why the test was needed and
understood it was just to clear ZF which we can reuse
with try_cmpxchg() !
With that diff:
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Thanks!
>
> diff --git a/net/core/sock.c b/net/core/sock.c
> index 2a03d1432111f2dcea9d700f80efb7b0bb4a47d8..86185f194d64b4eea0835ee0f9b53fda184c73cb
> 100644
> --- a/net/core/sock.c
> +++ b/net/core/sock.c
> @@ -3782,11 +3782,14 @@ void noinline lock_sock_nested(struct sock
> *sk, int subclass)
> might_sleep();
> #ifdef CONFIG_64BIT
> if (sizeof(struct slock_owned) == sizeof(long)) {
> - socket_lock_t tmp;
> + socket_lock_t tmp, old;
>
> tmp.slock = __SPIN_LOCK_UNLOCKED(tmp.slock);
> tmp.owned = 1;
> - if (likely(!cmpxchg(&sk->sk_lock.combined, 0, tmp.combined)))
> + old.slock = __SPIN_LOCK_UNLOCKED(old.slock);
> + old.owned = 0;
> + if (likely(try_cmpxchg(&sk->sk_lock.combined,
> + &old.combined, tmp.combined)))
> return;
> }
> #endif
>
> 00000000000077c0 <lock_sock_nested>:
> 77c0: f3 0f 1e fa endbr64
> 77c4: e8 00 00 00 00 call 77c9 <lock_sock_nested+0x9>
> 77c5: R_X86_64_PLT32 __fentry__-0x4
> 77c9: b9 01 00 00 00 mov $0x1,%ecx
> 77ce: 31 c0 xor %eax,%eax
> 77d0: f0 48 0f b1 8f 48 01 lock cmpxchg %rcx,0x148(%rdi)
> 77d7: 00 00
> 77d9: 75 06 jne 77e1
> <lock_sock_nested+0x21> // slow path
> 77db: 2e e9 00 00 00 00 cs jmp 77e1 <lock_sock_nested+0x21>
> 77dd: R_X86_64_PLT32 __x86_return_thunk-0x4
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH net-next] net: use cmpxchg() in lock_sock_nested()
2026-02-25 3:32 [PATCH net-next] net: use cmpxchg() in lock_sock_nested() Eric Dumazet
2026-02-25 7:19 ` Kuniyuki Iwashima
@ 2026-02-25 9:21 ` David Laight
1 sibling, 0 replies; 6+ messages in thread
From: David Laight @ 2026-02-25 9:21 UTC (permalink / raw)
To: Eric Dumazet
Cc: David S . Miller, Jakub Kicinski, Paolo Abeni, Simon Horman,
Kuniyuki Iwashima, netdev, eric.dumazet
On Wed, 25 Feb 2026 03:32:16 +0000
Eric Dumazet <edumazet@google.com> wrote:
> Add a fast path in lock_sock_nested(), to avoid acquiring
> the socket spinlock only to set @owned to one:
>
> spin_lock_bh(&sk->sk_lock.slock);
> if (unlikely(sock_owned_by_user_nocheck(sk)))
> __lock_sock(sk);
> sk->sk_lock.owned = 1;
> spin_unlock_bh(&sk->sk_lock.slock);
There has to be a better commit message that just quoting the old code.
The whole thing is quite subtle.
Not helped by the 'helper' sock_owned_by_user_nocheck() obfuscating things
and that __lock_sock() is 'wait for socket to be unlocked' and doesn't
actually lock it at all.
David
>
> On x86_64 compiler generates something quite efficient:
>
> ffffffff81e96100 <lock_sock_nested>:
> ffffffff81e96100: f3 0f 1e fa endbr64
> ffffffff81e96104: 0f 1f 44 00 00 nopl 0x0(%rax,%rax,1)
> ffffffff81e96109: b9 01 00 00 00 mov $0x1,%ecx
> ffffffff81e9610e: 31 c0 xor %eax,%eax
> ffffffff81e96110: f0 48 0f b1 8f 48 01 lock cmpxchg %rcx,0x148(%rdi)
> ffffffff81e96117: 00 00
> ffffffff81e96119: 48 85 c0 test %rax,%rax
> ffffffff81e9611c: 74 33 je ffffffff81e96151 <lock_sock_nested+0x51>
> ...// slow path
> ffffffff81e96151: 2e e9 19 c4 33 00 cs jmp ffffffff821d2570 <__pi___x86_return_thunk>
>
> Signed-off-by: Eric Dumazet <edumazet@google.com>
> ---
> include/net/sock.h | 9 +++++++--
> net/core/sock.c | 10 ++++++++++
> 2 files changed, 17 insertions(+), 2 deletions(-)
>
> diff --git a/include/net/sock.h b/include/net/sock.h
> index 55b61e4b0d8318887d527e919fc1103d78ac6d14..84c21fb38a28406e387dc33e0fb5decd18893950 100644
> --- a/include/net/sock.h
> +++ b/include/net/sock.h
> @@ -81,8 +81,13 @@
> * mini-semaphore synchronizes multiple users amongst themselves.
> */
> typedef struct {
> - spinlock_t slock;
> - int owned;
> + union {
> + struct slock_owned {
> + int owned;
> + spinlock_t slock;
> + };
> + long combined;
> + };
> wait_queue_head_t wq;
> /*
> * We express the mutex-alike socket_lock semantics
> diff --git a/net/core/sock.c b/net/core/sock.c
> index cfb2a6209946089669882cdbd5d1b36c53838989..2a03d1432111f2dcea9d700f80efb7b0bb4a47d8 100644
> --- a/net/core/sock.c
> +++ b/net/core/sock.c
> @@ -3780,6 +3780,16 @@ void noinline lock_sock_nested(struct sock *sk, int subclass)
> mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
>
> might_sleep();
> +#ifdef CONFIG_64BIT
> + if (sizeof(struct slock_owned) == sizeof(long)) {
> + socket_lock_t tmp;
> +
> + tmp.slock = __SPIN_LOCK_UNLOCKED(tmp.slock);
> + tmp.owned = 1;
> + if (likely(!cmpxchg(&sk->sk_lock.combined, 0, tmp.combined)))
> + return;
> + }
> +#endif
> spin_lock_bh(&sk->sk_lock.slock);
> if (unlikely(sock_owned_by_user_nocheck(sk)))
> __lock_sock(sk);
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2026-02-25 9:21 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-02-25 3:32 [PATCH net-next] net: use cmpxchg() in lock_sock_nested() Eric Dumazet
2026-02-25 7:19 ` Kuniyuki Iwashima
2026-02-25 7:23 ` Eric Dumazet
2026-02-25 7:29 ` Eric Dumazet
2026-02-25 7:43 ` Kuniyuki Iwashima
2026-02-25 9:21 ` David Laight
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox