From: Waiman Long <waiman.long@hp.com>
To: Peter Zijlstra <peterz@infradead.org>
Cc: doug.hatch@hp.com, tglx@linutronix.de, mingo@kernel.org,
linux-kernel@vger.kernel.org, scott.norton@hp.com,
torvalds@linux-foundation.org, hpa@zytor.com,
linux-tip-commits@vger.kernel.org
Subject: Re: [tip:locking/core] locking/pvqspinlock: Replace xchg() by the more descriptive set_mb()
Date: Mon, 11 May 2015 12:50:29 -0400 [thread overview]
Message-ID: <5550DDD5.1040004@hp.com> (raw)
In-Reply-To: <20150511145408.GU27504@twins.programming.kicks-ass.net>
On 05/11/2015 10:54 AM, Peter Zijlstra wrote:
> On Mon, May 11, 2015 at 05:43:55AM -0700, tip-bot for Waiman Long wrote:
>> - (void)xchg(&pn->state, vcpu_halted);
>> + set_mb(pn->state, vcpu_halted);
> Hmm, so I looked at the set_mb() definitions and I figure we want to do
> something like the below, right?
Yes, I think we should do that just to be safe from unexpected compiler
optimization.
> ---
> arch/arm/include/asm/barrier.h | 2 +-
> arch/arm64/include/asm/barrier.h | 2 +-
> arch/ia64/include/asm/barrier.h | 2 +-
> arch/metag/include/asm/barrier.h | 2 +-
> arch/mips/include/asm/barrier.h | 2 +-
> arch/powerpc/include/asm/barrier.h | 2 +-
> arch/s390/include/asm/barrier.h | 2 +-
> arch/sparc/include/asm/barrier_64.h | 2 +-
> arch/x86/include/asm/barrier.h | 2 +-
> include/asm-generic/barrier.h | 2 +-
> 10 files changed, 10 insertions(+), 10 deletions(-)
>
> diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
> index d2f81e6b8c1c..993150aea681 100644
> --- a/arch/arm/include/asm/barrier.h
> +++ b/arch/arm/include/asm/barrier.h
> @@ -81,7 +81,7 @@ do { \
> #define read_barrier_depends() do { } while(0)
> #define smp_read_barrier_depends() do { } while(0)
>
> -#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
> +#define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
>
> #define smp_mb__before_atomic() smp_mb()
> #define smp_mb__after_atomic() smp_mb()
> diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
> index 71f19c4dc0de..ff7de78d01b8 100644
> --- a/arch/arm64/include/asm/barrier.h
> +++ b/arch/arm64/include/asm/barrier.h
> @@ -114,7 +114,7 @@ do { \
> #define read_barrier_depends() do { } while(0)
> #define smp_read_barrier_depends() do { } while(0)
>
> -#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
> +#define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
> #define nop() asm volatile("nop");
>
> #define smp_mb__before_atomic() smp_mb()
> diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
> index f6769eb2bbf9..03117e7b2ab8 100644
> --- a/arch/ia64/include/asm/barrier.h
> +++ b/arch/ia64/include/asm/barrier.h
> @@ -82,7 +82,7 @@ do { \
> * acquire vs release semantics but we can't discuss this stuff with
> * Linus just yet. Grrr...
> */
> -#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
> +#define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
>
> /*
> * The group barrier in front of the rsm& ssm are necessary to ensure
> diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
> index d703d8e26a65..97eb018a2933 100644
> --- a/arch/metag/include/asm/barrier.h
> +++ b/arch/metag/include/asm/barrier.h
> @@ -84,7 +84,7 @@ static inline void fence(void)
> #define read_barrier_depends() do { } while (0)
> #define smp_read_barrier_depends() do { } while (0)
>
> -#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
> +#define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
>
> #define smp_store_release(p, v) \
> do { \
> diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
> index 2b8bbbcb9be0..cff1bbdaa74a 100644
> --- a/arch/mips/include/asm/barrier.h
> +++ b/arch/mips/include/asm/barrier.h
> @@ -113,7 +113,7 @@
> #endif
>
> #define set_mb(var, value) \
> - do { var = value; smp_mb(); } while (0)
> + do { WRITE_ONCE(var, value); smp_mb(); } while (0)
>
> #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
>
> diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
> index a3bf5be111ff..2a072e48780d 100644
> --- a/arch/powerpc/include/asm/barrier.h
> +++ b/arch/powerpc/include/asm/barrier.h
> @@ -34,7 +34,7 @@
> #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
> #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
>
> -#define set_mb(var, value) do { var = value; mb(); } while (0)
> +#define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
>
> #ifdef __SUBARCH_HAS_LWSYNC
> # define SMPWMB LWSYNC
> diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
> index 8d724718ec21..b66cd53d35fc 100644
> --- a/arch/s390/include/asm/barrier.h
> +++ b/arch/s390/include/asm/barrier.h
> @@ -36,7 +36,7 @@
> #define smp_mb__before_atomic() smp_mb()
> #define smp_mb__after_atomic() smp_mb()
>
> -#define set_mb(var, value) do { var = value; mb(); } while (0)
> +#define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
>
> #define smp_store_release(p, v) \
> do { \
> diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
> index 76648941fea7..125fec7512f4 100644
> --- a/arch/sparc/include/asm/barrier_64.h
> +++ b/arch/sparc/include/asm/barrier_64.h
> @@ -41,7 +41,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
> #define dma_wmb() wmb()
>
> #define set_mb(__var, __value) \
> - do { __var = __value; membar_safe("#StoreLoad"); } while(0)
> + do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0)
>
> #ifdef CONFIG_SMP
> #define smp_mb() mb()
> diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
> index 959e45b81fe2..9de5cde133a1 100644
> --- a/arch/x86/include/asm/barrier.h
> +++ b/arch/x86/include/asm/barrier.h
> @@ -40,7 +40,7 @@
> #define smp_mb() barrier()
> #define smp_rmb() barrier()
> #define smp_wmb() barrier()
> -#define set_mb(var, value) do { var = value; barrier(); } while (0)
> +#define set_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
That part is in the !CONFIG_SMP portion. I don't think we need to change it.
Cheers,
Longman
next prev parent reply other threads:[~2015-05-11 16:50 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <tip-52c9d2badd1ae4d11c29de57d4e964e48afd3cb4@git.kernel.org>
2015-05-11 14:54 ` [tip:locking/core] locking/pvqspinlock: Replace xchg() by the more descriptive set_mb() Peter Zijlstra
2015-05-11 16:50 ` Waiman Long [this message]
2015-05-11 17:50 ` Linus Torvalds
2015-05-12 8:45 ` Peter Zijlstra
2015-05-12 13:00 ` Peter Zijlstra
2015-05-12 8:53 ` Peter Zijlstra
2015-05-12 14:59 ` Peter Zijlstra
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5550DDD5.1040004@hp.com \
--to=waiman.long@hp.com \
--cc=doug.hatch@hp.com \
--cc=hpa@zytor.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-tip-commits@vger.kernel.org \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
--cc=scott.norton@hp.com \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox