From: Christophe Leroy <christophe.leroy@csgroup.eu>
To: Nicholas Piggin <npiggin@gmail.com>, linuxppc-dev@lists.ozlabs.org
Subject: Re: [PATCH v2] powerpc: add compile-time support for lbarx, lharx
Date: Wed, 7 Sep 2022 19:14:10 +0200 [thread overview]
Message-ID: <fc904a84-0b64-476e-c0ff-963eb3c1e73b@csgroup.eu> (raw)
In-Reply-To: <20210623032838.825897-1-npiggin@gmail.com>
Le 23/06/2021 à 05:28, Nicholas Piggin a écrit :
> ISA v2.06 (POWER7 and up) as well as e6500 support lbarx and lharx.
> Add a compile option that allows code to use it, and add support in
> cmpxchg and xchg 8 and 16 bit values without shifting and masking.
Is this this patch still relevant ?
If so, it should be rebased because it badly conflicts.
Thanks
Christophe
>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
> v2: Fixed lwarx->lharx typo, switched to PPC_HAS_
>
> arch/powerpc/Kconfig | 3 +
> arch/powerpc/include/asm/cmpxchg.h | 236 ++++++++++++++++++++++++-
> arch/powerpc/lib/sstep.c | 21 +--
> arch/powerpc/platforms/Kconfig.cputype | 5 +
> 4 files changed, 254 insertions(+), 11 deletions(-)
>
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 088dd2afcfe4..dc17f4d51a79 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -278,6 +278,9 @@ config PPC_BARRIER_NOSPEC
> default y
> depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E
>
> +config PPC_HAS_LBARX_LHARX
> + bool
> +
> config EARLY_PRINTK
> bool
> default y
> diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
> index cf091c4c22e5..28fbd57db1ec 100644
> --- a/arch/powerpc/include/asm/cmpxchg.h
> +++ b/arch/powerpc/include/asm/cmpxchg.h
> @@ -77,10 +77,76 @@ u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \
> * the previous value stored there.
> */
>
> +#ifndef CONFIG_PPC_HAS_LBARX_LHARX
> XCHG_GEN(u8, _local, "memory");
> XCHG_GEN(u8, _relaxed, "cc");
> XCHG_GEN(u16, _local, "memory");
> XCHG_GEN(u16, _relaxed, "cc");
> +#else
> +static __always_inline unsigned long
> +__xchg_u8_local(volatile void *p, unsigned long val)
> +{
> + unsigned long prev;
> +
> + __asm__ __volatile__(
> +"1: lbarx %0,0,%2 \n"
> +" stbcx. %3,0,%2 \n\
> + bne- 1b"
> + : "=&r" (prev), "+m" (*(volatile unsigned char *)p)
> + : "r" (p), "r" (val)
> + : "cc", "memory");
> +
> + return prev;
> +}
> +
> +static __always_inline unsigned long
> +__xchg_u8_relaxed(u8 *p, unsigned long val)
> +{
> + unsigned long prev;
> +
> + __asm__ __volatile__(
> +"1: lbarx %0,0,%2\n"
> +" stbcx. %3,0,%2\n"
> +" bne- 1b"
> + : "=&r" (prev), "+m" (*p)
> + : "r" (p), "r" (val)
> + : "cc");
> +
> + return prev;
> +}
> +
> +static __always_inline unsigned long
> +__xchg_u16_local(volatile void *p, unsigned long val)
> +{
> + unsigned long prev;
> +
> + __asm__ __volatile__(
> +"1: lharx %0,0,%2 \n"
> +" sthcx. %3,0,%2 \n\
> + bne- 1b"
> + : "=&r" (prev), "+m" (*(volatile unsigned short *)p)
> + : "r" (p), "r" (val)
> + : "cc", "memory");
> +
> + return prev;
> +}
> +
> +static __always_inline unsigned long
> +__xchg_u16_relaxed(u16 *p, unsigned long val)
> +{
> + unsigned long prev;
> +
> + __asm__ __volatile__(
> +"1: lharx %0,0,%2\n"
> +" sthcx. %3,0,%2\n"
> +" bne- 1b"
> + : "=&r" (prev), "+m" (*p)
> + : "r" (p), "r" (val)
> + : "cc");
> +
> + return prev;
> +}
> +#endif
>
> static __always_inline unsigned long
> __xchg_u32_local(volatile void *p, unsigned long val)
> @@ -198,11 +264,12 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
> (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
> (unsigned long)_x_, sizeof(*(ptr))); \
> })
> +
> /*
> * Compare and exchange - if *p == old, set it to new,
> * and return the old value of *p.
> */
> -
> +#ifndef CONFIG_PPC_HAS_LBARX_LHARX
> CMPXCHG_GEN(u8, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
> CMPXCHG_GEN(u8, _local, , , "memory");
> CMPXCHG_GEN(u8, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
> @@ -211,6 +278,173 @@ CMPXCHG_GEN(u16, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
> CMPXCHG_GEN(u16, _local, , , "memory");
> CMPXCHG_GEN(u16, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
> CMPXCHG_GEN(u16, _relaxed, , , "cc");
> +#else
> +static __always_inline unsigned long
> +__cmpxchg_u8(volatile unsigned char *p, unsigned long old, unsigned long new)
> +{
> + unsigned int prev;
> +
> + __asm__ __volatile__ (
> + PPC_ATOMIC_ENTRY_BARRIER
> +"1: lbarx %0,0,%2 # __cmpxchg_u8\n\
> + cmpw 0,%0,%3\n\
> + bne- 2f\n"
> +" stbcx. %4,0,%2\n\
> + bne- 1b"
> + PPC_ATOMIC_EXIT_BARRIER
> + "\n\
> +2:"
> + : "=&r" (prev), "+m" (*p)
> + : "r" (p), "r" (old), "r" (new)
> + : "cc", "memory");
> +
> + return prev;
> +}
> +
> +static __always_inline unsigned long
> +__cmpxchg_u8_local(volatile unsigned char *p, unsigned long old,
> + unsigned long new)
> +{
> + unsigned int prev;
> +
> + __asm__ __volatile__ (
> +"1: lbarx %0,0,%2 # __cmpxchg_u8\n\
> + cmpw 0,%0,%3\n\
> + bne- 2f\n"
> +" stbcx. %4,0,%2\n\
> + bne- 1b"
> + "\n\
> +2:"
> + : "=&r" (prev), "+m" (*p)
> + : "r" (p), "r" (old), "r" (new)
> + : "cc", "memory");
> +
> + return prev;
> +}
> +
> +static __always_inline unsigned long
> +__cmpxchg_u8_relaxed(u8 *p, unsigned long old, unsigned long new)
> +{
> + unsigned long prev;
> +
> + __asm__ __volatile__ (
> +"1: lbarx %0,0,%2 # __cmpxchg_u8_relaxed\n"
> +" cmpw 0,%0,%3\n"
> +" bne- 2f\n"
> +" stbcx. %4,0,%2\n"
> +" bne- 1b\n"
> +"2:"
> + : "=&r" (prev), "+m" (*p)
> + : "r" (p), "r" (old), "r" (new)
> + : "cc");
> +
> + return prev;
> +}
> +
> +static __always_inline unsigned long
> +__cmpxchg_u8_acquire(u8 *p, unsigned long old, unsigned long new)
> +{
> + unsigned long prev;
> +
> + __asm__ __volatile__ (
> +"1: lbarx %0,0,%2 # __cmpxchg_u8_acquire\n"
> +" cmpw 0,%0,%3\n"
> +" bne- 2f\n"
> +" stbcx. %4,0,%2\n"
> +" bne- 1b\n"
> + PPC_ACQUIRE_BARRIER
> + "\n"
> +"2:"
> + : "=&r" (prev), "+m" (*p)
> + : "r" (p), "r" (old), "r" (new)
> + : "cc", "memory");
> +
> + return prev;
> +}
> +
> +static __always_inline unsigned long
> +__cmpxchg_u16(volatile unsigned short *p, unsigned long old, unsigned long new)
> +{
> + unsigned int prev;
> +
> + __asm__ __volatile__ (
> + PPC_ATOMIC_ENTRY_BARRIER
> +"1: lharx %0,0,%2 # __cmpxchg_u16\n\
> + cmpw 0,%0,%3\n\
> + bne- 2f\n"
> +" sthcx. %4,0,%2\n\
> + bne- 1b"
> + PPC_ATOMIC_EXIT_BARRIER
> + "\n\
> +2:"
> + : "=&r" (prev), "+m" (*p)
> + : "r" (p), "r" (old), "r" (new)
> + : "cc", "memory");
> +
> + return prev;
> +}
> +
> +static __always_inline unsigned long
> +__cmpxchg_u16_local(volatile unsigned short *p, unsigned long old,
> + unsigned long new)
> +{
> + unsigned int prev;
> +
> + __asm__ __volatile__ (
> +"1: lharx %0,0,%2 # __cmpxchg_u16\n\
> + cmpw 0,%0,%3\n\
> + bne- 2f\n"
> +" sthcx. %4,0,%2\n\
> + bne- 1b"
> + "\n\
> +2:"
> + : "=&r" (prev), "+m" (*p)
> + : "r" (p), "r" (old), "r" (new)
> + : "cc", "memory");
> +
> + return prev;
> +}
> +
> +static __always_inline unsigned long
> +__cmpxchg_u16_relaxed(u16 *p, unsigned long old, unsigned long new)
> +{
> + unsigned long prev;
> +
> + __asm__ __volatile__ (
> +"1: lharx %0,0,%2 # __cmpxchg_u16_relaxed\n"
> +" cmpw 0,%0,%3\n"
> +" bne- 2f\n"
> +" sthcx. %4,0,%2\n"
> +" bne- 1b\n"
> +"2:"
> + : "=&r" (prev), "+m" (*p)
> + : "r" (p), "r" (old), "r" (new)
> + : "cc");
> +
> + return prev;
> +}
> +
> +static __always_inline unsigned long
> +__cmpxchg_u16_acquire(u16 *p, unsigned long old, unsigned long new)
> +{
> + unsigned long prev;
> +
> + __asm__ __volatile__ (
> +"1: lharx %0,0,%2 # __cmpxchg_u16_acquire\n"
> +" cmpw 0,%0,%3\n"
> +" bne- 2f\n"
> +" sthcx. %4,0,%2\n"
> +" bne- 1b\n"
> + PPC_ACQUIRE_BARRIER
> + "\n"
> +"2:"
> + : "=&r" (prev), "+m" (*p)
> + : "r" (p), "r" (old), "r" (new)
> + : "cc", "memory");
> +
> + return prev;
> +}
> +#endif
>
> static __always_inline unsigned long
> __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
> diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
> index f998e655b570..3a03c8ce1e95 100644
> --- a/arch/powerpc/platforms/Kconfig.cputype
> +++ b/arch/powerpc/platforms/Kconfig.cputype
> @@ -131,6 +131,7 @@ config GENERIC_CPU
> bool "Generic (POWER8 and above)"
> depends on PPC64 && CPU_LITTLE_ENDIAN
> select ARCH_HAS_FAST_MULTIPLIER
> + select PPC_HAS_LBARX_LHARX
>
> config GENERIC_CPU
> bool "Generic 32 bits powerpc"
> @@ -152,16 +153,19 @@ config POWER7_CPU
> bool "POWER7"
> depends on PPC_BOOK3S_64
> select ARCH_HAS_FAST_MULTIPLIER
> + select PPC_HAS_LBARX_LHARX
>
> config POWER8_CPU
> bool "POWER8"
> depends on PPC_BOOK3S_64
> select ARCH_HAS_FAST_MULTIPLIER
> + select PPC_HAS_LBARX_LHARX
>
> config POWER9_CPU
> bool "POWER9"
> depends on PPC_BOOK3S_64
> select ARCH_HAS_FAST_MULTIPLIER
> + select PPC_HAS_LBARX_LHARX
>
> config E5500_CPU
> bool "Freescale e5500"
> @@ -170,6 +174,7 @@ config E5500_CPU
> config E6500_CPU
> bool "Freescale e6500"
> depends on E500
> + select PPC_HAS_LBARX_LHARX
>
> config 860_CPU
> bool "8xx family"
prev parent reply other threads:[~2022-09-07 17:14 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-23 3:28 [PATCH v2] powerpc: add compile-time support for lbarx, lharx Nicholas Piggin
2022-09-07 17:14 ` Christophe Leroy [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=fc904a84-0b64-476e-c0ff-963eb3c1e73b@csgroup.eu \
--to=christophe.leroy@csgroup.eu \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=npiggin@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).