From mboxrd@z Thu Jan 1 00:00:00 1970 From: mathieu.desnoyers@polymtl.ca (Mathieu Desnoyers) Date: Thu, 18 Mar 2010 09:50:08 -0400 Subject: [PATCH 1/1] [RFC] arm: add half-word __xchg In-Reply-To: <1268919221-2748-1-git-send-email-virtuoso@slind.org> References: <20100318123223.GA6855@Krystal> <1268919221-2748-1-git-send-email-virtuoso@slind.org> Message-ID: <20100318135008.GA11800@Krystal> To: linux-arm-kernel@lists.infradead.org List-Id: linux-arm-kernel.lists.infradead.org * Alexander Shishkin (virtuoso at slind.org) wrote: > On systems where ldrexh/strexh are not available, use a generic local > version or __bad_xchg() on SMP. > > Signed-off-by: Alexander Shishkin > CC: linux-arm-kernel-bounces at lists.infradead.org > CC: Imre Deak > CC: Mathieu Desnoyers > --- > arch/arm/include/asm/system.h | 65 ++++++++++++++++++++++++++++++++++------ > 1 files changed, 55 insertions(+), 10 deletions(-) > > diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h > index d65b2f5..82248ae 100644 > --- a/arch/arm/include/asm/system.h > +++ b/arch/arm/include/asm/system.h > @@ -218,6 +218,39 @@ do { \ > last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ > } while (0) > > +static inline unsigned long __xchg_local_generic(unsigned long x, > + volatile void *ptr, int size) > +{ > + extern void __bad_xchg(volatile void *, int); > + unsigned long ret; > + unsigned long flags; > + > + switch (size) { > + case 1: > + raw_local_irq_save(flags); > + ret = *(volatile unsigned char *)ptr; > + *(volatile unsigned char *)ptr = x; > + raw_local_irq_restore(flags); > + break; > + > + case 2: > + raw_local_irq_save(flags); > + ret = *(volatile unsigned short *)ptr; > + *(volatile unsigned short *)ptr = x; > + raw_local_irq_restore(flags); > + break; > + > + case 4: > + raw_local_irq_save(flags); > + ret = *(volatile unsigned long *)ptr; > + *(volatile unsigned long *)ptr = x; > + raw_local_irq_restore(flags); > + break; > + } > + > + return ret; > +} > + > #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) > /* > * On the StrongARM, "swp" is terminally broken since it bypasses the > @@ -262,6 +295,26 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size > : "r" (x), "r" (ptr) > : "memory", "cc"); > break; > +#ifdef CONFIG_CPU_32v6K > + case 2: > + asm volatile("@ __xchg2\n" > + "1: ldrexh %0, [%3]\n" > + " strexh %1, %2, [%3]\n" > + " teq %1, #0\n" > + " bne 1b" > + : "=&r" (ret), "=&r" (tmp) > + : "r" (x), "r" (ptr) > + : "memory", "cc"); > + break; > +#else > + case 2: > +#ifdef CONFIG_SMP > + __bad_xchg(ptr, size), ret = 0; You don't need to put this one explicitly. See the default case of the switch. But.. thinking about it. It's bad to have a 2-byte xchg primitive that only works on UP and breaks the build on SMP. We should instead implement a workaround based on __cmpxchg4 to perform the 2-byte xchg(). Thanks, Mathieu > +#else > + ret = __xchg_local_generic(x, ptr, 2); > +#endif > + break; > +#endif > case 4: > asm volatile("@ __xchg4\n" > "1: ldrex %0, [%3]\n" > @@ -277,17 +330,9 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size > #error SMP is not supported on this platform > #endif > case 1: > - raw_local_irq_save(flags); > - ret = *(volatile unsigned char *)ptr; > - *(volatile unsigned char *)ptr = x; > - raw_local_irq_restore(flags); > - break; > - > + case 2: > case 4: > - raw_local_irq_save(flags); > - ret = *(volatile unsigned long *)ptr; > - *(volatile unsigned long *)ptr = x; > - raw_local_irq_restore(flags); > + ret = __xchg_local_generic(x, ptr, size); > break; > #else > case 1: > -- > 1.6.3.3 > -- Mathieu Desnoyers Operating System Efficiency Consultant EfficiOS Inc. http://www.efficios.com