linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 4/4] x86: Final unification of local_{32|64}.h
@ 2007-12-16 12:04 Harvey Harrison
  2007-12-16 15:31 ` Ingo Molnar
  0 siblings, 1 reply; 7+ messages in thread
From: Harvey Harrison @ 2007-12-16 12:04 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: H. Peter Anvin, LKML, Thomas Gleixner

No differences except for the defintion of local_add_return on
X86_64. The X86_32 version is just fine as it is protected with
ifdef CONFIG_M386 so use it directly.

Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
---
 include/asm-x86/local.h    |  149 ++++++++++++++++++++++++++++++++++++++++++-
 include/asm-x86/local_32.h |  150 --------------------------------------------
 include/asm-x86/local_64.h |  134 ---------------------------------------
 3 files changed, 145 insertions(+), 288 deletions(-)

diff --git a/include/asm-x86/local.h b/include/asm-x86/local.h
index 8839c36..3939859 100644
--- a/include/asm-x86/local.h
+++ b/include/asm-x86/local.h
@@ -14,6 +14,7 @@ typedef struct
 
 #define local_read(l)	atomic_long_read(&(l)->a)
 #define local_set(l,i)	atomic_long_set(&(l)->a, (i))
+
 /*
  * X86_32 uses longs
  * X86_64 uses quads
@@ -32,11 +33,151 @@ typedef struct
 # define ASM_XADD xaddq
 #endif
 
-#ifdef CONFIG_X86_32
-# include "local_32.h"
-#else
-# include "local_64.h"
+static inline void local_inc(local_t *l)
+{
+	__asm__ __volatile__(
+		"ASM_INC %0"
+		:"+m" (l->a.counter));
+}
+
+static inline void local_dec(local_t *l)
+{
+	__asm__ __volatile__(
+		"ASM_DEC %0"
+		:"+m" (l->a.counter));
+}
+
+static inline void local_add(long i, local_t *l)
+{
+	__asm__ __volatile__(
+		"ASM_ADD %1,%0"
+		:"+m" (l->a.counter)
+		:"ir" (i));
+}
+
+static inline void local_sub(long i, local_t *l)
+{
+	__asm__ __volatile__(
+		"ASM_SUB %1,%0"
+		:"+m" (l->a.counter)
+		:"ir" (i));
+}
+
+/**
+ * local_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @l: pointer to type local_t
+ *
+ * Atomically subtracts @i from @l and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static inline int local_sub_and_test(long i, local_t *l)
+{
+	unsigned char c;
+
+	__asm__ __volatile__(
+		"ASM_SUB %2,%0; sete %1"
+		:"+m" (l->a.counter), "=qm" (c)
+		:"ir" (i) : "memory");
+	return c;
+}
+
+/**
+ * local_dec_and_test - decrement and test
+ * @l: pointer to type local_t
+ *
+ * Atomically decrements @l by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static inline int local_dec_and_test(local_t *l)
+{
+	unsigned char c;
+
+	__asm__ __volatile__(
+		"ASM_DEC %0; sete %1"
+		:"+m" (l->a.counter), "=qm" (c)
+		: : "memory");
+	return c != 0;
+}
+
+/**
+ * local_inc_and_test - increment and test
+ * @l: pointer to type local_t
+ *
+ * Atomically increments @l by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static inline int local_inc_and_test(local_t *l)
+{
+	unsigned char c;
+
+	__asm__ __volatile__(
+		"ASM_INC %0; sete %1"
+		:"+m" (l->a.counter), "=qm" (c)
+		: : "memory");
+	return c != 0;
+}
+
+/**
+ * local_add_negative - add and test if negative
+ * @i: integer value to add
+ * @l: pointer to type local_t
+ *
+ * Atomically adds @i to @l and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static inline int local_add_negative(long i, local_t *l)
+{
+	unsigned char c;
+
+	__asm__ __volatile__(
+		"ASM_ADD %2,%0; sets %1"
+		:"+m" (l->a.counter), "=qm" (c)
+		:"ir" (i) : "memory");
+	return c;
+}
+
+/**
+ * local_add_return - add and return
+ * @i: integer value to add
+ * @l: pointer to type local_t
+ *
+ * Atomically adds @i to @l and returns @i + @l
+ */
+static inline long local_add_return(long i, local_t *l)
+{
+	long __i;
+#ifdef CONFIG_M386
+	unsigned long flags;
+	if(unlikely(boot_cpu_data.x86 <= 3))
+		goto no_xadd;
+#endif
+	/* Modern 486+ processor including X86_64*/
+	__i = i;
+	__asm__ __volatile__(
+		"ASM_XADD %0, %1;"
+		:"+r" (i), "+m" (l->a.counter)
+		: : "memory");
+	return i + __i;
+
+#ifdef CONFIG_M386
+no_xadd: /* Legacy 386 processor */
+	local_irq_save(flags);
+	__i = local_read(l);
+	local_set(l, i + __i);
+	local_irq_restore(flags);
+	return i + __i;
 #endif
+}
+
+static inline long local_sub_return(long i, local_t *l)
+{
+	return local_add_return(-i,l);
+}
 
 #define local_inc_return(l)  (local_add_return(1,l))
 #define local_dec_return(l)  (local_sub_return(1,l))
diff --git a/include/asm-x86/local_32.h b/include/asm-x86/local_32.h
deleted file mode 100644
index ff6d1d2..0000000
--- a/include/asm-x86/local_32.h
+++ /dev/null
@@ -1,150 +0,0 @@
-#ifndef _ARCH_I386_LOCAL_H
-#define _ARCH_I386_LOCAL_H
-
-static inline void local_inc(local_t *l)
-{
-	__asm__ __volatile__(
-		"ASM_INC %0"
-		:"+m" (l->a.counter));
-}
-
-static inline void local_dec(local_t *l)
-{
-	__asm__ __volatile__(
-		"ASM_DEC %0"
-		:"+m" (l->a.counter));
-}
-
-static inline void local_add(long i, local_t *l)
-{
-	__asm__ __volatile__(
-		"ASM_ADD %1,%0"
-		:"+m" (l->a.counter)
-		:"ir" (i));
-}
-
-static inline void local_sub(long i, local_t *l)
-{
-	__asm__ __volatile__(
-		"ASM_SUB %1,%0"
-		:"+m" (l->a.counter)
-		:"ir" (i));
-}
-
-/**
- * local_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @l: pointer to type local_t
- *
- * Atomically subtracts @i from @l and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static inline int local_sub_and_test(long i, local_t *l)
-{
-	unsigned char c;
-
-	__asm__ __volatile__(
-		"ASM_SUB %2,%0; sete %1"
-		:"+m" (l->a.counter), "=qm" (c)
-		:"ir" (i) : "memory");
-	return c;
-}
-
-/**
- * local_dec_and_test - decrement and test
- * @l: pointer to type local_t
- *
- * Atomically decrements @l by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static inline int local_dec_and_test(local_t *l)
-{
-	unsigned char c;
-
-	__asm__ __volatile__(
-		"ASM_DEC %0; sete %1"
-		:"+m" (l->a.counter), "=qm" (c)
-		: : "memory");
-	return c != 0;
-}
-
-/**
- * local_inc_and_test - increment and test
- * @l: pointer to type local_t
- *
- * Atomically increments @l by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static inline int local_inc_and_test(local_t *l)
-{
-	unsigned char c;
-
-	__asm__ __volatile__(
-		"ASM_INC %0; sete %1"
-		:"+m" (l->a.counter), "=qm" (c)
-		: : "memory");
-	return c != 0;
-}
-
-/**
- * local_add_negative - add and test if negative
- * @i: integer value to add
- * @l: pointer to type local_t
- *
- * Atomically adds @i to @l and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static inline int local_add_negative(long i, local_t *l)
-{
-	unsigned char c;
-
-	__asm__ __volatile__(
-		"ASM_ADD %2,%0; sets %1"
-		:"+m" (l->a.counter), "=qm" (c)
-		:"ir" (i) : "memory");
-	return c;
-}
-
-/**
- * local_add_return - add and return
- * @i: integer value to add
- * @l: pointer to type local_t
- *
- * Atomically adds @i to @l and returns @i + @l
- */
-static inline long local_add_return(long i, local_t *l)
-{
-	long __i;
-#ifdef CONFIG_M386
-	unsigned long flags;
-	if(unlikely(boot_cpu_data.x86 <= 3))
-		goto no_xadd;
-#endif
-	/* Modern 486+ processor */
-	__i = i;
-	__asm__ __volatile__(
-		"ASM_XADD %0, %1;"
-		:"+r" (i), "+m" (l->a.counter)
-		: : "memory");
-	return i + __i;
-
-#ifdef CONFIG_M386
-no_xadd: /* Legacy 386 processor */
-	local_irq_save(flags);
-	__i = local_read(l);
-	local_set(l, i + __i);
-	local_irq_restore(flags);
-	return i + __i;
-#endif
-}
-
-static inline long local_sub_return(long i, local_t *l)
-{
-	return local_add_return(-i,l);
-}
-
-#endif /* _ARCH_I386_LOCAL_H */
diff --git a/include/asm-x86/local_64.h b/include/asm-x86/local_64.h
deleted file mode 100644
index 0e92bc6..0000000
--- a/include/asm-x86/local_64.h
+++ /dev/null
@@ -1,134 +0,0 @@
-#ifndef _ARCH_X8664_LOCAL_H
-#define _ARCH_X8664_LOCAL_H
-
-static inline void local_inc(local_t *l)
-{
-	__asm__ __volatile__(
-		"ASM_INC %0"
-		:"+m" (l->a.counter));
-}
-
-static inline void local_dec(local_t *l)
-{
-	__asm__ __volatile__(
-		"ASM_DEC %0"
-		:"+m" (l->a.counter));
-}
-
-static inline void local_add(long i, local_t *l)
-{
-	__asm__ __volatile__(
-		"ASM_ADD %1,%0"
-		:"+m" (l->a.counter)
-		:"ir" (i));
-}
-
-static inline void local_sub(long i, local_t *l)
-{
-	__asm__ __volatile__(
-		"ASM_SUB %1,%0"
-		:"+m" (l->a.counter)
-		:"ir" (i));
-}
-
-/**
- * local_sub_and_test - subtract value from variable and test result
- * @i: integer value to subtract
- * @l: pointer to type local_t
- *
- * Atomically subtracts @i from @l and returns
- * true if the result is zero, or false for all
- * other cases.
- */
-static inline int local_sub_and_test(long i, local_t *l)
-{
-	unsigned char c;
-
-	__asm__ __volatile__(
-		"ASM_SUB %2,%0; sete %1"
-		:"+m" (l->a.counter), "=qm" (c)
-		:"ir" (i) : "memory");
-	return c;
-}
-
-/**
- * local_dec_and_test - decrement and test
- * @l: pointer to type local_t
- *
- * Atomically decrements @l by 1 and
- * returns true if the result is 0, or false for all other
- * cases.
- */
-static inline int local_dec_and_test(local_t *l)
-{
-	unsigned char c;
-
-	__asm__ __volatile__(
-		"ASM_DEC %0; sete %1"
-		:"+m" (l->a.counter), "=qm" (c)
-		: : "memory");
-	return c != 0;
-}
-
-/**
- * local_inc_and_test - increment and test
- * @l: pointer to type local_t
- *
- * Atomically increments @l by 1
- * and returns true if the result is zero, or false for all
- * other cases.
- */
-static inline int local_inc_and_test(local_t *l)
-{
-	unsigned char c;
-
-	__asm__ __volatile__(
-		"ASM_INC %0; sete %1"
-		:"+m" (l->a.counter), "=qm" (c)
-		: : "memory");
-	return c != 0;
-}
-
-/**
- * local_add_negative - add and test if negative
- * @i: integer value to add
- * @l: pointer to type local_t
- *
- * Atomically adds @i to @l and returns true
- * if the result is negative, or false when
- * result is greater than or equal to zero.
- */
-static inline int local_add_negative(long i, local_t *l)
-{
-	unsigned char c;
-
-	__asm__ __volatile__(
-		"ASM_ADD %2,%0; sets %1"
-		:"+m" (l->a.counter), "=qm" (c)
-		:"ir" (i) : "memory");
-	return c;
-}
-
-/**
- * local_add_return - add and return
- * @i: integer value to add
- * @l: pointer to type local_t
- *
- * Atomically adds @i to @l and returns @i + @l
- */
-static inline long local_add_return(long i, local_t *l)
-{
-	long __i = i;
-	__asm__ __volatile__(
-		"ASM_XADD %0, %1;"
-		:"+r" (i), "+m" (l->a.counter)
-		: : "memory");
-	return i + __i;
-}
-
-static inline long local_sub_return(long i, local_t *l)
-{
-	return local_add_return(-i,l);
-}
-
-#endif /* _ARCH_X8664_LOCAL_H */
-- 
1.5.4.rc0.1083.gf568


^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH 4/4] x86: Final unification of local_{32|64}.h
  2007-12-16 12:04 [PATCH 4/4] x86: Final unification of local_{32|64}.h Harvey Harrison
@ 2007-12-16 15:31 ` Ingo Molnar
  2007-12-16 22:48   ` H. Peter Anvin
  0 siblings, 1 reply; 7+ messages in thread
From: Ingo Molnar @ 2007-12-16 15:31 UTC (permalink / raw)
  To: Harvey Harrison; +Cc: H. Peter Anvin, LKML, Thomas Gleixner


* Harvey Harrison <harvey.harrison@gmail.com> wrote:

> No differences except for the defintion of local_add_return on X86_64. 
> The X86_32 version is just fine as it is protected with ifdef 
> CONFIG_M386 so use it directly.

thanks, i've applied your 4 patches to x86.git.

btw., now that we have a single unified file, it might make sense to fix 
these checkpatch complaints:

  total: 10 errors, 1 warnings, 257 lines checked

in case you are interested ;-)

	Ingo

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 4/4] x86: Final unification of local_{32|64}.h
  2007-12-16 15:31 ` Ingo Molnar
@ 2007-12-16 22:48   ` H. Peter Anvin
  2007-12-16 23:38     ` Harvey Harrison
  0 siblings, 1 reply; 7+ messages in thread
From: H. Peter Anvin @ 2007-12-16 22:48 UTC (permalink / raw)
  To: Ingo Molnar; +Cc: Harvey Harrison, LKML, Thomas Gleixner

Ingo Molnar wrote:
> * Harvey Harrison <harvey.harrison@gmail.com> wrote:
> 
>> No differences except for the defintion of local_add_return on X86_64. 
>> The X86_32 version is just fine as it is protected with ifdef 
>> CONFIG_M386 so use it directly.
> 
> thanks, i've applied your 4 patches to x86.git.
> 
> btw., now that we have a single unified file, it might make sense to fix 
> these checkpatch complaints:
> 
>   total: 10 errors, 1 warnings, 257 lines checked
> 
> in case you are interested ;-)
> 

Please pull them pending revision, they're broken (macros don't expand 
inside strings...)

	-hpa

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 4/4] x86: Final unification of local_{32|64}.h
  2007-12-16 22:48   ` H. Peter Anvin
@ 2007-12-16 23:38     ` Harvey Harrison
  2007-12-16 23:48       ` H. Peter Anvin
  0 siblings, 1 reply; 7+ messages in thread
From: Harvey Harrison @ 2007-12-16 23:38 UTC (permalink / raw)
  To: H. Peter Anvin; +Cc: Ingo Molnar, LKML, Thomas Gleixner

On Sun, 2007-12-16 at 14:48 -0800, H. Peter Anvin wrote:
> Ingo Molnar wrote:
> > * Harvey Harrison <harvey.harrison@gmail.com> wrote:
> > 
> >> No differences except for the defintion of local_add_return on X86_64. 
> >> The X86_32 version is just fine as it is protected with ifdef 
> >> CONFIG_M386 so use it directly.
> > 
> > thanks, i've applied your 4 patches to x86.git.
> > 
> > btw., now that we have a single unified file, it might make sense to fix 
> > these checkpatch complaints:
> > 
> >   total: 10 errors, 1 warnings, 257 lines checked
> > 
> > in case you are interested ;-)
> > 
> 
> Please pull them pending revision, they're broken (macros don't expand 
> inside strings...)
> 

Do you have a stylistic preference between these two options:

Option 1) Rely on CPP string constant concatenation

// possibly include trailing space here to avoid remembering
// leading space on the register names
# define _ASM_INC "incl" 

static inline void local_inc(local_t *l)
{
	__asm__ __volatile__(
		_ASM_INC " %0"
		:"+m" (l->a.counter));
}

Option 2) Macro Expansion

# define _ASM_INC(r) \"incl\ ##r\"

static inline void local_inc(local_t *l)
{
	__asm__ __volatile__(
		_ASM_INC(%0);
		:"+m" (l->a.counter));
}

Or some other suggestion.

Harvey




^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 4/4] x86: Final unification of local_{32|64}.h
  2007-12-16 23:38     ` Harvey Harrison
@ 2007-12-16 23:48       ` H. Peter Anvin
  2007-12-16 23:59         ` Harvey Harrison
  0 siblings, 1 reply; 7+ messages in thread
From: H. Peter Anvin @ 2007-12-16 23:48 UTC (permalink / raw)
  To: Harvey Harrison; +Cc: Ingo Molnar, LKML, Thomas Gleixner

Harvey Harrison wrote:
> 
> Do you have a stylistic preference between these two options:
> 
> Option 1) Rely on CPP string constant concatenation
> 
> // possibly include trailing space here to avoid remembering
> // leading space on the register names
> # define _ASM_INC "incl" 
> 
> static inline void local_inc(local_t *l)
> {
> 	__asm__ __volatile__(
> 		_ASM_INC " %0"
> 		:"+m" (l->a.counter));
> }
> 

This is what I have used up to this point, except including the space in 
the macro:

#define _ASM_INC " incl "

	_ASM_INC "%0"

	-hpa

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 4/4] x86: Final unification of local_{32|64}.h
  2007-12-16 23:48       ` H. Peter Anvin
@ 2007-12-16 23:59         ` Harvey Harrison
  2007-12-17  0:06           ` H. Peter Anvin
  0 siblings, 1 reply; 7+ messages in thread
From: Harvey Harrison @ 2007-12-16 23:59 UTC (permalink / raw)
  To: H. Peter Anvin; +Cc: Ingo Molnar, LKML, Thomas Gleixner

On Sun, 2007-12-16 at 15:48 -0800, H. Peter Anvin wrote:
> Harvey Harrison wrote:
> > 
> > Do you have a stylistic preference between these two options:
> > 
> > Option 1) Rely on CPP string constant concatenation
> > 
> > // possibly include trailing space here to avoid remembering
> > // leading space on the register names
> > # define _ASM_INC "incl" 
> > 
> > static inline void local_inc(local_t *l)
> > {
> > 	__asm__ __volatile__(
> > 		_ASM_INC " %0"
> > 		:"+m" (l->a.counter));
> > }
> > 
> 
> This is what I have used up to this point, except including the space in 
> the macro:
> 
> #define _ASM_INC " incl "
> 
> 	_ASM_INC "%0"

Not sure if you were just tossing a space on the end of my example,
but do you also put a leading space on the " incl " in addition to
the trailing space?

Harvey


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH 4/4] x86: Final unification of local_{32|64}.h
  2007-12-16 23:59         ` Harvey Harrison
@ 2007-12-17  0:06           ` H. Peter Anvin
  0 siblings, 0 replies; 7+ messages in thread
From: H. Peter Anvin @ 2007-12-17  0:06 UTC (permalink / raw)
  To: Harvey Harrison; +Cc: Ingo Molnar, LKML, Thomas Gleixner

Harvey Harrison wrote:
>>
>> #define _ASM_INC " incl "
>>
>> 	_ASM_INC "%0"
> 
> Not sure if you were just tossing a space on the end of my example,
> but do you also put a leading space on the " incl " in addition to
> the trailing space?
> 

That is what I have, again, just to make mistakes harder.

	-hpa

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2007-12-17  0:07 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2007-12-16 12:04 [PATCH 4/4] x86: Final unification of local_{32|64}.h Harvey Harrison
2007-12-16 15:31 ` Ingo Molnar
2007-12-16 22:48   ` H. Peter Anvin
2007-12-16 23:38     ` Harvey Harrison
2007-12-16 23:48       ` H. Peter Anvin
2007-12-16 23:59         ` Harvey Harrison
2007-12-17  0:06           ` H. Peter Anvin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).