* [PATCH 1/3] bitops: add ifdef CONFIG_GENERIC_FIND_BIT_LE guard
@ 2011-04-20 14:20 Akinobu Mita
2011-04-20 14:20 ` [PATCH 2/3] arm: use asm-generic/bitops/le.h Akinobu Mita
2011-04-20 14:31 ` [PATCH 1/3] bitops: add ifdef CONFIG_GENERIC_FIND_BIT_LE guard Arnd Bergmann
0 siblings, 2 replies; 5+ messages in thread
From: Akinobu Mita @ 2011-04-20 14:20 UTC (permalink / raw)
To: linux-arm-kernel
Some architectures have optimized find_*_bit_le() as static inline
functions but other little-endian bitops are identical to the generic
version.
This adds #ifdef CONFIG_GENERIC_FIND_BIT_LE guard for find_*_bit_le()
in asm-generic/bitops/le.h so that those architectures can use this
header file.
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: linux-arch at vger.kernel.org
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: linux390 at de.ibm.com
Cc: linux-s390 at vger.kernel.org
Cc: Russell King <linux@arm.linux.org.uk>
Cc: linux-arm-kernel at lists.infradead.org
---
include/asm-generic/bitops/le.h | 4 ++++
1 files changed, 4 insertions(+), 0 deletions(-)
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
index 946a21b..bd2253e 100644
--- a/include/asm-generic/bitops/le.h
+++ b/include/asm-generic/bitops/le.h
@@ -30,6 +30,8 @@ static inline unsigned long find_first_zero_bit_le(const void *addr,
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
+#ifdef CONFIG_GENERIC_FIND_BIT_LE
+
extern unsigned long find_next_zero_bit_le(const void *addr,
unsigned long size, unsigned long offset);
extern unsigned long find_next_bit_le(const void *addr,
@@ -38,6 +40,8 @@ extern unsigned long find_next_bit_le(const void *addr,
#define find_first_zero_bit_le(addr, size) \
find_next_zero_bit_le((addr), (size), 0)
+#endif /* CONFIG_GENERIC_FIND_BIT_LE */
+
#else
#error "Please fix <asm/byteorder.h>"
#endif
--
1.7.4.4
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 2/3] arm: use asm-generic/bitops/le.h
2011-04-20 14:20 [PATCH 1/3] bitops: add ifdef CONFIG_GENERIC_FIND_BIT_LE guard Akinobu Mita
@ 2011-04-20 14:20 ` Akinobu Mita
2011-04-20 14:31 ` [PATCH 1/3] bitops: add ifdef CONFIG_GENERIC_FIND_BIT_LE guard Arnd Bergmann
1 sibling, 0 replies; 5+ messages in thread
From: Akinobu Mita @ 2011-04-20 14:20 UTC (permalink / raw)
To: linux-arm-kernel
The previous change enables to use asm-generic/bitops/le.h on arm.
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: linux-arm-kernel at lists.infradead.org
---
arch/arm/include/asm/bitops.h | 41 ++++-------------------------------------
1 files changed, 4 insertions(+), 37 deletions(-)
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index ec2e0d4..4615e1b 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -203,8 +203,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#define find_first_bit(p,sz) _find_first_bit_le(p,sz)
#define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off)
-#define WORD_BITOFF_TO_LE(x) ((x))
-
#else
/*
* These are the big endian, atomic definitions.
@@ -214,8 +212,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
#define find_first_bit(p,sz) _find_first_bit_be(p,sz)
#define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off)
-#define WORD_BITOFF_TO_LE(x) ((x) ^ 0x18)
-
#endif
#if __LINUX_ARM_ARCH__ < 5
@@ -287,40 +283,9 @@ static inline int fls(int x)
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
-static inline void __set_bit_le(int nr, void *addr)
-{
- __set_bit(WORD_BITOFF_TO_LE(nr), addr);
-}
-
-static inline void __clear_bit_le(int nr, void *addr)
-{
- __clear_bit(WORD_BITOFF_TO_LE(nr), addr);
-}
-
-static inline int __test_and_set_bit_le(int nr, void *addr)
-{
- return __test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr);
-}
+#include <asm-generic/bitops/le.h>
-static inline int test_and_set_bit_le(int nr, void *addr)
-{
- return test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr);
-}
-
-static inline int __test_and_clear_bit_le(int nr, void *addr)
-{
- return __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr);
-}
-
-static inline int test_and_clear_bit_le(int nr, void *addr)
-{
- return test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr);
-}
-
-static inline int test_bit_le(int nr, const void *addr)
-{
- return test_bit(WORD_BITOFF_TO_LE(nr), addr);
-}
+#ifdef __ARMEB__
static inline int find_first_zero_bit_le(const void *p, unsigned size)
{
@@ -337,6 +302,8 @@ static inline int find_next_bit_le(const void *p, int size, int offset)
return _find_next_bit_le(p, size, offset);
}
+#endif
+
/*
* Ext2 is defined to use little-endian byte ordering.
*/
--
1.7.4.4
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH 1/3] bitops: add ifdef CONFIG_GENERIC_FIND_BIT_LE guard
2011-04-20 14:20 [PATCH 1/3] bitops: add ifdef CONFIG_GENERIC_FIND_BIT_LE guard Akinobu Mita
2011-04-20 14:20 ` [PATCH 2/3] arm: use asm-generic/bitops/le.h Akinobu Mita
@ 2011-04-20 14:31 ` Arnd Bergmann
2011-04-20 22:59 ` Akinobu Mita
1 sibling, 1 reply; 5+ messages in thread
From: Arnd Bergmann @ 2011-04-20 14:31 UTC (permalink / raw)
To: linux-arm-kernel
On Wednesday 20 April 2011, Akinobu Mita wrote:
> index 946a21b..bd2253e 100644
> --- a/include/asm-generic/bitops/le.h
> +++ b/include/asm-generic/bitops/le.h
> @@ -30,6 +30,8 @@ static inline unsigned long find_first_zero_bit_le(const void *addr,
>
> #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
>
> +#ifdef CONFIG_GENERIC_FIND_BIT_LE
> +
> extern unsigned long find_next_zero_bit_le(const void *addr,
> unsigned long size, unsigned long offset);
> extern unsigned long find_next_bit_le(const void *addr,
> @@ -38,6 +40,8 @@ extern unsigned long find_next_bit_le(const void *addr,
> #define find_first_zero_bit_le(addr, size) \
> find_next_zero_bit_le((addr), (size), 0)
>
> +#endif /* CONFIG_GENERIC_FIND_BIT_LE */
> +
> #else
> #error "Please fix <asm/byteorder.h>"
> #endif
The style that we normally use in asm-generic is to test the macro itself
for existence, so in asm-generic, do:
#ifndef find_next_zero_bit_le
extern unsigned long find_next_zero_bit_le(const void *addr,
unsigned long size, unsigned long offset);
#endif
and in the architectures, write
static inline unsigned long find_next_zero_bit_le(const void *addr,
unsigned long size, unsigned long offset)
#define find_next_zero_bit_le find_next_zero_bit_le
I guess we can do the #ifdef separately for each of the three macros,
or choose one of them to use as a key.
Arnd
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1/3] bitops: add ifdef CONFIG_GENERIC_FIND_BIT_LE guard
2011-04-20 14:31 ` [PATCH 1/3] bitops: add ifdef CONFIG_GENERIC_FIND_BIT_LE guard Arnd Bergmann
@ 2011-04-20 22:59 ` Akinobu Mita
2011-04-21 9:03 ` Arnd Bergmann
0 siblings, 1 reply; 5+ messages in thread
From: Akinobu Mita @ 2011-04-20 22:59 UTC (permalink / raw)
To: linux-arm-kernel
2011/4/20 Arnd Bergmann <arnd@arndb.de>:
> On Wednesday 20 April 2011, Akinobu Mita wrote:
>> index 946a21b..bd2253e 100644
>> --- a/include/asm-generic/bitops/le.h
>> +++ b/include/asm-generic/bitops/le.h
>> @@ -30,6 +30,8 @@ static inline unsigned long find_first_zero_bit_le(const void *addr,
>>
>> ?#define BITOP_LE_SWIZZLE ? ? ? ((BITS_PER_LONG-1) & ~0x7)
>>
>> +#ifdef CONFIG_GENERIC_FIND_BIT_LE
>> +
>> ?extern unsigned long find_next_zero_bit_le(const void *addr,
>> ? ? ? ? ? ? ? ? unsigned long size, unsigned long offset);
>> ?extern unsigned long find_next_bit_le(const void *addr,
>> @@ -38,6 +40,8 @@ extern unsigned long find_next_bit_le(const void *addr,
>> ?#define find_first_zero_bit_le(addr, size) \
>> ? ? ? ? find_next_zero_bit_le((addr), (size), 0)
>>
>> +#endif /* CONFIG_GENERIC_FIND_BIT_LE */
>> +
>> ?#else
>> ?#error "Please fix <asm/byteorder.h>"
>> ?#endif
>
> The style that we normally use in asm-generic is to test the macro itself
> for existence, so in asm-generic, do:
>
> #ifndef find_next_zero_bit_le
> extern unsigned long find_next_zero_bit_le(const void *addr,
> ? ? ? ? ? ? ? ? unsigned long size, unsigned long offset);
> #endif
>
> and in the architectures, write
>
> static inline unsigned long find_next_zero_bit_le(const void *addr,
> ? ? ? ? ? ? ? ? unsigned long size, unsigned long offset)
> #define find_next_zero_bit_le find_next_zero_bit_le
>
> I guess we can do the #ifdef separately for each of the three macros,
> or choose one of them to use as a key.
I see.
Should we also kill CONFIG_GENERIC_FIND_BIT_LE option comletely,
then add the #ifdef for each find_*() in lib/find_next_bit.c and always build
it unconditionally ?
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1/3] bitops: add ifdef CONFIG_GENERIC_FIND_BIT_LE guard
2011-04-20 22:59 ` Akinobu Mita
@ 2011-04-21 9:03 ` Arnd Bergmann
0 siblings, 0 replies; 5+ messages in thread
From: Arnd Bergmann @ 2011-04-21 9:03 UTC (permalink / raw)
To: linux-arm-kernel
On Thursday 21 April 2011, Akinobu Mita wrote:
> Should we also kill CONFIG_GENERIC_FIND_BIT_LE option comletely,
> then add the #ifdef for each find_*() in lib/find_next_bit.c and always build
> it unconditionally ?
I think that would be more consistent. It's not a big difference anyway, so
do it only if you feel motivated. There are a lot of other things that could
be changed this way, I'd only change it while reworking other aspects in order
to avoid accidentally breaking things.
Arnd
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2011-04-21 9:03 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-04-20 14:20 [PATCH 1/3] bitops: add ifdef CONFIG_GENERIC_FIND_BIT_LE guard Akinobu Mita
2011-04-20 14:20 ` [PATCH 2/3] arm: use asm-generic/bitops/le.h Akinobu Mita
2011-04-20 14:31 ` [PATCH 1/3] bitops: add ifdef CONFIG_GENERIC_FIND_BIT_LE guard Arnd Bergmann
2011-04-20 22:59 ` Akinobu Mita
2011-04-21 9:03 ` Arnd Bergmann
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).