linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/2] byteorder: add a new include/linux/swab.h to define byteswapping functions
@ 2008-07-15 19:01 Harvey Harrison
  2008-07-17 22:59 ` Andrew Morton
  0 siblings, 1 reply; 3+ messages in thread
From: Harvey Harrison @ 2008-07-15 19:01 UTC (permalink / raw)
  To: Linus Torvalds; +Cc: Andrew Morton, linux-arch

Collect the implementations from include/linux/byteorder/swab.h, swabb.h
in swab.h

The functionality provided covers:
u16 swab16(u16 val) - return a byteswapped 16 bit value
u32 swab32(u32 val) - return a byteswapped 32 bit value
u64 swab64(u64 val) - return a byteswapped 64 bit value
u32 swahw32(u32 val) - return a wordswapped 32 bit value
u32 swahb32(u32 val) - return a high/low byteswapped 32 bit value

Similar to above, but return swapped value from a naturally-aligned pointer
u16 swab16p(u16 *p)
u32 swab32p(u32 *p)
u64 swab64p(u64 *p)
u32 swahw32p(u32 *p)
u32 swahb32p(u32 *p)

Similar to above, but swap the value in-place (in-situ)
void swab16s(u16 *p)
void swab32s(u32 *p)
void swab64s(u64 *p)
void swahw32s(u32 *p)
void swahb32s(u32 *p)

Arches can override any of these with an optimized version by defining an
inline in their asm/byteorder.h (example given for swab16()):

u16 __arch_swab16() {}

Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
---
Linus, please apply these two as they can't break anything as they just add two
headers that nobody uses.  I'll submit the arch patches through the individual
arch maintainers once these hit mainline.

 include/linux/swab.h |  309 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 309 insertions(+), 0 deletions(-)

diff --git a/include/linux/swab.h b/include/linux/swab.h
new file mode 100644
index 0000000..8f955b2
--- /dev/null
+++ b/include/linux/swab.h
@@ -0,0 +1,309 @@
+#ifndef _LINUX_SWAB_H
+#define _LINUX_SWAB_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/byteorder.h>
+
+/*
+ * casts are necessary for constants, because we never know how for sure
+ * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
+ */
+#define __const_swab16(x) ((__u16)(				\
+	(((__u16)(x) & (__u16)0x00ffU) << 8) |			\
+	(((__u16)(x) & (__u16)0xff00U) >> 8)))
+
+#define __const_swab32(x) ((__u32)(				\
+	(((__u32)(x) & (__u32)0x000000ffUL) << 24) |		\
+	(((__u32)(x) & (__u32)0x0000ff00UL) <<  8) |		\
+	(((__u32)(x) & (__u32)0x00ff0000UL) >>  8) |		\
+	(((__u32)(x) & (__u32)0xff000000UL) >> 24)))
+
+#define __const_swab64(x) ((__u64)(				\
+	(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) |	\
+	(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) |	\
+	(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) |	\
+	(((__u64)(x) & (__u64)0x00000000ff000000ULL) <<  8) |	\
+	(((__u64)(x) & (__u64)0x000000ff00000000ULL) >>  8) |	\
+	(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) |	\
+	(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) |	\
+	(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56)))
+
+#define __const_swahw32(x) ((__u32)(				\
+	(((__u32)(x) & (__u32)0x0000ffffUL) << 16) |		\
+	(((__u32)(x) & (__u32)0xffff0000UL) >> 16)))
+
+#define __const_swahb32(x) ((__u32)(				\
+	(((__u32)(x) & (__u32)0x00ff00ffUL) << 8) |		\
+	(((__u32)(x) & (__u32)0xff00ff00UL) >> 8)))
+
+/*
+ * Implement the following as inlines, but define the interface using
+ * macros to allow constant folding when possible:
+ * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
+ */
+
+static inline __attribute_const__ __u16 ___swab16(__u16 val)
+{
+#ifdef HAVE_ARCH_SWAB16
+	return __arch_swab16(val);
+#elif defined(HAVE_ARCH_SWAB16P)
+	return __arch_swab16p(&val);
+#else
+	return __const_swab16(val);
+#endif
+}
+
+static inline __attribute_const__ __u32 ___swab32(__u32 val)
+{
+#ifdef HAVE_ARCH_SWAB32
+	return __arch_swab32(val);
+#elif defined(HAVE_ARCH_SWAB32P)
+	return __arch_swab32p(&val);
+#else
+	return __const_swab32(val);
+#endif
+}
+
+static inline __attribute_const__ __u64 ___swab64(__u64 val)
+{
+#ifdef HAVE_ARCH_SWAB64
+	return __arch_swab64(val);
+#elif defined(HAVE_ARCH_SWAB64P)
+	return __arch_swab64p(&val);
+#elif defined(__SWAB_64_THRU_32__)
+	__u32 h = val >> 32;
+	__u32 l = val & ((1ULL << 32) - 1);
+	return (((__u64)___swab32(l)) << 32) | ((__u64)(___swab32(h)));
+#else
+	return __const_swab64(val);
+#endif
+}
+
+static inline __attribute_const__ __u32 ___swahw32(__u32 val)
+{
+#ifdef HAVE_ARCH_SWAHW32
+	return __arch_swahw32(val);
+#elif defined(HAVE_ARCH_SWAHW32P)
+	return __arch_swahw32p(&val);
+#else
+	return __const_swahw32(val);
+#endif
+}
+
+static inline __attribute_const__ __u32 ___swahb32(__u32 val)
+{
+#ifdef HAVE_ARCH_SWAHB32
+	return __arch_swahb32(val);
+#elif defined(HAVE_ARCH_SWAHB32P)
+	return __arch_swahb32p(&val);
+#else
+	return __const_swahb32(val);
+#endif
+}
+
+/**
+ * __swab16 - return a byteswapped 16-bit value
+ * @x: value to byteswap
+ */
+#define __swab16(x)				\
+	(__builtin_constant_p((__u16)(x)) ?	\
+	__const_swab16((x)) :			\
+	___swab16((x)))
+
+/**
+ * __swab32 - return a byteswapped 32-bit value
+ * @x: value to byteswap
+ */
+#define __swab32(x)				\
+	(__builtin_constant_p((__u32)(x)) ?	\
+	__const_swab32((x)) :			\
+	___swab32((x)))
+
+/**
+ * __swab64 - return a byteswapped 64-bit value
+ * @x: value to byteswap
+ */
+#define __swab64(x)				\
+	(__builtin_constant_p((__u64)(x)) ?	\
+	__const_swab64((x)) :			\
+	___swab64((x)))
+
+/**
+ * __swahw32 - return a word-swapped 32-bit value
+ * @x: value to wordswap
+ *
+ * __swahw32(0x12340000) is 0x00001234
+ */
+#define __swahw32(x)				\
+	(__builtin_constant_p((__u32)(x)) ?	\
+	__const_swahw32((x)) :			\
+	___swahw32((x)))
+
+/**
+ * __swahb32 - return a high and low byte-swapped 32-bit value
+ * @x: value to byteswap
+ *
+ * __swahb32(0x12345678) is 0x34127856
+ */
+#define __swahb32(x)				\
+	(__builtin_constant_p((__u32)(x)) ?	\
+	__const_swahb32((x)) :			\
+	___swahb32((x)))
+
+/**
+ * __swab16p - return a byteswapped 16-bit value from a pointer
+ * @p: pointer to a naturally-aligned 16-bit value
+ */
+static inline __u16 __swab16p(const __u16 *p)
+{
+#ifdef HAVE_ARCH_SWAB16P
+	return __arch_swab16p(p);
+#else
+	return __swab16(*p);
+#endif
+}
+
+/**
+ * __swab32p - return a byteswapped 32-bit value from a pointer
+ * @p: pointer to a naturally-aligned 32-bit value
+ */
+static inline __u32 __swab32p(const __u32 *p)
+{
+#ifdef HAVE_ARCH_SWAB32P
+	return __arch_swab32p(p);
+#else
+	return __swab32(*p);
+#endif
+}
+
+/**
+ * __swab64p - return a byteswapped 64-bit value from a pointer
+ * @p: pointer to a naturally-aligned 64-bit value
+ */
+static inline __u64 __swab64p(const __u64 *p)
+{
+#ifdef HAVE_ARCH_SWAB64P
+	return __arch_swab64p(p);
+#else
+	return __swab64(*p);
+#endif
+}
+
+/**
+ * __swahw32p - return a wordswapped 32-bit value from a pointer
+ * @p: pointer to a naturally-aligned 32-bit value
+ *
+ * See __swahw32() for details of wordswapping.
+ */
+static inline __u32 __swahw32p(const __u32 *p)
+{
+#ifdef HAVE_ARCH_SWAHW32P
+	return __arch_swahw32p(p);
+#else
+	return __swahw32(*p);
+#endif
+}
+
+/**
+ * __swahb32p - return a high and low byteswapped 32-bit value from a pointer
+ * @p: pointer to a naturally-aligned 32-bit value
+ *
+ * See __swahb32() for details of high/low byteswapping.
+ */
+static inline __u32 __swahb32p(const __u32 *p)
+{
+#ifdef HAVE_ARCH_SWAHB32P
+	return __arch_swahb32p(p);
+#else
+	return __swahb32(*p);
+#endif
+}
+
+/**
+ * __swab16s - byteswap a 16-bit value in-place
+ * @p: pointer to a naturally-aligned 16-bit value
+ */
+static inline void __swab16s(__u16 *p)
+{
+#ifdef HAVE_ARCH_SWAB16S
+	__arch_swab16s(p);
+#else
+	*p = __swab16p(p);
+#endif
+}
+/**
+ * __swab32s - byteswap a 32-bit value in-place
+ * @p: pointer to a naturally-aligned 32-bit value
+ */
+static inline void __swab32s(__u32 *p)
+{
+#ifdef HAVE_ARCH_SWAB32S
+	__arch_swab32s(p);
+#else
+	*p = __swab32p(p);
+#endif
+}
+
+/**
+ * __swab64s - byteswap a 64-bit value in-place
+ * @p: pointer to a naturally-aligned 64-bit value
+ */
+static inline void __swab64s(__u64 *p)
+{
+#ifdef HAVE_ARCH_SWAB64S
+	__arch_swab64s(p);
+#else
+	*p = __swab64p(p);
+#endif
+}
+
+/**
+ * __swahw32s - wordswap a 32-bit value in-place
+ * @p: pointer to a naturally-aligned 32-bit value
+ *
+ * See __swahw32() for details of wordswapping
+ */
+static inline void __swahw32s(__u32 *p)
+{
+#ifdef HAVE_ARCH_SWAHW32S
+	__arch_swahw32s(p);
+#else
+	*p = __swahw32p(p);
+#endif
+}
+
+/**
+ * __swahb32s - high and low byteswap a 32-bit value in-place
+ * @p: pointer to a naturally-aligned 32-bit value
+ *
+ * See __swahb32() for details of high and low byte swapping
+ */
+static inline void __swahb32s(__u32 *p)
+{
+#ifdef HAVE_ARCH_SWAHB32S
+	__arch_swahb32s(p);
+#else
+	*p = __swahb32p(p);
+#endif
+}
+
+#ifdef __KERNEL__
+# define swab16 __swab16
+# define swab32 __swab32
+# define swab64 __swab64
+# define swahw32 __swahw32
+# define swahb32 __swahb32
+# define swab16p __swab16p
+# define swab32p __swab32p
+# define swab64p __swab64p
+# define swahw32p __swahw32p
+# define swahb32p __swahb32p
+# define swab16s __swab16s
+# define swab32s __swab32s
+# define swab64s __swab64s
+# define swahw32s __swahw32s
+# define swahb32s __swahb32s
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_SWAB_H */
-- 
1.5.6.3.471.gfd34

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH 1/2] byteorder: add a new include/linux/swab.h to define byteswapping functions
  2008-07-15 19:01 [PATCH 1/2] byteorder: add a new include/linux/swab.h to define byteswapping functions Harvey Harrison
@ 2008-07-17 22:59 ` Andrew Morton
  2008-07-17 23:23   ` Harvey Harrison
  0 siblings, 1 reply; 3+ messages in thread
From: Andrew Morton @ 2008-07-17 22:59 UTC (permalink / raw)
  To: Harvey Harrison; +Cc: torvalds, linux-arch

On Tue, 15 Jul 2008 12:01:49 -0700
Harvey Harrison <harvey.harrison@gmail.com> wrote:

> Collect the implementations from include/linux/byteorder/swab.h, swabb.h
> in swab.h

I'm afraid I've been basically ignoring the storm of byteorder and
related patches simply because I do not have a large enough brain to
work out wth is going on.

I get the impression that there's a great storm of random patches, some
of which are repeats, with a distressing number of updates all with no
overall plan.  Probably I'm wrong about that, but making it all not
_look_ like that would really help.

So ho hum.  I merged these two into -mm, at the tail of the queue, in a
new "byteorder" "tree", probably for 2.6.28.  We could bump them up
into 2.6.27-rc1 if that would help merging followup stuff out into the
subsystem trees during the 2.6.27-rcX cycle.

But please be aware that this ongoing patchstorm is quite confusing
from where I sit, and I just haven't been able to justify expending all
the time which it seems that it requires for me to work out just what
the heck is going on.

So please, send the patches less frequently, in larger batches, after
lots of testing.  Each series should have some overall theme which is
clearly explained in an easy-to-follow fashion.

Ta.

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH 1/2] byteorder: add a new include/linux/swab.h to define byteswapping functions
  2008-07-17 22:59 ` Andrew Morton
@ 2008-07-17 23:23   ` Harvey Harrison
  0 siblings, 0 replies; 3+ messages in thread
From: Harvey Harrison @ 2008-07-17 23:23 UTC (permalink / raw)
  To: Andrew Morton; +Cc: torvalds, linux-arch

On Thu, 2008-07-17 at 15:59 -0700, Andrew Morton wrote:
> On Tue, 15 Jul 2008 12:01:49 -0700
> Harvey Harrison <harvey.harrison@gmail.com> wrote:
> 
> > Collect the implementations from include/linux/byteorder/swab.h, swabb.h
> > in swab.h
> 
> I'm afraid I've been basically ignoring the storm of byteorder and
> related patches simply because I do not have a large enough brain to
> work out wth is going on.
> 
> I get the impression that there's a great storm of random patches, some
> of which are repeats, with a distressing number of updates all with no
> overall plan.  Probably I'm wrong about that, but making it all not
> _look_ like that would really help.
> 
> So ho hum.  I merged these two into -mm, at the tail of the queue, in a
> new "byteorder" "tree", probably for 2.6.28.  We could bump them up
> into 2.6.27-rc1 if that would help merging followup stuff out into the
> subsystem trees during the 2.6.27-rcX cycle.
> 
> But please be aware that this ongoing patchstorm is quite confusing
> from where I sit, and I just haven't been able to justify expending all
> the time which it seems that it requires for me to work out just what
> the heck is going on.
> 
> So please, send the patches less frequently, in larger batches, after
> lots of testing.  Each series should have some overall theme which is
> clearly explained in an easy-to-follow fashion.

A little context then:

With these two merged, I can start moving each arch over to the new byteorder
headers one at a time through the arch maintainers, so I would appreciate it
if they went upstream soonish.

The new headers are not used, and nothing at all changes for arches than do
not opt-in.

The advantage of the new headers are that:
1) there is a _standard_ way for each arch to provide optimized byteswapping
routines (swab etc), and all of the endian-dependant helpers cpu_to_* are
pulled up into linux/byteorder.h

2) we can now rely on __LITTLE_ENDIAN/_BIG_ENDIAN being defined in one place
and the checks for setting both/neither are unified in the linux/ header.

3) The linux/byteorder/ folder is consolidated now and removed, with a few
includes that were directly including byteorder/swabb.h removed as the implementation
now unconditionally provides this functionality.

4) With the implementation all moving to linux/byteorder, asm/byteorder no longer
is the best include, so I have a series that replaces all asm/ with linux/
compatibly, there is no flag day, once all the asm/ includes are gone
we can remove one include in the linux/ header.

5) The new implementation of cpu_to_*, etc does compile-time constant folding,
removing the need for all of the __constant_* versions which another series
then removes.  Again, no flag day, can proceed at its own pace without breaking anything.

6) Checks throughout the tree for __BIG_ENDIAN/__LITTLE_ENDIAN can be removed/simplified
as the linux header now does this check centrally.

I've been rebasing the 65 patch series against linux-next each day, let me know when you
want it, but it breaks down to:

2 patches which you just merged
1 patch wiring all the arches...can be easily split.
<nothing further until all arches have moved over>
1 patch removing direct includes of linux/byteorder/swabb.h
1 patch removing linux/byteorder/*
2 patches consolidating endian tests/adding an include to linux/byteorder.h to make
_either_ asm or linux/ ok to include directly
36 patches changing asm/byteorder to linux/byteorder throughout the tree and
finally making linux/byteorder.h the intended header for direct includes.

The rest of the patches are removals of the __constant_* endian helpers
replacing them with the regular versions that now do compile-time
folding, and finally removing the __constant versions entirely.


Cheers,

Harvey

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2008-07-17 23:23 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-07-15 19:01 [PATCH 1/2] byteorder: add a new include/linux/swab.h to define byteswapping functions Harvey Harrison
2008-07-17 22:59 ` Andrew Morton
2008-07-17 23:23   ` Harvey Harrison

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).