public inbox for linux-arch@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/8] kernel: add common infrastructure for unaligned access
@ 2008-04-10 19:44 Harvey Harrison
  2008-04-10 19:44 ` Harvey Harrison
                   ` (2 more replies)
  0 siblings, 3 replies; 24+ messages in thread
From: Harvey Harrison @ 2008-04-10 19:44 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-arch

Create a linux/unaligned folder similar in spirit to the linux/byteorder
folder to hold generic implementations collected from various arches.

Currently there are five implementations:
1) cpu_endian.h: C-struct based, heavily based on asm-generic/unaligned.h
2) little_endian.h: Open coded byte-swapping, heavily based on asm-arm
3) big_endian.h: Open coded byte-swapping, heavily based on asm-arm
4) no_builtin_memcpy.h: taken from multiple implementations in tree
5) access_ok.h: taken from x86 and others, unaligned access is ok.

API additions:

get_unaligned_{le16|le32|le64|be16|be32|be64}(p) which is meant to replace
code of the form:
le16_to_cpu(get_unaligned((__le16 *)p));

put_unaligned_{le16|le32|le64|be16|be32|be64}(val, pointer) which is meant to
replace code of the form:
put_unaligned(cpu_to_le16(val), (__le16 *)p);

Headers to create these based on the selected implementation and defining the
appropriate get_unaligned() and put_unaligned() macros are:

generic_le.h: Use the C-struct for get/put_unaligned and the le helpers, use the
opencoded be byteswapping implementation for big-endian.

generic_be.h: Use the C-struct for get/put_unaligned and the be helpers, use the
opencoded le byteswapping implementation for little-endian.

generic.h: Use opencoded byteswapping for all helpers, leaves it to the arch to
define get/put_unaligned.

Signed-off-by: Harvey Harrison <harvey.harrison-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
---
 include/linux/unaligned/access_ok.h         |   70 +++++++++++++++++++++
 include/linux/unaligned/big_endian.h        |   84 +++++++++++++++++++++++++
 include/linux/unaligned/cpu_endian.h        |   90 +++++++++++++++++++++++++++
 include/linux/unaligned/generic.h           |   67 ++++++++++++++++++++
 include/linux/unaligned/generic_be.h        |   70 +++++++++++++++++++++
 include/linux/unaligned/generic_le.h        |   70 +++++++++++++++++++++
 include/linux/unaligned/little_endian.h     |   84 +++++++++++++++++++++++++
 include/linux/unaligned/no_builtin_memcpy.h |   80 ++++++++++++++++++++++++
 8 files changed, 615 insertions(+), 0 deletions(-)

diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
new file mode 100644
index 0000000..e9d8ff4
--- /dev/null
+++ b/include/linux/unaligned/access_ok.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_ACCESS_OK_H_
+#define _LINUX_UNALIGNED_ACCESS_OK_H_
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+#define get_unaligned(ptr) (*(ptr))
+#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+
+static inline u16 get_unaligned_le16(const __le16 *p)
+{
+	return le16_to_cpup(p);
+}
+
+static inline u32 get_unaligned_le32(const __le32 *p)
+{
+	return le32_to_cpup(p);
+}
+
+static inline u64 get_unaligned_le64(const __le64 *p)
+{
+	return le64_to_cpup(p);
+}
+
+static inline u16 get_unaligned_be16(const __be16 *p)
+{
+	return be16_to_cpup(p);
+}
+
+static inline u32 get_unaligned_be32(const __be32 *p)
+{
+	return be32_to_cpup(p);
+}
+
+static inline u64 get_unaligned_be64(const __be64 *p)
+{
+	return be64_to_cpup(p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	*((__le16 *)p) = cpu_to_le16(val);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	*((__le32 *)p) = cpu_to_le32(val);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	*((__le64 *)p) = cpu_to_le64(val);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	*((__be16 *)p) = cpu_to_be16(val);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	*((__be32 *)p) = cpu_to_be32(val);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	*((__be64 *)p) = cpu_to_be64(val);
+}
+
+#endif /* _LINUX_UNALIGNED_ACCESS_OK_H_ */
diff --git a/include/linux/unaligned/big_endian.h b/include/linux/unaligned/big_endian.h
new file mode 100644
index 0000000..48802be
--- /dev/null
+++ b/include/linux/unaligned/big_endian.h
@@ -0,0 +1,84 @@
+#ifndef _LINUX_UNALIGNED_BIG_ENDIAN_H_
+#define _LINUX_UNALIGNED_BIG_ENDIAN_H_
+
+#include <linux/kernel.h>
+
+static inline u16 __get_unaligned_be16(const u8 *p)
+{
+	return (u16)(p[0] << 8 | p[1]);
+}
+
+static inline u32 __get_unaligned_be32(const u8 *p)
+{
+	return (u32)(p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]);
+}
+
+static inline u64 __get_unaligned_be64(const u8 *p)
+{
+	return ((u64)__get_unaligned_be32(p) << 32) |
+	       __get_unaligned_be32(p + 4);
+}
+
+#define __get_unaligned_be(ptr) ({			\
+	const void *__gu_p = (ptr);			\
+	typeof(*(ptr)) __val;				\
+	switch (sizeof(*(ptr))) {			\
+	case 1:						\
+		__val = *(const u8 *)__gu_p;		\
+		break;					\
+	case 2:						\
+		__val = __get_unaligned_be16(__gu_p);	\
+		break;					\
+	case 4:						\
+		__val = __get_unaligned_be32(__gu_p);	\
+		break;					\
+	case 8:						\
+		__val = __get_unaligned_be64(__gu_p);	\
+		break;					\
+	default:					\
+		BUILD_BUG_ON(1);			\
+		break;					\
+	};						\
+	__val; })
+
+static inline void __put_unaligned_be16(u16 val, u8 *p)
+{
+	*p++ = val >> 8;
+	*p++ = val;
+}
+
+static inline void __put_unaligned_be32(u32 val, u8 *p)
+{
+	__put_unaligned_be16(val >> 16, p);
+	__put_unaligned_be16(val, p + 2);
+}
+
+static inline void __put_unaligned_be64(u64 val, u8 *p)
+{
+	__put_unaligned_be32(val >> 32, p);
+	__put_unaligned_be32(val, p + 4);
+}
+
+#define __put_unaligned_be(val, ptr) ({					\
+	(void)sizeof(*(ptr) = (val));					\
+	void *__gu_p = (ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		*(u8 *)__gu_p = (__force u8)(val);			\
+		break;							\
+	case 2:								\
+		__put_unaligned_be16((__force u16)(val), __gu_p);	\
+		break;							\
+	case 4:								\
+		__put_unaligned_be32((__force u32)(val), __gu_p);	\
+		break;							\
+	case 8:								\
+		__put_unaligned_be64((__force u64)(val), __gu_p);	\
+		break;							\
+	default:							\
+		BUILD_BUG_ON(1);					\
+		break;							\
+	}								\
+	(void)0; })
+
+#endif /* _LINUX_UNALIGNED_BIG_ENDIAN_H_ */
diff --git a/include/linux/unaligned/cpu_endian.h b/include/linux/unaligned/cpu_endian.h
new file mode 100644
index 0000000..8189286
--- /dev/null
+++ b/include/linux/unaligned/cpu_endian.h
@@ -0,0 +1,90 @@
+#ifndef _LINUX_UNALIGNED_CPU_ENDIAN_H_
+#define _LINUX_UNALIGNED_CPU_ENDIAN_H_
+
+#include <linux/kernel.h>
+
+struct __una_u16 { u16 x __attribute__((packed)); };
+struct __una_u32 { u32 x __attribute__((packed)); };
+struct __una_u64 { u64 x __attribute__((packed)); };
+
+static inline u16 __get_unaligned_cpu16(const void *p)
+{
+	const struct __una_u16 *ptr = (const struct __una_u16 *)p;
+	return ptr->x;
+}
+
+static inline u32 __get_unaligned_cpu32(const void *p)
+{
+	const struct __una_u32 *ptr = (const struct __una_u32 *)p;
+	return ptr->x;
+}
+
+static inline u64 __get_unaligned_cpu64(const void *p)
+{
+	const struct __una_u64 *ptr = (const struct __una_u64 *)p;
+	return ptr->x;
+}
+
+#define __get_unaligned_cpu(ptr) ({			\
+	const void *__gu_p = (ptr);			\
+	typeof(*(ptr)) __val;				\
+	switch (sizeof(*(ptr))) {			\
+	case 1:						\
+		__val = *(const u8 *)__gu_p;		\
+		break;					\
+	case 2:						\
+		__val = __get_unaligned_cpu16(__gu_p);	\
+		break;					\
+	case 4:						\
+		__val = __get_unaligned_cpu32(__gu_p);	\
+		break;					\
+	case 8:						\
+		__val = __get_unaligned_cpu64(__gu_p);	\
+		break;					\
+	default:					\
+		BUILD_BUG_ON(1);			\
+		break;					\
+	};						\
+	__val; })
+
+static inline void __put_unaligned_cpu16(u16 val, void *p)
+{
+	struct __una_u16 *ptr = (struct __una_u16 *)p;
+	ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu32(u32 val, void *p)
+{
+	struct __una_u32 *ptr = (struct __una_u32 *)p;
+	ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu64(u64 val, void *p)
+{
+	struct __una_u64 *ptr = (struct __una_u64 *)p;
+	ptr->x = val;
+}
+
+#define __put_unaligned_cpu(val, ptr) ({				\
+	(void)sizeof(*(ptr) = (val));					\
+	void *__gu_p = (ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		*(u8 *)__gu_p = (__force u8)(val);			\
+		break;							\
+	case 2:								\
+		__put_unaligned_cpu16((__force u16)(val), __gu_p);	\
+		break;							\
+	case 4:								\
+		__put_unaligned_cpu32((__force u32)(val), __gu_p);	\
+		break;							\
+	case 8:								\
+		__put_unaligned_cpu64((__force u64)(val), __gu_p);	\
+		break;							\
+	default:							\
+		BUILD_BUG_ON(1);					\
+		break;							\
+	}								\
+	(void)0; })
+
+#endif /* _LINUX_UNALIGNED_CPU_ENDIAN_H_ */
diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h
new file mode 100644
index 0000000..9cd3fab
--- /dev/null
+++ b/include/linux/unaligned/generic.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_UNALIGNED_GENERIC_H_
+#define _LINUX_UNALIGNED_GENERIC_H_
+
+#include <linux/unaligned/little_endian.h>
+#include <linux/unaligned/big_endian.h>
+
+static inline u16 get_unaligned_le16(const __le16 *p)
+{
+	return __get_unaligned_le16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const __le32 *p)
+{
+	return __get_unaligned_le32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const __le64 *p)
+{
+	return __get_unaligned_le64((const u8 *)p);
+}
+
+static inline u16 get_unaligned_be16(const __be16 *p)
+{
+	return __get_unaligned_be16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const __be32 *p)
+{
+	return __get_unaligned_be32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const __be64 *p)
+{
+	return __get_unaligned_be64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	__put_unaligned_le16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	__put_unaligned_le32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	__put_unaligned_le64(val, p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	__put_unaligned_be16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	__put_unaligned_be32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	__put_unaligned_be64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_GENERIC_H_ */
diff --git a/include/linux/unaligned/generic_be.h b/include/linux/unaligned/generic_be.h
new file mode 100644
index 0000000..dd7e323
--- /dev/null
+++ b/include/linux/unaligned/generic_be.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_GENERIC_BE_H_
+#define _LINUX_UNALIGNED_GENERIC_BE_H_
+
+#include <linux/unaligned/cpu_endian.h>
+#include <linux/unaligned/little_endian.h>
+
+#define get_unaligned	__get_unaligned_cpu
+#define put_unaligned	__put_unaligned_cpu
+
+static inline u16 get_unaligned_le16(const __le16 *p)
+{
+	return __get_unaligned_le16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const __le32 *p)
+{
+	return __get_unaligned_le32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const __le64 *p)
+{
+	return __get_unaligned_le64((const u8 *)p);
+}
+
+static inline u16 get_unaligned_be16(const __be16 *p)
+{
+	return __get_unaligned_cpu16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const __be32 *p)
+{
+	return __get_unaligned_cpu32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const __be64 *p)
+{
+	return __get_unaligned_cpu64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	__put_unaligned_le16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	__put_unaligned_le32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	__put_unaligned_le64(val, p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	__put_unaligned_cpu16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	__put_unaligned_cpu32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	__put_unaligned_cpu64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_GENERIC_BE_H_ */
diff --git a/include/linux/unaligned/generic_le.h b/include/linux/unaligned/generic_le.h
new file mode 100644
index 0000000..7b7de52
--- /dev/null
+++ b/include/linux/unaligned/generic_le.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_GENERIC_LE_H_
+#define _LINUX_UNALIGNED_GENERIC_LE_H_
+
+#include <linux/unaligned/cpu_endian.h>
+#include <linux/unaligned/big_endian.h>
+
+#define get_unaligned	__get_unaligned_cpu
+#define put_unaligned	__put_unaligned_cpu
+
+static inline u16 get_unaligned_le16(const __le16 *p)
+{
+	return __get_unaligned_cpu16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const __le32 *p)
+{
+	return __get_unaligned_cpu32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const __le64 *p)
+{
+	return __get_unaligned_cpu64((const u8 *)p);
+}
+
+static inline u16 get_unaligned_be16(const __be16 *p)
+{
+	return __get_unaligned_be16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const __be32 *p)
+{
+	return __get_unaligned_be32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const __be64 *p)
+{
+	return __get_unaligned_be64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	__put_unaligned_cpu16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	__put_unaligned_cpu32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	__put_unaligned_cpu64(val, p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	__put_unaligned_be16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	__put_unaligned_be32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	__put_unaligned_be64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_GENERIC_LE_H_ */
diff --git a/include/linux/unaligned/little_endian.h b/include/linux/unaligned/little_endian.h
new file mode 100644
index 0000000..38de5c1
--- /dev/null
+++ b/include/linux/unaligned/little_endian.h
@@ -0,0 +1,84 @@
+#ifndef _LINUX_UNALIGNED_LITTLE_ENDIAN_H_
+#define _LINUX_UNALIGNED_LITTLE_ENDIAN_H_
+
+#include <linux/kernel.h>
+
+static inline u16 __get_unaligned_le16(const u8 *p)
+{
+	return (u16)(p[0] | p[1] << 8);
+}
+
+static inline u32 __get_unaligned_le32(const u8 *p)
+{
+	return (u32)(p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24);
+}
+
+static inline u64 __get_unaligned_le64(const u8 *p)
+{
+	return ((u64)__get_unaligned_le32(p + 4) << 32) |
+	       __get_unaligned_le32(p);
+}
+
+#define __get_unaligned_le(ptr) ({			\
+	const void *__gu_p = (ptr);			\
+	typeof(*(ptr)) __val;				\
+	switch (sizeof(*(ptr))) {			\
+	case 1:						\
+		__val = *(const u8 *)__gu_p;		\
+		break;					\
+	case 2:						\
+		__val = __get_unaligned_le16(__gu_p);	\
+		break;					\
+	case 4:						\
+		__val = __get_unaligned_le32(__gu_p);	\
+		break;					\
+	case 8:						\
+		__val = __get_unaligned_le64(__gu_p);	\
+		break;					\
+	default:					\
+		BUILD_BUG_ON(1);			\
+		break;					\
+	};						\
+	__val; })
+
+static inline void __put_unaligned_le16(u16 val, u8 *p)
+{
+	*p++ = val;
+	*p++ = val >> 8;
+}
+
+static inline void __put_unaligned_le32(u32 val, u8 *p)
+{
+	__put_unaligned_le16(val >> 16, p + 2);
+	__put_unaligned_le16(val, p);
+}
+
+static inline void __put_unaligned_le64(u64 val, u8 *p)
+{
+	__put_unaligned_le32(val >> 32, p + 4);
+	__put_unaligned_le32(val, p);
+}
+
+#define __put_unaligned_le(val, ptr) ({					\
+	(void)sizeof(*(ptr) = (val));					\
+	void *__gu_p = (ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		*(u8 *)__gu_p = (__force u8)(val);			\
+		break;							\
+	case 2:								\
+		__put_unaligned_le16((__force u16)(val), __gu_p);	\
+		break;							\
+	case 4:								\
+		__put_unaligned_le32((__force u32)(val), __gu_p);	\
+		break;							\
+	case 8:								\
+		__put_unaligned_le64((__force u64)(val), __gu_p);	\
+		break;							\
+	default:							\
+		BUILD_BUG_ON(1);					\
+		break;							\
+	}								\
+	(void)0; })
+
+#endif /* _LINUX_UNALIGNED_LITTLE_ENDIAN_H_ */
diff --git a/include/linux/unaligned/no_builtin_memcpy.h b/include/linux/unaligned/no_builtin_memcpy.h
new file mode 100644
index 0000000..9365807
--- /dev/null
+++ b/include/linux/unaligned/no_builtin_memcpy.h
@@ -0,0 +1,80 @@
+#ifndef _LINUX_UNALIGNED_NO_BUILTIN_MEMCPY_H_
+#define _LINUX_UNALIGNED_NO_BUILTIN_MEMCPY_H_
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+
+/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
+
+#define get_unaligned(ptr) ({				\
+	__typeof__(*(ptr)) __tmp;			\
+	memmove(&__tmp, (ptr), sizeof(*(ptr)));		\
+	__tmp; })
+
+#define put_unaligned(val, ptr) ({			\
+	__typeof__(*(ptr)) __tmp = (val);		\
+	memmove((ptr), &__tmp, sizeof(*(ptr)));		\
+	(void)0; })
+
+static inline u16 get_unaligned_le16(const __le16 *p)
+{
+	return le16_to_cpu(get_unaligned(p));
+}
+
+static inline u32 get_unaligned_le32(const __le32 *p)
+{
+	return le32_to_cpu(get_unaligned(p));
+}
+
+static inline u64 get_unaligned_le64(const __le64 *p)
+{
+	return le64_to_cpu(get_unaligned(p));
+}
+
+static inline u16 get_unaligned_be16(const __be16 *p)
+{
+	return be16_to_cpu(get_unaligned(p));
+}
+
+static inline u32 get_unaligned_be32(const __be32 *p)
+{
+	return be32_to_cpu(get_unaligned(p));
+}
+
+static inline u64 get_unaligned_be64(const __be64 *p)
+{
+	return be64_to_cpu(get_unaligned(p));
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	put_unaligned(cpu_to_le16(val), (__le16 *)p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	put_unaligned(cpu_to_le32(val), (__le32 *)p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	put_unaligned(cpu_to_le64(val), (__le64 *)p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	put_unaligned(cpu_to_be16(val), (__be16 *)p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	put_unaligned(cpu_to_be32(val), (__be32 *)p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	put_unaligned(cpu_to_be64(val), (__be64 *)p);
+}
+
+#endif
-- 
1.5.5.144.g3e42

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* [PATCH 1/8] kernel: add common infrastructure for unaligned access
  2008-04-10 19:44 [PATCH 1/8] kernel: add common infrastructure for unaligned access Harvey Harrison
@ 2008-04-10 19:44 ` Harvey Harrison
  2008-04-10 21:43 ` David Howells
  2008-04-11  0:06 ` [PATCHv2 " Harvey Harrison
  2 siblings, 0 replies; 24+ messages in thread
From: Harvey Harrison @ 2008-04-10 19:44 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-arch

Create a linux/unaligned folder similar in spirit to the linux/byteorder
folder to hold generic implementations collected from various arches.

Currently there are five implementations:
1) cpu_endian.h: C-struct based, heavily based on asm-generic/unaligned.h
2) little_endian.h: Open coded byte-swapping, heavily based on asm-arm
3) big_endian.h: Open coded byte-swapping, heavily based on asm-arm
4) no_builtin_memcpy.h: taken from multiple implementations in tree
5) access_ok.h: taken from x86 and others, unaligned access is ok.

API additions:

get_unaligned_{le16|le32|le64|be16|be32|be64}(p) which is meant to replace
code of the form:
le16_to_cpu(get_unaligned((__le16 *)p));

put_unaligned_{le16|le32|le64|be16|be32|be64}(val, pointer) which is meant to
replace code of the form:
put_unaligned(cpu_to_le16(val), (__le16 *)p);

Headers to create these based on the selected implementation and defining the
appropriate get_unaligned() and put_unaligned() macros are:

generic_le.h: Use the C-struct for get/put_unaligned and the le helpers, use the
opencoded be byteswapping implementation for big-endian.

generic_be.h: Use the C-struct for get/put_unaligned and the be helpers, use the
opencoded le byteswapping implementation for little-endian.

generic.h: Use opencoded byteswapping for all helpers, leaves it to the arch to
define get/put_unaligned.

Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
---
 include/linux/unaligned/access_ok.h         |   70 +++++++++++++++++++++
 include/linux/unaligned/big_endian.h        |   84 +++++++++++++++++++++++++
 include/linux/unaligned/cpu_endian.h        |   90 +++++++++++++++++++++++++++
 include/linux/unaligned/generic.h           |   67 ++++++++++++++++++++
 include/linux/unaligned/generic_be.h        |   70 +++++++++++++++++++++
 include/linux/unaligned/generic_le.h        |   70 +++++++++++++++++++++
 include/linux/unaligned/little_endian.h     |   84 +++++++++++++++++++++++++
 include/linux/unaligned/no_builtin_memcpy.h |   80 ++++++++++++++++++++++++
 8 files changed, 615 insertions(+), 0 deletions(-)

diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
new file mode 100644
index 0000000..e9d8ff4
--- /dev/null
+++ b/include/linux/unaligned/access_ok.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_ACCESS_OK_H_
+#define _LINUX_UNALIGNED_ACCESS_OK_H_
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+#define get_unaligned(ptr) (*(ptr))
+#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+
+static inline u16 get_unaligned_le16(const __le16 *p)
+{
+	return le16_to_cpup(p);
+}
+
+static inline u32 get_unaligned_le32(const __le32 *p)
+{
+	return le32_to_cpup(p);
+}
+
+static inline u64 get_unaligned_le64(const __le64 *p)
+{
+	return le64_to_cpup(p);
+}
+
+static inline u16 get_unaligned_be16(const __be16 *p)
+{
+	return be16_to_cpup(p);
+}
+
+static inline u32 get_unaligned_be32(const __be32 *p)
+{
+	return be32_to_cpup(p);
+}
+
+static inline u64 get_unaligned_be64(const __be64 *p)
+{
+	return be64_to_cpup(p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	*((__le16 *)p) = cpu_to_le16(val);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	*((__le32 *)p) = cpu_to_le32(val);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	*((__le64 *)p) = cpu_to_le64(val);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	*((__be16 *)p) = cpu_to_be16(val);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	*((__be32 *)p) = cpu_to_be32(val);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	*((__be64 *)p) = cpu_to_be64(val);
+}
+
+#endif /* _LINUX_UNALIGNED_ACCESS_OK_H_ */
diff --git a/include/linux/unaligned/big_endian.h b/include/linux/unaligned/big_endian.h
new file mode 100644
index 0000000..48802be
--- /dev/null
+++ b/include/linux/unaligned/big_endian.h
@@ -0,0 +1,84 @@
+#ifndef _LINUX_UNALIGNED_BIG_ENDIAN_H_
+#define _LINUX_UNALIGNED_BIG_ENDIAN_H_
+
+#include <linux/kernel.h>
+
+static inline u16 __get_unaligned_be16(const u8 *p)
+{
+	return (u16)(p[0] << 8 | p[1]);
+}
+
+static inline u32 __get_unaligned_be32(const u8 *p)
+{
+	return (u32)(p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]);
+}
+
+static inline u64 __get_unaligned_be64(const u8 *p)
+{
+	return ((u64)__get_unaligned_be32(p) << 32) |
+	       __get_unaligned_be32(p + 4);
+}
+
+#define __get_unaligned_be(ptr) ({			\
+	const void *__gu_p = (ptr);			\
+	typeof(*(ptr)) __val;				\
+	switch (sizeof(*(ptr))) {			\
+	case 1:						\
+		__val = *(const u8 *)__gu_p;		\
+		break;					\
+	case 2:						\
+		__val = __get_unaligned_be16(__gu_p);	\
+		break;					\
+	case 4:						\
+		__val = __get_unaligned_be32(__gu_p);	\
+		break;					\
+	case 8:						\
+		__val = __get_unaligned_be64(__gu_p);	\
+		break;					\
+	default:					\
+		BUILD_BUG_ON(1);			\
+		break;					\
+	};						\
+	__val; })
+
+static inline void __put_unaligned_be16(u16 val, u8 *p)
+{
+	*p++ = val >> 8;
+	*p++ = val;
+}
+
+static inline void __put_unaligned_be32(u32 val, u8 *p)
+{
+	__put_unaligned_be16(val >> 16, p);
+	__put_unaligned_be16(val, p + 2);
+}
+
+static inline void __put_unaligned_be64(u64 val, u8 *p)
+{
+	__put_unaligned_be32(val >> 32, p);
+	__put_unaligned_be32(val, p + 4);
+}
+
+#define __put_unaligned_be(val, ptr) ({					\
+	(void)sizeof(*(ptr) = (val));					\
+	void *__gu_p = (ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		*(u8 *)__gu_p = (__force u8)(val);			\
+		break;							\
+	case 2:								\
+		__put_unaligned_be16((__force u16)(val), __gu_p);	\
+		break;							\
+	case 4:								\
+		__put_unaligned_be32((__force u32)(val), __gu_p);	\
+		break;							\
+	case 8:								\
+		__put_unaligned_be64((__force u64)(val), __gu_p);	\
+		break;							\
+	default:							\
+		BUILD_BUG_ON(1);					\
+		break;							\
+	}								\
+	(void)0; })
+
+#endif /* _LINUX_UNALIGNED_BIG_ENDIAN_H_ */
diff --git a/include/linux/unaligned/cpu_endian.h b/include/linux/unaligned/cpu_endian.h
new file mode 100644
index 0000000..8189286
--- /dev/null
+++ b/include/linux/unaligned/cpu_endian.h
@@ -0,0 +1,90 @@
+#ifndef _LINUX_UNALIGNED_CPU_ENDIAN_H_
+#define _LINUX_UNALIGNED_CPU_ENDIAN_H_
+
+#include <linux/kernel.h>
+
+struct __una_u16 { u16 x __attribute__((packed)); };
+struct __una_u32 { u32 x __attribute__((packed)); };
+struct __una_u64 { u64 x __attribute__((packed)); };
+
+static inline u16 __get_unaligned_cpu16(const void *p)
+{
+	const struct __una_u16 *ptr = (const struct __una_u16 *)p;
+	return ptr->x;
+}
+
+static inline u32 __get_unaligned_cpu32(const void *p)
+{
+	const struct __una_u32 *ptr = (const struct __una_u32 *)p;
+	return ptr->x;
+}
+
+static inline u64 __get_unaligned_cpu64(const void *p)
+{
+	const struct __una_u64 *ptr = (const struct __una_u64 *)p;
+	return ptr->x;
+}
+
+#define __get_unaligned_cpu(ptr) ({			\
+	const void *__gu_p = (ptr);			\
+	typeof(*(ptr)) __val;				\
+	switch (sizeof(*(ptr))) {			\
+	case 1:						\
+		__val = *(const u8 *)__gu_p;		\
+		break;					\
+	case 2:						\
+		__val = __get_unaligned_cpu16(__gu_p);	\
+		break;					\
+	case 4:						\
+		__val = __get_unaligned_cpu32(__gu_p);	\
+		break;					\
+	case 8:						\
+		__val = __get_unaligned_cpu64(__gu_p);	\
+		break;					\
+	default:					\
+		BUILD_BUG_ON(1);			\
+		break;					\
+	};						\
+	__val; })
+
+static inline void __put_unaligned_cpu16(u16 val, void *p)
+{
+	struct __una_u16 *ptr = (struct __una_u16 *)p;
+	ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu32(u32 val, void *p)
+{
+	struct __una_u32 *ptr = (struct __una_u32 *)p;
+	ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu64(u64 val, void *p)
+{
+	struct __una_u64 *ptr = (struct __una_u64 *)p;
+	ptr->x = val;
+}
+
+#define __put_unaligned_cpu(val, ptr) ({				\
+	(void)sizeof(*(ptr) = (val));					\
+	void *__gu_p = (ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		*(u8 *)__gu_p = (__force u8)(val);			\
+		break;							\
+	case 2:								\
+		__put_unaligned_cpu16((__force u16)(val), __gu_p);	\
+		break;							\
+	case 4:								\
+		__put_unaligned_cpu32((__force u32)(val), __gu_p);	\
+		break;							\
+	case 8:								\
+		__put_unaligned_cpu64((__force u64)(val), __gu_p);	\
+		break;							\
+	default:							\
+		BUILD_BUG_ON(1);					\
+		break;							\
+	}								\
+	(void)0; })
+
+#endif /* _LINUX_UNALIGNED_CPU_ENDIAN_H_ */
diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h
new file mode 100644
index 0000000..9cd3fab
--- /dev/null
+++ b/include/linux/unaligned/generic.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_UNALIGNED_GENERIC_H_
+#define _LINUX_UNALIGNED_GENERIC_H_
+
+#include <linux/unaligned/little_endian.h>
+#include <linux/unaligned/big_endian.h>
+
+static inline u16 get_unaligned_le16(const __le16 *p)
+{
+	return __get_unaligned_le16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const __le32 *p)
+{
+	return __get_unaligned_le32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const __le64 *p)
+{
+	return __get_unaligned_le64((const u8 *)p);
+}
+
+static inline u16 get_unaligned_be16(const __be16 *p)
+{
+	return __get_unaligned_be16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const __be32 *p)
+{
+	return __get_unaligned_be32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const __be64 *p)
+{
+	return __get_unaligned_be64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	__put_unaligned_le16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	__put_unaligned_le32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	__put_unaligned_le64(val, p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	__put_unaligned_be16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	__put_unaligned_be32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	__put_unaligned_be64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_GENERIC_H_ */
diff --git a/include/linux/unaligned/generic_be.h b/include/linux/unaligned/generic_be.h
new file mode 100644
index 0000000..dd7e323
--- /dev/null
+++ b/include/linux/unaligned/generic_be.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_GENERIC_BE_H_
+#define _LINUX_UNALIGNED_GENERIC_BE_H_
+
+#include <linux/unaligned/cpu_endian.h>
+#include <linux/unaligned/little_endian.h>
+
+#define get_unaligned	__get_unaligned_cpu
+#define put_unaligned	__put_unaligned_cpu
+
+static inline u16 get_unaligned_le16(const __le16 *p)
+{
+	return __get_unaligned_le16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const __le32 *p)
+{
+	return __get_unaligned_le32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const __le64 *p)
+{
+	return __get_unaligned_le64((const u8 *)p);
+}
+
+static inline u16 get_unaligned_be16(const __be16 *p)
+{
+	return __get_unaligned_cpu16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const __be32 *p)
+{
+	return __get_unaligned_cpu32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const __be64 *p)
+{
+	return __get_unaligned_cpu64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	__put_unaligned_le16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	__put_unaligned_le32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	__put_unaligned_le64(val, p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	__put_unaligned_cpu16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	__put_unaligned_cpu32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	__put_unaligned_cpu64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_GENERIC_BE_H_ */
diff --git a/include/linux/unaligned/generic_le.h b/include/linux/unaligned/generic_le.h
new file mode 100644
index 0000000..7b7de52
--- /dev/null
+++ b/include/linux/unaligned/generic_le.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_GENERIC_LE_H_
+#define _LINUX_UNALIGNED_GENERIC_LE_H_
+
+#include <linux/unaligned/cpu_endian.h>
+#include <linux/unaligned/big_endian.h>
+
+#define get_unaligned	__get_unaligned_cpu
+#define put_unaligned	__put_unaligned_cpu
+
+static inline u16 get_unaligned_le16(const __le16 *p)
+{
+	return __get_unaligned_cpu16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const __le32 *p)
+{
+	return __get_unaligned_cpu32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const __le64 *p)
+{
+	return __get_unaligned_cpu64((const u8 *)p);
+}
+
+static inline u16 get_unaligned_be16(const __be16 *p)
+{
+	return __get_unaligned_be16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const __be32 *p)
+{
+	return __get_unaligned_be32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const __be64 *p)
+{
+	return __get_unaligned_be64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	__put_unaligned_cpu16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	__put_unaligned_cpu32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	__put_unaligned_cpu64(val, p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	__put_unaligned_be16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	__put_unaligned_be32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	__put_unaligned_be64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_GENERIC_LE_H_ */
diff --git a/include/linux/unaligned/little_endian.h b/include/linux/unaligned/little_endian.h
new file mode 100644
index 0000000..38de5c1
--- /dev/null
+++ b/include/linux/unaligned/little_endian.h
@@ -0,0 +1,84 @@
+#ifndef _LINUX_UNALIGNED_LITTLE_ENDIAN_H_
+#define _LINUX_UNALIGNED_LITTLE_ENDIAN_H_
+
+#include <linux/kernel.h>
+
+static inline u16 __get_unaligned_le16(const u8 *p)
+{
+	return (u16)(p[0] | p[1] << 8);
+}
+
+static inline u32 __get_unaligned_le32(const u8 *p)
+{
+	return (u32)(p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24);
+}
+
+static inline u64 __get_unaligned_le64(const u8 *p)
+{
+	return ((u64)__get_unaligned_le32(p + 4) << 32) |
+	       __get_unaligned_le32(p);
+}
+
+#define __get_unaligned_le(ptr) ({			\
+	const void *__gu_p = (ptr);			\
+	typeof(*(ptr)) __val;				\
+	switch (sizeof(*(ptr))) {			\
+	case 1:						\
+		__val = *(const u8 *)__gu_p;		\
+		break;					\
+	case 2:						\
+		__val = __get_unaligned_le16(__gu_p);	\
+		break;					\
+	case 4:						\
+		__val = __get_unaligned_le32(__gu_p);	\
+		break;					\
+	case 8:						\
+		__val = __get_unaligned_le64(__gu_p);	\
+		break;					\
+	default:					\
+		BUILD_BUG_ON(1);			\
+		break;					\
+	};						\
+	__val; })
+
+static inline void __put_unaligned_le16(u16 val, u8 *p)
+{
+	*p++ = val;
+	*p++ = val >> 8;
+}
+
+static inline void __put_unaligned_le32(u32 val, u8 *p)
+{
+	__put_unaligned_le16(val >> 16, p + 2);
+	__put_unaligned_le16(val, p);
+}
+
+static inline void __put_unaligned_le64(u64 val, u8 *p)
+{
+	__put_unaligned_le32(val >> 32, p + 4);
+	__put_unaligned_le32(val, p);
+}
+
+#define __put_unaligned_le(val, ptr) ({					\
+	(void)sizeof(*(ptr) = (val));					\
+	void *__gu_p = (ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		*(u8 *)__gu_p = (__force u8)(val);			\
+		break;							\
+	case 2:								\
+		__put_unaligned_le16((__force u16)(val), __gu_p);	\
+		break;							\
+	case 4:								\
+		__put_unaligned_le32((__force u32)(val), __gu_p);	\
+		break;							\
+	case 8:								\
+		__put_unaligned_le64((__force u64)(val), __gu_p);	\
+		break;							\
+	default:							\
+		BUILD_BUG_ON(1);					\
+		break;							\
+	}								\
+	(void)0; })
+
+#endif /* _LINUX_UNALIGNED_LITTLE_ENDIAN_H_ */
diff --git a/include/linux/unaligned/no_builtin_memcpy.h b/include/linux/unaligned/no_builtin_memcpy.h
new file mode 100644
index 0000000..9365807
--- /dev/null
+++ b/include/linux/unaligned/no_builtin_memcpy.h
@@ -0,0 +1,80 @@
+#ifndef _LINUX_UNALIGNED_NO_BUILTIN_MEMCPY_H_
+#define _LINUX_UNALIGNED_NO_BUILTIN_MEMCPY_H_
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+
+/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
+
+#define get_unaligned(ptr) ({				\
+	__typeof__(*(ptr)) __tmp;			\
+	memmove(&__tmp, (ptr), sizeof(*(ptr)));		\
+	__tmp; })
+
+#define put_unaligned(val, ptr) ({			\
+	__typeof__(*(ptr)) __tmp = (val);		\
+	memmove((ptr), &__tmp, sizeof(*(ptr)));		\
+	(void)0; })
+
+static inline u16 get_unaligned_le16(const __le16 *p)
+{
+	return le16_to_cpu(get_unaligned(p));
+}
+
+static inline u32 get_unaligned_le32(const __le32 *p)
+{
+	return le32_to_cpu(get_unaligned(p));
+}
+
+static inline u64 get_unaligned_le64(const __le64 *p)
+{
+	return le64_to_cpu(get_unaligned(p));
+}
+
+static inline u16 get_unaligned_be16(const __be16 *p)
+{
+	return be16_to_cpu(get_unaligned(p));
+}
+
+static inline u32 get_unaligned_be32(const __be32 *p)
+{
+	return be32_to_cpu(get_unaligned(p));
+}
+
+static inline u64 get_unaligned_be64(const __be64 *p)
+{
+	return be64_to_cpu(get_unaligned(p));
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	put_unaligned(cpu_to_le16(val), (__le16 *)p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	put_unaligned(cpu_to_le32(val), (__le32 *)p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	put_unaligned(cpu_to_le64(val), (__le64 *)p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	put_unaligned(cpu_to_be16(val), (__be16 *)p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	put_unaligned(cpu_to_be32(val), (__be32 *)p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	put_unaligned(cpu_to_be64(val), (__be64 *)p);
+}
+
+#endif
-- 
1.5.5.144.g3e42



^ permalink raw reply related	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
  2008-04-10 19:44 [PATCH 1/8] kernel: add common infrastructure for unaligned access Harvey Harrison
  2008-04-10 19:44 ` Harvey Harrison
@ 2008-04-10 21:43 ` David Howells
  2008-04-10 21:43   ` David Howells
       [not found]   ` <11527.1207863801-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
  2008-04-11  0:06 ` [PATCHv2 " Harvey Harrison
  2 siblings, 2 replies; 24+ messages in thread
From: David Howells @ 2008-04-10 21:43 UTC (permalink / raw)
  To: Harvey Harrison
  Cc: dhowells-H+wXaHxf7aLQT0dZR+AlfA, Andrew Morton, linux-arch

Harvey Harrison <harvey.harrison-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:

> +static inline u16 __get_unaligned_le16(const u8 *p)
> +{
> +	return (u16)(p[0] | p[1] << 8);
> +}

You shouldn't need these casts.  return is going to cast it anyway.

Actually, you probably _ought_ to have casts, but it should look like this:

	return (u16)p[0] | (u16)p[1] << 8;

You are shifting an 8-bit value left by 8 bits, so the compiler may be at
liberty to instruct the RHS to end up zero.

I presume the compiler is guaranteed not to merge the two memory accesses?  It
can't seem to make it do so, though I seem to remember there were cases where
it did, though I can't reproduce them.  I assume that's why you're passing in
a u8 pointer and not a u16/u32/u64 pointer.

David

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
  2008-04-10 21:43 ` David Howells
@ 2008-04-10 21:43   ` David Howells
       [not found]   ` <11527.1207863801-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
  1 sibling, 0 replies; 24+ messages in thread
From: David Howells @ 2008-04-10 21:43 UTC (permalink / raw)
  To: Harvey Harrison; +Cc: dhowells, Andrew Morton, linux-arch

Harvey Harrison <harvey.harrison@gmail.com> wrote:

> +static inline u16 __get_unaligned_le16(const u8 *p)
> +{
> +	return (u16)(p[0] | p[1] << 8);
> +}

You shouldn't need these casts.  return is going to cast it anyway.

Actually, you probably _ought_ to have casts, but it should look like this:

	return (u16)p[0] | (u16)p[1] << 8;

You are shifting an 8-bit value left by 8 bits, so the compiler may be at
liberty to instruct the RHS to end up zero.

I presume the compiler is guaranteed not to merge the two memory accesses?  It
can't seem to make it do so, though I seem to remember there were cases where
it did, though I can't reproduce them.  I assume that's why you're passing in
a u8 pointer and not a u16/u32/u64 pointer.

David

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
       [not found]   ` <11527.1207863801-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
@ 2008-04-10 21:55     ` Harvey Harrison
  2008-04-10 21:55       ` Harvey Harrison
  2008-04-10 22:01       ` David Howells
  0 siblings, 2 replies; 24+ messages in thread
From: Harvey Harrison @ 2008-04-10 21:55 UTC (permalink / raw)
  To: David Howells; +Cc: Andrew Morton, linux-arch

On Thu, 2008-04-10 at 22:43 +0100, David Howells wrote:
> Harvey Harrison <harvey.harrison-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
> 
> > +static inline u16 __get_unaligned_le16(const u8 *p)
> > +{
> > +	return (u16)(p[0] | p[1] << 8);
> > +}
> 
> You shouldn't need these casts.  return is going to cast it anyway.
> 
> Actually, you probably _ought_ to have casts, but it should look like this:
> 
> 	return (u16)p[0] | (u16)p[1] << 8;

I've been looking at that thinking I needed something different, I
believe it is ok as u8 will expand to int when shifted... correct?  Or
do I actually need the cast on each p[] term...anyone?

> 
> You are shifting an 8-bit value left by 8 bits, so the compiler may be at
> liberty to instruct the RHS to end up zero.
> 
> I presume the compiler is guaranteed not to merge the two memory accesses?  It
> can't seem to make it do so, though I seem to remember there were cases where
> it did, though I can't reproduce them.  I assume that's why you're passing in
> a u8 pointer and not a u16/u32/u64 pointer.

Yes, that is the reason.  The implementation is nearly identical to the
existing arm version in-tree (minus the register keywords of course).

Harvey

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
  2008-04-10 21:55     ` Harvey Harrison
@ 2008-04-10 21:55       ` Harvey Harrison
  2008-04-10 22:01       ` David Howells
  1 sibling, 0 replies; 24+ messages in thread
From: Harvey Harrison @ 2008-04-10 21:55 UTC (permalink / raw)
  To: David Howells; +Cc: Andrew Morton, linux-arch

On Thu, 2008-04-10 at 22:43 +0100, David Howells wrote:
> Harvey Harrison <harvey.harrison@gmail.com> wrote:
> 
> > +static inline u16 __get_unaligned_le16(const u8 *p)
> > +{
> > +	return (u16)(p[0] | p[1] << 8);
> > +}
> 
> You shouldn't need these casts.  return is going to cast it anyway.
> 
> Actually, you probably _ought_ to have casts, but it should look like this:
> 
> 	return (u16)p[0] | (u16)p[1] << 8;

I've been looking at that thinking I needed something different, I
believe it is ok as u8 will expand to int when shifted... correct?  Or
do I actually need the cast on each p[] term...anyone?

> 
> You are shifting an 8-bit value left by 8 bits, so the compiler may be at
> liberty to instruct the RHS to end up zero.
> 
> I presume the compiler is guaranteed not to merge the two memory accesses?  It
> can't seem to make it do so, though I seem to remember there were cases where
> it did, though I can't reproduce them.  I assume that's why you're passing in
> a u8 pointer and not a u16/u32/u64 pointer.

Yes, that is the reason.  The implementation is nearly identical to the
existing arm version in-tree (minus the register keywords of course).

Harvey


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
  2008-04-10 21:55     ` Harvey Harrison
  2008-04-10 21:55       ` Harvey Harrison
@ 2008-04-10 22:01       ` David Howells
  2008-04-10 22:01         ` David Howells
       [not found]         ` <11814.1207864864-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
  1 sibling, 2 replies; 24+ messages in thread
From: David Howells @ 2008-04-10 22:01 UTC (permalink / raw)
  To: Harvey Harrison
  Cc: dhowells-H+wXaHxf7aLQT0dZR+AlfA, Andrew Morton, linux-arch

Harvey Harrison <harvey.harrison-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:

> > Actually, you probably _ought_ to have casts, but it should look like this:
> > 
> > 	return (u16)p[0] | (u16)p[1] << 8;
> 
> I've been looking at that thinking I needed something different, I
> believe it is ok as u8 will expand to int when shifted... correct?  Or
> do I actually need the cast on each p[] term...anyone?

Hmmm... I think you may be right:

	#include <stdio.h>

	int main()
	{
		unsigned char x;
		printf("%u, %u\n", sizeof(x), sizeof(x << 8));
		return 0;
	}

Says:

	1, 4

In which case, the cast you do have is superfluous, and casting the retrievals
is unnecessary.

David

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
  2008-04-10 22:01       ` David Howells
@ 2008-04-10 22:01         ` David Howells
       [not found]         ` <11814.1207864864-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
  1 sibling, 0 replies; 24+ messages in thread
From: David Howells @ 2008-04-10 22:01 UTC (permalink / raw)
  To: Harvey Harrison; +Cc: dhowells, Andrew Morton, linux-arch

Harvey Harrison <harvey.harrison@gmail.com> wrote:

> > Actually, you probably _ought_ to have casts, but it should look like this:
> > 
> > 	return (u16)p[0] | (u16)p[1] << 8;
> 
> I've been looking at that thinking I needed something different, I
> believe it is ok as u8 will expand to int when shifted... correct?  Or
> do I actually need the cast on each p[] term...anyone?

Hmmm... I think you may be right:

	#include <stdio.h>

	int main()
	{
		unsigned char x;
		printf("%u, %u\n", sizeof(x), sizeof(x << 8));
		return 0;
	}

Says:

	1, 4

In which case, the cast you do have is superfluous, and casting the retrievals
is unnecessary.

David

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
       [not found]         ` <11814.1207864864-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
@ 2008-04-10 22:06           ` Harvey Harrison
  2008-04-10 22:06             ` Harvey Harrison
  2008-04-10 22:15             ` David Howells
  0 siblings, 2 replies; 24+ messages in thread
From: Harvey Harrison @ 2008-04-10 22:06 UTC (permalink / raw)
  To: David Howells; +Cc: Andrew Morton, linux-arch

On Thu, 2008-04-10 at 23:01 +0100, David Howells wrote:
> Harvey Harrison <harvey.harrison-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
> 
> > > Actually, you probably _ought_ to have casts, but it should look like this:
> > > 
> > > 	return (u16)p[0] | (u16)p[1] << 8;
> > 
> > I've been looking at that thinking I needed something different, I
> > believe it is ok as u8 will expand to int when shifted... correct?  Or
> > do I actually need the cast on each p[] term...anyone?
> 
> Hmmm... I think you may be right:
> 
> 	#include <stdio.h>
> 
> 	int main()
> 	{
> 		unsigned char x;
> 		printf("%u, %u\n", sizeof(x), sizeof(x << 8));
> 		return 0;
> 	}
> 
> Says:
> 
> 	1, 4
> 
> In which case, the cast you do have is superfluous, and casting the retrievals
> is unnecessary.

Expands to int, not unsigned int, I think that cast is still needed?

Harvey

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
  2008-04-10 22:06           ` Harvey Harrison
@ 2008-04-10 22:06             ` Harvey Harrison
  2008-04-10 22:15             ` David Howells
  1 sibling, 0 replies; 24+ messages in thread
From: Harvey Harrison @ 2008-04-10 22:06 UTC (permalink / raw)
  To: David Howells; +Cc: Andrew Morton, linux-arch

On Thu, 2008-04-10 at 23:01 +0100, David Howells wrote:
> Harvey Harrison <harvey.harrison@gmail.com> wrote:
> 
> > > Actually, you probably _ought_ to have casts, but it should look like this:
> > > 
> > > 	return (u16)p[0] | (u16)p[1] << 8;
> > 
> > I've been looking at that thinking I needed something different, I
> > believe it is ok as u8 will expand to int when shifted... correct?  Or
> > do I actually need the cast on each p[] term...anyone?
> 
> Hmmm... I think you may be right:
> 
> 	#include <stdio.h>
> 
> 	int main()
> 	{
> 		unsigned char x;
> 		printf("%u, %u\n", sizeof(x), sizeof(x << 8));
> 		return 0;
> 	}
> 
> Says:
> 
> 	1, 4
> 
> In which case, the cast you do have is superfluous, and casting the retrievals
> is unnecessary.

Expands to int, not unsigned int, I think that cast is still needed?

Harvey


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
  2008-04-10 22:06           ` Harvey Harrison
  2008-04-10 22:06             ` Harvey Harrison
@ 2008-04-10 22:15             ` David Howells
  2008-04-10 22:15               ` David Howells
       [not found]               ` <11907.1207865758-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
  1 sibling, 2 replies; 24+ messages in thread
From: David Howells @ 2008-04-10 22:15 UTC (permalink / raw)
  To: Harvey Harrison
  Cc: dhowells-H+wXaHxf7aLQT0dZR+AlfA, Andrew Morton, linux-arch

Harvey Harrison <harvey.harrison-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:

> Expands to int, not unsigned int, I think that cast is still needed?

What for?  The return is going to do the same cast anyway because of the
function return type.

David

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
  2008-04-10 22:15             ` David Howells
@ 2008-04-10 22:15               ` David Howells
       [not found]               ` <11907.1207865758-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
  1 sibling, 0 replies; 24+ messages in thread
From: David Howells @ 2008-04-10 22:15 UTC (permalink / raw)
  To: Harvey Harrison; +Cc: dhowells, Andrew Morton, linux-arch

Harvey Harrison <harvey.harrison@gmail.com> wrote:

> Expands to int, not unsigned int, I think that cast is still needed?

What for?  The return is going to do the same cast anyway because of the
function return type.

David

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
       [not found]               ` <11907.1207865758-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
@ 2008-04-10 22:20                 ` Harvey Harrison
  2008-04-10 22:20                   ` Harvey Harrison
  2008-04-10 22:33                   ` David Howells
  0 siblings, 2 replies; 24+ messages in thread
From: Harvey Harrison @ 2008-04-10 22:20 UTC (permalink / raw)
  To: David Howells; +Cc: Andrew Morton, linux-arch

On Thu, 2008-04-10 at 23:15 +0100, David Howells wrote:
> Harvey Harrison <harvey.harrison-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
> 
> > Expands to int, not unsigned int, I think that cast is still needed?
> 
> What for?  The return is going to do the same cast anyway because of the
> function return type.
> 
> David

Well, for the u16 case, won't the compiler warn about truncating the
return if I return an int when the function returns u16?

In the u64 case, I need the cast to ensure it expands to u64 rather than
int from the shift.  Agreed in the u32 case, but then it looks different
than the u16/u64 case (cargo-cult I know, but nice for consistency).

Harvey

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
  2008-04-10 22:20                 ` Harvey Harrison
@ 2008-04-10 22:20                   ` Harvey Harrison
  2008-04-10 22:33                   ` David Howells
  1 sibling, 0 replies; 24+ messages in thread
From: Harvey Harrison @ 2008-04-10 22:20 UTC (permalink / raw)
  To: David Howells; +Cc: Andrew Morton, linux-arch

On Thu, 2008-04-10 at 23:15 +0100, David Howells wrote:
> Harvey Harrison <harvey.harrison@gmail.com> wrote:
> 
> > Expands to int, not unsigned int, I think that cast is still needed?
> 
> What for?  The return is going to do the same cast anyway because of the
> function return type.
> 
> David

Well, for the u16 case, won't the compiler warn about truncating the
return if I return an int when the function returns u16?

In the u64 case, I need the cast to ensure it expands to u64 rather than
int from the shift.  Agreed in the u32 case, but then it looks different
than the u16/u64 case (cargo-cult I know, but nice for consistency).

Harvey


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
  2008-04-10 22:20                 ` Harvey Harrison
  2008-04-10 22:20                   ` Harvey Harrison
@ 2008-04-10 22:33                   ` David Howells
  2008-04-10 22:33                     ` David Howells
       [not found]                     ` <11989.1207866793-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
  1 sibling, 2 replies; 24+ messages in thread
From: David Howells @ 2008-04-10 22:33 UTC (permalink / raw)
  To: Harvey Harrison
  Cc: dhowells-H+wXaHxf7aLQT0dZR+AlfA, Andrew Morton, linux-arch

Harvey Harrison <harvey.harrison-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:

> Well, for the u16 case, won't the compiler warn about truncating the
> return if I return an int when the function returns u16?

It doesn't for me for:

	u16 test(const u8 *p)
	{
		return (u16) (p[0] | p[1] << 8);
	}

> In the u64 case, I need the cast to ensure it expands to u64 rather than
> int from the shift.  Agreed in the u32 case, but then it looks different
> than the u16/u64 case (cargo-cult I know, but nice for consistency).

Agreed, the u64 cast is necessary, but I was talking about casts generally of:

	<TYPE> function(...)
	{
		return (<TYPE>) (...);
	}

David

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
  2008-04-10 22:33                   ` David Howells
@ 2008-04-10 22:33                     ` David Howells
       [not found]                     ` <11989.1207866793-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
  1 sibling, 0 replies; 24+ messages in thread
From: David Howells @ 2008-04-10 22:33 UTC (permalink / raw)
  To: Harvey Harrison; +Cc: dhowells, Andrew Morton, linux-arch

Harvey Harrison <harvey.harrison@gmail.com> wrote:

> Well, for the u16 case, won't the compiler warn about truncating the
> return if I return an int when the function returns u16?

It doesn't for me for:

	u16 test(const u8 *p)
	{
		return (u16) (p[0] | p[1] << 8);
	}

> In the u64 case, I need the cast to ensure it expands to u64 rather than
> int from the shift.  Agreed in the u32 case, but then it looks different
> than the u16/u64 case (cargo-cult I know, but nice for consistency).

Agreed, the u64 cast is necessary, but I was talking about casts generally of:

	<TYPE> function(...)
	{
		return (<TYPE>) (...);
	}

David

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
       [not found]                     ` <11989.1207866793-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
@ 2008-04-10 22:37                       ` Harvey Harrison
  2008-04-10 22:37                         ` Harvey Harrison
  0 siblings, 1 reply; 24+ messages in thread
From: Harvey Harrison @ 2008-04-10 22:37 UTC (permalink / raw)
  To: David Howells; +Cc: Andrew Morton, linux-arch

On Thu, 2008-04-10 at 23:33 +0100, David Howells wrote:
> Harvey Harrison <harvey.harrison-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org> wrote:
> 
> > Well, for the u16 case, won't the compiler warn about truncating the
> > return if I return an int when the function returns u16?
> 
> It doesn't for me for:
> 
> 	u16 test(const u8 *p)
> 	{
> 		return (u16) (p[0] | p[1] << 8);
> 	}
> 
> > In the u64 case, I need the cast to ensure it expands to u64 rather than
> > int from the shift.  Agreed in the u32 case, but then it looks different
> > than the u16/u64 case (cargo-cult I know, but nice for consistency).
> 
> Agreed, the u64 cast is necessary, but I was talking about casts generally of:
> 
> 	<TYPE> function(...)
> 	{
> 		return (<TYPE>) (...);
> 	}
> 

OK, I'll see what other comments come in and keep it in mind for the
next iteration.  I'll also move frv over to the generic C code in that
submission as we discussed.

Harvey

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCH 1/8] kernel: add common infrastructure for unaligned access
  2008-04-10 22:37                       ` Harvey Harrison
@ 2008-04-10 22:37                         ` Harvey Harrison
  0 siblings, 0 replies; 24+ messages in thread
From: Harvey Harrison @ 2008-04-10 22:37 UTC (permalink / raw)
  To: David Howells; +Cc: Andrew Morton, linux-arch

On Thu, 2008-04-10 at 23:33 +0100, David Howells wrote:
> Harvey Harrison <harvey.harrison@gmail.com> wrote:
> 
> > Well, for the u16 case, won't the compiler warn about truncating the
> > return if I return an int when the function returns u16?
> 
> It doesn't for me for:
> 
> 	u16 test(const u8 *p)
> 	{
> 		return (u16) (p[0] | p[1] << 8);
> 	}
> 
> > In the u64 case, I need the cast to ensure it expands to u64 rather than
> > int from the shift.  Agreed in the u32 case, but then it looks different
> > than the u16/u64 case (cargo-cult I know, but nice for consistency).
> 
> Agreed, the u64 cast is necessary, but I was talking about casts generally of:
> 
> 	<TYPE> function(...)
> 	{
> 		return (<TYPE>) (...);
> 	}
> 

OK, I'll see what other comments come in and keep it in mind for the
next iteration.  I'll also move frv over to the generic C code in that
submission as we discussed.

Harvey


^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCHv2 1/8] kernel: add common infrastructure for unaligned access
  2008-04-10 19:44 [PATCH 1/8] kernel: add common infrastructure for unaligned access Harvey Harrison
  2008-04-10 19:44 ` Harvey Harrison
  2008-04-10 21:43 ` David Howells
@ 2008-04-11  0:06 ` Harvey Harrison
  2008-04-11  0:06   ` Harvey Harrison
  2008-04-11 18:09   ` Russell King
  2 siblings, 2 replies; 24+ messages in thread
From: Harvey Harrison @ 2008-04-11  0:06 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-arch

Small update changing the exposed API of get_unaligned_* take
a void * to avoid casts in every single caller.  We are already
specifying what we want to get explicitly in the function name:
get_unaligned_le16...le32...le64..etc.

This will make 8/8 look even cleaner as a bunch of casts are not
needed.

Harvey

diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
new file mode 100644
index 0000000..3668b45
--- /dev/null
+++ b/include/linux/unaligned/access_ok.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_ACCESS_OK_H_
+#define _LINUX_UNALIGNED_ACCESS_OK_H_
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+#define get_unaligned(ptr) (*(ptr))
+#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return le16_to_cpup((__le16 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return le32_to_cpup((__le32 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return le64_to_cpup((__le64 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return be16_to_cpup((__be16 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return be32_to_cpup((__be32 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return be64_to_cpup((__be64 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	*((__le16 *)p) = cpu_to_le16(val);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	*((__le32 *)p) = cpu_to_le32(val);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	*((__le64 *)p) = cpu_to_le64(val);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	*((__be16 *)p) = cpu_to_be16(val);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	*((__be32 *)p) = cpu_to_be32(val);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	*((__be64 *)p) = cpu_to_be64(val);
+}
+
+#endif /* _LINUX_UNALIGNED_ACCESS_OK_H_ */
diff --git a/include/linux/unaligned/big_endian.h b/include/linux/unaligned/big_endian.h
new file mode 100644
index 0000000..f3cd105
--- /dev/null
+++ b/include/linux/unaligned/big_endian.h
@@ -0,0 +1,84 @@
+#ifndef _LINUX_UNALIGNED_BIG_ENDIAN_H_
+#define _LINUX_UNALIGNED_BIG_ENDIAN_H_
+
+#include <linux/kernel.h>
+
+static inline u16 __get_unaligned_be16(const u8 *p)
+{
+	return p[0] << 8 | p[1];
+}
+
+static inline u32 __get_unaligned_be32(const u8 *p)
+{
+	return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
+}
+
+static inline u64 __get_unaligned_be64(const u8 *p)
+{
+	return (u64)__get_unaligned_be32(p) << 32 |
+	       __get_unaligned_be32(p + 4);
+}
+
+#define __get_unaligned_be(ptr) ({			\
+	const void *__gu_p = (ptr);			\
+	typeof(*(ptr)) __val;				\
+	switch (sizeof(*(ptr))) {			\
+	case 1:						\
+		__val = *(const u8 *)__gu_p;		\
+		break;					\
+	case 2:						\
+		__val = __get_unaligned_be16(__gu_p);	\
+		break;					\
+	case 4:						\
+		__val = __get_unaligned_be32(__gu_p);	\
+		break;					\
+	case 8:						\
+		__val = __get_unaligned_be64(__gu_p);	\
+		break;					\
+	default:					\
+		BUILD_BUG_ON(1);			\
+		break;					\
+	};						\
+	__val; })
+
+static inline void __put_unaligned_be16(u16 val, u8 *p)
+{
+	*p++ = val >> 8;
+	*p++ = val;
+}
+
+static inline void __put_unaligned_be32(u32 val, u8 *p)
+{
+	__put_unaligned_be16(val >> 16, p);
+	__put_unaligned_be16(val, p + 2);
+}
+
+static inline void __put_unaligned_be64(u64 val, u8 *p)
+{
+	__put_unaligned_be32(val >> 32, p);
+	__put_unaligned_be32(val, p + 4);
+}
+
+#define __put_unaligned_be(val, ptr) ({					\
+	(void)sizeof(*(ptr) = (val));					\
+	void *__gu_p = (ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		*(u8 *)__gu_p = (__force u8)(val);			\
+		break;							\
+	case 2:								\
+		__put_unaligned_be16((__force u16)(val), __gu_p);	\
+		break;							\
+	case 4:								\
+		__put_unaligned_be32((__force u32)(val), __gu_p);	\
+		break;							\
+	case 8:								\
+		__put_unaligned_be64((__force u64)(val), __gu_p);	\
+		break;							\
+	default:							\
+		BUILD_BUG_ON(1);					\
+		break;							\
+	}								\
+	(void)0; })
+
+#endif /* _LINUX_UNALIGNED_BIG_ENDIAN_H_ */
diff --git a/include/linux/unaligned/cpu_endian.h b/include/linux/unaligned/cpu_endian.h
new file mode 100644
index 0000000..8189286
--- /dev/null
+++ b/include/linux/unaligned/cpu_endian.h
@@ -0,0 +1,90 @@
+#ifndef _LINUX_UNALIGNED_CPU_ENDIAN_H_
+#define _LINUX_UNALIGNED_CPU_ENDIAN_H_
+
+#include <linux/kernel.h>
+
+struct __una_u16 { u16 x __attribute__((packed)); };
+struct __una_u32 { u32 x __attribute__((packed)); };
+struct __una_u64 { u64 x __attribute__((packed)); };
+
+static inline u16 __get_unaligned_cpu16(const void *p)
+{
+	const struct __una_u16 *ptr = (const struct __una_u16 *)p;
+	return ptr->x;
+}
+
+static inline u32 __get_unaligned_cpu32(const void *p)
+{
+	const struct __una_u32 *ptr = (const struct __una_u32 *)p;
+	return ptr->x;
+}
+
+static inline u64 __get_unaligned_cpu64(const void *p)
+{
+	const struct __una_u64 *ptr = (const struct __una_u64 *)p;
+	return ptr->x;
+}
+
+#define __get_unaligned_cpu(ptr) ({			\
+	const void *__gu_p = (ptr);			\
+	typeof(*(ptr)) __val;				\
+	switch (sizeof(*(ptr))) {			\
+	case 1:						\
+		__val = *(const u8 *)__gu_p;		\
+		break;					\
+	case 2:						\
+		__val = __get_unaligned_cpu16(__gu_p);	\
+		break;					\
+	case 4:						\
+		__val = __get_unaligned_cpu32(__gu_p);	\
+		break;					\
+	case 8:						\
+		__val = __get_unaligned_cpu64(__gu_p);	\
+		break;					\
+	default:					\
+		BUILD_BUG_ON(1);			\
+		break;					\
+	};						\
+	__val; })
+
+static inline void __put_unaligned_cpu16(u16 val, void *p)
+{
+	struct __una_u16 *ptr = (struct __una_u16 *)p;
+	ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu32(u32 val, void *p)
+{
+	struct __una_u32 *ptr = (struct __una_u32 *)p;
+	ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu64(u64 val, void *p)
+{
+	struct __una_u64 *ptr = (struct __una_u64 *)p;
+	ptr->x = val;
+}
+
+#define __put_unaligned_cpu(val, ptr) ({				\
+	(void)sizeof(*(ptr) = (val));					\
+	void *__gu_p = (ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		*(u8 *)__gu_p = (__force u8)(val);			\
+		break;							\
+	case 2:								\
+		__put_unaligned_cpu16((__force u16)(val), __gu_p);	\
+		break;							\
+	case 4:								\
+		__put_unaligned_cpu32((__force u32)(val), __gu_p);	\
+		break;							\
+	case 8:								\
+		__put_unaligned_cpu64((__force u64)(val), __gu_p);	\
+		break;							\
+	default:							\
+		BUILD_BUG_ON(1);					\
+		break;							\
+	}								\
+	(void)0; })
+
+#endif /* _LINUX_UNALIGNED_CPU_ENDIAN_H_ */
diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h
new file mode 100644
index 0000000..50ce393
--- /dev/null
+++ b/include/linux/unaligned/generic.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_UNALIGNED_GENERIC_H_
+#define _LINUX_UNALIGNED_GENERIC_H_
+
+#include <linux/unaligned/little_endian.h>
+#include <linux/unaligned/big_endian.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return __get_unaligned_le16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return __get_unaligned_le32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return __get_unaligned_le64((const u8 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return __get_unaligned_be16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return __get_unaligned_be32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return __get_unaligned_be64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	__put_unaligned_le16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	__put_unaligned_le32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	__put_unaligned_le64(val, p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	__put_unaligned_be16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	__put_unaligned_be32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	__put_unaligned_be64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_GENERIC_H_ */
diff --git a/include/linux/unaligned/generic_be.h b/include/linux/unaligned/generic_be.h
new file mode 100644
index 0000000..a200aca
--- /dev/null
+++ b/include/linux/unaligned/generic_be.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_GENERIC_BE_H_
+#define _LINUX_UNALIGNED_GENERIC_BE_H_
+
+#include <linux/unaligned/cpu_endian.h>
+#include <linux/unaligned/little_endian.h>
+
+#define get_unaligned	__get_unaligned_cpu
+#define put_unaligned	__put_unaligned_cpu
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return __get_unaligned_le16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return __get_unaligned_le32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return __get_unaligned_le64((const u8 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return __get_unaligned_cpu16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return __get_unaligned_cpu32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return __get_unaligned_cpu64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	__put_unaligned_le16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	__put_unaligned_le32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	__put_unaligned_le64(val, p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	__put_unaligned_cpu16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	__put_unaligned_cpu32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	__put_unaligned_cpu64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_GENERIC_BE_H_ */
diff --git a/include/linux/unaligned/generic_le.h b/include/linux/unaligned/generic_le.h
new file mode 100644
index 0000000..8a6f6ae
--- /dev/null
+++ b/include/linux/unaligned/generic_le.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_GENERIC_LE_H_
+#define _LINUX_UNALIGNED_GENERIC_LE_H_
+
+#include <linux/unaligned/cpu_endian.h>
+#include <linux/unaligned/big_endian.h>
+
+#define get_unaligned	__get_unaligned_cpu
+#define put_unaligned	__put_unaligned_cpu
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return __get_unaligned_cpu16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return __get_unaligned_cpu32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return __get_unaligned_cpu64((const u8 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return __get_unaligned_be16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return __get_unaligned_be32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return __get_unaligned_be64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	__put_unaligned_cpu16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	__put_unaligned_cpu32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	__put_unaligned_cpu64(val, p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	__put_unaligned_be16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	__put_unaligned_be32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	__put_unaligned_be64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_GENERIC_LE_H_ */
diff --git a/include/linux/unaligned/little_endian.h b/include/linux/unaligned/little_endian.h
new file mode 100644
index 0000000..860af6b
--- /dev/null
+++ b/include/linux/unaligned/little_endian.h
@@ -0,0 +1,84 @@
+#ifndef _LINUX_UNALIGNED_LITTLE_ENDIAN_H_
+#define _LINUX_UNALIGNED_LITTLE_ENDIAN_H_
+
+#include <linux/kernel.h>
+
+static inline u16 __get_unaligned_le16(const u8 *p)
+{
+	return p[0] | p[1] << 8;
+}
+
+static inline u32 __get_unaligned_le32(const u8 *p)
+{
+	return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
+}
+
+static inline u64 __get_unaligned_le64(const u8 *p)
+{
+	return (u64)__get_unaligned_le32(p + 4) << 32 |
+	       __get_unaligned_le32(p);
+}
+
+#define __get_unaligned_le(ptr) ({			\
+	const void *__gu_p = (ptr);			\
+	typeof(*(ptr)) __val;				\
+	switch (sizeof(*(ptr))) {			\
+	case 1:						\
+		__val = *(const u8 *)__gu_p;		\
+		break;					\
+	case 2:						\
+		__val = __get_unaligned_le16(__gu_p);	\
+		break;					\
+	case 4:						\
+		__val = __get_unaligned_le32(__gu_p);	\
+		break;					\
+	case 8:						\
+		__val = __get_unaligned_le64(__gu_p);	\
+		break;					\
+	default:					\
+		BUILD_BUG_ON(1);			\
+		break;					\
+	};						\
+	__val; })
+
+static inline void __put_unaligned_le16(u16 val, u8 *p)
+{
+	*p++ = val;
+	*p++ = val >> 8;
+}
+
+static inline void __put_unaligned_le32(u32 val, u8 *p)
+{
+	__put_unaligned_le16(val >> 16, p + 2);
+	__put_unaligned_le16(val, p);
+}
+
+static inline void __put_unaligned_le64(u64 val, u8 *p)
+{
+	__put_unaligned_le32(val >> 32, p + 4);
+	__put_unaligned_le32(val, p);
+}
+
+#define __put_unaligned_le(val, ptr) ({					\
+	(void)sizeof(*(ptr) = (val));					\
+	void *__gu_p = (ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		*(u8 *)__gu_p = (__force u8)(val);			\
+		break;							\
+	case 2:								\
+		__put_unaligned_le16((__force u16)(val), __gu_p);	\
+		break;							\
+	case 4:								\
+		__put_unaligned_le32((__force u32)(val), __gu_p);	\
+		break;							\
+	case 8:								\
+		__put_unaligned_le64((__force u64)(val), __gu_p);	\
+		break;							\
+	default:							\
+		BUILD_BUG_ON(1);					\
+		break;							\
+	}								\
+	(void)0; })
+
+#endif /* _LINUX_UNALIGNED_LITTLE_ENDIAN_H_ */
diff --git a/include/linux/unaligned/no_builtin_memcpy.h b/include/linux/unaligned/no_builtin_memcpy.h
new file mode 100644
index 0000000..9b9e803
--- /dev/null
+++ b/include/linux/unaligned/no_builtin_memcpy.h
@@ -0,0 +1,80 @@
+#ifndef _LINUX_UNALIGNED_NO_BUILTIN_MEMCPY_H_
+#define _LINUX_UNALIGNED_NO_BUILTIN_MEMCPY_H_
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+
+/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
+
+#define get_unaligned(ptr) ({				\
+	__typeof__(*(ptr)) __tmp;			\
+	memmove(&__tmp, (ptr), sizeof(*(ptr)));		\
+	__tmp; })
+
+#define put_unaligned(val, ptr) ({			\
+	__typeof__(*(ptr)) __tmp = (val);		\
+	memmove((ptr), &__tmp, sizeof(*(ptr)));		\
+	(void)0; })
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return le16_to_cpu(get_unaligned((__le16 *)p));
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return le32_to_cpu(get_unaligned((__le32 *)p));
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return le64_to_cpu(get_unaligned((__le64 *)p));
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return be16_to_cpu(get_unaligned((__be16 *)p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return be32_to_cpu(get_unaligned((__be32 *)p));
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return be64_to_cpu(get_unaligned((__be64 *)p));
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	put_unaligned(cpu_to_le16(val), (__le16 *)p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	put_unaligned(cpu_to_le32(val), (__le32 *)p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	put_unaligned(cpu_to_le64(val), (__le64 *)p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	put_unaligned(cpu_to_be16(val), (__be16 *)p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	put_unaligned(cpu_to_be32(val), (__be32 *)p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	put_unaligned(cpu_to_be64(val), (__be64 *)p);
+}
+
+#endif

^ permalink raw reply related	[flat|nested] 24+ messages in thread

* Re: [PATCHv2 1/8] kernel: add common infrastructure for unaligned access
  2008-04-11  0:06 ` [PATCHv2 " Harvey Harrison
@ 2008-04-11  0:06   ` Harvey Harrison
  2008-04-11 18:09   ` Russell King
  1 sibling, 0 replies; 24+ messages in thread
From: Harvey Harrison @ 2008-04-11  0:06 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-arch

Small update changing the exposed API of get_unaligned_* take
a void * to avoid casts in every single caller.  We are already
specifying what we want to get explicitly in the function name:
get_unaligned_le16...le32...le64..etc.

This will make 8/8 look even cleaner as a bunch of casts are not
needed.

Harvey

diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
new file mode 100644
index 0000000..3668b45
--- /dev/null
+++ b/include/linux/unaligned/access_ok.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_ACCESS_OK_H_
+#define _LINUX_UNALIGNED_ACCESS_OK_H_
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+#define get_unaligned(ptr) (*(ptr))
+#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return le16_to_cpup((__le16 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return le32_to_cpup((__le32 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return le64_to_cpup((__le64 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return be16_to_cpup((__be16 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return be32_to_cpup((__be32 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return be64_to_cpup((__be64 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	*((__le16 *)p) = cpu_to_le16(val);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	*((__le32 *)p) = cpu_to_le32(val);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	*((__le64 *)p) = cpu_to_le64(val);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	*((__be16 *)p) = cpu_to_be16(val);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	*((__be32 *)p) = cpu_to_be32(val);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	*((__be64 *)p) = cpu_to_be64(val);
+}
+
+#endif /* _LINUX_UNALIGNED_ACCESS_OK_H_ */
diff --git a/include/linux/unaligned/big_endian.h b/include/linux/unaligned/big_endian.h
new file mode 100644
index 0000000..f3cd105
--- /dev/null
+++ b/include/linux/unaligned/big_endian.h
@@ -0,0 +1,84 @@
+#ifndef _LINUX_UNALIGNED_BIG_ENDIAN_H_
+#define _LINUX_UNALIGNED_BIG_ENDIAN_H_
+
+#include <linux/kernel.h>
+
+static inline u16 __get_unaligned_be16(const u8 *p)
+{
+	return p[0] << 8 | p[1];
+}
+
+static inline u32 __get_unaligned_be32(const u8 *p)
+{
+	return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
+}
+
+static inline u64 __get_unaligned_be64(const u8 *p)
+{
+	return (u64)__get_unaligned_be32(p) << 32 |
+	       __get_unaligned_be32(p + 4);
+}
+
+#define __get_unaligned_be(ptr) ({			\
+	const void *__gu_p = (ptr);			\
+	typeof(*(ptr)) __val;				\
+	switch (sizeof(*(ptr))) {			\
+	case 1:						\
+		__val = *(const u8 *)__gu_p;		\
+		break;					\
+	case 2:						\
+		__val = __get_unaligned_be16(__gu_p);	\
+		break;					\
+	case 4:						\
+		__val = __get_unaligned_be32(__gu_p);	\
+		break;					\
+	case 8:						\
+		__val = __get_unaligned_be64(__gu_p);	\
+		break;					\
+	default:					\
+		BUILD_BUG_ON(1);			\
+		break;					\
+	};						\
+	__val; })
+
+static inline void __put_unaligned_be16(u16 val, u8 *p)
+{
+	*p++ = val >> 8;
+	*p++ = val;
+}
+
+static inline void __put_unaligned_be32(u32 val, u8 *p)
+{
+	__put_unaligned_be16(val >> 16, p);
+	__put_unaligned_be16(val, p + 2);
+}
+
+static inline void __put_unaligned_be64(u64 val, u8 *p)
+{
+	__put_unaligned_be32(val >> 32, p);
+	__put_unaligned_be32(val, p + 4);
+}
+
+#define __put_unaligned_be(val, ptr) ({					\
+	(void)sizeof(*(ptr) = (val));					\
+	void *__gu_p = (ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		*(u8 *)__gu_p = (__force u8)(val);			\
+		break;							\
+	case 2:								\
+		__put_unaligned_be16((__force u16)(val), __gu_p);	\
+		break;							\
+	case 4:								\
+		__put_unaligned_be32((__force u32)(val), __gu_p);	\
+		break;							\
+	case 8:								\
+		__put_unaligned_be64((__force u64)(val), __gu_p);	\
+		break;							\
+	default:							\
+		BUILD_BUG_ON(1);					\
+		break;							\
+	}								\
+	(void)0; })
+
+#endif /* _LINUX_UNALIGNED_BIG_ENDIAN_H_ */
diff --git a/include/linux/unaligned/cpu_endian.h b/include/linux/unaligned/cpu_endian.h
new file mode 100644
index 0000000..8189286
--- /dev/null
+++ b/include/linux/unaligned/cpu_endian.h
@@ -0,0 +1,90 @@
+#ifndef _LINUX_UNALIGNED_CPU_ENDIAN_H_
+#define _LINUX_UNALIGNED_CPU_ENDIAN_H_
+
+#include <linux/kernel.h>
+
+struct __una_u16 { u16 x __attribute__((packed)); };
+struct __una_u32 { u32 x __attribute__((packed)); };
+struct __una_u64 { u64 x __attribute__((packed)); };
+
+static inline u16 __get_unaligned_cpu16(const void *p)
+{
+	const struct __una_u16 *ptr = (const struct __una_u16 *)p;
+	return ptr->x;
+}
+
+static inline u32 __get_unaligned_cpu32(const void *p)
+{
+	const struct __una_u32 *ptr = (const struct __una_u32 *)p;
+	return ptr->x;
+}
+
+static inline u64 __get_unaligned_cpu64(const void *p)
+{
+	const struct __una_u64 *ptr = (const struct __una_u64 *)p;
+	return ptr->x;
+}
+
+#define __get_unaligned_cpu(ptr) ({			\
+	const void *__gu_p = (ptr);			\
+	typeof(*(ptr)) __val;				\
+	switch (sizeof(*(ptr))) {			\
+	case 1:						\
+		__val = *(const u8 *)__gu_p;		\
+		break;					\
+	case 2:						\
+		__val = __get_unaligned_cpu16(__gu_p);	\
+		break;					\
+	case 4:						\
+		__val = __get_unaligned_cpu32(__gu_p);	\
+		break;					\
+	case 8:						\
+		__val = __get_unaligned_cpu64(__gu_p);	\
+		break;					\
+	default:					\
+		BUILD_BUG_ON(1);			\
+		break;					\
+	};						\
+	__val; })
+
+static inline void __put_unaligned_cpu16(u16 val, void *p)
+{
+	struct __una_u16 *ptr = (struct __una_u16 *)p;
+	ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu32(u32 val, void *p)
+{
+	struct __una_u32 *ptr = (struct __una_u32 *)p;
+	ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu64(u64 val, void *p)
+{
+	struct __una_u64 *ptr = (struct __una_u64 *)p;
+	ptr->x = val;
+}
+
+#define __put_unaligned_cpu(val, ptr) ({				\
+	(void)sizeof(*(ptr) = (val));					\
+	void *__gu_p = (ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		*(u8 *)__gu_p = (__force u8)(val);			\
+		break;							\
+	case 2:								\
+		__put_unaligned_cpu16((__force u16)(val), __gu_p);	\
+		break;							\
+	case 4:								\
+		__put_unaligned_cpu32((__force u32)(val), __gu_p);	\
+		break;							\
+	case 8:								\
+		__put_unaligned_cpu64((__force u64)(val), __gu_p);	\
+		break;							\
+	default:							\
+		BUILD_BUG_ON(1);					\
+		break;							\
+	}								\
+	(void)0; })
+
+#endif /* _LINUX_UNALIGNED_CPU_ENDIAN_H_ */
diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h
new file mode 100644
index 0000000..50ce393
--- /dev/null
+++ b/include/linux/unaligned/generic.h
@@ -0,0 +1,67 @@
+#ifndef _LINUX_UNALIGNED_GENERIC_H_
+#define _LINUX_UNALIGNED_GENERIC_H_
+
+#include <linux/unaligned/little_endian.h>
+#include <linux/unaligned/big_endian.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return __get_unaligned_le16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return __get_unaligned_le32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return __get_unaligned_le64((const u8 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return __get_unaligned_be16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return __get_unaligned_be32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return __get_unaligned_be64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	__put_unaligned_le16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	__put_unaligned_le32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	__put_unaligned_le64(val, p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	__put_unaligned_be16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	__put_unaligned_be32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	__put_unaligned_be64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_GENERIC_H_ */
diff --git a/include/linux/unaligned/generic_be.h b/include/linux/unaligned/generic_be.h
new file mode 100644
index 0000000..a200aca
--- /dev/null
+++ b/include/linux/unaligned/generic_be.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_GENERIC_BE_H_
+#define _LINUX_UNALIGNED_GENERIC_BE_H_
+
+#include <linux/unaligned/cpu_endian.h>
+#include <linux/unaligned/little_endian.h>
+
+#define get_unaligned	__get_unaligned_cpu
+#define put_unaligned	__put_unaligned_cpu
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return __get_unaligned_le16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return __get_unaligned_le32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return __get_unaligned_le64((const u8 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return __get_unaligned_cpu16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return __get_unaligned_cpu32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return __get_unaligned_cpu64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	__put_unaligned_le16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	__put_unaligned_le32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	__put_unaligned_le64(val, p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	__put_unaligned_cpu16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	__put_unaligned_cpu32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	__put_unaligned_cpu64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_GENERIC_BE_H_ */
diff --git a/include/linux/unaligned/generic_le.h b/include/linux/unaligned/generic_le.h
new file mode 100644
index 0000000..8a6f6ae
--- /dev/null
+++ b/include/linux/unaligned/generic_le.h
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_GENERIC_LE_H_
+#define _LINUX_UNALIGNED_GENERIC_LE_H_
+
+#include <linux/unaligned/cpu_endian.h>
+#include <linux/unaligned/big_endian.h>
+
+#define get_unaligned	__get_unaligned_cpu
+#define put_unaligned	__put_unaligned_cpu
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return __get_unaligned_cpu16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return __get_unaligned_cpu32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return __get_unaligned_cpu64((const u8 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return __get_unaligned_be16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return __get_unaligned_be32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return __get_unaligned_be64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	__put_unaligned_cpu16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	__put_unaligned_cpu32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	__put_unaligned_cpu64(val, p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	__put_unaligned_be16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	__put_unaligned_be32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	__put_unaligned_be64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_GENERIC_LE_H_ */
diff --git a/include/linux/unaligned/little_endian.h b/include/linux/unaligned/little_endian.h
new file mode 100644
index 0000000..860af6b
--- /dev/null
+++ b/include/linux/unaligned/little_endian.h
@@ -0,0 +1,84 @@
+#ifndef _LINUX_UNALIGNED_LITTLE_ENDIAN_H_
+#define _LINUX_UNALIGNED_LITTLE_ENDIAN_H_
+
+#include <linux/kernel.h>
+
+static inline u16 __get_unaligned_le16(const u8 *p)
+{
+	return p[0] | p[1] << 8;
+}
+
+static inline u32 __get_unaligned_le32(const u8 *p)
+{
+	return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
+}
+
+static inline u64 __get_unaligned_le64(const u8 *p)
+{
+	return (u64)__get_unaligned_le32(p + 4) << 32 |
+	       __get_unaligned_le32(p);
+}
+
+#define __get_unaligned_le(ptr) ({			\
+	const void *__gu_p = (ptr);			\
+	typeof(*(ptr)) __val;				\
+	switch (sizeof(*(ptr))) {			\
+	case 1:						\
+		__val = *(const u8 *)__gu_p;		\
+		break;					\
+	case 2:						\
+		__val = __get_unaligned_le16(__gu_p);	\
+		break;					\
+	case 4:						\
+		__val = __get_unaligned_le32(__gu_p);	\
+		break;					\
+	case 8:						\
+		__val = __get_unaligned_le64(__gu_p);	\
+		break;					\
+	default:					\
+		BUILD_BUG_ON(1);			\
+		break;					\
+	};						\
+	__val; })
+
+static inline void __put_unaligned_le16(u16 val, u8 *p)
+{
+	*p++ = val;
+	*p++ = val >> 8;
+}
+
+static inline void __put_unaligned_le32(u32 val, u8 *p)
+{
+	__put_unaligned_le16(val >> 16, p + 2);
+	__put_unaligned_le16(val, p);
+}
+
+static inline void __put_unaligned_le64(u64 val, u8 *p)
+{
+	__put_unaligned_le32(val >> 32, p + 4);
+	__put_unaligned_le32(val, p);
+}
+
+#define __put_unaligned_le(val, ptr) ({					\
+	(void)sizeof(*(ptr) = (val));					\
+	void *__gu_p = (ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		*(u8 *)__gu_p = (__force u8)(val);			\
+		break;							\
+	case 2:								\
+		__put_unaligned_le16((__force u16)(val), __gu_p);	\
+		break;							\
+	case 4:								\
+		__put_unaligned_le32((__force u32)(val), __gu_p);	\
+		break;							\
+	case 8:								\
+		__put_unaligned_le64((__force u64)(val), __gu_p);	\
+		break;							\
+	default:							\
+		BUILD_BUG_ON(1);					\
+		break;							\
+	}								\
+	(void)0; })
+
+#endif /* _LINUX_UNALIGNED_LITTLE_ENDIAN_H_ */
diff --git a/include/linux/unaligned/no_builtin_memcpy.h b/include/linux/unaligned/no_builtin_memcpy.h
new file mode 100644
index 0000000..9b9e803
--- /dev/null
+++ b/include/linux/unaligned/no_builtin_memcpy.h
@@ -0,0 +1,80 @@
+#ifndef _LINUX_UNALIGNED_NO_BUILTIN_MEMCPY_H_
+#define _LINUX_UNALIGNED_NO_BUILTIN_MEMCPY_H_
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+
+/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
+
+#define get_unaligned(ptr) ({				\
+	__typeof__(*(ptr)) __tmp;			\
+	memmove(&__tmp, (ptr), sizeof(*(ptr)));		\
+	__tmp; })
+
+#define put_unaligned(val, ptr) ({			\
+	__typeof__(*(ptr)) __tmp = (val);		\
+	memmove((ptr), &__tmp, sizeof(*(ptr)));		\
+	(void)0; })
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+	return le16_to_cpu(get_unaligned((__le16 *)p));
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+	return le32_to_cpu(get_unaligned((__le32 *)p));
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+	return le64_to_cpu(get_unaligned((__le64 *)p));
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+	return be16_to_cpu(get_unaligned((__be16 *)p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+	return be32_to_cpu(get_unaligned((__be32 *)p));
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+	return be64_to_cpu(get_unaligned((__be64 *)p));
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+	put_unaligned(cpu_to_le16(val), (__le16 *)p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+	put_unaligned(cpu_to_le32(val), (__le32 *)p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+	put_unaligned(cpu_to_le64(val), (__le64 *)p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+	put_unaligned(cpu_to_be16(val), (__be16 *)p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+	put_unaligned(cpu_to_be32(val), (__be32 *)p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+	put_unaligned(cpu_to_be64(val), (__be64 *)p);
+}
+
+#endif



^ permalink raw reply related	[flat|nested] 24+ messages in thread

* Re: [PATCHv2 1/8] kernel: add common infrastructure for unaligned access
  2008-04-11  0:06 ` [PATCHv2 " Harvey Harrison
  2008-04-11  0:06   ` Harvey Harrison
@ 2008-04-11 18:09   ` Russell King
  2008-04-11 18:09     ` Russell King
       [not found]     ` <20080411180928.GA9137-f404yB8NqCZvn6HldHNs0ANdhmdF6hFW@public.gmane.org>
  1 sibling, 2 replies; 24+ messages in thread
From: Russell King @ 2008-04-11 18:09 UTC (permalink / raw)
  To: Harvey Harrison; +Cc: Andrew Morton, linux-arch

On Thu, Apr 10, 2008 at 05:06:34PM -0700, Harvey Harrison wrote:
> +#define __get_unaligned_cpu(ptr) ({			\
> +	const void *__gu_p = (ptr);			\
> +	typeof(*(ptr)) __val;				\
> +	switch (sizeof(*(ptr))) {			\
> +	case 1:						\
> +		__val = *(const u8 *)__gu_p;		\
> +		break;					\
> +	case 2:						\
> +		__val = __get_unaligned_cpu16(__gu_p);	\
> +		break;					\
> +	case 4:						\
> +		__val = __get_unaligned_cpu32(__gu_p);	\
> +		break;					\
> +	case 8:						\
> +		__val = __get_unaligned_cpu64(__gu_p);	\
> +		break;					\
> +	default:					\
> +		BUILD_BUG_ON(1);			\
> +		break;					\
> +	};						\
> +	__val; })

This won't work - on ARM we used to use this style, but it fails in
some corner case, so we ended up switching to using GCC's
__builtin_choose_expr() instead.

Such a corner case:

static unsigned long foo(const unsigned long *ptr)
{
	return __get_unaligned_cpu(ptr);
}

This results in '__val' being declared as const, and therefore the
compiler errors out in the switch statement since the code tries to
assign to a const '__val'.

See 17b602b1c1a38f3f0a4461bb1f571346e751b36b.

So, for the present set of patches, NAK for changing ARM.

-- 
Russell King
 Linux kernel    2.6 ARM Linux   - http://www.arm.linux.org.uk/
 maintainer of:

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCHv2 1/8] kernel: add common infrastructure for unaligned access
  2008-04-11 18:09   ` Russell King
@ 2008-04-11 18:09     ` Russell King
       [not found]     ` <20080411180928.GA9137-f404yB8NqCZvn6HldHNs0ANdhmdF6hFW@public.gmane.org>
  1 sibling, 0 replies; 24+ messages in thread
From: Russell King @ 2008-04-11 18:09 UTC (permalink / raw)
  To: Harvey Harrison; +Cc: Andrew Morton, linux-arch

On Thu, Apr 10, 2008 at 05:06:34PM -0700, Harvey Harrison wrote:
> +#define __get_unaligned_cpu(ptr) ({			\
> +	const void *__gu_p = (ptr);			\
> +	typeof(*(ptr)) __val;				\
> +	switch (sizeof(*(ptr))) {			\
> +	case 1:						\
> +		__val = *(const u8 *)__gu_p;		\
> +		break;					\
> +	case 2:						\
> +		__val = __get_unaligned_cpu16(__gu_p);	\
> +		break;					\
> +	case 4:						\
> +		__val = __get_unaligned_cpu32(__gu_p);	\
> +		break;					\
> +	case 8:						\
> +		__val = __get_unaligned_cpu64(__gu_p);	\
> +		break;					\
> +	default:					\
> +		BUILD_BUG_ON(1);			\
> +		break;					\
> +	};						\
> +	__val; })

This won't work - on ARM we used to use this style, but it fails in
some corner case, so we ended up switching to using GCC's
__builtin_choose_expr() instead.

Such a corner case:

static unsigned long foo(const unsigned long *ptr)
{
	return __get_unaligned_cpu(ptr);
}

This results in '__val' being declared as const, and therefore the
compiler errors out in the switch statement since the code tries to
assign to a const '__val'.

See 17b602b1c1a38f3f0a4461bb1f571346e751b36b.

So, for the present set of patches, NAK for changing ARM.

-- 
Russell King
 Linux kernel    2.6 ARM Linux   - http://www.arm.linux.org.uk/
 maintainer of:

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCHv2 1/8] kernel: add common infrastructure for unaligned access
       [not found]     ` <20080411180928.GA9137-f404yB8NqCZvn6HldHNs0ANdhmdF6hFW@public.gmane.org>
@ 2008-04-11 19:01       ` Harvey Harrison
  2008-04-11 19:01         ` Harvey Harrison
  0 siblings, 1 reply; 24+ messages in thread
From: Harvey Harrison @ 2008-04-11 19:01 UTC (permalink / raw)
  To: Russell King; +Cc: Andrew Morton, linux-arch

On Fri, 2008-04-11 at 19:09 +0100, Russell King wrote:
> On Thu, Apr 10, 2008 at 05:06:34PM -0700, Harvey Harrison wrote:
> > +#define __get_unaligned_cpu(ptr) ({			\
> > +	const void *__gu_p = (ptr);			\
> > +	typeof(*(ptr)) __val;				\
> > +	switch (sizeof(*(ptr))) {			\
> > +	case 1:						\
> > +		__val = *(const u8 *)__gu_p;		\
> > +		break;					\
> > +	case 2:						\
> > +		__val = __get_unaligned_cpu16(__gu_p);	\
> > +		break;					\
> > +	case 4:						\
> > +		__val = __get_unaligned_cpu32(__gu_p);	\
> > +		break;					\
> > +	case 8:						\
> > +		__val = __get_unaligned_cpu64(__gu_p);	\
> > +		break;					\
> > +	default:					\
> > +		BUILD_BUG_ON(1);			\
> > +		break;					\
> > +	};						\
> > +	__val; })
> 
> This won't work - on ARM we used to use this style, but it fails in
> some corner case, so we ended up switching to using GCC's
> __builtin_choose_expr() instead.
> 
> Such a corner case:
> 
> static unsigned long foo(const unsigned long *ptr)
> {
> 	return __get_unaligned_cpu(ptr);
> }
> 
> This results in '__val' being declared as const, and therefore the
> compiler errors out in the switch statement since the code tries to
> assign to a const '__val'.

OK, will respin the set, I think I have a better way of doing it anyway.

Not that it matters, but wouldn't that be a gcc bug, for it to preserve
const on __val, wouldn't that have to be:

const unsigned long * const ptr

Just curious.

Thanks.

Harvey

^ permalink raw reply	[flat|nested] 24+ messages in thread

* Re: [PATCHv2 1/8] kernel: add common infrastructure for unaligned access
  2008-04-11 19:01       ` Harvey Harrison
@ 2008-04-11 19:01         ` Harvey Harrison
  0 siblings, 0 replies; 24+ messages in thread
From: Harvey Harrison @ 2008-04-11 19:01 UTC (permalink / raw)
  To: Russell King; +Cc: Andrew Morton, linux-arch

On Fri, 2008-04-11 at 19:09 +0100, Russell King wrote:
> On Thu, Apr 10, 2008 at 05:06:34PM -0700, Harvey Harrison wrote:
> > +#define __get_unaligned_cpu(ptr) ({			\
> > +	const void *__gu_p = (ptr);			\
> > +	typeof(*(ptr)) __val;				\
> > +	switch (sizeof(*(ptr))) {			\
> > +	case 1:						\
> > +		__val = *(const u8 *)__gu_p;		\
> > +		break;					\
> > +	case 2:						\
> > +		__val = __get_unaligned_cpu16(__gu_p);	\
> > +		break;					\
> > +	case 4:						\
> > +		__val = __get_unaligned_cpu32(__gu_p);	\
> > +		break;					\
> > +	case 8:						\
> > +		__val = __get_unaligned_cpu64(__gu_p);	\
> > +		break;					\
> > +	default:					\
> > +		BUILD_BUG_ON(1);			\
> > +		break;					\
> > +	};						\
> > +	__val; })
> 
> This won't work - on ARM we used to use this style, but it fails in
> some corner case, so we ended up switching to using GCC's
> __builtin_choose_expr() instead.
> 
> Such a corner case:
> 
> static unsigned long foo(const unsigned long *ptr)
> {
> 	return __get_unaligned_cpu(ptr);
> }
> 
> This results in '__val' being declared as const, and therefore the
> compiler errors out in the switch statement since the code tries to
> assign to a const '__val'.

OK, will respin the set, I think I have a better way of doing it anyway.

Not that it matters, but wouldn't that be a gcc bug, for it to preserve
const on __val, wouldn't that have to be:

const unsigned long * const ptr

Just curious.

Thanks.

Harvey



^ permalink raw reply	[flat|nested] 24+ messages in thread

end of thread, other threads:[~2008-04-11 19:01 UTC | newest]

Thread overview: 24+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-04-10 19:44 [PATCH 1/8] kernel: add common infrastructure for unaligned access Harvey Harrison
2008-04-10 19:44 ` Harvey Harrison
2008-04-10 21:43 ` David Howells
2008-04-10 21:43   ` David Howells
     [not found]   ` <11527.1207863801-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2008-04-10 21:55     ` Harvey Harrison
2008-04-10 21:55       ` Harvey Harrison
2008-04-10 22:01       ` David Howells
2008-04-10 22:01         ` David Howells
     [not found]         ` <11814.1207864864-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2008-04-10 22:06           ` Harvey Harrison
2008-04-10 22:06             ` Harvey Harrison
2008-04-10 22:15             ` David Howells
2008-04-10 22:15               ` David Howells
     [not found]               ` <11907.1207865758-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2008-04-10 22:20                 ` Harvey Harrison
2008-04-10 22:20                   ` Harvey Harrison
2008-04-10 22:33                   ` David Howells
2008-04-10 22:33                     ` David Howells
     [not found]                     ` <11989.1207866793-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2008-04-10 22:37                       ` Harvey Harrison
2008-04-10 22:37                         ` Harvey Harrison
2008-04-11  0:06 ` [PATCHv2 " Harvey Harrison
2008-04-11  0:06   ` Harvey Harrison
2008-04-11 18:09   ` Russell King
2008-04-11 18:09     ` Russell King
     [not found]     ` <20080411180928.GA9137-f404yB8NqCZvn6HldHNs0ANdhmdF6hFW@public.gmane.org>
2008-04-11 19:01       ` Harvey Harrison
2008-04-11 19:01         ` Harvey Harrison

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox