linux-arch.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Vineet Gupta <Vineet.Gupta1@synopsys.com>
To: linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org
Cc: tglx@linutronix.de, arnd@arndb.de,
	Vineet Gupta <Vineet.Gupta1@synopsys.com>
Subject: [RFC PATCH v1 05/31] ARC: uaccess friends
Date: Wed,  7 Nov 2012 10:47:28 +0100	[thread overview]
Message-ID: <1352281674-2186-6-git-send-email-vgupta@synopsys.com> (raw)
In-Reply-To: <1352281674-2186-1-git-send-email-vgupta@synopsys.com>

Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
---
 arch/arc/include/asm/segment.h |   24 ++
 arch/arc/include/asm/uaccess.h |  605 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 629 insertions(+), 0 deletions(-)
 create mode 100644 arch/arc/include/asm/segment.h
 create mode 100644 arch/arc/include/asm/uaccess.h

diff --git a/arch/arc/include/asm/segment.h b/arch/arc/include/asm/segment.h
new file mode 100644
index 0000000..da2c459
--- /dev/null
+++ b/arch/arc/include/asm/segment.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASMARC_SEGMENT_H
+#define __ASMARC_SEGMENT_H
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long mm_segment_t;
+
+#define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
+
+#define KERNEL_DS		MAKE_MM_SEG(0)
+#define USER_DS			MAKE_MM_SEG(TASK_SIZE)
+
+#define segment_eq(a, b)	((a) == (b))
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASMARC_SEGMENT_H */
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
new file mode 100644
index 0000000..503c98d
--- /dev/null
+++ b/arch/arc/include/asm/uaccess.h
@@ -0,0 +1,605 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2010
+ *    -__clear_user( ) called multiple times during elf load was byte loop
+ *    converted to do as much word clear as possible.
+ *
+ * vineetg: Dec 2009
+ *    -Hand crafted constant propagation for "constant" copy sizes
+ *    -stock kernel shrunk by 33K at -O3
+ *
+ * vineetg: Sept 2009
+ *    -Added option to (UN)inline copy_(to|from)_user to reduce code sz
+ *    -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
+ *    -Enabled when doing -Os
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_UACCESS_H
+#define _ASM_ARC_UACCESS_H
+
+#include <linux/sched.h>
+#include <asm/errno.h>
+#include <linux/string.h>	/* for generic string functions */
+
+
+#define __kernel_ok		(segment_eq(get_fs(), KERNEL_DS))
+#define __user_ok(addr, sz)	(((sz) <= TASK_SIZE) && \
+				 (((addr)+(sz)) <= get_fs()))
+#define __access_ok(addr, sz)	(unlikely(__kernel_ok) || \
+				 likely(__user_ok((addr), (sz))))
+
+static inline unsigned long
+__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	long res = 0;
+	char val;
+	unsigned long tmp1, tmp2, tmp3, tmp4;
+	unsigned long orig_n = n;
+
+	if (n == 0)
+		return 0;
+
+	/* unaligned */
+	if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+		unsigned char tmp;
+
+		__asm__ __volatile__ (
+		"	mov.f   lp_count, %0		\n"
+		"	lpnz 2f				\n"
+		"1:	ldb.ab  %1, [%3, 1]		\n"
+		"	stb.ab  %1, [%2, 1]		\n"
+		"	sub     %0,%0,1			\n"
+		"2:	;nop				\n"
+		"	.section .fixup, \"ax\"		\n"
+		"	.align 4			\n"
+		"3:	j   2b				\n"
+		"	.previous			\n"
+		"	.section __ex_table, \"a\"	\n"
+		"	.align 4			\n"
+		"	.word   1b, 3b			\n"
+		"	.previous			\n"
+
+		: "+r" (n),
+		/*
+		 * Note as an '&' earlyclobber operand to make sure the
+		 * temporary register inside the loop is not the same as
+		 *  FROM or TO.
+		*/
+		  "=&r" (tmp), "+r" (to), "+r" (from)
+		:
+		: "lp_count", "lp_start", "lp_end", "memory");
+
+		return n;
+	}
+
+	/*
+	 * Hand-crafted constant propagation to reduce code sz of the
+	 * laddered copy 16x,8,4,2,1
+	 */
+	if (__builtin_constant_p(orig_n)) {
+		res = orig_n;
+
+		if (orig_n / 16) {
+			orig_n = orig_n % 16;
+
+			__asm__ __volatile__(
+			"	lsr   lp_count, %7,4		\n"
+			"	lp    3f			\n"
+			"1:	ld.ab   %3, [%2, 4]		\n"
+			"11:	ld.ab   %4, [%2, 4]		\n"
+			"12:	ld.ab   %5, [%2, 4]		\n"
+			"13:	ld.ab   %6, [%2, 4]		\n"
+			"	st.ab   %3, [%1, 4]		\n"
+			"	st.ab   %4, [%1, 4]		\n"
+			"	st.ab   %5, [%1, 4]		\n"
+			"	st.ab   %6, [%1, 4]		\n"
+			"	sub     %0,%0,16		\n"
+			"3:	;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   3b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   1b, 4b			\n"
+			"	.word   11b,4b			\n"
+			"	.word   12b,4b			\n"
+			"	.word   13b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from),
+			  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+			: "ir"(n)
+			: "lp_count", "memory");
+		}
+		if (orig_n / 8) {
+			orig_n = orig_n % 8;
+
+			__asm__ __volatile__(
+			"14:	ld.ab   %3, [%2,4]		\n"
+			"15:	ld.ab   %4, [%2,4]		\n"
+			"	st.ab   %3, [%1,4]		\n"
+			"	st.ab   %4, [%1,4]		\n"
+			"	sub     %0,%0,8			\n"
+			"31:	;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   31b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   14b,4b			\n"
+			"	.word   15b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from),
+			  "=r"(tmp1), "=r"(tmp2)
+			:
+			: "memory");
+		}
+		if (orig_n / 4) {
+			orig_n = orig_n % 4;
+
+			__asm__ __volatile__(
+			"16:	ld.ab   %3, [%2,4]		\n"
+			"	st.ab   %3, [%1,4]		\n"
+			"	sub     %0,%0,4			\n"
+			"32:	;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   32b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   16b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+			:
+			: "memory");
+		}
+		if (orig_n / 2) {
+			orig_n = orig_n % 2;
+
+			__asm__ __volatile__(
+			"17:	ldw.ab   %3, [%2,2]		\n"
+			"	stw.ab   %3, [%1,2]		\n"
+			"	sub      %0,%0,2		\n"
+			"33:	;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   33b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   17b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+			:
+			: "memory");
+		}
+		if (orig_n & 1) {
+			__asm__ __volatile__(
+			"18:	ldb.ab   %3, [%2,2]		\n"
+			"	stb.ab   %3, [%1,2]		\n"
+			"	sub      %0,%0,1		\n"
+			"34:	; nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   34b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   18b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+			:
+			: "memory");
+		}
+	} else {  /* n is NOT constant, so laddered copy of 16x,8,4,2,1  */
+
+		__asm__ __volatile__(
+		"	mov %0,%3			\n"
+		"	lsr.f   lp_count, %3,4		\n"  /* 16x bytes */
+		"	lpnz    3f			\n"
+		"1:	ld.ab   %5, [%2, 4]		\n"
+		"11:	ld.ab   %6, [%2, 4]		\n"
+		"12:	ld.ab   %7, [%2, 4]		\n"
+		"13:	ld.ab   %8, [%2, 4]		\n"
+		"	st.ab   %5, [%1, 4]		\n"
+		"	st.ab   %6, [%1, 4]		\n"
+		"	st.ab   %7, [%1, 4]		\n"
+		"	st.ab   %8, [%1, 4]		\n"
+		"	sub     %0,%0,16		\n"
+		"3:	and.f   %3,%3,0xf		\n"  /* stragglers */
+		"	bz      34f			\n"
+		"	bbit0   %3,3,31f		\n"  /* 8 bytes left */
+		"14:	ld.ab   %5, [%2,4]		\n"
+		"15:	ld.ab   %6, [%2,4]		\n"
+		"	st.ab   %5, [%1,4]		\n"
+		"	st.ab   %6, [%1,4]		\n"
+		"	sub.f   %0,%0,8			\n"
+		"31:	bbit0   %3,2,32f		\n"  /* 4 bytes left */
+		"16:	ld.ab   %5, [%2,4]		\n"
+		"	st.ab   %5, [%1,4]		\n"
+		"	sub.f   %0,%0,4			\n"
+		"32:	bbit0   %3,1,33f		\n"  /* 2 bytes left */
+		"17:	ldw.ab  %5, [%2,2]		\n"
+		"	stw.ab  %5, [%1,2]		\n"
+		"	sub.f   %0,%0,2			\n"
+		"33:	bbit0   %3,0,34f		\n"
+		"18:	ldb.ab  %5, [%2,1]		\n"  /* 1 byte left */
+		"	stb.ab  %5, [%1,1]		\n"
+		"	sub.f   %0,%0,1			\n"
+		"34:	;nop				\n"
+		"	.section .fixup, \"ax\"		\n"
+		"	.align 4			\n"
+		"4:	j   34b				\n"
+		"	.previous			\n"
+		"	.section __ex_table, \"a\"	\n"
+		"	.align 4			\n"
+		"	.word   1b, 4b			\n"
+		"	.word   11b,4b			\n"
+		"	.word   12b,4b			\n"
+		"	.word   13b,4b			\n"
+		"	.word   14b,4b			\n"
+		"	.word   15b,4b			\n"
+		"	.word   16b,4b			\n"
+		"	.word   17b,4b			\n"
+		"	.word   18b,4b			\n"
+		"	.previous			\n"
+		: "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+		  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+		:
+		: "lp_count", "memory");
+	}
+
+	return res;
+}
+
+extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
+					   unsigned long n);
+
+static inline unsigned long
+__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	long res = 0;
+	char val;
+	unsigned long tmp1, tmp2, tmp3, tmp4;
+	unsigned long orig_n = n;
+
+	if (n == 0)
+		return 0;
+
+	/* unaligned */
+	if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+		unsigned char tmp;
+
+		__asm__ __volatile__(
+		"	mov.f   lp_count, %0		\n"
+		"	lpnz 3f				\n"
+		"	ldb.ab  %1, [%3, 1]		\n"
+		"1:	stb.ab  %1, [%2, 1]		\n"
+		"	sub     %0, %0, 1		\n"
+		"3:	;nop				\n"
+		"	.section .fixup, \"ax\"		\n"
+		"	.align 4			\n"
+		"4:	j   3b				\n"
+		"	.previous			\n"
+		"	.section __ex_table, \"a\"	\n"
+		"	.align 4			\n"
+		"	.word   1b, 4b			\n"
+		"	.previous			\n"
+
+		: "+r" (n),
+		/* Note as an '&' earlyclobber operand to make sure the
+		 * temporary register inside the loop is not the same as
+		 * FROM or TO.
+		 */
+		  "=&r" (tmp), "+r" (to), "+r" (from)
+		:
+		: "lp_count", "lp_start", "lp_end", "memory");
+
+		return n;
+	}
+
+	if (__builtin_constant_p(orig_n)) {
+		res = orig_n;
+
+		if (orig_n / 16) {
+			orig_n = orig_n % 16;
+
+			__asm__ __volatile__(
+			"	lsr lp_count, %7,4		\n"
+			"	lp  3f				\n"
+			"	ld.ab %3, [%2, 4]		\n"
+			"	ld.ab %4, [%2, 4]		\n"
+			"	ld.ab %5, [%2, 4]		\n"
+			"	ld.ab %6, [%2, 4]		\n"
+			"1:	st.ab %3, [%1, 4]		\n"
+			"11:	st.ab %4, [%1, 4]		\n"
+			"12:	st.ab %5, [%1, 4]		\n"
+			"13:	st.ab %6, [%1, 4]		\n"
+			"	sub   %0, %0, 16		\n"
+			"3:;nop					\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   3b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   1b, 4b			\n"
+			"	.word   11b,4b			\n"
+			"	.word   12b,4b			\n"
+			"	.word   13b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from),
+			  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+			: "ir"(n)
+			: "lp_count", "memory");
+		}
+		if (orig_n / 8) {
+			orig_n = orig_n % 8;
+
+			__asm__ __volatile__(
+			"	ld.ab   %3, [%2,4]		\n"
+			"	ld.ab   %4, [%2,4]		\n"
+			"14:	st.ab   %3, [%1,4]		\n"
+			"15:	st.ab   %4, [%1,4]		\n"
+			"	sub     %0, %0, 8		\n"
+			"31:;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   31b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   14b,4b			\n"
+			"	.word   15b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from),
+			  "=r"(tmp1), "=r"(tmp2)
+			:
+			: "memory");
+		}
+		if (orig_n / 4) {
+			orig_n = orig_n % 4;
+
+			__asm__ __volatile__(
+			"	ld.ab   %3, [%2,4]		\n"
+			"16:	st.ab   %3, [%1,4]		\n"
+			"	sub     %0, %0, 4		\n"
+			"32:;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   32b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   16b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+			:
+			: "memory");
+		}
+		if (orig_n / 2) {
+			orig_n = orig_n % 2;
+
+			__asm__ __volatile__(
+			"	ldw.ab    %3, [%2,2]		\n"
+			"17:	stw.ab    %3, [%1,2]		\n"
+			"	sub       %0, %0, 2		\n"
+			"33:;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   33b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   17b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+			:
+			: "memory");
+		}
+		if (orig_n & 1) {
+			__asm__ __volatile__(
+			"	ldb.ab  %3, [%2,1]		\n"
+			"18:	stb.ab  %3, [%1,1]		\n"
+			"	sub     %0, %0, 1		\n"
+			"34:	;nop				\n"
+			"	.section .fixup, \"ax\"		\n"
+			"	.align 4			\n"
+			"4:	j   34b				\n"
+			"	.previous			\n"
+			"	.section __ex_table, \"a\"	\n"
+			"	.align 4			\n"
+			"	.word   18b,4b			\n"
+			"	.previous			\n"
+			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+			:
+			: "memory");
+		}
+	} else {  /* n is NOT constant, so laddered copy of 16x,8,4,2,1  */
+
+		__asm__ __volatile__(
+		"	mov   %0,%3			\n"
+		"	lsr.f lp_count, %3,4		\n"  /* 16x bytes */
+		"	lpnz  3f			\n"
+		"	ld.ab %5, [%2, 4]		\n"
+		"	ld.ab %6, [%2, 4]		\n"
+		"	ld.ab %7, [%2, 4]		\n"
+		"	ld.ab %8, [%2, 4]		\n"
+		"1:	st.ab %5, [%1, 4]		\n"
+		"11:	st.ab %6, [%1, 4]		\n"
+		"12:	st.ab %7, [%1, 4]		\n"
+		"13:	st.ab %8, [%1, 4]		\n"
+		"	sub   %0, %0, 16		\n"
+		"3:	and.f %3,%3,0xf			\n" /* stragglers */
+		"	bz 34f				\n"
+		"	bbit0   %3,3,31f		\n" /* 8 bytes left */
+		"	ld.ab   %5, [%2,4]		\n"
+		"	ld.ab   %6, [%2,4]		\n"
+		"14:	st.ab   %5, [%1,4]		\n"
+		"15:	st.ab   %6, [%1,4]		\n"
+		"	sub.f   %0, %0, 8		\n"
+		"31:	bbit0   %3,2,32f		\n"  /* 4 bytes left */
+		"	ld.ab   %5, [%2,4]		\n"
+		"16:	st.ab   %5, [%1,4]		\n"
+		"	sub.f   %0, %0, 4		\n"
+		"32:	bbit0 %3,1,33f			\n"  /* 2 bytes left */
+		"	ldw.ab    %5, [%2,2]		\n"
+		"17:	stw.ab    %5, [%1,2]		\n"
+		"	sub.f %0, %0, 2			\n"
+		"33:	bbit0 %3,0,34f			\n"
+		"	ldb.ab    %5, [%2,1]		\n"  /* 1 byte left */
+		"18:	stb.ab  %5, [%1,1]		\n"
+		"	sub.f %0, %0, 1			\n"
+		"34:	;nop				\n"
+		"	.section .fixup, \"ax\"		\n"
+		"	.align 4			\n"
+		"4:	j   34b				\n"
+		"	.previous			\n"
+		"	.section __ex_table, \"a\"	\n"
+		"	.align 4			\n"
+		"	.word   1b, 4b			\n"
+		"	.word   11b,4b			\n"
+		"	.word   12b,4b			\n"
+		"	.word   13b,4b			\n"
+		"	.word   14b,4b			\n"
+		"	.word   15b,4b			\n"
+		"	.word   16b,4b			\n"
+		"	.word   17b,4b			\n"
+		"	.word   18b,4b			\n"
+		"	.previous			\n"
+		: "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+		  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+		:
+		: "lp_count", "memory");
+	}
+
+	return res;
+}
+
+static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
+{
+	long res = n;
+	unsigned char *d_char = to;
+
+	__asm__ __volatile__(
+	"	bbit0   %0, 0, 1f		\n"
+	"75:	stb.ab  %2, [%0,1]		\n"
+	"	sub %1, %1, 1			\n"
+	"1:	bbit0   %0, 1, 2f		\n"
+	"76:	stw.ab  %2, [%0,2]		\n"
+	"	sub %1, %1, 2			\n"
+	"2:	asr.f   lp_count, %1, 2		\n"
+	"	lpnz    3f			\n"
+	"77:	st.ab   %2, [%0,4]		\n"
+	"	sub %1, %1, 4			\n"
+	"3:	bbit0   %1, 1, 4f		\n"
+	"78:	stw.ab  %2, [%0,2]		\n"
+	"	sub %1, %1, 2			\n"
+	"4:	bbit0   %1, 0, 5f		\n"
+	"79:	stb.ab  %2, [%0,1]		\n"
+	"	sub %1, %1, 1			\n"
+	"5:					\n"
+	"	.section .fixup, \"ax\"		\n"
+	"	.align 4			\n"
+	"3:	j   5b				\n"
+	"	.previous			\n"
+	"	.section __ex_table, \"a\"	\n"
+	"	.align 4			\n"
+	"	.word   75b, 3b			\n"
+	"	.word   76b, 3b			\n"
+	"	.word   77b, 3b			\n"
+	"	.word   78b, 3b			\n"
+	"	.word   79b, 3b			\n"
+	"	.previous			\n"
+	: "+r"(d_char), "+r"(res)
+	: "i"(0)
+	: "lp_count", "lp_start", "lp_end", "memory");
+
+	return res;
+}
+
+static inline long
+__arc_strncpy_from_user(char *dst, const char __user *src, long count)
+{
+	long res = count;
+	char val;
+	unsigned int hw_count;
+
+	if (count == 0)
+		return 0;
+
+	__asm__ __volatile__(
+	"	lp 2f		\n"
+	"1:	ldb.ab  %3, [%2, 1]		\n"
+	"	breq.d  %3, 0, 2f		\n"
+	"	stb.ab  %3, [%1, 1]		\n"
+	"2:	sub %0, %6, %4			\n"
+	"3:	;nop				\n"
+	"	.section .fixup, \"ax\"		\n"
+	"	.align 4			\n"
+	"4:	mov %0, %5			\n"
+	"	j   3b				\n"
+	"	.previous			\n"
+	"	.section __ex_table, \"a\"	\n"
+	"	.align 4			\n"
+	"	.word   1b, 4b			\n"
+	"	.previous			\n"
+	: "=r"(res), "+r"(dst), "+r"(src), "=&r"(val), "=l"(hw_count)
+	: "g"(-EFAULT), "ir"(count), "4"(count)	/* this "4" seeds lp_count */
+	: "memory");
+
+	return res;
+}
+
+static inline long __arc_strnlen_user(const char __user *s, long n)
+{
+	long res, tmp1, cnt;
+	char val;
+
+	__asm__ __volatile__(
+	"	mov %2, %1			\n"
+	"1:	ldb.ab  %3, [%0, 1]		\n"
+	"	breq.d  %3, 0, 2f		\n"
+	"	sub.f   %2, %2, 1		\n"
+	"	bnz 1b				\n"
+	"	sub %2, %2, 1			\n"
+	"2:	sub %0, %1, %2			\n"
+	"3:	;nop				\n"
+	"	.section .fixup, \"ax\"		\n"
+	"	.align 4			\n"
+	"4:	mov %0, 0			\n"
+	"	j   3b				\n"
+	"	.previous			\n"
+	"	.section __ex_table, \"a\"	\n"
+	"	.align 4			\n"
+	"	.word 1b, 4b			\n"
+	"	.previous			\n"
+	: "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val)
+	: "0"(s), "1"(n)
+	: "memory");
+
+	return res;
+}
+
+#define __copy_from_user(t, f, n)	__arc_copy_from_user(t, f, n)
+#define __copy_to_user(t, f, n)		__arc_copy_to_user(t, f, n)
+#define __clear_user(d, n)		__arc_clear_user(d, n)
+#define __strncpy_from_user(d, s, n)	__arc_strncpy_from_user(d, s, n)
+#define __strnlen_user(s, n)		__arc_strnlen_user(s, n)
+
+#include <asm-generic/uaccess.h>
+
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif
-- 
1.7.4.1

  parent reply	other threads:[~2012-11-07  9:48 UTC|newest]

Thread overview: 146+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-11-07  9:47 [RFC Patch v1 00/31] Synopsys ARC Linux kernel Port Vineet Gupta
2012-11-07  9:47 ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 01/31] ARC: Generic Headers Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 02/31] ARC: irqflags Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-12 19:50   ` Thomas Gleixner
2012-11-12 19:50     ` Thomas Gleixner
2013-01-01  7:44     ` Vineet Gupta
2013-01-01  7:44       ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 03/31] ARC: atomic/bitops/cmpxchg/barriers Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 04/31] asm-generic headers: uaccess.h to conditionally define segment_eq() Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` Vineet Gupta [this message]
2012-11-07  9:47 ` [RFC PATCH v1 06/31] asm-generic headers: Allow yet more arch overrides in checksum.h Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 07/31] ARC: checksum/byteorder/swab routines Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 08/31] ARC: Fundamental ARCH data-types/defines Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-08  7:10   ` Jonas Bonn
2012-11-08 18:52     ` Vineet Gupta
2012-11-08 20:36       ` Jonas Bonn
2012-11-12 13:58         ` Vineet Gupta
2012-11-12 14:12           ` Arnd Bergmann
2012-11-07  9:47 ` [RFC PATCH v1 09/31] ARC: spinlock/rwlock/mutex primitives Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 10/31] ARC: string library Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 11/31] ARC: Low level IRQ/Trap/Exception(non-MMU) Handling Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-16  4:58   ` Al Viro
2012-11-16  4:58     ` Al Viro
2012-12-27  9:00     ` Vineet Gupta
2012-12-27 13:29       ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 12/31] ARC: Interrupt Handling Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-12 20:08   ` Thomas Gleixner
2012-11-12 20:08     ` Thomas Gleixner
2013-01-01 10:46     ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 13/31] ARC: Non-MMU Exception Handling Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 14/31] ARC: syscall support Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07 14:21   ` Arnd Bergmann
2012-11-09  9:50     ` James Hogan
2012-11-13 11:41       ` James Hogan
2012-11-13 12:01         ` Jonas Bonn
2012-11-13 12:01           ` Jonas Bonn
2012-11-13 12:11           ` James Hogan
2012-11-14 12:23             ` Arnd Bergmann
2012-11-14 12:31               ` James Hogan
2012-11-13 10:13     ` Gilad Ben-Yossef
2012-11-13 10:37       ` Arnd Bergmann
2012-11-15  6:15         ` Vineet Gupta
2012-11-15  6:15           ` Vineet Gupta
2012-11-15 12:35           ` Arnd Bergmann
2013-01-17  5:13             ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 15/31] ARC: Process/scheduling/clock/Timers/Delay Management Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-12 20:29   ` Thomas Gleixner
2013-01-02  7:13     ` Vineet Gupta
2013-01-02  8:45       ` Vineet Gupta
2013-01-04 13:01       ` Frederic Weisbecker
2012-11-07  9:47 ` [RFC PATCH v1 16/31] ARC: Signal handling Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-16  5:26   ` Al Viro
2012-12-28 12:34     ` Vineet Gupta
2012-12-28 12:34       ` Vineet Gupta
2012-12-28 12:42       ` [PATCH 1/2] ARC: [Review] Preparing to fix incorrect syscall restarts due to signals Vineet Gupta
2012-12-28 12:42         ` [PATCH 2/2] ARC: [Review] Prevent incorrect syscall restarts Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 17/31] ARC: Cache Flush Management Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 18/31] ARC: Page Table Management Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 19/31] ARC: MMU Context Management Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 20/31] ARC: MMU Exception Handling Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 21/31] ARC: TLB flush Handling Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 22/31] ARC: Page Fault handling (incl uaccess fixup) Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 23/31] ARC: I/O and DMA Mappings Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 24/31] ARC: startup #1: low-level, setup_arch(), /proc/cpuinfo, mem init Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 25/31] ARC: [plat-arcfpga] Hooking up platform to ARC UART Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07 14:16   ` Arnd Bergmann
2012-11-07 14:16     ` Arnd Bergmann
2013-01-07 13:10     ` Vineet Gupta
2013-01-07 13:46       ` Arnd Bergmann
2013-01-07 14:04         ` Vineet Gupta
2013-01-07 14:36           ` Arnd Bergmann
2013-01-14  7:35     ` early init dt for earlyprintk (was Re: [RFC PATCH v1 25/31] ARC: [plat-arcfpga] Hooking up platform to ARC UART) Vineet Gupta
2013-01-14  9:48       ` James Hogan
2013-01-14 10:09         ` Vineet Gupta
2013-01-14 10:54       ` Arnd Bergmann
2013-01-17  7:29     ` [RFC PATCH v1 25/31] ARC: [plat-arcfpga] Hooking up platform to ARC UART Vineet Gupta
2013-01-17 10:52       ` Arnd Bergmann
2012-11-07  9:47 ` [RFC PATCH v1 26/31] ARC: Build system: Makefiles, Kconfig, Linker script Vineet Gupta
2012-11-07 14:13   ` Arnd Bergmann
     [not found]     ` <50E4449A.7010606@synopsys.com>
     [not found]       ` <201301021448.20119.arnd@arndb.de>
2013-01-03  7:58         ` Vineet Gupta
2013-01-03  8:25           ` Arnd Bergmann
2013-01-03  8:25             ` Arnd Bergmann
2013-03-11 12:29     ` SYSV IPC broken for no-legacy syscall kernels (was Re: [RFC PATCH v1 26/31] ARC: Build system: Makefiles, Kconfig, Linker script) Vineet Gupta
2013-03-11 12:44       ` James Hogan
2013-03-11 12:56         ` Vineet Gupta
2013-03-11 13:07           ` James Hogan
2013-03-11 13:30             ` Arnd Bergmann
2013-03-11 13:48               ` Vineet Gupta
2013-03-11 13:48                 ` Vineet Gupta
2013-03-11 14:50                 ` Arnd Bergmann
2012-11-15 17:49   ` [RFC PATCH v1 26/31] ARC: Build system: Makefiles, Kconfig, Linker script James Hogan
2012-11-15 17:49     ` James Hogan
2012-11-15 19:30     ` Ralf Baechle
2012-11-16  6:36       ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 27/31] ARC: Last bits (stubs) to get to a running kernel with UART Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 28/31] ARC: split ret_from_fork, simplify kernel_thread() Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 29/31] ARC: switch to generic kernel_thread() Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 30/31] ARC: switch to generic kernel_execve() and sys_execve() Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-16  4:08   ` Al Viro
2012-11-16  4:08     ` Al Viro
2012-11-17 14:01     ` Vineet Gupta
2012-11-07  9:47 ` [RFC PATCH v1 31/31] ARC: [plat-arcfpga] defconfig Vineet Gupta
2012-11-07  9:47   ` Vineet Gupta
2012-11-07 14:06   ` Arnd Bergmann
2012-11-12 14:18     ` James Hogan
2012-11-12 14:21       ` Arnd Bergmann
2012-11-07 14:36 ` [RFC Patch v1 00/31] Synopsys ARC Linux kernel Port Arnd Bergmann
2012-11-08 19:09   ` Vineet Gupta
2012-11-07 20:46 ` Gilad Ben-Yossef
2012-11-07 20:46   ` Gilad Ben-Yossef
2012-11-20 13:47 ` Pavel Machek
2012-11-20 13:49   ` Vineet Gupta
2012-11-20 13:59   ` Pavel Machek
2012-11-20 14:17     ` Vineet Gupta
2013-01-18 19:46       ` Pavel Machek
2013-01-18 22:17         ` Arnd Bergmann
2013-01-19 10:15           ` Pavel Machek
2013-01-19 12:32         ` Vineet Gupta
2013-01-19 17:02           ` Pavel Machek

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1352281674-2186-6-git-send-email-vgupta@synopsys.com \
    --to=vineet.gupta1@synopsys.com \
    --cc=arnd@arndb.de \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).