linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [patch] x86: some lock annotations for user copy paths
@ 2008-09-10 11:37 Nick Piggin
  2008-09-10 11:41 ` Peter Zijlstra
  2008-09-10 14:31 ` Andi Kleen
  0 siblings, 2 replies; 44+ messages in thread
From: Nick Piggin @ 2008-09-10 11:37 UTC (permalink / raw)
  To: Ingo Molnar, Peter Zijlstra, Linux Kernel Mailing List


copy_to/from_user and all its variants (except the atomic ones) can take a
page fault and perform non-trivial work like taking mmap_sem and entering
the filesyste/pagecache.

Unfortunately, this often escapes lockdep because a common pattern is to
use it to read in some arguments just set up from userspace, or write data
back to a hot buffer. In those cases, it will be unlikely for page reclaim
to get a window in to cause copy_*_user to fault.

With the new might_lock primitives, add some annotations to x86. I don't
know if I caught all possible faulting points (it's a bit of a maze, and I
didn't really look at 32-bit). But this is a starting point.

Boots and runs OK so far.

Signed-off-by: Nick Piggin <npiggin@suse.de>
---

Index: linux-2.6/include/asm-x86/uaccess_64.h
===================================================================
--- linux-2.6.orig/include/asm-x86/uaccess_64.h
+++ linux-2.6/include/asm-x86/uaccess_64.h
@@ -28,6 +28,10 @@ static __always_inline __must_check
 int __copy_from_user(void *dst, const void __user *src, unsigned size)
 {
 	int ret = 0;
+
+	might_sleep();
+	if (current->mm)
+		might_lock_read(&current->mm->mmap_sem);
 	if (!__builtin_constant_p(size))
 		return copy_user_generic(dst, (__force void *)src, size);
 	switch (size) {
@@ -70,6 +74,10 @@ static __always_inline __must_check
 int __copy_to_user(void __user *dst, const void *src, unsigned size)
 {
 	int ret = 0;
+
+	might_sleep();
+	if (current->mm)
+		might_lock_read(&current->mm->mmap_sem);
 	if (!__builtin_constant_p(size))
 		return copy_user_generic((__force void *)dst, src, size);
 	switch (size) {
@@ -112,6 +120,10 @@ static __always_inline __must_check
 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
 {
 	int ret = 0;
+
+	might_sleep();
+	if (current->mm)
+		might_lock_read(&current->mm->mmap_sem);
 	if (!__builtin_constant_p(size))
 		return copy_user_generic((__force void *)dst,
 					 (__force void *)src, size);
Index: linux-2.6/include/asm-x86/uaccess.h
===================================================================
--- linux-2.6.orig/include/asm-x86/uaccess.h
+++ linux-2.6/include/asm-x86/uaccess.h
@@ -8,6 +8,8 @@
 #include <linux/thread_info.h>
 #include <linux/prefetch.h>
 #include <linux/string.h>
+#include <linux/lockdep.h>
+#include <linux/sched.h>
 #include <asm/asm.h>
 #include <asm/page.h>
 
@@ -157,6 +159,9 @@ extern int __get_user_bad(void);
 	int __ret_gu;							\
 	unsigned long __val_gu;						\
 	__chk_user_ptr(ptr);						\
+	might_sleep();							\
+	if (current->mm)						\
+		might_lock_read(&current->mm->mmap_sem);		\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
 		__get_user_x(1, __ret_gu, __val_gu, ptr);		\
@@ -241,6 +246,9 @@ extern void __put_user_8(void);
 	int __ret_pu;						\
 	__typeof__(*(ptr)) __pu_val;				\
 	__chk_user_ptr(ptr);					\
+	might_sleep();						\
+	if (current->mm)					\
+		might_lock_read(&current->mm->mmap_sem);	\
 	__pu_val = x;						\
 	switch (sizeof(*(ptr))) {				\
 	case 1:							\
@@ -265,6 +273,9 @@ extern void __put_user_8(void);
 #define __put_user_size(x, ptr, size, retval, errret)			\
 do {									\
 	retval = 0;							\
+	might_sleep();							\
+	if (current->mm)						\
+		might_lock_read(&current->mm->mmap_sem);		\
 	__chk_user_ptr(ptr);						\
 	switch (size) {							\
 	case 1:								\
@@ -317,6 +328,9 @@ do {									\
 #define __get_user_size(x, ptr, size, retval, errret)			\
 do {									\
 	retval = 0;							\
+	might_sleep();							\
+	if (current->mm)						\
+		might_lock_read(&current->mm->mmap_sem);		\
 	__chk_user_ptr(ptr);						\
 	switch (size) {							\
 	case 1:								\
Index: linux-2.6/arch/x86/lib/usercopy_32.c
===================================================================
--- linux-2.6.orig/arch/x86/lib/usercopy_32.c
+++ linux-2.6/arch/x86/lib/usercopy_32.c
@@ -33,6 +33,8 @@ static inline int __movsl_is_ok(unsigned
 do {									   \
 	int __d0, __d1, __d2;						   \
 	might_sleep();							   \
+	if (current->mm)						   \
+		might_lock_read(&current->mm->mmap_sem);		   \
 	__asm__ __volatile__(						   \
 		"	testl %1,%1\n"					   \
 		"	jz 2f\n"					   \
@@ -120,6 +122,8 @@ EXPORT_SYMBOL(strncpy_from_user);
 do {									\
 	int __d0;							\
 	might_sleep();							\
+	if (current->mm)						\
+		might_lock_read(&current->mm->mmap_sem);		\
 	__asm__ __volatile__(						\
 		"0:	rep; stosl\n"					\
 		"	movl %2,%0\n"					\
@@ -148,7 +152,6 @@ do {									\
 unsigned long
 clear_user(void __user *to, unsigned long n)
 {
-	might_sleep();
 	if (access_ok(VERIFY_WRITE, to, n))
 		__do_clear_user(to, n);
 	return n;
@@ -191,6 +194,8 @@ long strnlen_user(const char __user *s, 
 	unsigned long res, tmp;
 
 	might_sleep();
+	if (current->mm)
+		might_lock_read(&current->mm->mmap_sem);
 
 	__asm__ __volatile__(
 		"	testl %0, %0\n"
Index: linux-2.6/arch/x86/lib/usercopy_64.c
===================================================================
--- linux-2.6.orig/arch/x86/lib/usercopy_64.c
+++ linux-2.6/arch/x86/lib/usercopy_64.c
@@ -16,6 +16,8 @@
 do {									   \
 	long __d0, __d1, __d2;						   \
 	might_sleep();							   \
+	if (current->mm)						   \
+		might_lock_read(&current->mm->mmap_sem);		   \
 	__asm__ __volatile__(						   \
 		"	testq %1,%1\n"					   \
 		"	jz 2f\n"					   \
@@ -65,6 +67,8 @@ unsigned long __clear_user(void __user *
 {
 	long __d0;
 	might_sleep();
+	if (current->mm)
+		might_lock_read(&current->mm->mmap_sem);
 	/* no memory constraint because it doesn't change any memory gcc knows
 	   about */
 	asm volatile(
Index: linux-2.6/include/asm-x86/uaccess_32.h
===================================================================
--- linux-2.6.orig/include/asm-x86/uaccess_32.h
+++ linux-2.6/include/asm-x86/uaccess_32.h
@@ -82,8 +82,10 @@ __copy_to_user_inatomic(void __user *to,
 static __always_inline unsigned long __must_check
 __copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       might_sleep();
-       return __copy_to_user_inatomic(to, from, n);
+	might_sleep();
+	if (current->mm)
+		might_lock_read(&current->mm->mmap_sem);
+	return __copy_to_user_inatomic(to, from, n);
 }
 
 static __always_inline unsigned long
@@ -138,6 +140,8 @@ static __always_inline unsigned long
 __copy_from_user(void *to, const void __user *from, unsigned long n)
 {
 	might_sleep();
+	if (current->mm)
+		might_lock_read(&current->mm->mmap_sem);
 	if (__builtin_constant_p(n)) {
 		unsigned long ret;
 
@@ -160,6 +164,8 @@ static __always_inline unsigned long __c
 				const void __user *from, unsigned long n)
 {
 	might_sleep();
+	if (current->mm)
+		might_lock_read(&current->mm->mmap_sem);
 	if (__builtin_constant_p(n)) {
 		unsigned long ret;
 

^ permalink raw reply	[flat|nested] 44+ messages in thread

end of thread, other threads:[~2008-09-24 23:45 UTC | newest]

Thread overview: 44+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-09-10 11:37 [patch] x86: some lock annotations for user copy paths Nick Piggin
2008-09-10 11:41 ` Peter Zijlstra
2008-09-10 11:47   ` Ingo Molnar
2008-09-10 11:50     ` Ingo Molnar
2008-09-10 12:12     ` Ingo Molnar
2008-09-10 12:32       ` Ingo Molnar
2008-09-10 15:12         ` Nick Piggin
2008-09-10 14:48       ` Nick Piggin
2008-09-10 15:01         ` Peter Zijlstra
2008-09-10 15:17           ` Nick Piggin
2008-09-10 15:26           ` Nick Piggin
2008-09-11  8:27             ` Ingo Molnar
2008-09-11 10:43               ` Nick Piggin
2008-09-12  9:24                 ` [PATCH] sysfs: fix deadlock Ingo Molnar
2008-09-14 22:02                   ` Nick Piggin
2008-09-15  9:15                     ` Peter Zijlstra
2008-09-14  7:39               ` [lockdep] possible circular locking, between &mm->mmap_sem and &dev->ev_mutex Ingo Molnar
2008-09-14  7:44                 ` Andrew Morton
2008-09-14  8:06                   ` [patch] mm: fix locking, inotify_read's ev_mutex vs do_page_fault's mmap_sem Ingo Molnar
2008-09-14 22:12                     ` [patch] mm: tiny-shmem fix lor, mmap_sem vs i_mutex Nick Piggin
2008-09-17 20:14                       ` Andrew Morton
2008-09-17 20:46                         ` Matt Mackall
2008-09-18 11:12                         ` Ingo Molnar
2008-09-18 19:29                           ` Jeremy Fitzhardinge
2008-09-18 21:11                             ` Matt Mackall
2008-09-20  2:18                               ` Dave Hansen
2008-09-20 16:12                                 ` Hugh Dickins
2008-09-22 15:14                                   ` Dave Hansen
2008-09-22 14:54                                 ` David Howells
2008-09-23  5:32                                   ` Nick Piggin
2008-09-24 19:29                                     ` Hugh Dickins
2008-09-24 19:47                                       ` Andrew Morton
2008-09-24 18:18                                   ` David Howells
2008-09-24 18:29                                     ` Matt Mackall
2008-09-24 19:41                                       ` Hugh Dickins
2008-09-24 19:59                                       ` David Howells
2008-09-24 23:43                                         ` Hugh Dickins
2008-09-24 18:56                                     ` David Howells
2008-09-24 19:11                                       ` Matt Mackall
2008-09-24 19:26                                       ` David Howells
2008-09-19  8:40                             ` Ingo Molnar
2008-09-14 21:36                   ` [lockdep] possible circular locking, between &mm->mmap_sem and &dev->ev_mutex Nick Piggin
2008-09-10 14:30     ` [patch] x86: some lock annotations for user copy paths Nick Piggin
2008-09-10 14:31 ` Andi Kleen

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).