* [PATCH v3-resend 01/11] asm-generic: uaccess s/might_sleep/might_fault/
2013-05-26 14:21 [PATCH v3-resend 00/11] uaccess: better might_sleep/might_fault behavior Michael S. Tsirkin
@ 2013-05-26 14:30 ` Michael S. Tsirkin
2013-05-28 13:14 ` [tip:sched/mm] " tip-bot for Michael S. Tsirkin
2013-05-26 14:30 ` [PATCH v3-resend 02/11] arm64: " Michael S. Tsirkin
` (10 subsequent siblings)
11 siblings, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2013-05-26 14:30 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Peter Zijlstra, Arnd Bergmann, linux-arch
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
include/asm-generic/uaccess.h | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index c184aa8..dc1269c 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -163,7 +163,7 @@ static inline __must_check long __copy_to_user(void __user *to,
#define put_user(x, ptr) \
({ \
- might_sleep(); \
+ might_fault(); \
access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ? \
__put_user(x, ptr) : \
-EFAULT; \
@@ -225,7 +225,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
#define get_user(x, ptr) \
({ \
- might_sleep(); \
+ might_fault(); \
access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \
__get_user(x, ptr) : \
-EFAULT; \
@@ -255,7 +255,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
static inline long copy_from_user(void *to,
const void __user * from, unsigned long n)
{
- might_sleep();
+ might_fault();
if (access_ok(VERIFY_READ, from, n))
return __copy_from_user(to, from, n);
else
@@ -265,7 +265,7 @@ static inline long copy_from_user(void *to,
static inline long copy_to_user(void __user *to,
const void *from, unsigned long n)
{
- might_sleep();
+ might_fault();
if (access_ok(VERIFY_WRITE, to, n))
return __copy_to_user(to, from, n);
else
@@ -336,7 +336,7 @@ __clear_user(void __user *to, unsigned long n)
static inline __must_check unsigned long
clear_user(void __user *to, unsigned long n)
{
- might_sleep();
+ might_fault();
if (!access_ok(VERIFY_WRITE, to, n))
return n;
--
MST
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [tip:sched/mm] asm-generic: uaccess s/might_sleep/might_fault/
2013-05-26 14:30 ` [PATCH v3-resend 01/11] asm-generic: uaccess s/might_sleep/might_fault/ Michael S. Tsirkin
@ 2013-05-28 13:14 ` tip-bot for Michael S. Tsirkin
0 siblings, 0 replies; 25+ messages in thread
From: tip-bot for Michael S. Tsirkin @ 2013-05-28 13:14 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, a.p.zijlstra, torvalds, mst, peterz,
akpm, tglx
Commit-ID: e0acd0bd0594161be44c054bb6b984972f444beb
Gitweb: http://git.kernel.org/tip/e0acd0bd0594161be44c054bb6b984972f444beb
Author: Michael S. Tsirkin <mst@redhat.com>
AuthorDate: Sun, 26 May 2013 17:30:36 +0300
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Tue, 28 May 2013 09:41:05 +0200
asm-generic: uaccess s/might_sleep/might_fault/
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1369577426-26721-1-git-send-email-mst@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
include/asm-generic/uaccess.h | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index c184aa8..dc1269c 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -163,7 +163,7 @@ static inline __must_check long __copy_to_user(void __user *to,
#define put_user(x, ptr) \
({ \
- might_sleep(); \
+ might_fault(); \
access_ok(VERIFY_WRITE, ptr, sizeof(*ptr)) ? \
__put_user(x, ptr) : \
-EFAULT; \
@@ -225,7 +225,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
#define get_user(x, ptr) \
({ \
- might_sleep(); \
+ might_fault(); \
access_ok(VERIFY_READ, ptr, sizeof(*ptr)) ? \
__get_user(x, ptr) : \
-EFAULT; \
@@ -255,7 +255,7 @@ extern int __get_user_bad(void) __attribute__((noreturn));
static inline long copy_from_user(void *to,
const void __user * from, unsigned long n)
{
- might_sleep();
+ might_fault();
if (access_ok(VERIFY_READ, from, n))
return __copy_from_user(to, from, n);
else
@@ -265,7 +265,7 @@ static inline long copy_from_user(void *to,
static inline long copy_to_user(void __user *to,
const void *from, unsigned long n)
{
- might_sleep();
+ might_fault();
if (access_ok(VERIFY_WRITE, to, n))
return __copy_to_user(to, from, n);
else
@@ -336,7 +336,7 @@ __clear_user(void __user *to, unsigned long n)
static inline __must_check unsigned long
clear_user(void __user *to, unsigned long n)
{
- might_sleep();
+ might_fault();
if (!access_ok(VERIFY_WRITE, to, n))
return n;
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH v3-resend 02/11] arm64: uaccess s/might_sleep/might_fault/
2013-05-26 14:21 [PATCH v3-resend 00/11] uaccess: better might_sleep/might_fault behavior Michael S. Tsirkin
2013-05-26 14:30 ` [PATCH v3-resend 01/11] asm-generic: uaccess s/might_sleep/might_fault/ Michael S. Tsirkin
@ 2013-05-26 14:30 ` Michael S. Tsirkin
2013-05-28 13:15 ` [tip:sched/mm] " tip-bot for Michael S. Tsirkin
2013-05-26 14:30 ` [PATCH v3-resend 03/11] frv: " Michael S. Tsirkin
` (9 subsequent siblings)
11 siblings, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2013-05-26 14:30 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Peter Zijlstra, Arnd Bergmann, Catalin Marinas,
Will Deacon, linux-arm-kernel
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
---
arch/arm64/include/asm/uaccess.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 008f848..edb3d5c 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -166,7 +166,7 @@ do { \
#define get_user(x, ptr) \
({ \
- might_sleep(); \
+ might_fault(); \
access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) ? \
__get_user((x), (ptr)) : \
((x) = 0, -EFAULT); \
@@ -227,7 +227,7 @@ do { \
#define put_user(x, ptr) \
({ \
- might_sleep(); \
+ might_fault(); \
access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
__put_user((x), (ptr)) : \
-EFAULT; \
--
MST
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [tip:sched/mm] arm64: uaccess s/might_sleep/might_fault/
2013-05-26 14:30 ` [PATCH v3-resend 02/11] arm64: " Michael S. Tsirkin
@ 2013-05-28 13:15 ` tip-bot for Michael S. Tsirkin
0 siblings, 0 replies; 25+ messages in thread
From: tip-bot for Michael S. Tsirkin @ 2013-05-28 13:15 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, a.p.zijlstra, torvalds, catalin.marinas,
mst, peterz, akpm, tglx
Commit-ID: 56d2ef789f7c424918abdf6b95d84a64c1473220
Gitweb: http://git.kernel.org/tip/56d2ef789f7c424918abdf6b95d84a64c1473220
Author: Michael S. Tsirkin <mst@redhat.com>
AuthorDate: Sun, 26 May 2013 17:30:42 +0300
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Tue, 28 May 2013 09:41:06 +0200
arm64: uaccess s/might_sleep/might_fault/
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1369577426-26721-2-git-send-email-mst@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
arch/arm64/include/asm/uaccess.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 008f848..edb3d5c 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -166,7 +166,7 @@ do { \
#define get_user(x, ptr) \
({ \
- might_sleep(); \
+ might_fault(); \
access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) ? \
__get_user((x), (ptr)) : \
((x) = 0, -EFAULT); \
@@ -227,7 +227,7 @@ do { \
#define put_user(x, ptr) \
({ \
- might_sleep(); \
+ might_fault(); \
access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
__put_user((x), (ptr)) : \
-EFAULT; \
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH v3-resend 03/11] frv: uaccess s/might_sleep/might_fault/
2013-05-26 14:21 [PATCH v3-resend 00/11] uaccess: better might_sleep/might_fault behavior Michael S. Tsirkin
2013-05-26 14:30 ` [PATCH v3-resend 01/11] asm-generic: uaccess s/might_sleep/might_fault/ Michael S. Tsirkin
2013-05-26 14:30 ` [PATCH v3-resend 02/11] arm64: " Michael S. Tsirkin
@ 2013-05-26 14:30 ` Michael S. Tsirkin
2013-05-28 13:16 ` [tip:sched/mm] " tip-bot for Michael S. Tsirkin
2013-05-26 14:30 ` [PATCH v3-resend 04/11] m32r: " Michael S. Tsirkin
` (8 subsequent siblings)
11 siblings, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2013-05-26 14:30 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Peter Zijlstra, Arnd Bergmann, David Howells
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
arch/frv/include/asm/uaccess.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h
index 0b67ec5..3ac9a59 100644
--- a/arch/frv/include/asm/uaccess.h
+++ b/arch/frv/include/asm/uaccess.h
@@ -280,14 +280,14 @@ extern long __memcpy_user(void *dst, const void *src, unsigned long count);
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
- might_sleep();
+ might_fault();
return __copy_to_user_inatomic(to, from, n);
}
static inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
- might_sleep();
+ might_fault();
return __copy_from_user_inatomic(to, from, n);
}
--
MST
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [tip:sched/mm] frv: uaccess s/might_sleep/might_fault/
2013-05-26 14:30 ` [PATCH v3-resend 03/11] frv: " Michael S. Tsirkin
@ 2013-05-28 13:16 ` tip-bot for Michael S. Tsirkin
0 siblings, 0 replies; 25+ messages in thread
From: tip-bot for Michael S. Tsirkin @ 2013-05-28 13:16 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, a.p.zijlstra, torvalds, mst, peterz,
akpm, tglx
Commit-ID: b607ae78ac8a78f8e5e36817500e7c311519f032
Gitweb: http://git.kernel.org/tip/b607ae78ac8a78f8e5e36817500e7c311519f032
Author: Michael S. Tsirkin <mst@redhat.com>
AuthorDate: Sun, 26 May 2013 17:30:47 +0300
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Tue, 28 May 2013 09:41:07 +0200
frv: uaccess s/might_sleep/might_fault/
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1369577426-26721-3-git-send-email-mst@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
arch/frv/include/asm/uaccess.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h
index 0b67ec5..3ac9a59 100644
--- a/arch/frv/include/asm/uaccess.h
+++ b/arch/frv/include/asm/uaccess.h
@@ -280,14 +280,14 @@ extern long __memcpy_user(void *dst, const void *src, unsigned long count);
static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
- might_sleep();
+ might_fault();
return __copy_to_user_inatomic(to, from, n);
}
static inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
- might_sleep();
+ might_fault();
return __copy_from_user_inatomic(to, from, n);
}
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH v3-resend 04/11] m32r: uaccess s/might_sleep/might_fault/
2013-05-26 14:21 [PATCH v3-resend 00/11] uaccess: better might_sleep/might_fault behavior Michael S. Tsirkin
` (2 preceding siblings ...)
2013-05-26 14:30 ` [PATCH v3-resend 03/11] frv: " Michael S. Tsirkin
@ 2013-05-26 14:30 ` Michael S. Tsirkin
2013-05-28 13:18 ` [tip:sched/mm] " tip-bot for Michael S. Tsirkin
2013-05-26 14:30 ` [PATCH v3-resend 05/11] microblaze: " Michael S. Tsirkin
` (7 subsequent siblings)
11 siblings, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2013-05-26 14:30 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Peter Zijlstra, Arnd Bergmann, Hirokazu Takata,
linux-m32r, linux-m32r-ja
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
arch/m32r/include/asm/uaccess.h | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h
index 1c7047b..84fe7ba 100644
--- a/arch/m32r/include/asm/uaccess.h
+++ b/arch/m32r/include/asm/uaccess.h
@@ -216,7 +216,7 @@ extern int fixup_exception(struct pt_regs *regs);
({ \
long __gu_err = 0; \
unsigned long __gu_val; \
- might_sleep(); \
+ might_fault(); \
__get_user_size(__gu_val,(ptr),(size),__gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -227,7 +227,7 @@ extern int fixup_exception(struct pt_regs *regs);
long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- might_sleep(); \
+ might_fault(); \
if (access_ok(VERIFY_READ,__gu_addr,size)) \
__get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
@@ -295,7 +295,7 @@ do { \
#define __put_user_nocheck(x,ptr,size) \
({ \
long __pu_err; \
- might_sleep(); \
+ might_fault(); \
__put_user_size((x),(ptr),(size),__pu_err); \
__pu_err; \
})
@@ -305,7 +305,7 @@ do { \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- might_sleep(); \
+ might_fault(); \
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
__pu_err; \
@@ -597,7 +597,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
*/
#define copy_to_user(to,from,n) \
({ \
- might_sleep(); \
+ might_fault(); \
__generic_copy_to_user((to),(from),(n)); \
})
@@ -638,7 +638,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
*/
#define copy_from_user(to,from,n) \
({ \
- might_sleep(); \
+ might_fault(); \
__generic_copy_from_user((to),(from),(n)); \
})
--
MST
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [tip:sched/mm] m32r: uaccess s/might_sleep/might_fault/
2013-05-26 14:30 ` [PATCH v3-resend 04/11] m32r: " Michael S. Tsirkin
@ 2013-05-28 13:18 ` tip-bot for Michael S. Tsirkin
0 siblings, 0 replies; 25+ messages in thread
From: tip-bot for Michael S. Tsirkin @ 2013-05-28 13:18 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, a.p.zijlstra, torvalds, mst, peterz,
akpm, tglx
Commit-ID: 01682576d5fd1c92b96d79560b17208a6567c331
Gitweb: http://git.kernel.org/tip/01682576d5fd1c92b96d79560b17208a6567c331
Author: Michael S. Tsirkin <mst@redhat.com>
AuthorDate: Sun, 26 May 2013 17:30:51 +0300
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Tue, 28 May 2013 09:41:07 +0200
m32r: uaccess s/might_sleep/might_fault/
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1369577426-26721-4-git-send-email-mst@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
arch/m32r/include/asm/uaccess.h | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h
index 1c7047b..84fe7ba 100644
--- a/arch/m32r/include/asm/uaccess.h
+++ b/arch/m32r/include/asm/uaccess.h
@@ -216,7 +216,7 @@ extern int fixup_exception(struct pt_regs *regs);
({ \
long __gu_err = 0; \
unsigned long __gu_val; \
- might_sleep(); \
+ might_fault(); \
__get_user_size(__gu_val,(ptr),(size),__gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -227,7 +227,7 @@ extern int fixup_exception(struct pt_regs *regs);
long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- might_sleep(); \
+ might_fault(); \
if (access_ok(VERIFY_READ,__gu_addr,size)) \
__get_user_size(__gu_val,__gu_addr,(size),__gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
@@ -295,7 +295,7 @@ do { \
#define __put_user_nocheck(x,ptr,size) \
({ \
long __pu_err; \
- might_sleep(); \
+ might_fault(); \
__put_user_size((x),(ptr),(size),__pu_err); \
__pu_err; \
})
@@ -305,7 +305,7 @@ do { \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- might_sleep(); \
+ might_fault(); \
if (access_ok(VERIFY_WRITE,__pu_addr,size)) \
__put_user_size((x),__pu_addr,(size),__pu_err); \
__pu_err; \
@@ -597,7 +597,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
*/
#define copy_to_user(to,from,n) \
({ \
- might_sleep(); \
+ might_fault(); \
__generic_copy_to_user((to),(from),(n)); \
})
@@ -638,7 +638,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
*/
#define copy_from_user(to,from,n) \
({ \
- might_sleep(); \
+ might_fault(); \
__generic_copy_from_user((to),(from),(n)); \
})
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH v3-resend 05/11] microblaze: uaccess s/might_sleep/might_fault/
2013-05-26 14:21 [PATCH v3-resend 00/11] uaccess: better might_sleep/might_fault behavior Michael S. Tsirkin
` (3 preceding siblings ...)
2013-05-26 14:30 ` [PATCH v3-resend 04/11] m32r: " Michael S. Tsirkin
@ 2013-05-26 14:30 ` Michael S. Tsirkin
2013-05-28 13:19 ` [tip:sched/mm] " tip-bot for Michael S. Tsirkin
2013-05-26 14:31 ` [PATCH v3-resend 06/11] mn10300: " Michael S. Tsirkin
` (6 subsequent siblings)
11 siblings, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2013-05-26 14:30 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Peter Zijlstra, Arnd Bergmann, Michal Simek,
microblaze-uclinux
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
arch/microblaze/include/asm/uaccess.h | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index efe59d8..2fc8bf7 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -145,7 +145,7 @@ static inline unsigned long __must_check __clear_user(void __user *to,
static inline unsigned long __must_check clear_user(void __user *to,
unsigned long n)
{
- might_sleep();
+ might_fault();
if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
return n;
@@ -371,7 +371,7 @@ extern long __user_bad(void);
static inline long copy_from_user(void *to,
const void __user *from, unsigned long n)
{
- might_sleep();
+ might_fault();
if (access_ok(VERIFY_READ, from, n))
return __copy_from_user(to, from, n);
return n;
@@ -385,7 +385,7 @@ static inline long copy_from_user(void *to,
static inline long copy_to_user(void __user *to,
const void *from, unsigned long n)
{
- might_sleep();
+ might_fault();
if (access_ok(VERIFY_WRITE, to, n))
return __copy_to_user(to, from, n);
return n;
--
MST
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [tip:sched/mm] microblaze: uaccess s/might_sleep/might_fault/
2013-05-26 14:30 ` [PATCH v3-resend 05/11] microblaze: " Michael S. Tsirkin
@ 2013-05-28 13:19 ` tip-bot for Michael S. Tsirkin
0 siblings, 0 replies; 25+ messages in thread
From: tip-bot for Michael S. Tsirkin @ 2013-05-28 13:19 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, a.p.zijlstra, torvalds, mst, peterz,
akpm, tglx
Commit-ID: ac093f8d5e76be1f2654acfd7a59d339ba037654
Gitweb: http://git.kernel.org/tip/ac093f8d5e76be1f2654acfd7a59d339ba037654
Author: Michael S. Tsirkin <mst@redhat.com>
AuthorDate: Sun, 26 May 2013 17:30:56 +0300
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Tue, 28 May 2013 09:41:08 +0200
microblaze: uaccess s/might_sleep/might_fault/
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1369577426-26721-5-git-send-email-mst@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
arch/microblaze/include/asm/uaccess.h | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index efe59d8..2fc8bf7 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -145,7 +145,7 @@ static inline unsigned long __must_check __clear_user(void __user *to,
static inline unsigned long __must_check clear_user(void __user *to,
unsigned long n)
{
- might_sleep();
+ might_fault();
if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
return n;
@@ -371,7 +371,7 @@ extern long __user_bad(void);
static inline long copy_from_user(void *to,
const void __user *from, unsigned long n)
{
- might_sleep();
+ might_fault();
if (access_ok(VERIFY_READ, from, n))
return __copy_from_user(to, from, n);
return n;
@@ -385,7 +385,7 @@ static inline long copy_from_user(void *to,
static inline long copy_to_user(void __user *to,
const void *from, unsigned long n)
{
- might_sleep();
+ might_fault();
if (access_ok(VERIFY_WRITE, to, n))
return __copy_to_user(to, from, n);
return n;
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH v3-resend 06/11] mn10300: uaccess s/might_sleep/might_fault/
2013-05-26 14:21 [PATCH v3-resend 00/11] uaccess: better might_sleep/might_fault behavior Michael S. Tsirkin
` (4 preceding siblings ...)
2013-05-26 14:30 ` [PATCH v3-resend 05/11] microblaze: " Michael S. Tsirkin
@ 2013-05-26 14:31 ` Michael S. Tsirkin
2013-05-28 13:20 ` [tip:sched/mm] " tip-bot for Michael S. Tsirkin
2013-05-26 14:31 ` [PATCH v3-resend 07/11] powerpc: " Michael S. Tsirkin
` (5 subsequent siblings)
11 siblings, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2013-05-26 14:31 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Peter Zijlstra, Arnd Bergmann, David Howells,
Koichi Yasutake, linux-am33-list
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
arch/mn10300/include/asm/uaccess.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 780560b..107508a 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -471,13 +471,13 @@ extern unsigned long __generic_copy_from_user(void *, const void __user *,
#define __copy_to_user(to, from, n) \
({ \
- might_sleep(); \
+ might_fault(); \
__copy_to_user_inatomic((to), (from), (n)); \
})
#define __copy_from_user(to, from, n) \
({ \
- might_sleep(); \
+ might_fault(); \
__copy_from_user_inatomic((to), (from), (n)); \
})
--
MST
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [tip:sched/mm] mn10300: uaccess s/might_sleep/might_fault/
2013-05-26 14:31 ` [PATCH v3-resend 06/11] mn10300: " Michael S. Tsirkin
@ 2013-05-28 13:20 ` tip-bot for Michael S. Tsirkin
0 siblings, 0 replies; 25+ messages in thread
From: tip-bot for Michael S. Tsirkin @ 2013-05-28 13:20 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, a.p.zijlstra, torvalds, mst, peterz,
akpm, tglx
Commit-ID: 3837a3cfe4a27836e0e9f207eb2d4f00b5a8fcba
Gitweb: http://git.kernel.org/tip/3837a3cfe4a27836e0e9f207eb2d4f00b5a8fcba
Author: Michael S. Tsirkin <mst@redhat.com>
AuthorDate: Sun, 26 May 2013 17:31:05 +0300
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Tue, 28 May 2013 09:41:08 +0200
mn10300: uaccess s/might_sleep/might_fault/
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1369577426-26721-6-git-send-email-mst@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
arch/mn10300/include/asm/uaccess.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 780560b..107508a 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -471,13 +471,13 @@ extern unsigned long __generic_copy_from_user(void *, const void __user *,
#define __copy_to_user(to, from, n) \
({ \
- might_sleep(); \
+ might_fault(); \
__copy_to_user_inatomic((to), (from), (n)); \
})
#define __copy_from_user(to, from, n) \
({ \
- might_sleep(); \
+ might_fault(); \
__copy_from_user_inatomic((to), (from), (n)); \
})
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH v3-resend 07/11] powerpc: uaccess s/might_sleep/might_fault/
2013-05-26 14:21 [PATCH v3-resend 00/11] uaccess: better might_sleep/might_fault behavior Michael S. Tsirkin
` (5 preceding siblings ...)
2013-05-26 14:31 ` [PATCH v3-resend 06/11] mn10300: " Michael S. Tsirkin
@ 2013-05-26 14:31 ` Michael S. Tsirkin
2013-05-27 9:36 ` Benjamin Herrenschmidt
2013-05-28 13:22 ` [tip:sched/mm] " tip-bot for Michael S. Tsirkin
2013-05-26 14:31 ` [PATCH v3-resend 08/11] tile: " Michael S. Tsirkin
` (4 subsequent siblings)
11 siblings, 2 replies; 25+ messages in thread
From: Michael S. Tsirkin @ 2013-05-26 14:31 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Peter Zijlstra, Arnd Bergmann,
Benjamin Herrenschmidt, Paul Mackerras, linuxppc-dev
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Arnd Bergmann suggested that the following code
if (!is_kernel_addr((unsigned long)__pu_addr))
might_fault();
can be further simplified by adding a version of might_fault
that includes the kernel addr check.
Will be considered as a further optimization in future.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
arch/powerpc/include/asm/uaccess.h | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 4db4959..9485b43 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -178,7 +178,7 @@ do { \
long __pu_err; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
if (!is_kernel_addr((unsigned long)__pu_addr)) \
- might_sleep(); \
+ might_fault(); \
__chk_user_ptr(ptr); \
__put_user_size((x), __pu_addr, (size), __pu_err); \
__pu_err; \
@@ -188,7 +188,7 @@ do { \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- might_sleep(); \
+ might_fault(); \
if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
__put_user_size((x), __pu_addr, (size), __pu_err); \
__pu_err; \
@@ -268,7 +268,7 @@ do { \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
- might_sleep(); \
+ might_fault(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -282,7 +282,7 @@ do { \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
- might_sleep(); \
+ might_fault(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -294,7 +294,7 @@ do { \
long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- might_sleep(); \
+ might_fault(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
@@ -419,14 +419,14 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
static inline unsigned long __copy_from_user(void *to,
const void __user *from, unsigned long size)
{
- might_sleep();
+ might_fault();
return __copy_from_user_inatomic(to, from, size);
}
static inline unsigned long __copy_to_user(void __user *to,
const void *from, unsigned long size)
{
- might_sleep();
+ might_fault();
return __copy_to_user_inatomic(to, from, size);
}
@@ -434,7 +434,7 @@ extern unsigned long __clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size)
{
- might_sleep();
+ might_fault();
if (likely(access_ok(VERIFY_WRITE, addr, size)))
return __clear_user(addr, size);
if ((unsigned long)addr < TASK_SIZE) {
--
MST
^ permalink raw reply related [flat|nested] 25+ messages in thread
* Re: [PATCH v3-resend 07/11] powerpc: uaccess s/might_sleep/might_fault/
2013-05-26 14:31 ` [PATCH v3-resend 07/11] powerpc: " Michael S. Tsirkin
@ 2013-05-27 9:36 ` Benjamin Herrenschmidt
2013-05-28 13:22 ` [tip:sched/mm] " tip-bot for Michael S. Tsirkin
1 sibling, 0 replies; 25+ messages in thread
From: Benjamin Herrenschmidt @ 2013-05-27 9:36 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel, Ingo Molnar, Peter Zijlstra, Arnd Bergmann,
Paul Mackerras, linuxppc-dev
On Sun, 2013-05-26 at 17:31 +0300, Michael S. Tsirkin wrote:
> The only reason uaccess routines might sleep
> is if they fault. Make this explicit.
>
> Arnd Bergmann suggested that the following code
> if (!is_kernel_addr((unsigned long)__pu_addr))
> might_fault();
> can be further simplified by adding a version of might_fault
> that includes the kernel addr check.
>
> Will be considered as a further optimization in future.
>
> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
As long as Peter is happy with the general semantics of might_fault()
and you have at least build-tested it, then I'm happy.
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
> ---
> arch/powerpc/include/asm/uaccess.h | 16 ++++++++--------
> 1 file changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
> index 4db4959..9485b43 100644
> --- a/arch/powerpc/include/asm/uaccess.h
> +++ b/arch/powerpc/include/asm/uaccess.h
> @@ -178,7 +178,7 @@ do { \
> long __pu_err; \
> __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
> if (!is_kernel_addr((unsigned long)__pu_addr)) \
> - might_sleep(); \
> + might_fault(); \
> __chk_user_ptr(ptr); \
> __put_user_size((x), __pu_addr, (size), __pu_err); \
> __pu_err; \
> @@ -188,7 +188,7 @@ do { \
> ({ \
> long __pu_err = -EFAULT; \
> __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
> - might_sleep(); \
> + might_fault(); \
> if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
> __put_user_size((x), __pu_addr, (size), __pu_err); \
> __pu_err; \
> @@ -268,7 +268,7 @@ do { \
> const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
> __chk_user_ptr(ptr); \
> if (!is_kernel_addr((unsigned long)__gu_addr)) \
> - might_sleep(); \
> + might_fault(); \
> __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
> (x) = (__typeof__(*(ptr)))__gu_val; \
> __gu_err; \
> @@ -282,7 +282,7 @@ do { \
> const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
> __chk_user_ptr(ptr); \
> if (!is_kernel_addr((unsigned long)__gu_addr)) \
> - might_sleep(); \
> + might_fault(); \
> __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
> (x) = (__typeof__(*(ptr)))__gu_val; \
> __gu_err; \
> @@ -294,7 +294,7 @@ do { \
> long __gu_err = -EFAULT; \
> unsigned long __gu_val = 0; \
> const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
> - might_sleep(); \
> + might_fault(); \
> if (access_ok(VERIFY_READ, __gu_addr, (size))) \
> __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
> (x) = (__typeof__(*(ptr)))__gu_val; \
> @@ -419,14 +419,14 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
> static inline unsigned long __copy_from_user(void *to,
> const void __user *from, unsigned long size)
> {
> - might_sleep();
> + might_fault();
> return __copy_from_user_inatomic(to, from, size);
> }
>
> static inline unsigned long __copy_to_user(void __user *to,
> const void *from, unsigned long size)
> {
> - might_sleep();
> + might_fault();
> return __copy_to_user_inatomic(to, from, size);
> }
>
> @@ -434,7 +434,7 @@ extern unsigned long __clear_user(void __user *addr, unsigned long size);
>
> static inline unsigned long clear_user(void __user *addr, unsigned long size)
> {
> - might_sleep();
> + might_fault();
> if (likely(access_ok(VERIFY_WRITE, addr, size)))
> return __clear_user(addr, size);
> if ((unsigned long)addr < TASK_SIZE) {
^ permalink raw reply [flat|nested] 25+ messages in thread
* [tip:sched/mm] powerpc: uaccess s/might_sleep/might_fault/
2013-05-26 14:31 ` [PATCH v3-resend 07/11] powerpc: " Michael S. Tsirkin
2013-05-27 9:36 ` Benjamin Herrenschmidt
@ 2013-05-28 13:22 ` tip-bot for Michael S. Tsirkin
1 sibling, 0 replies; 25+ messages in thread
From: tip-bot for Michael S. Tsirkin @ 2013-05-28 13:22 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, a.p.zijlstra, torvalds, mst, peterz,
benh, akpm, tglx
Commit-ID: 1af1717dbf96eba8a74a2d6a99e75a7795075a02
Gitweb: http://git.kernel.org/tip/1af1717dbf96eba8a74a2d6a99e75a7795075a02
Author: Michael S. Tsirkin <mst@redhat.com>
AuthorDate: Sun, 26 May 2013 17:31:38 +0300
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Tue, 28 May 2013 09:41:09 +0200
powerpc: uaccess s/might_sleep/might_fault/
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Arnd Bergmann suggested that the following code
if (!is_kernel_addr((unsigned long)__pu_addr))
might_fault();
can be further simplified by adding a version of might_fault
that includes the kernel addr check.
Will be considered as a further optimization in future.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1369577426-26721-7-git-send-email-mst@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
arch/powerpc/include/asm/uaccess.h | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 4db4959..9485b43 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -178,7 +178,7 @@ do { \
long __pu_err; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
if (!is_kernel_addr((unsigned long)__pu_addr)) \
- might_sleep(); \
+ might_fault(); \
__chk_user_ptr(ptr); \
__put_user_size((x), __pu_addr, (size), __pu_err); \
__pu_err; \
@@ -188,7 +188,7 @@ do { \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- might_sleep(); \
+ might_fault(); \
if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
__put_user_size((x), __pu_addr, (size), __pu_err); \
__pu_err; \
@@ -268,7 +268,7 @@ do { \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
- might_sleep(); \
+ might_fault(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -282,7 +282,7 @@ do { \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
- might_sleep(); \
+ might_fault(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
__gu_err; \
@@ -294,7 +294,7 @@ do { \
long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- might_sleep(); \
+ might_fault(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
(x) = (__typeof__(*(ptr)))__gu_val; \
@@ -419,14 +419,14 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
static inline unsigned long __copy_from_user(void *to,
const void __user *from, unsigned long size)
{
- might_sleep();
+ might_fault();
return __copy_from_user_inatomic(to, from, size);
}
static inline unsigned long __copy_to_user(void __user *to,
const void *from, unsigned long size)
{
- might_sleep();
+ might_fault();
return __copy_to_user_inatomic(to, from, size);
}
@@ -434,7 +434,7 @@ extern unsigned long __clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size)
{
- might_sleep();
+ might_fault();
if (likely(access_ok(VERIFY_WRITE, addr, size)))
return __clear_user(addr, size);
if ((unsigned long)addr < TASK_SIZE) {
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH v3-resend 08/11] tile: uaccess s/might_sleep/might_fault/
2013-05-26 14:21 [PATCH v3-resend 00/11] uaccess: better might_sleep/might_fault behavior Michael S. Tsirkin
` (6 preceding siblings ...)
2013-05-26 14:31 ` [PATCH v3-resend 07/11] powerpc: " Michael S. Tsirkin
@ 2013-05-26 14:31 ` Michael S. Tsirkin
2013-05-28 13:23 ` [tip:sched/mm] " tip-bot for Michael S. Tsirkin
2013-05-26 14:31 ` [PATCH v3-resend 09/11] x86: " Michael S. Tsirkin
` (3 subsequent siblings)
11 siblings, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2013-05-26 14:31 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Peter Zijlstra, Arnd Bergmann, Chris Metcalf
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
---
arch/tile/include/asm/uaccess.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index 8a082bc..e4d44bd 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -442,7 +442,7 @@ extern unsigned long __copy_in_user_inatomic(
static inline unsigned long __must_check
__copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
- might_sleep();
+ might_fault();
return __copy_in_user_inatomic(to, from, n);
}
--
MST
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [tip:sched/mm] tile: uaccess s/might_sleep/might_fault/
2013-05-26 14:31 ` [PATCH v3-resend 08/11] tile: " Michael S. Tsirkin
@ 2013-05-28 13:23 ` tip-bot for Michael S. Tsirkin
0 siblings, 0 replies; 25+ messages in thread
From: tip-bot for Michael S. Tsirkin @ 2013-05-28 13:23 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, a.p.zijlstra, torvalds, mst, peterz,
cmetcalf, akpm, tglx
Commit-ID: f8abe86cc4fbd4ba083fd151b88e02fb3ce88b9c
Gitweb: http://git.kernel.org/tip/f8abe86cc4fbd4ba083fd151b88e02fb3ce88b9c
Author: Michael S. Tsirkin <mst@redhat.com>
AuthorDate: Sun, 26 May 2013 17:31:48 +0300
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Tue, 28 May 2013 09:41:09 +0200
tile: uaccess s/might_sleep/might_fault/
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Chris Metcalf <cmetcalf@tilera.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1369577426-26721-8-git-send-email-mst@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
arch/tile/include/asm/uaccess.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index 8a082bc..e4d44bd 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -442,7 +442,7 @@ extern unsigned long __copy_in_user_inatomic(
static inline unsigned long __must_check
__copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
- might_sleep();
+ might_fault();
return __copy_in_user_inatomic(to, from, n);
}
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH v3-resend 09/11] x86: uaccess s/might_sleep/might_fault/
2013-05-26 14:21 [PATCH v3-resend 00/11] uaccess: better might_sleep/might_fault behavior Michael S. Tsirkin
` (7 preceding siblings ...)
2013-05-26 14:31 ` [PATCH v3-resend 08/11] tile: " Michael S. Tsirkin
@ 2013-05-26 14:31 ` Michael S. Tsirkin
2013-05-28 13:24 ` [tip:sched/mm] " tip-bot for Michael S. Tsirkin
2013-05-26 14:32 ` [PATCH v3-resend 10/11] kernel: drop voluntary schedule from might_fault Michael S. Tsirkin
` (2 subsequent siblings)
11 siblings, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2013-05-26 14:31 UTC (permalink / raw)
To: linux-kernel
Cc: Ingo Molnar, Peter Zijlstra, Arnd Bergmann, Thomas Gleixner,
H. Peter Anvin, x86
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
---
arch/x86/include/asm/uaccess_64.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 142810c..4f7923d 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -235,7 +235,7 @@ extern long __copy_user_nocache(void *dst, const void __user *src,
static inline int
__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
{
- might_sleep();
+ might_fault();
return __copy_user_nocache(dst, src, size, 1);
}
--
MST
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [tip:sched/mm] x86: uaccess s/might_sleep/might_fault/
2013-05-26 14:31 ` [PATCH v3-resend 09/11] x86: " Michael S. Tsirkin
@ 2013-05-28 13:24 ` tip-bot for Michael S. Tsirkin
0 siblings, 0 replies; 25+ messages in thread
From: tip-bot for Michael S. Tsirkin @ 2013-05-28 13:24 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, a.p.zijlstra, torvalds, mst, peterz,
akpm, tglx
Commit-ID: 016be2e55d98aee0b97b94b200d6e0e110c8392a
Gitweb: http://git.kernel.org/tip/016be2e55d98aee0b97b94b200d6e0e110c8392a
Author: Michael S. Tsirkin <mst@redhat.com>
AuthorDate: Sun, 26 May 2013 17:31:55 +0300
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Tue, 28 May 2013 09:41:10 +0200
x86: uaccess s/might_sleep/might_fault/
The only reason uaccess routines might sleep
is if they fault. Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1369577426-26721-9-git-send-email-mst@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
arch/x86/include/asm/uaccess_64.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 142810c..4f7923d 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -235,7 +235,7 @@ extern long __copy_user_nocache(void *dst, const void __user *src,
static inline int
__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
{
- might_sleep();
+ might_fault();
return __copy_user_nocache(dst, src, size, 1);
}
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH v3-resend 10/11] kernel: drop voluntary schedule from might_fault
2013-05-26 14:21 [PATCH v3-resend 00/11] uaccess: better might_sleep/might_fault behavior Michael S. Tsirkin
` (8 preceding siblings ...)
2013-05-26 14:31 ` [PATCH v3-resend 09/11] x86: " Michael S. Tsirkin
@ 2013-05-26 14:32 ` Michael S. Tsirkin
2013-05-28 13:25 ` [tip:sched/mm] mm, sched: Drop voluntary schedule from might_fault() tip-bot for Michael S. Tsirkin
2013-05-26 14:32 ` [PATCH v3-resend 11/11] kernel: uaccess in atomic with pagefault_disable Michael S. Tsirkin
2013-05-27 16:35 ` [PATCH v3-resend 00/11] uaccess: better might_sleep/might_fault behavior Peter Zijlstra
11 siblings, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2013-05-26 14:32 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Peter Zijlstra, Arnd Bergmann, linux-mm
might_fault is called from functions like copy_to_user
which most callers expect to be very fast, like
a couple of instructions. So functions like memcpy_toiovec call them
many times in a loop.
But might_fault calls might_sleep() and with CONFIG_PREEMPT_VOLUNTARY
this results in a function call.
Let's not do this - just call __might_sleep that produces
a diagnostic for sleep within atomic, but drop
might_preempt().
Here's a test sending traffic between the VM and the host,
host is built with CONFIG_PREEMPT_VOLUNTARY:
Before:
incoming: 7122.77 Mb/s
outgoing: 8480.37 Mb/s
after:
incoming: 8619.24 Mb/s
outgoing: 9455.42 Mb/s
As a side effect, this fixes an issue pointed
out by Ingo: might_fault might schedule differently
depending on PROVE_LOCKING. Now there's no
preemption point in both cases, so it's consistent.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
include/linux/kernel.h | 2 +-
mm/memory.c | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e96329c..c514c06 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -198,7 +198,7 @@ void might_fault(void);
#else
static inline void might_fault(void)
{
- might_sleep();
+ __might_sleep(__FILE__, __LINE__, 0);
}
#endif
diff --git a/mm/memory.c b/mm/memory.c
index 6dc1882..c1f190f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4222,7 +4222,8 @@ void might_fault(void)
if (segment_eq(get_fs(), KERNEL_DS))
return;
- might_sleep();
+ __might_sleep(__FILE__, __LINE__, 0);
+
/*
* it would be nicer only to annotate paths which are not under
* pagefault_disable, however that requires a larger audit and
--
MST
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [tip:sched/mm] mm, sched: Drop voluntary schedule from might_fault()
2013-05-26 14:32 ` [PATCH v3-resend 10/11] kernel: drop voluntary schedule from might_fault Michael S. Tsirkin
@ 2013-05-28 13:25 ` tip-bot for Michael S. Tsirkin
0 siblings, 0 replies; 25+ messages in thread
From: tip-bot for Michael S. Tsirkin @ 2013-05-28 13:25 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, a.p.zijlstra, torvalds, mst, peterz,
akpm, tglx
Commit-ID: 114276ac0a3beb9c391a410349bd770653e185ce
Gitweb: http://git.kernel.org/tip/114276ac0a3beb9c391a410349bd770653e185ce
Author: Michael S. Tsirkin <mst@redhat.com>
AuthorDate: Sun, 26 May 2013 17:32:13 +0300
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Tue, 28 May 2013 09:41:11 +0200
mm, sched: Drop voluntary schedule from might_fault()
might_fault() is called from functions like copy_to_user()
which most callers expect to be very fast, like a couple of
instructions.
So functions like memcpy_toiovec() call them many times in a loop.
But might_fault() calls might_sleep() and with CONFIG_PREEMPT_VOLUNTARY
this results in a function call.
Let's not do this - just call __might_sleep() that produces
a diagnostic for sleep within atomic, but drop
might_preempt().
Here's a test sending traffic between the VM and the host,
host is built with CONFIG_PREEMPT_VOLUNTARY:
before:
incoming: 7122.77 Mb/s
outgoing: 8480.37 Mb/s
after:
incoming: 8619.24 Mb/s
outgoing: 9455.42 Mb/s
As a side effect, this fixes an issue pointed
out by Ingo: might_fault might schedule differently
depending on PROVE_LOCKING. Now there's no
preemption point in both cases, so it's consistent.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1369577426-26721-10-git-send-email-mst@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
include/linux/kernel.h | 2 +-
mm/memory.c | 3 ++-
2 files changed, 3 insertions(+), 2 deletions(-)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e9ef6d6..24719ea 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -198,7 +198,7 @@ void might_fault(void);
#else
static inline void might_fault(void)
{
- might_sleep();
+ __might_sleep(__FILE__, __LINE__, 0);
}
#endif
diff --git a/mm/memory.c b/mm/memory.c
index 6dc1882..c1f190f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4222,7 +4222,8 @@ void might_fault(void)
if (segment_eq(get_fs(), KERNEL_DS))
return;
- might_sleep();
+ __might_sleep(__FILE__, __LINE__, 0);
+
/*
* it would be nicer only to annotate paths which are not under
* pagefault_disable, however that requires a larger audit and
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [PATCH v3-resend 11/11] kernel: uaccess in atomic with pagefault_disable
2013-05-26 14:21 [PATCH v3-resend 00/11] uaccess: better might_sleep/might_fault behavior Michael S. Tsirkin
` (9 preceding siblings ...)
2013-05-26 14:32 ` [PATCH v3-resend 10/11] kernel: drop voluntary schedule from might_fault Michael S. Tsirkin
@ 2013-05-26 14:32 ` Michael S. Tsirkin
2013-05-28 13:27 ` [tip:sched/mm] mm, sched: Allow uaccess in atomic with pagefault_disable() tip-bot for Michael S. Tsirkin
2013-05-27 16:35 ` [PATCH v3-resend 00/11] uaccess: better might_sleep/might_fault behavior Peter Zijlstra
11 siblings, 1 reply; 25+ messages in thread
From: Michael S. Tsirkin @ 2013-05-26 14:32 UTC (permalink / raw)
To: linux-kernel; +Cc: Ingo Molnar, Peter Zijlstra, Arnd Bergmann, linux-mm
This changes might_fault so that it does not
trigger a false positive diagnostic for e.g. the following
sequence:
spin_lock_irqsave
pagefault_disable
copy_to_user
pagefault_enable
spin_unlock_irqrestore
In particular vhost wants to do this, to call
socket ops from under a lock.
There are 3 cases to consider:
CONFIG_PROVE_LOCKING - might_fault is non-inline
so it's easy to move the in_atomic test to fix
up the false positive warning.
CONFIG_DEBUG_ATOMIC_SLEEP - might_fault
is currently inline, but we are calling a
non-inline __might_sleep anyway,
so let's use the non-line version of might_fault
that does the right thing.
!CONFIG_DEBUG_ATOMIC_SLEEP && !CONFIG_PROVE_LOCKING
__might_sleep is a nop so might_fault is a nop.
Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
include/linux/kernel.h | 7 ++-----
mm/memory.c | 11 +++++++----
2 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index c514c06..0153be1 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -193,13 +193,10 @@ extern int _cond_resched(void);
(__x < 0) ? -__x : __x; \
})
-#ifdef CONFIG_PROVE_LOCKING
+#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
void might_fault(void);
#else
-static inline void might_fault(void)
-{
- __might_sleep(__FILE__, __LINE__, 0);
-}
+static inline void might_fault(void) { }
#endif
extern struct atomic_notifier_head panic_notifier_list;
diff --git a/mm/memory.c b/mm/memory.c
index c1f190f..d7d54a1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4210,7 +4210,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
up_read(&mm->mmap_sem);
}
-#ifdef CONFIG_PROVE_LOCKING
+#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
void might_fault(void)
{
/*
@@ -4222,14 +4222,17 @@ void might_fault(void)
if (segment_eq(get_fs(), KERNEL_DS))
return;
- __might_sleep(__FILE__, __LINE__, 0);
-
/*
* it would be nicer only to annotate paths which are not under
* pagefault_disable, however that requires a larger audit and
* providing helpers like get_user_atomic.
*/
- if (!in_atomic() && current->mm)
+ if (in_atomic())
+ return;
+
+ __might_sleep(__FILE__, __LINE__, 0);
+
+ if (current->mm)
might_lock_read(¤t->mm->mmap_sem);
}
EXPORT_SYMBOL(might_fault);
--
MST
^ permalink raw reply related [flat|nested] 25+ messages in thread
* [tip:sched/mm] mm, sched: Allow uaccess in atomic with pagefault_disable()
2013-05-26 14:32 ` [PATCH v3-resend 11/11] kernel: uaccess in atomic with pagefault_disable Michael S. Tsirkin
@ 2013-05-28 13:27 ` tip-bot for Michael S. Tsirkin
0 siblings, 0 replies; 25+ messages in thread
From: tip-bot for Michael S. Tsirkin @ 2013-05-28 13:27 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, a.p.zijlstra, torvalds, mst, peterz,
akpm, tglx
Commit-ID: 662bbcb2747c2422cf98d3d97619509379eee466
Gitweb: http://git.kernel.org/tip/662bbcb2747c2422cf98d3d97619509379eee466
Author: Michael S. Tsirkin <mst@redhat.com>
AuthorDate: Sun, 26 May 2013 17:32:23 +0300
Committer: Ingo Molnar <mingo@kernel.org>
CommitDate: Tue, 28 May 2013 09:41:11 +0200
mm, sched: Allow uaccess in atomic with pagefault_disable()
This changes might_fault() so that it does not
trigger a false positive diagnostic for e.g. the following
sequence:
spin_lock_irqsave()
pagefault_disable()
copy_to_user()
pagefault_enable()
spin_unlock_irqrestore()
In particular vhost wants to do this, to call
socket ops from under a lock.
There are 3 cases to consider:
- CONFIG_PROVE_LOCKING - might_fault is non-inline
so it's easy to move the in_atomic test to fix
up the false positive warning.
- CONFIG_DEBUG_ATOMIC_SLEEP - might_fault
is currently inline, but we are calling a
non-inline __might_sleep anyway,
so let's use the non-line version of might_fault
that does the right thing.
- !CONFIG_DEBUG_ATOMIC_SLEEP && !CONFIG_PROVE_LOCKING
__might_sleep is a nop so might_fault is a nop.
Make this explicit.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1369577426-26721-11-git-send-email-mst@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
---
include/linux/kernel.h | 7 ++-----
mm/memory.c | 11 +++++++----
2 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 24719ea..4c7e2e5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -193,13 +193,10 @@ extern int _cond_resched(void);
(__x < 0) ? -__x : __x; \
})
-#ifdef CONFIG_PROVE_LOCKING
+#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
void might_fault(void);
#else
-static inline void might_fault(void)
-{
- __might_sleep(__FILE__, __LINE__, 0);
-}
+static inline void might_fault(void) { }
#endif
extern struct atomic_notifier_head panic_notifier_list;
diff --git a/mm/memory.c b/mm/memory.c
index c1f190f..d7d54a1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4210,7 +4210,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
up_read(&mm->mmap_sem);
}
-#ifdef CONFIG_PROVE_LOCKING
+#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
void might_fault(void)
{
/*
@@ -4222,14 +4222,17 @@ void might_fault(void)
if (segment_eq(get_fs(), KERNEL_DS))
return;
- __might_sleep(__FILE__, __LINE__, 0);
-
/*
* it would be nicer only to annotate paths which are not under
* pagefault_disable, however that requires a larger audit and
* providing helpers like get_user_atomic.
*/
- if (!in_atomic() && current->mm)
+ if (in_atomic())
+ return;
+
+ __might_sleep(__FILE__, __LINE__, 0);
+
+ if (current->mm)
might_lock_read(¤t->mm->mmap_sem);
}
EXPORT_SYMBOL(might_fault);
^ permalink raw reply related [flat|nested] 25+ messages in thread
* Re: [PATCH v3-resend 00/11] uaccess: better might_sleep/might_fault behavior
2013-05-26 14:21 [PATCH v3-resend 00/11] uaccess: better might_sleep/might_fault behavior Michael S. Tsirkin
` (10 preceding siblings ...)
2013-05-26 14:32 ` [PATCH v3-resend 11/11] kernel: uaccess in atomic with pagefault_disable Michael S. Tsirkin
@ 2013-05-27 16:35 ` Peter Zijlstra
11 siblings, 0 replies; 25+ messages in thread
From: Peter Zijlstra @ 2013-05-27 16:35 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: linux-kernel, Ingo Molnar, Arnd Bergmann, linux-arch, linux-mm,
kvm
On Sun, May 26, 2013 at 05:21:30PM +0300, Michael S. Tsirkin wrote:
> If the changes look good, would sched maintainers
> please consider merging them through sched/core because of the
> interaction with the scheduler?
>
> Please review, and consider for 3.11.
I'll stick them in my queue, we'll see if anything falls over ;-)
^ permalink raw reply [flat|nested] 25+ messages in thread