* [PATCH v5 06/10] x86: Temporary disable SMAP to legally access user pages in kernel mode
@ 2014-05-06 10:08 Feng Wu
2014-05-06 12:04 ` Jan Beulich
0 siblings, 1 reply; 2+ messages in thread
From: Feng Wu @ 2014-05-06 10:08 UTC (permalink / raw)
To: xen-devel
Cc: kevin.tian, Feng Wu, JBeulich, andrew.cooper3, eddie.dong,
jun.nakajima, ian.campbell
Use STAC/CLAC to temporarily disable SMAP to allow legal accesses to
user pages in kernel mode
STAC/CLAC is not needed for compat_create_bounce_frame, since in this
chunk of code, it only accesses the pv guest's kernel stack, which is
in ring 1 for 32-bit pv guests.
Signed-off-by: Feng Wu <feng.wu@intel.com>
---
xen/arch/x86/traps.c | 12 ++++++++++++
xen/arch/x86/usercopy.c | 6 ++++++
xen/arch/x86/x86_64/entry.S | 2 ++
xen/include/asm-x86/uaccess.h | 8 ++++++--
xen/include/asm-x86/x86_64/system.h | 4 +++-
5 files changed, 29 insertions(+), 3 deletions(-)
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 45070bb..19c96d5 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -3811,6 +3811,18 @@ unsigned long do_get_debugreg(int reg)
void asm_domain_crash_synchronous(unsigned long addr)
{
+ /*
+ * We need clear AC bit here because in entry.S AC is set
+ * by ASM_STAC to temporarily allow accesses to user pages
+ * which is prevented by SMAP by default.
+ *
+ * For some code paths, where this function is called, clac()
+ * is not needed, but adding clac() here instead of each place
+ * asm_domain_crash_synchronous() is called can reduce the code
+ * redundancy, and it is harmless as well.
+ */
+ clac();
+
if ( addr == 0 )
addr = this_cpu(last_extable_addr);
diff --git a/xen/arch/x86/usercopy.c b/xen/arch/x86/usercopy.c
index b79202b..4cc78f5 100644
--- a/xen/arch/x86/usercopy.c
+++ b/xen/arch/x86/usercopy.c
@@ -14,6 +14,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned n)
{
unsigned long __d0, __d1, __d2, __n = n;
+ stac();
asm volatile (
" cmp $"STR(2*BYTES_PER_LONG-1)",%0\n"
" jbe 1f\n"
@@ -42,6 +43,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, unsigned n)
: "=&c" (__n), "=&D" (__d0), "=&S" (__d1), "=&r" (__d2)
: "0" (__n), "1" (to), "2" (from), "3" (__n)
: "memory" );
+ clac();
return __n;
}
@@ -51,6 +53,7 @@ __copy_from_user_ll(void *to, const void __user *from, unsigned n)
{
unsigned long __d0, __d1, __d2, __n = n;
+ stac();
asm volatile (
" cmp $"STR(2*BYTES_PER_LONG-1)",%0\n"
" jbe 1f\n"
@@ -85,6 +88,7 @@ __copy_from_user_ll(void *to, const void __user *from, unsigned n)
: "=&c" (__n), "=&D" (__d0), "=&S" (__d1), "=&r" (__d2)
: "0" (__n), "1" (to), "2" (from), "3" (__n)
: "memory" );
+ clac();
return __n;
}
@@ -113,6 +117,7 @@ copy_to_user(void __user *to, const void *from, unsigned n)
#define __do_clear_user(addr,size) \
do { \
long __d0; \
+ stac(); \
__asm__ __volatile__( \
"0: rep; stosl\n" \
" movl %2,%0\n" \
@@ -126,6 +131,7 @@ do { \
_ASM_EXTABLE(1b,2b) \
: "=&c"(size), "=&D" (__d0) \
: "r"(size & 3), "0"(size / 4), "1"((long)addr), "a"(0)); \
+ clac(); \
} while (0)
/**
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 205251d..dce6eb5 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -379,6 +379,7 @@ __UNLIKELY_END(create_bounce_frame_bad_sp)
movb TRAPBOUNCE_flags(%rdx),%cl
subq $40,%rsi
movq UREGS_ss+8(%rsp),%rax
+ ASM_STAC
.Lft2: movq %rax,32(%rsi) # SS
movq UREGS_rsp+8(%rsp),%rax
.Lft3: movq %rax,24(%rsi) # RSP
@@ -423,6 +424,7 @@ UNLIKELY_END(bounce_failsafe)
.Lft12: movq %rax,8(%rsi) # R11
movq UREGS_rcx+8(%rsp),%rax
.Lft13: movq %rax,(%rsi) # RCX
+ ASM_CLAC
/* Rewrite our stack frame and return to guest-OS mode. */
/* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap. */
/* Also clear AC: alignment checks shouldn't trigger in kernel mode. */
diff --git a/xen/include/asm-x86/uaccess.h b/xen/include/asm-x86/uaccess.h
index 88b4ba2..92bc322 100644
--- a/xen/include/asm-x86/uaccess.h
+++ b/xen/include/asm-x86/uaccess.h
@@ -146,6 +146,7 @@ struct __large_struct { unsigned long buf[100]; };
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+ stac(); \
__asm__ __volatile__( \
"1: mov"itype" %"rtype"1,%2\n" \
"2:\n" \
@@ -155,9 +156,11 @@ struct __large_struct { unsigned long buf[100]; };
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r"(err) \
- : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
+ : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)); \
+ clac();
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
+ stac(); \
__asm__ __volatile__( \
"1: mov"itype" %2,%"rtype"1\n" \
"2:\n" \
@@ -168,7 +171,8 @@ struct __large_struct { unsigned long buf[100]; };
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
: "=r"(err), ltype (x) \
- : "m"(__m(addr)), "i"(errret), "0"(err))
+ : "m"(__m(addr)), "i"(errret), "0"(err)); \
+ clac();
/**
* __copy_to_user: - Copy a block of data into user space, with less checking
diff --git a/xen/include/asm-x86/x86_64/system.h b/xen/include/asm-x86/x86_64/system.h
index 20f038b..b6341ad1 100644
--- a/xen/include/asm-x86/x86_64/system.h
+++ b/xen/include/asm-x86/x86_64/system.h
@@ -12,6 +12,7 @@
* is the same as the initial value of _o then _n is written to location _p.
*/
#define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
+ stac(); \
asm volatile ( \
"1: lock; cmpxchg"_isuff" %"_oppre"2,%3\n" \
"2:\n" \
@@ -22,7 +23,8 @@
_ASM_EXTABLE(1b, 3b) \
: "=a" (_o), "=r" (_rc) \
: _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
- : "memory");
+ : "memory"); \
+ clac();
#define cmpxchg_user(_p,_o,_n) \
({ \
--
1.8.3.1
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH v5 06/10] x86: Temporary disable SMAP to legally access user pages in kernel mode
2014-05-06 10:08 [PATCH v5 06/10] x86: Temporary disable SMAP to legally access user pages in kernel mode Feng Wu
@ 2014-05-06 12:04 ` Jan Beulich
0 siblings, 0 replies; 2+ messages in thread
From: Jan Beulich @ 2014-05-06 12:04 UTC (permalink / raw)
To: Feng Wu
Cc: kevin.tian, ian.campbell, andrew.cooper3, eddie.dong, xen-devel,
jun.nakajima
>>> On 06.05.14 at 12:08, <feng.wu@intel.com> wrote:
> Use STAC/CLAC to temporarily disable SMAP to allow legal accesses to
> user pages in kernel mode
>
> STAC/CLAC is not needed for compat_create_bounce_frame, since in this
> chunk of code, it only accesses the pv guest's kernel stack, which is
> in ring 1 for 32-bit pv guests.
>
> Signed-off-by: Feng Wu <feng.wu@intel.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
> ---
> xen/arch/x86/traps.c | 12 ++++++++++++
> xen/arch/x86/usercopy.c | 6 ++++++
> xen/arch/x86/x86_64/entry.S | 2 ++
> xen/include/asm-x86/uaccess.h | 8 ++++++--
> xen/include/asm-x86/x86_64/system.h | 4 +++-
> 5 files changed, 29 insertions(+), 3 deletions(-)
>
> diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
> index 45070bb..19c96d5 100644
> --- a/xen/arch/x86/traps.c
> +++ b/xen/arch/x86/traps.c
> @@ -3811,6 +3811,18 @@ unsigned long do_get_debugreg(int reg)
>
> void asm_domain_crash_synchronous(unsigned long addr)
> {
> + /*
> + * We need clear AC bit here because in entry.S AC is set
> + * by ASM_STAC to temporarily allow accesses to user pages
> + * which is prevented by SMAP by default.
> + *
> + * For some code paths, where this function is called, clac()
> + * is not needed, but adding clac() here instead of each place
> + * asm_domain_crash_synchronous() is called can reduce the code
> + * redundancy, and it is harmless as well.
> + */
> + clac();
> +
> if ( addr == 0 )
> addr = this_cpu(last_extable_addr);
>
> diff --git a/xen/arch/x86/usercopy.c b/xen/arch/x86/usercopy.c
> index b79202b..4cc78f5 100644
> --- a/xen/arch/x86/usercopy.c
> +++ b/xen/arch/x86/usercopy.c
> @@ -14,6 +14,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void
> *from, unsigned n)
> {
> unsigned long __d0, __d1, __d2, __n = n;
>
> + stac();
> asm volatile (
> " cmp $"STR(2*BYTES_PER_LONG-1)",%0\n"
> " jbe 1f\n"
> @@ -42,6 +43,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void
> *from, unsigned n)
> : "=&c" (__n), "=&D" (__d0), "=&S" (__d1), "=&r" (__d2)
> : "0" (__n), "1" (to), "2" (from), "3" (__n)
> : "memory" );
> + clac();
>
> return __n;
> }
> @@ -51,6 +53,7 @@ __copy_from_user_ll(void *to, const void __user *from,
> unsigned n)
> {
> unsigned long __d0, __d1, __d2, __n = n;
>
> + stac();
> asm volatile (
> " cmp $"STR(2*BYTES_PER_LONG-1)",%0\n"
> " jbe 1f\n"
> @@ -85,6 +88,7 @@ __copy_from_user_ll(void *to, const void __user *from,
> unsigned n)
> : "=&c" (__n), "=&D" (__d0), "=&S" (__d1), "=&r" (__d2)
> : "0" (__n), "1" (to), "2" (from), "3" (__n)
> : "memory" );
> + clac();
>
> return __n;
> }
> @@ -113,6 +117,7 @@ copy_to_user(void __user *to, const void *from, unsigned
> n)
> #define __do_clear_user(addr,size) \
> do { \
> long __d0; \
> + stac(); \
> __asm__ __volatile__( \
> "0: rep; stosl\n" \
> " movl %2,%0\n" \
> @@ -126,6 +131,7 @@ do { \
> _ASM_EXTABLE(1b,2b) \
> : "=&c"(size), "=&D" (__d0) \
> : "r"(size & 3), "0"(size / 4), "1"((long)addr), "a"(0)); \
> + clac(); \
> } while (0)
>
> /**
> diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
> index 205251d..dce6eb5 100644
> --- a/xen/arch/x86/x86_64/entry.S
> +++ b/xen/arch/x86/x86_64/entry.S
> @@ -379,6 +379,7 @@ __UNLIKELY_END(create_bounce_frame_bad_sp)
> movb TRAPBOUNCE_flags(%rdx),%cl
> subq $40,%rsi
> movq UREGS_ss+8(%rsp),%rax
> + ASM_STAC
> .Lft2: movq %rax,32(%rsi) # SS
> movq UREGS_rsp+8(%rsp),%rax
> .Lft3: movq %rax,24(%rsi) # RSP
> @@ -423,6 +424,7 @@ UNLIKELY_END(bounce_failsafe)
> .Lft12: movq %rax,8(%rsi) # R11
> movq UREGS_rcx+8(%rsp),%rax
> .Lft13: movq %rax,(%rsi) # RCX
> + ASM_CLAC
> /* Rewrite our stack frame and return to guest-OS mode. */
> /* IA32 Ref. Vol. 3: TF, VM, RF and NT flags are cleared on trap.
> */
> /* Also clear AC: alignment checks shouldn't trigger in kernel
> mode. */
> diff --git a/xen/include/asm-x86/uaccess.h b/xen/include/asm-x86/uaccess.h
> index 88b4ba2..92bc322 100644
> --- a/xen/include/asm-x86/uaccess.h
> +++ b/xen/include/asm-x86/uaccess.h
> @@ -146,6 +146,7 @@ struct __large_struct { unsigned long buf[100]; };
> * aliasing issues.
> */
> #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
> + stac(); \
> __asm__ __volatile__( \
> "1: mov"itype" %"rtype"1,%2\n" \
> "2:\n" \
> @@ -155,9 +156,11 @@ struct __large_struct { unsigned long buf[100]; };
> ".previous\n" \
> _ASM_EXTABLE(1b, 3b) \
> : "=r"(err) \
> - : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err))
> + : ltype (x), "m"(__m(addr)), "i"(errret), "0"(err)); \
> + clac();
>
> #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
> + stac(); \
> __asm__ __volatile__( \
> "1: mov"itype" %2,%"rtype"1\n" \
> "2:\n" \
> @@ -168,7 +171,8 @@ struct __large_struct { unsigned long buf[100]; };
> ".previous\n" \
> _ASM_EXTABLE(1b, 3b) \
> : "=r"(err), ltype (x) \
> - : "m"(__m(addr)), "i"(errret), "0"(err))
> + : "m"(__m(addr)), "i"(errret), "0"(err)); \
> + clac();
>
> /**
> * __copy_to_user: - Copy a block of data into user space, with less
> checking
> diff --git a/xen/include/asm-x86/x86_64/system.h
> b/xen/include/asm-x86/x86_64/system.h
> index 20f038b..b6341ad1 100644
> --- a/xen/include/asm-x86/x86_64/system.h
> +++ b/xen/include/asm-x86/x86_64/system.h
> @@ -12,6 +12,7 @@
> * is the same as the initial value of _o then _n is written to location
> _p.
> */
> #define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
> + stac(); \
> asm volatile ( \
> "1: lock; cmpxchg"_isuff" %"_oppre"2,%3\n" \
> "2:\n" \
> @@ -22,7 +23,8 @@
> _ASM_EXTABLE(1b, 3b) \
> : "=a" (_o), "=r" (_rc) \
> : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1"
> (0) \
> - : "memory");
> + : "memory"); \
> + clac();
>
> #define cmpxchg_user(_p,_o,_n) \
> ({ \
> --
> 1.8.3.1
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2014-05-06 12:04 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-05-06 10:08 [PATCH v5 06/10] x86: Temporary disable SMAP to legally access user pages in kernel mode Feng Wu
2014-05-06 12:04 ` Jan Beulich
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).