* [PATCH] x86/hvm: Fix mapping corner case during task switching.
@ 2018-08-28 18:17 Andrew Cooper
2018-08-30 15:18 ` Jan Beulich
0 siblings, 1 reply; 2+ messages in thread
From: Andrew Cooper @ 2018-08-28 18:17 UTC (permalink / raw)
To: Xen-devel; +Cc: Andrew Cooper, Wei Liu, Jan Beulich, Roger Pau Monné
hvm_map_entry() can fail for a number of reasons, including for a misaligned
LDT/GDT access which crosses a 4K boundary. Architecturally speaking, this
should be fixed, but Long Mode doesn't support task switches, and no 32bit OS
is going to misalign its LDT/GDT base, which is why this task isn't very high
on the TODO list.
However, the hvm_map_fail error label returns failure without raising an
exception, which interferes with hvm_task_switch()'s exception tracking, and
can cause it to finish and return to guest context as if the task switch had
completed successfully.
Resolve this corner case by folding all the failure paths together, which
causes an hvm_map_entry() failure to result in #TS[SEL]. hvm_unmap_entry()
copes fine with a NULL pointer so can be called unconditionally.
In practice, this is just a latent corner case as all hvm_map_entry() failures
crash the domain, but it should be fixed nevertheless.
Finally, rename hvm_load_segment_selector() to task_switch_load_seg() to avoid
giving the impression that it is usable for general segment loading.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Wei Liu <wei.liu2@citrix.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
---
xen/arch/x86/hvm/hvm.c | 51 +++++++++++++++++++++++++-------------------------
1 file changed, 25 insertions(+), 26 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 72c51fa..2eaabf1 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2704,11 +2704,11 @@ static void hvm_unmap_entry(void *p)
hvm_unmap_guest_frame(p, 0);
}
-static int hvm_load_segment_selector(
+static int task_switch_load_seg(
enum x86_segment seg, uint16_t sel, unsigned int cpl, unsigned int eflags)
{
struct segment_register desctab, segr;
- struct desc_struct *pdesc, desc;
+ struct desc_struct *pdesc = NULL, desc;
u8 dpl, rpl;
bool_t writable;
int fault_type = TRAP_invalid_tss;
@@ -2728,7 +2728,7 @@ static int hvm_load_segment_selector(
if ( (sel & 0xfffc) == 0 )
{
if ( (seg == x86_seg_cs) || (seg == x86_seg_ss) )
- goto fail;
+ goto fault;
memset(&segr, 0, sizeof(segr));
segr.sel = sel;
hvm_set_segment_register(v, seg, &segr);
@@ -2737,29 +2737,29 @@ static int hvm_load_segment_selector(
/* LDT descriptor must be in the GDT. */
if ( (seg == x86_seg_ldtr) && (sel & 4) )
- goto fail;
+ goto fault;
hvm_get_segment_register(
v, (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, &desctab);
/* Segment not valid for use (cooked meaning of .p)? */
if ( !desctab.p )
- goto fail;
+ goto fault;
/* Check against descriptor table limit. */
if ( ((sel & 0xfff8) + 7) > desctab.limit )
- goto fail;
+ goto fault;
pdesc = hvm_map_entry(desctab.base + (sel & 0xfff8), &writable);
if ( pdesc == NULL )
- goto hvm_map_fail;
+ goto fault;
do {
desc = *pdesc;
/* LDT descriptor is a system segment. All others are code/data. */
if ( (desc.b & (1u<<12)) == ((seg == x86_seg_ldtr) << 12) )
- goto unmap_and_fail;
+ goto fault;
dpl = (desc.b >> 13) & 3;
rpl = sel & 3;
@@ -2769,27 +2769,27 @@ static int hvm_load_segment_selector(
case x86_seg_cs:
/* Code segment? */
if ( !(desc.b & _SEGMENT_CODE) )
- goto unmap_and_fail;
+ goto fault;
/* Non-conforming segment: check DPL against RPL. */
if ( !(desc.b & _SEGMENT_EC) && (dpl != rpl) )
- goto unmap_and_fail;
+ goto fault;
break;
case x86_seg_ss:
/* Writable data segment? */
if ( (desc.b & (_SEGMENT_CODE|_SEGMENT_WR)) != _SEGMENT_WR )
- goto unmap_and_fail;
+ goto fault;
if ( (dpl != cpl) || (dpl != rpl) )
- goto unmap_and_fail;
+ goto fault;
break;
case x86_seg_ldtr:
/* LDT system segment? */
if ( (desc.b & _SEGMENT_TYPE) != (2u<<8) )
- goto unmap_and_fail;
+ goto fault;
goto skip_accessed_flag;
default:
/* Readable code or data segment? */
if ( (desc.b & (_SEGMENT_CODE|_SEGMENT_WR)) == _SEGMENT_CODE )
- goto unmap_and_fail;
+ goto fault;
/*
* Data or non-conforming code segment:
* check DPL against RPL and CPL.
@@ -2797,7 +2797,7 @@ static int hvm_load_segment_selector(
if ( ((desc.b & (_SEGMENT_EC|_SEGMENT_CODE)) !=
(_SEGMENT_EC|_SEGMENT_CODE))
&& ((dpl < cpl) || (dpl < rpl)) )
- goto unmap_and_fail;
+ goto fault;
break;
}
@@ -2806,7 +2806,7 @@ static int hvm_load_segment_selector(
{
fault_type = (seg != x86_seg_ss) ? TRAP_no_segment
: TRAP_stack_error;
- goto unmap_and_fail;
+ goto fault;
}
} while ( !(desc.b & 0x100) && /* Ensure Accessed flag is set */
writable && /* except if we are to discard writes */
@@ -2831,11 +2831,10 @@ static int hvm_load_segment_selector(
return 0;
- unmap_and_fail:
+ fault:
hvm_unmap_entry(pdesc);
- fail:
hvm_inject_hw_exception(fault_type, sel & 0xfffc);
- hvm_map_fail:
+
return 1;
}
@@ -3008,7 +3007,7 @@ void hvm_task_switch(
new_cpl = tss.eflags & X86_EFLAGS_VM ? 3 : tss.cs & 3;
- if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt, new_cpl, 0) )
+ if ( task_switch_load_seg(x86_seg_ldtr, tss.ldt, new_cpl, 0) )
goto out;
rc = hvm_set_cr3(tss.cr3, 1);
@@ -3029,12 +3028,12 @@ void hvm_task_switch(
regs->rdi = tss.edi;
exn_raised = 0;
- if ( hvm_load_segment_selector(x86_seg_es, tss.es, new_cpl, tss.eflags) ||
- hvm_load_segment_selector(x86_seg_cs, tss.cs, new_cpl, tss.eflags) ||
- hvm_load_segment_selector(x86_seg_ss, tss.ss, new_cpl, tss.eflags) ||
- hvm_load_segment_selector(x86_seg_ds, tss.ds, new_cpl, tss.eflags) ||
- hvm_load_segment_selector(x86_seg_fs, tss.fs, new_cpl, tss.eflags) ||
- hvm_load_segment_selector(x86_seg_gs, tss.gs, new_cpl, tss.eflags) )
+ if ( task_switch_load_seg(x86_seg_es, tss.es, new_cpl, tss.eflags) ||
+ task_switch_load_seg(x86_seg_cs, tss.cs, new_cpl, tss.eflags) ||
+ task_switch_load_seg(x86_seg_ss, tss.ss, new_cpl, tss.eflags) ||
+ task_switch_load_seg(x86_seg_ds, tss.ds, new_cpl, tss.eflags) ||
+ task_switch_load_seg(x86_seg_fs, tss.fs, new_cpl, tss.eflags) ||
+ task_switch_load_seg(x86_seg_gs, tss.gs, new_cpl, tss.eflags) )
exn_raised = 1;
if ( taskswitch_reason == TSW_call_or_int )
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply related [flat|nested] 2+ messages in thread* Re: [PATCH] x86/hvm: Fix mapping corner case during task switching.
2018-08-28 18:17 [PATCH] x86/hvm: Fix mapping corner case during task switching Andrew Cooper
@ 2018-08-30 15:18 ` Jan Beulich
0 siblings, 0 replies; 2+ messages in thread
From: Jan Beulich @ 2018-08-30 15:18 UTC (permalink / raw)
To: Andrew Cooper; +Cc: Xen-devel, Wei Liu, Roger Pau Monne
>>> On 28.08.18 at 20:17, <andrew.cooper3@citrix.com> wrote:
> hvm_map_entry() can fail for a number of reasons, including for a misaligned
> LDT/GDT access which crosses a 4K boundary. Architecturally speaking, this
> should be fixed, but Long Mode doesn't support task switches, and no 32bit OS
> is going to misalign its LDT/GDT base, which is why this task isn't very high
> on the TODO list.
>
> However, the hvm_map_fail error label returns failure without raising an
> exception, which interferes with hvm_task_switch()'s exception tracking, and
> can cause it to finish and return to guest context as if the task switch had
> completed successfully.
>
> Resolve this corner case by folding all the failure paths together, which
> causes an hvm_map_entry() failure to result in #TS[SEL]. hvm_unmap_entry()
> copes fine with a NULL pointer so can be called unconditionally.
>
> In practice, this is just a latent corner case as all hvm_map_entry() failures
> crash the domain, but it should be fixed nevertheless.
>
> Finally, rename hvm_load_segment_selector() to task_switch_load_seg() to avoid
> giving the impression that it is usable for general segment loading.
>
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2018-08-30 15:18 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-08-28 18:17 [PATCH] x86/hvm: Fix mapping corner case during task switching Andrew Cooper
2018-08-30 15:18 ` Jan Beulich
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).