From: Andrew Cooper <andrew.cooper3@citrix.com>
To: Xen-devel <xen-devel@lists.xen.org>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Jan Beulich <JBeulich@suse.com>
Subject: [PATCH v3 24/24] x86/emul: Use system-segment relative memory accesses
Date: Wed, 30 Nov 2016 13:50:41 +0000 [thread overview]
Message-ID: <1480513841-7565-25-git-send-email-andrew.cooper3@citrix.com> (raw)
In-Reply-To: <1480513841-7565-1-git-send-email-andrew.cooper3@citrix.com>
With hvm_virtual_to_linear_addr() capable of doing proper system-segment
relative memory accesses, avoid open-coding the address and limit calculations
locally.
When a table spans the 4GB boundary (32bit) or non-canonical boundary (64bit),
segmentation errors are now raised. Previously, the use of x86_seg_none
resulted in segmentation being skipped, and the linear address being truncated
through the pagewalk, and possibly coming out valid on the far side.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Signed-off-by: Jan Beulich <JBeulich@suse.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
---
v2:
* Shorten exception handling
* Replace ->cmpxchg() assertion with proper exception handling
---
xen/arch/x86/hvm/hvm.c | 8 +++
xen/arch/x86/x86_emulate/x86_emulate.c | 123 +++++++++++++++++++++------------
2 files changed, 85 insertions(+), 46 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 426edee..599363b 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2470,6 +2470,14 @@ bool_t hvm_virtual_to_linear_addr(
unsigned long addr = offset, last_byte;
bool_t okay = 0;
+ /*
+ * These checks are for a memory access through an active segment.
+ *
+ * It is expected that the access rights of reg are suitable for seg (and
+ * that this is enforced at the point that seg is loaded).
+ */
+ ASSERT(seg < x86_seg_none);
+
if ( !(current->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
{
/*
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
index 0fb2c09..c18adbe 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1181,20 +1181,36 @@ static int ioport_access_check(
return rc;
/* Ensure the TSS has an io-bitmap-offset field. */
- generate_exception_if(tr.attr.fields.type != 0xb ||
- tr.limit < 0x67, EXC_GP, 0);
+ generate_exception_if(tr.attr.fields.type != 0xb, EXC_GP, 0);
- if ( (rc = read_ulong(x86_seg_none, tr.base + 0x66,
- &iobmp, 2, ctxt, ops)) )
+ switch ( rc = read_ulong(x86_seg_tr, 0x66, &iobmp, 2, ctxt, ops) )
+ {
+ case X86EMUL_OKAY:
+ break;
+
+ case X86EMUL_EXCEPTION:
+ generate_exception_if(!ctxt->event_pending, EXC_GP, 0);
+ /* fallthrough */
+
+ default:
return rc;
+ }
- /* Ensure TSS includes two bytes including byte containing first port. */
- iobmp += first_port / 8;
- generate_exception_if(tr.limit <= iobmp, EXC_GP, 0);
+ /* Read two bytes including byte containing first port. */
+ switch ( rc = read_ulong(x86_seg_tr, iobmp + first_port / 8,
+ &iobmp, 2, ctxt, ops) )
+ {
+ case X86EMUL_OKAY:
+ break;
+
+ case X86EMUL_EXCEPTION:
+ generate_exception_if(!ctxt->event_pending, EXC_GP, 0);
+ /* fallthrough */
- if ( (rc = read_ulong(x86_seg_none, tr.base + iobmp,
- &iobmp, 2, ctxt, ops)) )
+ default:
return rc;
+ }
+
generate_exception_if(iobmp & (((1 << bytes) - 1) << (first_port & 7)),
EXC_GP, 0);
@@ -1317,9 +1333,12 @@ realmode_load_seg(
struct x86_emulate_ctxt *ctxt,
const struct x86_emulate_ops *ops)
{
- int rc = ops->read_segment(seg, sreg, ctxt);
+ int rc;
+
+ if ( !ops->read_segment )
+ return X86EMUL_UNHANDLEABLE;
- if ( !rc )
+ if ( (rc = ops->read_segment(seg, sreg, ctxt)) == X86EMUL_OKAY )
{
sreg->sel = sel;
sreg->base = (uint32_t)sel << 4;
@@ -1336,7 +1355,7 @@ protmode_load_seg(
struct x86_emulate_ctxt *ctxt,
const struct x86_emulate_ops *ops)
{
- struct segment_register desctab;
+ enum x86_segment sel_seg = (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr;
struct { uint32_t a, b; } desc;
uint8_t dpl, rpl;
int cpl = get_cpl(ctxt, ops);
@@ -1369,21 +1388,19 @@ protmode_load_seg(
if ( !is_x86_user_segment(seg) && (sel & 4) )
goto raise_exn;
- if ( (rc = ops->read_segment((sel & 4) ? x86_seg_ldtr : x86_seg_gdtr,
- &desctab, ctxt)) )
- return rc;
-
- /* Segment not valid for use (cooked meaning of .p)? */
- if ( !desctab.attr.fields.p )
- goto raise_exn;
+ switch ( rc = ops->read(sel_seg, sel & 0xfff8, &desc, sizeof(desc), ctxt) )
+ {
+ case X86EMUL_OKAY:
+ break;
- /* Check against descriptor table limit. */
- if ( ((sel & 0xfff8) + 7) > desctab.limit )
- goto raise_exn;
+ case X86EMUL_EXCEPTION:
+ if ( !ctxt->event_pending )
+ goto raise_exn;
+ /* fallthrough */
- if ( (rc = ops->read(x86_seg_none, desctab.base + (sel & 0xfff8),
- &desc, sizeof(desc), ctxt)) )
+ default:
return rc;
+ }
if ( !is_x86_user_segment(seg) )
{
@@ -1471,9 +1488,20 @@ protmode_load_seg(
{
uint32_t new_desc_b = desc.b | a_flag;
- if ( (rc = ops->cmpxchg(x86_seg_none, desctab.base + (sel & 0xfff8) + 4,
- &desc.b, &new_desc_b, 4, ctxt)) != 0 )
+ switch ( (rc = ops->cmpxchg(sel_seg, (sel & 0xfff8) + 4, &desc.b,
+ &new_desc_b, sizeof(desc.b), ctxt)) )
+ {
+ case X86EMUL_OKAY:
+ break;
+
+ case X86EMUL_EXCEPTION:
+ if ( !ctxt->event_pending )
+ goto raise_exn;
+ /* fallthrough */
+
+ default:
return rc;
+ }
/* Force the Accessed flag in our local copy. */
desc.b = new_desc_b;
@@ -1507,8 +1535,7 @@ load_seg(
struct segment_register reg;
int rc;
- if ( (ops->read_segment == NULL) ||
- (ops->write_segment == NULL) )
+ if ( !ops->write_segment )
return X86EMUL_UNHANDLEABLE;
if ( !sreg )
@@ -1636,8 +1663,7 @@ static int inject_swint(enum x86_swint_type type,
if ( !in_realmode(ctxt, ops) )
{
unsigned int idte_size, idte_offset;
- struct segment_register idtr;
- uint32_t idte_ctl;
+ struct { uint32_t a, b, c, d; } idte;
int lm = in_longmode(ctxt, ops);
if ( lm < 0 )
@@ -1660,24 +1686,30 @@ static int inject_swint(enum x86_swint_type type,
((ctxt->regs->eflags & EFLG_IOPL) != EFLG_IOPL) )
goto raise_exn;
- fail_if(ops->read_segment == NULL);
fail_if(ops->read == NULL);
- if ( (rc = ops->read_segment(x86_seg_idtr, &idtr, ctxt)) )
- goto done;
-
- if ( (idte_offset + idte_size - 1) > idtr.limit )
- goto raise_exn;
/*
- * Should strictly speaking read all 8/16 bytes of an entry,
- * but we currently only care about the dpl and present bits.
+ * Read all 8/16 bytes so the idtr limit check is applied properly
+ * to this entry, even though we only end up looking at the 2nd
+ * word.
*/
- if ( (rc = ops->read(x86_seg_none, idtr.base + idte_offset + 4,
- &idte_ctl, sizeof(idte_ctl), ctxt)) )
- goto done;
+ switch ( rc = ops->read(x86_seg_idtr, idte_offset,
+ &idte, idte_size, ctxt) )
+ {
+ case X86EMUL_OKAY:
+ break;
+
+ case X86EMUL_EXCEPTION:
+ if ( !ctxt->event_pending )
+ goto raise_exn;
+ /* fallthrough */
+
+ default:
+ return rc;
+ }
/* Is this entry present? */
- if ( !(idte_ctl & (1u << 15)) )
+ if ( !(idte.b & (1u << 15)) )
{
fault_type = EXC_NP;
goto raise_exn;
@@ -1686,12 +1718,11 @@ static int inject_swint(enum x86_swint_type type,
/* icebp counts as a hardware event, and bypasses the dpl check. */
if ( type != x86_swint_icebp )
{
- struct segment_register ss;
+ int cpl = get_cpl(ctxt, ops);
- if ( (rc = ops->read_segment(x86_seg_ss, &ss, ctxt)) )
- goto done;
+ fail_if(cpl < 0);
- if ( ss.attr.fields.dpl > ((idte_ctl >> 13) & 3) )
+ if ( cpl > ((idte.b >> 13) & 3) )
goto raise_exn;
}
}
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
prev parent reply other threads:[~2016-11-30 13:50 UTC|newest]
Thread overview: 59+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-11-30 13:50 [PATCH for-4.9 v3 00/24] XSA-191 followup Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 01/24] x86/shadow: Fix #PFs from emulated writes crossing a page boundary Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 02/24] x86/emul: Drop X86EMUL_CMPXCHG_FAILED Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 03/24] x86/emul: Simplfy emulation state setup Andrew Cooper
2016-12-08 6:34 ` George Dunlap
2016-11-30 13:50 ` [PATCH v3 04/24] x86/emul: Rename hvm_trap to x86_event and move it into the emulation infrastructure Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 05/24] x86/emul: Rename HVM_DELIVER_NO_ERROR_CODE to X86_EVENT_NO_EC Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 06/24] x86/pv: Implement pv_inject_{event, page_fault, hw_exception}() Andrew Cooper
2016-12-01 10:06 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 07/24] x86/emul: Clean up the naming of the retire union Andrew Cooper
2016-11-30 13:58 ` Paul Durrant
2016-11-30 14:02 ` Andrew Cooper
2016-11-30 14:05 ` Paul Durrant
2016-11-30 16:43 ` Jan Beulich
2016-12-01 10:08 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 08/24] x86/emul: Correct the behaviour of pop %ss and interrupt shadowing Andrew Cooper
2016-12-01 10:18 ` Jan Beulich
2016-12-01 10:51 ` Andrew Cooper
2016-12-01 11:19 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 09/24] x86/emul: Provide a wrapper to x86_emulate() to ASSERT() certain behaviour Andrew Cooper
2016-12-01 10:40 ` Jan Beulich
2016-12-01 10:58 ` Andrew Cooper
2016-12-01 11:21 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 10/24] x86/emul: Always use fault semantics for software events Andrew Cooper
2016-11-30 17:55 ` Boris Ostrovsky
2016-12-01 10:53 ` Jan Beulich
2016-12-01 11:15 ` Andrew Cooper
2016-12-01 11:23 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 11/24] x86/emul: Implement singlestep as a retire flag Andrew Cooper
2016-11-30 14:28 ` Paul Durrant
2016-12-01 11:16 ` Jan Beulich
2016-12-01 11:23 ` Andrew Cooper
2016-12-01 11:33 ` Tim Deegan
2016-12-01 12:05 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 12/24] x86/emul: Remove opencoded exception generation Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 13/24] x86/emul: Rework emulator event injection Andrew Cooper
2016-11-30 14:26 ` Paul Durrant
2016-12-01 11:35 ` Tim Deegan
2016-12-01 12:31 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 14/24] x86/vmx: Use hvm_{get, set}_segment_register() rather than vmx_{get, set}_segment_register() Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 15/24] x86/hvm: Reposition the modification of raw segment data from the VMCB/VMCS Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 16/24] x86/emul: Avoid raising faults behind the emulators back Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 17/24] x86/pv: " Andrew Cooper
2016-12-01 11:50 ` Tim Deegan
2016-12-01 12:57 ` Jan Beulich
2016-12-01 13:12 ` Andrew Cooper
2016-12-01 13:27 ` Jan Beulich
2016-11-30 13:50 ` [PATCH v3 18/24] x86/shadow: " Andrew Cooper
2016-12-01 11:39 ` Tim Deegan
2016-12-01 11:40 ` Andrew Cooper
2016-12-01 13:00 ` Jan Beulich
2016-12-01 13:15 ` Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 19/24] x86/hvm: Extend the hvm_copy_*() API with a pagefault_info pointer Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 20/24] x86/hvm: Reimplement hvm_copy_*_nofault() in terms of no pagefault_info Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 21/24] x86/hvm: Rename hvm_copy_*_guest_virt() to hvm_copy_*_guest_linear() Andrew Cooper
2016-11-30 13:50 ` [PATCH v3 22/24] x86/hvm: Avoid __hvm_copy() raising #PF behind the emulators back Andrew Cooper
2016-11-30 14:29 ` Paul Durrant
2016-11-30 13:50 ` [PATCH v3 23/24] x86/emul: Prepare to allow use of system segments for memory references Andrew Cooper
2016-11-30 13:50 ` Andrew Cooper [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1480513841-7565-25-git-send-email-andrew.cooper3@citrix.com \
--to=andrew.cooper3@citrix.com \
--cc=JBeulich@suse.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).