xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] x86: defer not-present segment checks
@ 2016-10-10 10:04 Jan Beulich
  2016-10-10 10:05 ` Andrew Cooper
  0 siblings, 1 reply; 2+ messages in thread
From: Jan Beulich @ 2016-10-10 10:04 UTC (permalink / raw)
  To: xen-devel; +Cc: Andrew Cooper

[-- Attachment #1: Type: text/plain, Size: 5687 bytes --]

Following on from commits 5602e74c60 ("x86emul: correct loading of
%ss") and bdb860d01c ("x86/HVM: correct segment register loading during
task switch") the point of the non-.present checks needs to be refined:
#NP (and its #SS companion), other than suggested by the various
instruction pages in Intel's SDM, gets checked for only after all type
and permission checks. The only checks getting done even later are the
long mode specific ones for system descriptors (which we don't support
yet) and 64-bit code segments (i.e. anything touching other than the
attribute byte).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Defer L/D bits check on 64-bit code segments until after the P bit
    one.

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2754,14 +2754,6 @@ static int hvm_load_segment_selector(
     do {
         desc = *pdesc;
 
-        /* Segment present in memory? */
-        if ( !(desc.b & _SEGMENT_P) )
-        {
-            fault_type = (seg != x86_seg_ss) ? TRAP_no_segment
-                                             : TRAP_stack_error;
-            goto unmap_and_fail;
-        }
-
         /* LDT descriptor is a system segment. All others are code/data. */
         if ( (desc.b & (1u<<12)) == ((seg == x86_seg_ldtr) << 12) )
             goto unmap_and_fail;
@@ -2806,6 +2798,14 @@ static int hvm_load_segment_selector(
                 goto unmap_and_fail;
             break;
         }
+
+        /* Segment present in memory? */
+        if ( !(desc.b & _SEGMENT_P) )
+        {
+            fault_type = (seg != x86_seg_ss) ? TRAP_no_segment
+                                             : TRAP_stack_error;
+            goto unmap_and_fail;
+        }
     } while ( !(desc.b & 0x100) && /* Ensure Accessed flag is set */
               writable && /* except if we are to discard writes */
               (cmpxchg(&pdesc->b, desc.b, desc.b | 0x100) != desc.b) );
@@ -2892,12 +2892,6 @@ void hvm_task_switch(
     if ( tr.attr.fields.g )
         tr.limit = (tr.limit << 12) | 0xfffu;
 
-    if ( !tr.attr.fields.p )
-    {
-        hvm_inject_hw_exception(TRAP_no_segment, tss_sel & 0xfff8);
-        goto out;
-    }
-
     if ( tr.attr.fields.type != ((taskswitch_reason == TSW_iret) ? 0xb : 0x9) )
     {
         hvm_inject_hw_exception(
@@ -2906,6 +2900,12 @@ void hvm_task_switch(
         goto out;
     }
 
+    if ( !tr.attr.fields.p )
+    {
+        hvm_inject_hw_exception(TRAP_no_segment, tss_sel & 0xfff8);
+        goto out;
+    }
+
     if ( tr.limit < (sizeof(tss)-1) )
     {
         hvm_inject_hw_exception(TRAP_invalid_tss, tss_sel & 0xfff8);
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1311,7 +1311,7 @@ protmode_load_seg(
     struct { uint32_t a, b; } desc;
     uint8_t dpl, rpl;
     int cpl = get_cpl(ctxt, ops);
-    uint32_t new_desc_b, a_flag = 0x100;
+    uint32_t a_flag = 0x100;
     int rc, fault_type = EXC_GP;
 
     if ( cpl < 0 )
@@ -1352,13 +1352,6 @@ protmode_load_seg(
                          &desc, sizeof(desc), ctxt)) )
         return rc;
 
-    /* Segment present in memory? */
-    if ( !(desc.b & (1u<<15)) )
-    {
-        fault_type = seg != x86_seg_ss ? EXC_NP : EXC_SS;
-        goto raise_exn;
-    }
-
     if ( !is_x86_user_segment(seg) )
     {
         /* System segments must have S flag == 0. */
@@ -1393,10 +1386,6 @@ protmode_load_seg(
                /* Non-conforming segment: check RPL and DPL against CPL. */
                : rpl > cpl || dpl != cpl )
             goto raise_exn;
-        /* 64-bit code segments (L bit set) must have D bit clear. */
-        if ( in_longmode(ctxt, ops) &&
-             (desc.b & (1 << 21)) && (desc.b & (1 << 22)) )
-            goto raise_exn;
         sel = (sel ^ rpl) | cpl;
         break;
     case x86_seg_ss:
@@ -1410,7 +1399,8 @@ protmode_load_seg(
         /* LDT system segment? */
         if ( (desc.b & (15u<<8)) != (2u<<8) )
             goto raise_exn;
-        goto skip_accessed_flag;
+        a_flag = 0;
+        break;
     case x86_seg_tr:
         /* Available TSS system segment? */
         if ( (desc.b & (15u<<8)) != (9u<<8) )
@@ -1428,18 +1418,31 @@ protmode_load_seg(
         break;
     }
 
+    /* Segment present in memory? */
+    if ( !(desc.b & (1 << 15)) )
+    {
+        fault_type = seg != x86_seg_ss ? EXC_NP : EXC_SS;
+        goto raise_exn;
+    }
+
+    /* 64-bit code segments (L bit set) must have D bit clear. */
+    if ( seg == x86_seg_cs && in_longmode(ctxt, ops) &&
+         (desc.b & (1 << 21)) && (desc.b & (1 << 22)) )
+        goto raise_exn;
+
     /* Ensure Accessed flag is set. */
-    new_desc_b = desc.b | a_flag;
-    if ( !(desc.b & a_flag) &&
-         ((rc = ops->cmpxchg(
-             x86_seg_none, desctab.base + (sel & 0xfff8) + 4,
-             &desc.b, &new_desc_b, 4, ctxt)) != 0) )
-        return rc;
+    if ( a_flag && !(desc.b & a_flag) )
+    {
+        uint32_t new_desc_b = desc.b | a_flag;
 
-    /* Force the Accessed flag in our local copy. */
-    desc.b |= a_flag;
+        if ( (rc = ops->cmpxchg(x86_seg_none, desctab.base + (sel & 0xfff8) + 4,
+                                &desc.b, &new_desc_b, 4, ctxt)) != 0 )
+            return rc;
+
+        /* Force the Accessed flag in our local copy. */
+        desc.b = new_desc_b;
+    }
 
- skip_accessed_flag:
     sreg->base = (((desc.b <<  0) & 0xff000000u) |
                   ((desc.b << 16) & 0x00ff0000u) |
                   ((desc.a >> 16) & 0x0000ffffu));



[-- Attachment #2: x86-defer-NP-exception.patch --]
[-- Type: text/plain, Size: 5724 bytes --]

x86: defer not-present segment checks

Following on from commits 5602e74c60 ("x86emul: correct loading of
%ss") and bdb860d01c ("x86/HVM: correct segment register loading during
task switch") the point of the non-.present checks needs to be refined:
#NP (and its #SS companion), other than suggested by the various
instruction pages in Intel's SDM, gets checked for only after all type
and permission checks. The only checks getting done even later are the
long mode specific ones for system descriptors (which we don't support
yet) and 64-bit code segments (i.e. anything touching other than the
attribute byte).

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Defer L/D bits check on 64-bit code segments until after the P bit
    one.

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2754,14 +2754,6 @@ static int hvm_load_segment_selector(
     do {
         desc = *pdesc;
 
-        /* Segment present in memory? */
-        if ( !(desc.b & _SEGMENT_P) )
-        {
-            fault_type = (seg != x86_seg_ss) ? TRAP_no_segment
-                                             : TRAP_stack_error;
-            goto unmap_and_fail;
-        }
-
         /* LDT descriptor is a system segment. All others are code/data. */
         if ( (desc.b & (1u<<12)) == ((seg == x86_seg_ldtr) << 12) )
             goto unmap_and_fail;
@@ -2806,6 +2798,14 @@ static int hvm_load_segment_selector(
                 goto unmap_and_fail;
             break;
         }
+
+        /* Segment present in memory? */
+        if ( !(desc.b & _SEGMENT_P) )
+        {
+            fault_type = (seg != x86_seg_ss) ? TRAP_no_segment
+                                             : TRAP_stack_error;
+            goto unmap_and_fail;
+        }
     } while ( !(desc.b & 0x100) && /* Ensure Accessed flag is set */
               writable && /* except if we are to discard writes */
               (cmpxchg(&pdesc->b, desc.b, desc.b | 0x100) != desc.b) );
@@ -2892,12 +2892,6 @@ void hvm_task_switch(
     if ( tr.attr.fields.g )
         tr.limit = (tr.limit << 12) | 0xfffu;
 
-    if ( !tr.attr.fields.p )
-    {
-        hvm_inject_hw_exception(TRAP_no_segment, tss_sel & 0xfff8);
-        goto out;
-    }
-
     if ( tr.attr.fields.type != ((taskswitch_reason == TSW_iret) ? 0xb : 0x9) )
     {
         hvm_inject_hw_exception(
@@ -2906,6 +2900,12 @@ void hvm_task_switch(
         goto out;
     }
 
+    if ( !tr.attr.fields.p )
+    {
+        hvm_inject_hw_exception(TRAP_no_segment, tss_sel & 0xfff8);
+        goto out;
+    }
+
     if ( tr.limit < (sizeof(tss)-1) )
     {
         hvm_inject_hw_exception(TRAP_invalid_tss, tss_sel & 0xfff8);
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1311,7 +1311,7 @@ protmode_load_seg(
     struct { uint32_t a, b; } desc;
     uint8_t dpl, rpl;
     int cpl = get_cpl(ctxt, ops);
-    uint32_t new_desc_b, a_flag = 0x100;
+    uint32_t a_flag = 0x100;
     int rc, fault_type = EXC_GP;
 
     if ( cpl < 0 )
@@ -1352,13 +1352,6 @@ protmode_load_seg(
                          &desc, sizeof(desc), ctxt)) )
         return rc;
 
-    /* Segment present in memory? */
-    if ( !(desc.b & (1u<<15)) )
-    {
-        fault_type = seg != x86_seg_ss ? EXC_NP : EXC_SS;
-        goto raise_exn;
-    }
-
     if ( !is_x86_user_segment(seg) )
     {
         /* System segments must have S flag == 0. */
@@ -1393,10 +1386,6 @@ protmode_load_seg(
                /* Non-conforming segment: check RPL and DPL against CPL. */
                : rpl > cpl || dpl != cpl )
             goto raise_exn;
-        /* 64-bit code segments (L bit set) must have D bit clear. */
-        if ( in_longmode(ctxt, ops) &&
-             (desc.b & (1 << 21)) && (desc.b & (1 << 22)) )
-            goto raise_exn;
         sel = (sel ^ rpl) | cpl;
         break;
     case x86_seg_ss:
@@ -1410,7 +1399,8 @@ protmode_load_seg(
         /* LDT system segment? */
         if ( (desc.b & (15u<<8)) != (2u<<8) )
             goto raise_exn;
-        goto skip_accessed_flag;
+        a_flag = 0;
+        break;
     case x86_seg_tr:
         /* Available TSS system segment? */
         if ( (desc.b & (15u<<8)) != (9u<<8) )
@@ -1428,18 +1418,31 @@ protmode_load_seg(
         break;
     }
 
+    /* Segment present in memory? */
+    if ( !(desc.b & (1 << 15)) )
+    {
+        fault_type = seg != x86_seg_ss ? EXC_NP : EXC_SS;
+        goto raise_exn;
+    }
+
+    /* 64-bit code segments (L bit set) must have D bit clear. */
+    if ( seg == x86_seg_cs && in_longmode(ctxt, ops) &&
+         (desc.b & (1 << 21)) && (desc.b & (1 << 22)) )
+        goto raise_exn;
+
     /* Ensure Accessed flag is set. */
-    new_desc_b = desc.b | a_flag;
-    if ( !(desc.b & a_flag) &&
-         ((rc = ops->cmpxchg(
-             x86_seg_none, desctab.base + (sel & 0xfff8) + 4,
-             &desc.b, &new_desc_b, 4, ctxt)) != 0) )
-        return rc;
+    if ( a_flag && !(desc.b & a_flag) )
+    {
+        uint32_t new_desc_b = desc.b | a_flag;
 
-    /* Force the Accessed flag in our local copy. */
-    desc.b |= a_flag;
+        if ( (rc = ops->cmpxchg(x86_seg_none, desctab.base + (sel & 0xfff8) + 4,
+                                &desc.b, &new_desc_b, 4, ctxt)) != 0 )
+            return rc;
+
+        /* Force the Accessed flag in our local copy. */
+        desc.b = new_desc_b;
+    }
 
- skip_accessed_flag:
     sreg->base = (((desc.b <<  0) & 0xff000000u) |
                   ((desc.b << 16) & 0x00ff0000u) |
                   ((desc.a >> 16) & 0x0000ffffu));

[-- Attachment #3: Type: text/plain, Size: 127 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH v2] x86: defer not-present segment checks
  2016-10-10 10:04 [PATCH v2] x86: defer not-present segment checks Jan Beulich
@ 2016-10-10 10:05 ` Andrew Cooper
  0 siblings, 0 replies; 2+ messages in thread
From: Andrew Cooper @ 2016-10-10 10:05 UTC (permalink / raw)
  To: Jan Beulich, xen-devel

On 10/10/16 11:04, Jan Beulich wrote:
> Following on from commits 5602e74c60 ("x86emul: correct loading of
> %ss") and bdb860d01c ("x86/HVM: correct segment register loading during
> task switch") the point of the non-.present checks needs to be refined:
> #NP (and its #SS companion), other than suggested by the various
> instruction pages in Intel's SDM, gets checked for only after all type
> and permission checks. The only checks getting done even later are the
> long mode specific ones for system descriptors (which we don't support
> yet) and 64-bit code segments (i.e. anything touching other than the
> attribute byte).
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2016-10-10 10:05 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-10-10 10:04 [PATCH v2] x86: defer not-present segment checks Jan Beulich
2016-10-10 10:05 ` Andrew Cooper

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).