xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: George Dunlap <george.dunlap@eu.citrix.com>
To: xen-devel@lists.xen.org
Cc: George Dunlap <george.dunlap@eu.citrix.com>,
	Keir Fraser <keir@xen.org>, Tim Deegan <tim@xen.org>,
	Jan Beulich <jan.beulich@suse.com>
Subject: [PATCH RFC v13 11/20] pvh: Support read_segment_register for PVH
Date: Mon, 23 Sep 2013 17:49:51 +0100	[thread overview]
Message-ID: <1379955000-11050-12-git-send-email-george.dunlap@eu.citrix.com> (raw)
In-Reply-To: <1379955000-11050-1-git-send-email-george.dunlap@eu.citrix.com>

This will be necessary to do PV-style emulated operations for PVH guests.

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
v13:
 - Put read_selector next to other segment-related calls
 - rename pvh_get_selector to hvm_read_selector, to make the naming consistent
CC: Jan Beulich <jan.beulich@suse.com>
CC: Tim Deegan <tim@xen.org>
CC: Keir Fraser <keir@xen.org>
---
 xen/arch/x86/domain.c         |    8 ++++----
 xen/arch/x86/hvm/vmx/vmx.c    |   40 ++++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/traps.c          |   26 ++++++++++++--------------
 xen/arch/x86/x86_64/traps.c   |   16 ++++++++--------
 xen/include/asm-x86/hvm/hvm.h |    6 ++++++
 xen/include/asm-x86/system.h  |   19 +++++++++++++++----
 6 files changed, 85 insertions(+), 30 deletions(-)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 5b7e1b2..4b4c66d 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1217,10 +1217,10 @@ static void save_segments(struct vcpu *v)
     struct cpu_user_regs *regs = &v->arch.user_regs;
     unsigned int dirty_segment_mask = 0;
 
-    regs->ds = read_segment_register(ds);
-    regs->es = read_segment_register(es);
-    regs->fs = read_segment_register(fs);
-    regs->gs = read_segment_register(gs);
+    regs->ds = read_segment_register(v, regs, ds);
+    regs->es = read_segment_register(v, regs, es);
+    regs->fs = read_segment_register(v, regs, fs);
+    regs->gs = read_segment_register(v, regs, gs);
 
     if ( regs->ds )
         dirty_segment_mask |= DIRTY_DS;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index f9b589b..5392223 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -664,6 +664,45 @@ static void vmx_ctxt_switch_to(struct vcpu *v)
         .fields = { .type = 0xb, .s = 0, .dpl = 0, .p = 1, .avl = 0,    \
                     .l = 0, .db = 0, .g = 0, .pad = 0 } }).bytes)
 
+u16 vmx_read_selector(struct vcpu *v, enum x86_segment seg)
+{
+    u16 sel = 0;
+
+    vmx_vmcs_enter(v);
+    switch ( seg )
+    {
+    case x86_seg_cs:
+        sel = __vmread(GUEST_CS_SELECTOR);
+        break;
+
+    case x86_seg_ss:
+        sel = __vmread(GUEST_SS_SELECTOR);
+        break;
+
+    case x86_seg_es:
+        sel = __vmread(GUEST_ES_SELECTOR);
+        break;
+
+    case x86_seg_ds:
+        sel = __vmread(GUEST_DS_SELECTOR);
+        break;
+
+    case x86_seg_fs:
+        sel = __vmread(GUEST_FS_SELECTOR);
+        break;
+
+    case x86_seg_gs:
+        sel = __vmread(GUEST_GS_SELECTOR);
+        break;
+
+    default:
+        BUG();
+    }
+    vmx_vmcs_exit(v);
+
+    return sel;
+}
+
 void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
                               struct segment_register *reg)
 {
@@ -1526,6 +1565,7 @@ static struct hvm_function_table __initdata vmx_function_table = {
     .guest_x86_mode       = vmx_guest_x86_mode,
     .get_segment_register = vmx_get_segment_register,
     .set_segment_register = vmx_set_segment_register,
+    .read_selector        = vmx_read_selector,
     .get_shadow_gs_base   = vmx_get_shadow_gs_base,
     .update_host_cr3      = vmx_update_host_cr3,
     .update_guest_cr      = vmx_update_guest_cr,
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 58a92a5..1eac9ff 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1835,8 +1835,6 @@ static inline uint64_t guest_misc_enable(uint64_t val)
     }                                                                       \
     (eip) += sizeof(_x); _x; })
 
-#define read_sreg(regs, sr) read_segment_register(sr)
-
 static int is_cpufreq_controller(struct domain *d)
 {
     return ((cpufreq_controller == FREQCTL_dom0_kernel) &&
@@ -1881,7 +1879,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
         goto fail;
 
     /* emulating only opcodes not allowing SS to be default */
-    data_sel = read_sreg(regs, ds);
+    data_sel = read_segment_register(v, regs, ds);
 
     /* Legacy prefixes. */
     for ( i = 0; i < 8; i++, rex == opcode || (rex = 0) )
@@ -1899,17 +1897,17 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
             data_sel = regs->cs;
             continue;
         case 0x3e: /* DS override */
-            data_sel = read_sreg(regs, ds);
+            data_sel = read_segment_register(v, regs, ds);
             continue;
         case 0x26: /* ES override */
-            data_sel = read_sreg(regs, es);
+            data_sel = read_segment_register(v, regs, es);
             continue;
         case 0x64: /* FS override */
-            data_sel = read_sreg(regs, fs);
+            data_sel = read_segment_register(v, regs, fs);
             lm_ovr = lm_seg_fs;
             continue;
         case 0x65: /* GS override */
-            data_sel = read_sreg(regs, gs);
+            data_sel = read_segment_register(v, regs, gs);
             lm_ovr = lm_seg_gs;
             continue;
         case 0x36: /* SS override */
@@ -1956,7 +1954,7 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
 
         if ( !(opcode & 2) )
         {
-            data_sel = read_sreg(regs, es);
+            data_sel = read_segment_register(v, regs, es);
             lm_ovr = lm_seg_none;
         }
 
@@ -2689,22 +2687,22 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
             ASSERT(opnd_sel);
             continue;
         case 0x3e: /* DS override */
-            opnd_sel = read_sreg(regs, ds);
+            opnd_sel = read_segment_register(v, regs, ds);
             if ( !opnd_sel )
                 opnd_sel = dpl;
             continue;
         case 0x26: /* ES override */
-            opnd_sel = read_sreg(regs, es);
+            opnd_sel = read_segment_register(v, regs, es);
             if ( !opnd_sel )
                 opnd_sel = dpl;
             continue;
         case 0x64: /* FS override */
-            opnd_sel = read_sreg(regs, fs);
+            opnd_sel = read_segment_register(v, regs, fs);
             if ( !opnd_sel )
                 opnd_sel = dpl;
             continue;
         case 0x65: /* GS override */
-            opnd_sel = read_sreg(regs, gs);
+            opnd_sel = read_segment_register(v, regs, gs);
             if ( !opnd_sel )
                 opnd_sel = dpl;
             continue;
@@ -2757,7 +2755,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
                             switch ( modrm & 7 )
                             {
                             default:
-                                opnd_sel = read_sreg(regs, ds);
+                                opnd_sel = read_segment_register(v, regs, ds);
                                 break;
                             case 4: case 5:
                                 opnd_sel = regs->ss;
@@ -2785,7 +2783,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
                             break;
                         }
                         if ( !opnd_sel )
-                            opnd_sel = read_sreg(regs, ds);
+                            opnd_sel = read_segment_register(v, regs, ds);
                         switch ( modrm & 7 )
                         {
                         case 0: case 2: case 4:
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index 8644aaf..3dfb309 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -123,10 +123,10 @@ void show_registers(struct cpu_user_regs *regs)
         fault_crs[0] = read_cr0();
         fault_crs[3] = read_cr3();
         fault_crs[4] = read_cr4();
-        fault_regs.ds = read_segment_register(ds);
-        fault_regs.es = read_segment_register(es);
-        fault_regs.fs = read_segment_register(fs);
-        fault_regs.gs = read_segment_register(gs);
+        fault_regs.ds = read_segment_register(v, regs, ds);
+        fault_regs.es = read_segment_register(v, regs, es);
+        fault_regs.fs = read_segment_register(v, regs, fs);
+        fault_regs.gs = read_segment_register(v, regs, gs);
     }
 
     print_xen_info();
@@ -239,10 +239,10 @@ void do_double_fault(struct cpu_user_regs *regs)
     crs[2] = read_cr2();
     crs[3] = read_cr3();
     crs[4] = read_cr4();
-    regs->ds = read_segment_register(ds);
-    regs->es = read_segment_register(es);
-    regs->fs = read_segment_register(fs);
-    regs->gs = read_segment_register(gs);
+    regs->ds = read_segment_register(current, regs, ds);
+    regs->es = read_segment_register(current, regs, es);
+    regs->fs = read_segment_register(current, regs, fs);
+    regs->gs = read_segment_register(current, regs, gs);
 
     printk("CPU:    %d\n", cpu);
     _show_registers(regs, crs, CTXT_hypervisor, NULL);
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 3376418..9437ff7 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -114,6 +114,7 @@ struct hvm_function_table {
                                  struct segment_register *reg);
     void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
                                  struct segment_register *reg);
+    u16 (*read_selector)(struct vcpu *v, enum x86_segment seg);
     unsigned long (*get_shadow_gs_base)(struct vcpu *v);
 
     /* 
@@ -321,6 +322,11 @@ hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
     hvm_funcs.set_segment_register(v, seg, reg);
 }
 
+static inline u16 hvm_read_selector(struct vcpu *v, enum x86_segment seg)
+{
+    return hvm_funcs.read_selector(v, seg);
+}
+
 static inline unsigned long hvm_get_shadow_gs_base(struct vcpu *v)
 {
     return hvm_funcs.get_shadow_gs_base(v);
diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
index 6ab7d56..34f7bbd 100644
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -4,10 +4,21 @@
 #include <xen/lib.h>
 #include <xen/bitops.h>
 
-#define read_segment_register(name)                             \
-({  u16 __sel;                                                  \
-    asm volatile ( "movw %%" STR(name) ",%0" : "=r" (__sel) );  \
-    __sel;                                                      \
+/*
+ * We need vcpu because during context switch, going from PV to PVH,
+ * in save_segments() current has been updated to next, and no longer pointing
+ * to the PV, but the intention is to get selector for the PV. Checking
+ * is_pvh_vcpu(current) will yield incorrect results in such a case.
+ */
+#define read_segment_register(vcpu, regs, name)                   \
+({  u16 __sel;                                                    \
+    struct cpu_user_regs *_regs = (regs);                         \
+                                                                  \
+    if ( is_pvh_vcpu(vcpu) && guest_mode(_regs) )                 \
+        __sel = hvm_read_selector(vcpu, x86_seg_##name);          \
+    else                                                          \
+        asm volatile ( "movw %%" #name ",%0" : "=r" (__sel) );    \
+    __sel;                                                        \
 })
 
 #define wbinvd() \
-- 
1.7.9.5

  parent reply	other threads:[~2013-09-23 16:49 UTC|newest]

Thread overview: 58+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-09-23 16:49 [PATCH RFC v13 00/20] Introduce PVH domU support George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 01/20] Allow vmx_update_debug_state to be called when v!=current George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 02/20] pvh prep: code motion George Dunlap
2013-09-26  9:20   ` Tim Deegan
2013-10-04 15:29   ` Roger Pau Monné
2013-09-23 16:49 ` [PATCH RFC v13 03/20] Introduce pv guest type and has_hvm_container macros George Dunlap
2013-09-26 11:53   ` Tim Deegan
2013-09-26 12:54     ` Ian Campbell
2013-09-26 13:46     ` George Dunlap
2013-09-26 15:31       ` Konrad Rzeszutek Wilk
2013-09-26 16:24       ` Tim Deegan
2013-09-23 16:49 ` [PATCH RFC v13 04/20] pvh: Introduce PVH guest type George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 05/20] pvh: Disable unneeded features of HVM containers George Dunlap
2013-09-26 15:22   ` Jan Beulich
2013-11-04 12:31     ` George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 06/20] pvh: vmx-specific changes George Dunlap
2013-09-26 15:29   ` Jan Beulich
2013-11-07 14:14     ` George Dunlap
2013-11-07 14:29       ` Jan Beulich
2013-10-07 15:55   ` Roger Pau Monné
2013-10-07 16:06     ` George Dunlap
2013-10-07 16:12       ` Tim Deegan
2013-10-07 16:20         ` George Dunlap
2013-10-07 17:08           ` Tim Deegan
2013-10-08  8:45         ` Jan Beulich
2013-11-07 12:02           ` George Dunlap
2013-11-07 13:12             ` Jan Beulich
2013-09-23 16:49 ` [PATCH RFC v13 07/20] pvh: Do not allow PVH guests to change paging modes George Dunlap
2013-09-26 15:30   ` Jan Beulich
2013-09-23 16:49 ` [PATCH RFC v13 08/20] pvh: PVH access to hypercalls George Dunlap
2013-09-26 15:33   ` Jan Beulich
2013-09-27 21:15     ` Mukesh Rathor
2013-09-30  6:38       ` Jan Beulich
2013-09-23 16:49 ` [PATCH RFC v13 09/20] pvh: Use PV e820 George Dunlap
2013-09-27 17:57   ` Konrad Rzeszutek Wilk
2013-09-23 16:49 ` [PATCH RFC v13 10/20] pvh: Support guest_kernel_mode for PVH George Dunlap
2013-09-23 16:49 ` George Dunlap [this message]
2013-09-26 15:36   ` [PATCH RFC v13 11/20] pvh: Support read_segment_register " Jan Beulich
2013-09-23 16:49 ` [PATCH RFC v13 12/20] pvh: read_descriptor for PVH guests George Dunlap
2013-09-27 18:34   ` Konrad Rzeszutek Wilk
2013-09-23 16:49 ` [PATCH RFC v13 13/20] pvh: Set up more PV stuff in set_info_guest George Dunlap
2013-09-26 15:43   ` Jan Beulich
2013-11-07 15:57     ` George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 14/20] pvh: Use PV handlers for emulated forced invalid ops, cpuid, and IO George Dunlap
2013-09-26 15:52   ` Jan Beulich
2013-09-23 16:49 ` [PATCH RFC v13 15/20] pvh: Disable 32-bit guest support for now George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 16/20] pvh: Restrict tsc_mode to NEVER_EMULATE " George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 17/20] pvh: Disable debug traps when doing pv emulation for PVH domains George Dunlap
2013-09-26 15:55   ` Jan Beulich
2013-09-23 16:49 ` [PATCH RFC v13 18/20] pvh: Documentation George Dunlap
2013-09-23 16:49 ` [PATCH RFC v13 19/20] PVH xen tools: libxc changes to build a PVH guest George Dunlap
2013-09-27 18:37   ` Konrad Rzeszutek Wilk
2013-10-18 16:45   ` Roger Pau Monné
2013-11-04 11:56     ` George Dunlap
2013-11-04 13:18       ` Roger Pau Monné
2013-09-23 16:50 ` [PATCH RFC v13 20/20] PVH xen tools: libxl changes to create " George Dunlap
2013-09-27 18:38   ` Konrad Rzeszutek Wilk
2013-09-27 13:08 ` [PATCH RFC v13 00/20] Introduce PVH domU support Konrad Rzeszutek Wilk

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1379955000-11050-12-git-send-email-george.dunlap@eu.citrix.com \
    --to=george.dunlap@eu.citrix.com \
    --cc=jan.beulich@suse.com \
    --cc=keir@xen.org \
    --cc=tim@xen.org \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).