xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] x86/svm: Fixes to OS Visible Workaround handling
@ 2018-08-15 17:07 Andrew Cooper
  2018-08-21 15:09 ` Boris Ostrovsky
  0 siblings, 1 reply; 2+ messages in thread
From: Andrew Cooper @ 2018-08-15 17:07 UTC (permalink / raw)
  To: Xen-devel
  Cc: Andrew Cooper, Boris Ostrovsky, Brian Woods, Jan Beulich,
	Suravee Suthikulpanit

OSVW data is technically per-cpu, but it is the firmwares reponsibility to
make it equivelent on each cpu.  A guests OSVW data is sources from global
data in Xen, clearly making it per-domain data rather than per-vcpu data.

Move the data from struct arch_svm_struct to struct svm_domain, and call
svm_guest_osvw_init() from svm_domain_initialise() instead of
svm_vcpu_initialise().

In svm_guest_osvw_init(), reading osvw_length and osvw_status must be done
under the osvw_lock to avoid observing mismatched values.  The guests view of
osvw_length also needs clipping at 64 as we only offer one status register (To
date, 5 is the maximum index defined AFAICT).  Avoid opencoding max().

Drop svm_handle_osvw() as its shorter and simpler to implement the
functionality inline in svm_msr_{read,write}_intercept().  As the OSVW MSRs
are a contiguous block, we can access them as an array for simplicity.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Boris Ostrovsky <boris.ostrovsky@oracle.com>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
CC: Brian Woods <brian.woods@amd.com>
CC: Jan Beulich <JBeulich@suse.com>
---
 xen/arch/x86/hvm/svm/svm.c         | 47 +++++++++++++++-----------------------
 xen/include/asm-x86/hvm/svm/vmcb.h | 14 +++++++-----
 2 files changed, 26 insertions(+), 35 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 37f782b..a16f372 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -1195,17 +1195,18 @@ void svm_vmenter_helper(const struct cpu_user_regs *regs)
     vmcb->rflags = regs->rflags | X86_EFLAGS_MBS;
 }
 
-static void svm_guest_osvw_init(struct vcpu *vcpu)
+static void svm_guest_osvw_init(struct domain *d)
 {
-    if ( boot_cpu_data.x86_vendor != X86_VENDOR_AMD )
-        return;
+    struct svm_domain *svm = &d->arch.hvm_domain.svm;
+
+    spin_lock(&osvw_lock);
 
     /*
      * Guests should see errata 400 and 415 as fixed (assuming that
      * HLT and IO instructions are intercepted).
      */
-    vcpu->arch.hvm_svm.osvw.length = (osvw_length >= 3) ? osvw_length : 3;
-    vcpu->arch.hvm_svm.osvw.status = osvw_status & ~(6ULL);
+    svm->osvw.length = min(max(3ul, osvw_length), 64ul);
+    svm->osvw.status = osvw_status & ~6;
 
     /*
      * By increasing VCPU's osvw.length to 3 we are telling the guest that
@@ -1216,7 +1217,9 @@ static void svm_guest_osvw_init(struct vcpu *vcpu)
      * is present (because we really don't know).
      */
     if ( osvw_length == 0 && boot_cpu_data.x86 == 0x10 )
-        vcpu->arch.hvm_svm.osvw.status |= 1;
+        svm->osvw.status |= 1;
+
+    spin_unlock(&osvw_lock);
 }
 
 void svm_host_osvw_reset()
@@ -1268,6 +1271,8 @@ static int svm_domain_initialise(struct domain *d)
 
     d->arch.ctxt_switch = &csw;
 
+    svm_guest_osvw_init(d);
+
     return 0;
 }
 
@@ -1289,8 +1294,6 @@ static int svm_vcpu_initialise(struct vcpu *v)
         return rc;
     }
 
-    svm_guest_osvw_init(v);
-
     return 0;
 }
 
@@ -1627,23 +1630,6 @@ static void svm_init_erratum_383(const struct cpuinfo_x86 *c)
     }
 }
 
-static int svm_handle_osvw(struct vcpu *v, uint32_t msr, uint64_t *val, bool_t read)
-{
-    if ( !v->domain->arch.cpuid->extd.osvw )
-        return -1;
-
-    if ( read )
-    {
-        if (msr == MSR_AMD_OSVW_ID_LENGTH)
-            *val = v->arch.hvm_svm.osvw.length;
-        else
-            *val = v->arch.hvm_svm.osvw.status;
-    }
-    /* Writes are ignored */
-
-    return 0;
-}
-
 static int _svm_cpu_up(bool bsp)
 {
     uint64_t msr_content;
@@ -1875,6 +1861,7 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
 {
     int ret;
     struct vcpu *v = current;
+    const struct domain *d = v->domain;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
     switch ( msr )
@@ -2017,9 +2004,10 @@ static int svm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
 
     case MSR_AMD_OSVW_ID_LENGTH:
     case MSR_AMD_OSVW_STATUS:
-        ret = svm_handle_osvw(v, msr, msr_content, 1);
-        if ( ret < 0 )
+        if ( !d->arch.cpuid->extd.osvw )
             goto gpf;
+        *msr_content =
+            d->arch.hvm_domain.svm.osvw.raw[msr - MSR_AMD_OSVW_ID_LENGTH];
         break;
 
     default:
@@ -2063,6 +2051,7 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
 {
     int ret, result = X86EMUL_OKAY;
     struct vcpu *v = current;
+    struct domain *d = v->domain;
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
 
     switch ( msr )
@@ -2218,9 +2207,9 @@ static int svm_msr_write_intercept(unsigned int msr, uint64_t msr_content)
 
     case MSR_AMD_OSVW_ID_LENGTH:
     case MSR_AMD_OSVW_STATUS:
-        ret = svm_handle_osvw(v, msr, &msr_content, 0);
-        if ( ret < 0 )
+        if ( !d->arch.cpuid->extd.osvw )
             goto gpf;
+        /* Write-discard */
         break;
 
     default:
diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h b/xen/include/asm-x86/hvm/svm/vmcb.h
index 6add818..f7974da 100644
--- a/xen/include/asm-x86/hvm/svm/vmcb.h
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h
@@ -493,6 +493,14 @@ struct vmcb_struct {
 };
 
 struct svm_domain {
+    /* OSVW MSRs */
+    union {
+        uint64_t raw[2];
+        struct {
+            uint64_t length;
+            uint64_t status;
+        };
+    } osvw;
 };
 
 /*
@@ -536,12 +544,6 @@ struct arch_svm_struct {
 
     /* data breakpoint extension MSRs */
     uint32_t dr_mask[4];
-
-    /* OSVW MSRs */
-    struct {
-        u64 length;
-        u64 status;
-    } osvw;
 };
 
 struct vmcb_struct *alloc_vmcb(void);
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] x86/svm: Fixes to OS Visible Workaround handling
  2018-08-15 17:07 [PATCH] x86/svm: Fixes to OS Visible Workaround handling Andrew Cooper
@ 2018-08-21 15:09 ` Boris Ostrovsky
  0 siblings, 0 replies; 2+ messages in thread
From: Boris Ostrovsky @ 2018-08-21 15:09 UTC (permalink / raw)
  To: Andrew Cooper, Xen-devel; +Cc: Brian Woods, Jan Beulich, Suravee Suthikulpanit

On 08/15/2018 01:07 PM, Andrew Cooper wrote:
> OSVW data is technically per-cpu, but it is the firmwares reponsibility to
> make it equivelent on each cpu.  A guests OSVW data is sources from global
> data in Xen, clearly making it per-domain data rather than per-vcpu data.
>
> Move the data from struct arch_svm_struct to struct svm_domain, and call
> svm_guest_osvw_init() from svm_domain_initialise() instead of
> svm_vcpu_initialise().
>
> In svm_guest_osvw_init(), reading osvw_length and osvw_status must be done
> under the osvw_lock to avoid observing mismatched values.  The guests view of
> osvw_length also needs clipping at 64 as we only offer one status register (To
> date, 5 is the maximum index defined AFAICT).  Avoid opencoding max().


Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>

We should probably emit a warning when MSR_AMD_OSVW_ID_LENGTH reports
more than 64 bits in svm_host_osvw_init().


-boris

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xenproject.org
https://lists.xenproject.org/mailman/listinfo/xen-devel

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2018-08-21 15:09 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-08-15 17:07 [PATCH] x86/svm: Fixes to OS Visible Workaround handling Andrew Cooper
2018-08-21 15:09 ` Boris Ostrovsky

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).