All of lore.kernel.org
 help / color / mirror / Atom feed
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
To: speck@linutronix.de
Subject: [MODERATED] Re: [patch V11 05/16] SSB 5
Date: Thu, 10 May 2018 19:50:56 -0400	[thread overview]
Message-ID: <20180510235056.GA27882@char.us.oracle.com> (raw)
In-Reply-To: <20180510222533.GH13616@tassilo.jf.intel.com>

On Thu, May 10, 2018 at 03:25:33PM -0700, speck for Andi Kleen wrote:
> > It is actually slower. I tried doing it last time with the spectre/meltdown
> > and the performance was way slower than doing it this way. I can dig up the patches
> >  - as I think we did the tests on Broadwell but hadn't tried Skylake or such
> > (or maybe it was the other way around).
> 
> Was this with MSR lists unconditionally, or with MSR list combined with 
> the "wait for the first write" approach?

It was the unconditional. The patch went through a bunch of iterations and this
is what we ended up testing - but it showed around 2-3% performance degradation
when doing TPCC-like workloads. I am trying to track down exactly what hardware
that was done against.

It won't apply to 'master' as this was against our heavily backported 4.1 kernel
but you get the idea. Also ignore some of the commit description as we did
eventually figure out the 'very fragile' case.


From: Daniel Kiper <daniel.kiper@oracle.com>

x86/kvm: Initialize guest MSR_IA32_SPEC_CTRL from VMCS VM-entry MSR load area

Current solution, wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl) before VMLAUNCH,
is very fragile and it may not restore MSR_IA32_SPEC_CTRL guest value in some
cases, e.g. during rescheduling processes (it looks that it is separate bug which
have to be fixed; anyway...). So, let's initialize guest MSR_IA32_SPEC_CTRL from
VMCS VM-entry MSR load area which is designed to do such things. Additionally,
save guest MSR_IA32_SPEC_CTRL into VM-exit MSR store area before VM-exit instead
of rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl) after VM-exit. Later may not be
reliable too.

This patch replaces commit bc5d49f (x86/spec: Always set IBRS to guest value on
VMENTER and host on VMEXIT) and 51b5f53 (x86/spec: Always set IBRS to guest value
on VMENTER and host on VMEXIT (redux)).

Signed-off-by: Daniel Kiper <daniel.kiper@oracle.com>

---
 arch/x86/kvm/vmx.c |   56 ++++++++++++++++++++++++++++++++++++++++++++--------
 1 file changed, 48 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index aa9bc4f..e7c0f8b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -162,8 +162,10 @@ module_param(ple_window_max, int, S_IRUGO);
 
 extern const ulong vmx_return;
 
-#define NR_AUTOLOAD_MSRS 8
-#define VMCS02_POOL_SIZE 1
+#define NR_AUTOLOAD_MSRS	8
+#define NR_AUTOSTORE_MSRS	NR_AUTOLOAD_MSRS
+
+#define VMCS02_POOL_SIZE	1
 
 struct vmcs {
 	u32 revision_id;
@@ -504,6 +506,10 @@ struct vcpu_vmx {
 		struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
 		struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
 	} msr_autoload;
+	struct msr_autostore {
+		unsigned nr;
+		struct vmx_msr_entry guest[NR_AUTOSTORE_MSRS];
+	} msr_autostore;
 	struct {
 		int           loaded;
 		u16           fs_sel, gs_sel, ldt_sel;
@@ -1704,6 +1710,37 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
 	m->host[i].value = host_val;
 }
 
+static void add_atomic_store_msr(struct vcpu_vmx *vmx, unsigned msr)
+{
+	unsigned i;
+	struct msr_autostore *m = &vmx->msr_autostore;
+
+	for (i = 0; i < m->nr; ++i)
+		if (m->guest[i].index == msr)
+			return;
+
+	if (i == NR_AUTOSTORE_MSRS) {
+		pr_err("Not enough msr store entries. Can't add msr %x\n", msr);
+		BUG();
+	}
+
+	m->guest[i].index = msr;
+	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, ++m->nr);
+}
+
+static u64 get_msr_vmcs_store(struct vcpu_vmx *vmx, unsigned msr)
+{
+	unsigned i;
+	struct msr_autostore *m = &vmx->msr_autostore;
+
+	for (i = 0; i < m->nr; ++i)
+		if (m->guest[i].index == msr)
+			return m->guest[i].value;
+
+	pr_err("Can't find msr %x in VMCS store\n", msr);
+	BUG();
+}
+
 static void reload_tss(void)
 {
 	/*
@@ -4716,6 +4753,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 #endif
 
 	vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+	vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest));
 	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
 	vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
 	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
@@ -8192,8 +8230,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 
 	vmx->__launched = vmx->loaded_vmcs->launched;
 
-	if (ibrs_inuse)
-		wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
+	if (ibrs_inuse) {
+		add_atomic_switch_msr(vmx, MSR_IA32_SPEC_CTRL, vmx->spec_ctrl,
+				      SPEC_CTRL_FEATURE_ENABLE_IBRS);
+		add_atomic_store_msr(vmx, MSR_IA32_SPEC_CTRL);
+	}
 
 	asm(
 		/* Store host registers */
@@ -8317,12 +8358,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
 	      );
 
-	if (ibrs_inuse) {
-		rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
-		wrmsrl(MSR_IA32_SPEC_CTRL, SPEC_CTRL_FEATURE_ENABLE_IBRS);
-	}
 	stuff_RSB();
 
+	if (ibrs_inuse)
+		vmx->spec_ctrl = get_msr_vmcs_store(vmx, MSR_IA32_SPEC_CTRL);
+
 	/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
 	if (debugctlmsr)
 		update_debugctlmsr(debugctlmsr);
-- 
1.7.10.4

  reply	other threads:[~2018-05-10 23:51 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-02 21:51 [patch V11 00/16] SSB 0 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 01/16] SSB 1 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 02/16] SSB 2 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 03/16] SSB 3 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 04/16] SSB 4 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 05/16] SSB 5 Thomas Gleixner
2018-05-10 17:52   ` [MODERATED] " Andi Kleen
2018-05-10 18:30     ` Konrad Rzeszutek Wilk
2018-05-10 19:08       ` Andi Kleen
2018-05-10 21:22         ` Konrad Rzeszutek Wilk
2018-05-10 22:25           ` Andi Kleen
2018-05-10 23:50             ` Konrad Rzeszutek Wilk [this message]
2018-05-11 16:11               ` Andi Kleen
2018-05-16  7:55               ` Paolo Bonzini
2018-05-16 13:52                 ` Konrad Rzeszutek Wilk
2018-05-02 21:51 ` [patch V11 06/16] SSB 6 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 07/16] SSB 7 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 08/16] SSB 8 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 09/16] SSB 9 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 10/16] SSB 10 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 11/16] SSB 11 Thomas Gleixner
2018-05-04 20:58   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-02 21:51 ` [patch V11 12/16] SSB 12 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 13/16] SSB 13 Thomas Gleixner
2018-05-02 21:51 ` [patch V11 14/16] SSB 14 Thomas Gleixner
2018-05-03  7:19   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-03  7:31     ` Thomas Gleixner
2018-05-03  7:22   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-02 21:51 ` [patch V11 15/16] SSB 15 Thomas Gleixner
2018-05-03  7:21   ` [MODERATED] " Konrad Rzeszutek Wilk
2018-05-02 21:51 ` [patch V11 16/16] SSB 16 Thomas Gleixner
2018-05-02 23:21 ` [patch V11 00/16] SSB 0 Thomas Gleixner
2018-05-03  4:27 ` [MODERATED] Encrypted Message Tim Chen
2018-05-03  6:10   ` [MODERATED] Re: [patch V11 00/16] SSB 0 Ingo Molnar
2018-05-03  6:30   ` Thomas Gleixner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180510235056.GA27882@char.us.oracle.com \
    --to=konrad.wilk@oracle.com \
    --cc=speck@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.