From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: From: Greg Kroah-Hartman To: linux-kernel@vger.kernel.org Cc: Greg Kroah-Hartman , stable@vger.kernel.org, Konrad Rzeszutek Wilk , Thomas Gleixner Subject: [PATCH 4.14 063/104] x86/KVM/VMX: Separate the VMX AUTOLOAD guest/host number accounting Date: Tue, 14 Aug 2018 19:17:17 +0200 Message-Id: <20180814171519.409768594@linuxfoundation.org> In-Reply-To: <20180814171515.270692185@linuxfoundation.org> References: <20180814171515.270692185@linuxfoundation.org> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Sender: linux-kernel-owner@vger.kernel.org List-ID: 4.14-stable review patch. If anyone has any objections, please let me know. ------------------ From: Konrad Rzeszutek Wilk commit 3190709335dd31fe1aeeebfe4ffb6c7624ef971f upstream This allows to load a different number of MSRs depending on the context: VMEXIT or VMENTER. Signed-off-by: Konrad Rzeszutek Wilk Signed-off-by: Thomas Gleixner Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2058,12 +2058,18 @@ static void clear_atomic_switch_msr(stru } i = find_msr(&m->guest, msr); if (i < 0) - return; + goto skip_guest; --m->guest.nr; - --m->host.nr; m->guest.val[i] = m->guest.val[m->guest.nr]; - m->host.val[i] = m->host.val[m->host.nr]; vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); + +skip_guest: + i = find_msr(&m->host, msr); + if (i < 0) + return; + + --m->host.nr; + m->host.val[i] = m->host.val[m->host.nr]; vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); } @@ -2081,7 +2087,7 @@ static void add_atomic_switch_msr_specia static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, u64 guest_val, u64 host_val) { - int i; + int i, j; struct msr_autoload *m = &vmx->msr_autoload; switch (msr) { @@ -2117,21 +2123,24 @@ static void add_atomic_switch_msr(struct } i = find_msr(&m->guest, msr); - if (i == NR_AUTOLOAD_MSRS) { + j = find_msr(&m->host, msr); + if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { printk_once(KERN_WARNING "Not enough msr switch entries. " "Can't add msr %x\n", msr); return; - } else if (i < 0) { + } + if (i < 0) { i = m->guest.nr++; - ++m->host.nr; vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); + } + if (j < 0) { + j = m->host.nr++; vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); } - m->guest.val[i].index = msr; m->guest.val[i].value = guest_val; - m->host.val[i].index = msr; - m->host.val[i].value = host_val; + m->host.val[j].index = msr; + m->host.val[j].value = host_val; } static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)