From: Tom Lendacky <thomas.lendacky@amd.com>
To: Aaron Lewis <aaronlewis@google.com>, kvm@vger.kernel.org
Cc: pbonzini@redhat.com, jmattson@google.com, seanjc@google.com
Subject: Re: [PATCH 09/15] KVM: SVM: Drop "always" flag from list of possible passthrough MSRs
Date: Tue, 3 Dec 2024 15:26:44 -0600 [thread overview]
Message-ID: <5da02caa-1bf8-87fb-785d-f5db41ef249a@amd.com> (raw)
In-Reply-To: <20241127201929.4005605-10-aaronlewis@google.com>
On 11/27/24 14:19, Aaron Lewis wrote:
> From: Sean Christopherson <seanjc@google.com>
>
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
> arch/x86/kvm/svm/svm.c | 134 ++++++++++++++++++++---------------------
> 1 file changed, 67 insertions(+), 67 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index 25d41709a0eaa..3813258497e49 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -81,51 +81,48 @@ static DEFINE_PER_CPU(u64, current_tsc_ratio);
>
> #define X2APIC_MSR(x) (APIC_BASE_MSR + (x >> 4))
>
> -static const struct svm_direct_access_msrs {
> - u32 index; /* Index of the MSR */
> - bool always; /* True if intercept is initially cleared */
> -} direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = {
> - { .index = MSR_STAR, .always = true },
> - { .index = MSR_IA32_SYSENTER_CS, .always = true },
> - { .index = MSR_IA32_SYSENTER_EIP, .always = false },
> - { .index = MSR_IA32_SYSENTER_ESP, .always = false },
> +static const u32 direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = {
> + MSR_STAR,
> + MSR_IA32_SYSENTER_CS,
> + MSR_IA32_SYSENTER_EIP,
> + MSR_IA32_SYSENTER_ESP,
> #ifdef CONFIG_X86_64
> - { .index = MSR_GS_BASE, .always = true },
> - { .index = MSR_FS_BASE, .always = true },
> - { .index = MSR_KERNEL_GS_BASE, .always = true },
> - { .index = MSR_LSTAR, .always = true },
> - { .index = MSR_CSTAR, .always = true },
> - { .index = MSR_SYSCALL_MASK, .always = true },
> + MSR_GS_BASE,
> + MSR_FS_BASE,
> + MSR_KERNEL_GS_BASE,
> + MSR_LSTAR,
> + MSR_CSTAR,
> + MSR_SYSCALL_MASK,
> #endif
> - { .index = MSR_IA32_SPEC_CTRL, .always = false },
> - { .index = MSR_IA32_PRED_CMD, .always = false },
> - { .index = MSR_IA32_FLUSH_CMD, .always = false },
> - { .index = MSR_IA32_DEBUGCTLMSR, .always = false },
> - { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
> - { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
> - { .index = MSR_IA32_LASTINTFROMIP, .always = false },
> - { .index = MSR_IA32_LASTINTTOIP, .always = false },
> - { .index = MSR_IA32_XSS, .always = false },
> - { .index = MSR_EFER, .always = false },
> - { .index = MSR_IA32_CR_PAT, .always = false },
> - { .index = MSR_AMD64_SEV_ES_GHCB, .always = false },
> - { .index = MSR_TSC_AUX, .always = false },
> - { .index = X2APIC_MSR(APIC_ID), .always = false },
> - { .index = X2APIC_MSR(APIC_LVR), .always = false },
> - { .index = X2APIC_MSR(APIC_TASKPRI), .always = false },
> - { .index = X2APIC_MSR(APIC_ARBPRI), .always = false },
> - { .index = X2APIC_MSR(APIC_PROCPRI), .always = false },
> - { .index = X2APIC_MSR(APIC_EOI), .always = false },
> - { .index = X2APIC_MSR(APIC_RRR), .always = false },
> - { .index = X2APIC_MSR(APIC_LDR), .always = false },
> - { .index = X2APIC_MSR(APIC_DFR), .always = false },
> - { .index = X2APIC_MSR(APIC_SPIV), .always = false },
> - { .index = X2APIC_MSR(APIC_ISR), .always = false },
> - { .index = X2APIC_MSR(APIC_TMR), .always = false },
> - { .index = X2APIC_MSR(APIC_IRR), .always = false },
> - { .index = X2APIC_MSR(APIC_ESR), .always = false },
> - { .index = X2APIC_MSR(APIC_ICR), .always = false },
> - { .index = X2APIC_MSR(APIC_ICR2), .always = false },
> + MSR_IA32_SPEC_CTRL,
> + MSR_IA32_PRED_CMD,
> + MSR_IA32_FLUSH_CMD,
> + MSR_IA32_DEBUGCTLMSR,
> + MSR_IA32_LASTBRANCHFROMIP,
> + MSR_IA32_LASTBRANCHTOIP,
> + MSR_IA32_LASTINTFROMIP,
> + MSR_IA32_LASTINTTOIP,
> + MSR_IA32_XSS,
> + MSR_EFER,
> + MSR_IA32_CR_PAT,
> + MSR_AMD64_SEV_ES_GHCB,
> + MSR_TSC_AUX,
> + X2APIC_MSR(APIC_ID),
> + X2APIC_MSR(APIC_LVR),
> + X2APIC_MSR(APIC_TASKPRI),
> + X2APIC_MSR(APIC_ARBPRI),
> + X2APIC_MSR(APIC_PROCPRI),
> + X2APIC_MSR(APIC_EOI),
> + X2APIC_MSR(APIC_RRR),
> + X2APIC_MSR(APIC_LDR),
> + X2APIC_MSR(APIC_DFR),
> + X2APIC_MSR(APIC_SPIV),
> + X2APIC_MSR(APIC_ISR),
> + X2APIC_MSR(APIC_TMR),
> + X2APIC_MSR(APIC_IRR),
> + X2APIC_MSR(APIC_ESR),
> + X2APIC_MSR(APIC_ICR),
> + X2APIC_MSR(APIC_ICR2),
>
> /*
> * Note:
> @@ -134,15 +131,15 @@ static const struct svm_direct_access_msrs {
> * the AVIC hardware would generate GP fault. Therefore, always
> * intercept the MSR 0x832, and do not setup direct_access_msr.
> */
> - { .index = X2APIC_MSR(APIC_LVTTHMR), .always = false },
> - { .index = X2APIC_MSR(APIC_LVTPC), .always = false },
> - { .index = X2APIC_MSR(APIC_LVT0), .always = false },
> - { .index = X2APIC_MSR(APIC_LVT1), .always = false },
> - { .index = X2APIC_MSR(APIC_LVTERR), .always = false },
> - { .index = X2APIC_MSR(APIC_TMICT), .always = false },
> - { .index = X2APIC_MSR(APIC_TMCCT), .always = false },
> - { .index = X2APIC_MSR(APIC_TDCR), .always = false },
> - { .index = MSR_INVALID, .always = false },
> + X2APIC_MSR(APIC_LVTTHMR),
> + X2APIC_MSR(APIC_LVTPC),
> + X2APIC_MSR(APIC_LVT0),
> + X2APIC_MSR(APIC_LVT1),
> + X2APIC_MSR(APIC_LVTERR),
> + X2APIC_MSR(APIC_TMICT),
> + X2APIC_MSR(APIC_TMCCT),
> + X2APIC_MSR(APIC_TDCR),
> + MSR_INVALID,
By adding this there are two things being done in this patch. I think it
would be easier to see the changes related specifically to the "always"
flag being removed if the MSR_INVALID addition was a separate patch.
Thanks,
Tom
> };
>
> /*
> @@ -763,9 +760,10 @@ static int direct_access_msr_slot(u32 msr)
> {
> u32 i;
>
> - for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
> - if (direct_access_msrs[i].index == msr)
> + for (i = 0; direct_access_msrs[i] != MSR_INVALID; i++) {
> + if (direct_access_msrs[i] == msr)
> return i;
> + }
>
> return -ENOENT;
> }
> @@ -911,15 +909,17 @@ unsigned long *svm_vcpu_alloc_msrpm(void)
>
> void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, unsigned long *msrpm)
> {
> - int i;
> -
> - for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
> - if (!direct_access_msrs[i].always)
> - continue;
> - svm_disable_intercept_for_msr(vcpu, direct_access_msrs[i].index,
> - MSR_TYPE_RW);
> - }
> + svm_disable_intercept_for_msr(vcpu, MSR_STAR, MSR_TYPE_RW);
> + svm_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
>
> +#ifdef CONFIG_X86_64
> + svm_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
> + svm_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
> + svm_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
> + svm_disable_intercept_for_msr(vcpu, MSR_LSTAR, MSR_TYPE_RW);
> + svm_disable_intercept_for_msr(vcpu, MSR_CSTAR, MSR_TYPE_RW);
> + svm_disable_intercept_for_msr(vcpu, MSR_SYSCALL_MASK, MSR_TYPE_RW);
> +#endif
> if (sev_es_guest(vcpu->kvm))
> svm_disable_intercept_for_msr(vcpu, MSR_AMD64_SEV_ES_GHCB, MSR_TYPE_RW);
> }
> @@ -935,7 +935,7 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
> return;
>
> for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) {
> - int index = direct_access_msrs[i].index;
> + int index = direct_access_msrs[i];
>
> if ((index < APIC_BASE_MSR) ||
> (index > APIC_BASE_MSR + 0xff))
> @@ -965,8 +965,8 @@ static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
> * refreshed since KVM is going to intercept them regardless of what
> * userspace wants.
> */
> - for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
> - u32 msr = direct_access_msrs[i].index;
> + for (i = 0; direct_access_msrs[i] != MSR_INVALID; i++) {
> + u32 msr = direct_access_msrs[i];
>
> if (!test_bit(i, svm->shadow_msr_intercept.read))
> svm_disable_intercept_for_msr(vcpu, msr, MSR_TYPE_R);
> @@ -1009,10 +1009,10 @@ static void init_msrpm_offsets(void)
>
> memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
>
> - for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
> + for (i = 0; direct_access_msrs[i] != MSR_INVALID; i++) {
> u32 offset;
>
> - offset = svm_msrpm_offset(direct_access_msrs[i].index);
> + offset = svm_msrpm_offset(direct_access_msrs[i]);
> BUG_ON(offset == MSR_INVALID);
>
> add_msr_offset(offset);
next prev parent reply other threads:[~2024-12-03 21:26 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-27 20:19 [PATCH 00/15] Unify MSR intercepts in x86 Aaron Lewis
2024-11-27 20:19 ` [PATCH 01/15] KVM: x86: Use non-atomic bit ops to manipulate "shadow" MSR intercepts Aaron Lewis
2024-11-27 20:38 ` Sean Christopherson
2024-11-27 20:19 ` [PATCH 02/15] KVM: SVM: Use non-atomic bit ops to manipulate MSR interception bitmaps Aaron Lewis
2024-11-27 20:19 ` [PATCH 03/15] KVM: SVM: Invert the polarity of the "shadow" " Aaron Lewis
2024-11-27 20:42 ` Sean Christopherson
2024-12-03 21:08 ` Tom Lendacky
2024-11-27 20:19 ` [PATCH 04/15] KVM: SVM: Track MSRPM as "unsigned long", not "u32" Aaron Lewis
2024-11-27 20:19 ` [PATCH 05/15] KVM: x86: SVM: Adopt VMX style MSR intercepts in SVM Aaron Lewis
2024-11-27 20:43 ` Sean Christopherson
2024-11-27 20:19 ` [PATCH 06/15] KVM: SVM: Disable intercepts for all direct access MSRs on MSR filter changes Aaron Lewis
2024-11-27 20:47 ` Sean Christopherson
2024-11-27 20:19 ` [PATCH 07/15] KVM: SVM: Delete old SVM MSR management code Aaron Lewis
2024-11-27 20:19 ` [PATCH 08/15] KVM: SVM: Pass through GHCB MSR if and only if VM is SEV-ES Aaron Lewis
2024-12-03 21:21 ` Tom Lendacky
2024-11-27 20:19 ` [PATCH 09/15] KVM: SVM: Drop "always" flag from list of possible passthrough MSRs Aaron Lewis
2024-12-03 21:26 ` Tom Lendacky [this message]
2024-11-27 20:19 ` [PATCH 10/15] KVM: SVM: Don't "NULL terminate" the " Aaron Lewis
2024-12-03 21:30 ` Tom Lendacky
2024-11-27 20:19 ` [PATCH 11/15] KVM: VMX: Make list of possible passthrough MSRs "const" Aaron Lewis
2024-11-27 20:19 ` [PATCH 12/15] KVM: x86: Track possible passthrough MSRs in kvm_x86_ops Aaron Lewis
2024-11-27 21:57 ` Sean Christopherson
2024-11-28 16:46 ` Borislav Petkov
2024-12-03 19:47 ` Sean Christopherson
2024-12-05 17:56 ` Borislav Petkov
2024-12-05 18:06 ` Borislav Petkov
2024-12-06 15:23 ` Sean Christopherson
2024-12-06 16:01 ` Borislav Petkov
2024-11-27 20:19 ` [PATCH 13/15] KVM: x86: Move ownership of passthrough MSR "shadow" to common x86 Aaron Lewis
2024-11-27 20:19 ` [PATCH 14/15] KVM: x86: Hoist SVM MSR intercepts to common x86 code Aaron Lewis
2024-11-27 20:19 ` [PATCH 15/15] KVM: x86: Hoist VMX " Aaron Lewis
2024-11-27 20:56 ` [PATCH 00/15] Unify MSR intercepts in x86 Sean Christopherson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5da02caa-1bf8-87fb-785d-f5db41ef249a@amd.com \
--to=thomas.lendacky@amd.com \
--cc=aaronlewis@google.com \
--cc=jmattson@google.com \
--cc=kvm@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox