From: Aaron Lewis <aaronlewis@google.com>
To: kvm@vger.kernel.org
Cc: pbonzini@redhat.com, jmattson@google.com, seanjc@google.com,
Aaron Lewis <aaronlewis@google.com>
Subject: [PATCH 14/15] KVM: x86: Hoist SVM MSR intercepts to common x86 code
Date: Wed, 27 Nov 2024 20:19:28 +0000 [thread overview]
Message-ID: <20241127201929.4005605-15-aaronlewis@google.com> (raw)
In-Reply-To: <20241127201929.4005605-1-aaronlewis@google.com>
Now that the SVM and VMX implementations for MSR intercepts are the
same hoist the SVM implementation to common x86 code.
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
arch/x86/include/asm/kvm-x86-ops.h | 1 +
arch/x86/include/asm/kvm_host.h | 3 ++
arch/x86/kvm/svm/svm.c | 73 ++---------------------------
arch/x86/kvm/x86.c | 75 ++++++++++++++++++++++++++++++
arch/x86/kvm/x86.h | 2 +
5 files changed, 86 insertions(+), 68 deletions(-)
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 124c2e1e42026..3f10ce4957f74 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -132,6 +132,7 @@ KVM_X86_OP(apic_init_signal_blocked)
KVM_X86_OP_OPTIONAL(enable_l2_tlb_flush)
KVM_X86_OP_OPTIONAL(migrate_timers)
KVM_X86_OP_OPTIONAL(msr_filter_changed)
+KVM_X86_OP_OPTIONAL(get_msr_bitmap_entries)
KVM_X86_OP(disable_intercept_for_msr)
KVM_X86_OP(complete_emulated_msr)
KVM_X86_OP(vcpu_deliver_sipi_vector)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 808b5365e4bd2..763fc054a2c56 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1830,6 +1830,9 @@ struct kvm_x86_ops {
const u32 * const possible_passthrough_msrs;
const u32 nr_possible_passthrough_msrs;
+ void (*get_msr_bitmap_entries)(struct kvm_vcpu *vcpu, u32 msr,
+ unsigned long **read_map, u8 *read_bit,
+ unsigned long **write_map, u8 *write_bit);
void (*disable_intercept_for_msr)(struct kvm_vcpu *vcpu, u32 msr, int type);
void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 31ed6c68e8194..aaf244e233b90 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -799,84 +799,20 @@ static void svm_get_msr_bitmap_entries(struct kvm_vcpu *vcpu, u32 msr,
*write_map = &svm->msrpm[offset];
}
-#define BUILD_SVM_MSR_BITMAP_HELPER(fn, bitop, access) \
-static inline void fn(struct kvm_vcpu *vcpu, u32 msr) \
-{ \
- unsigned long *read_map, *write_map; \
- u8 read_bit, write_bit; \
- \
- svm_get_msr_bitmap_entries(vcpu, msr, &read_map, &read_bit, \
- &write_map, &write_bit); \
- bitop(access##_bit, access##_map); \
-}
-
-BUILD_SVM_MSR_BITMAP_HELPER(svm_set_msr_bitmap_read, __set_bit, read)
-BUILD_SVM_MSR_BITMAP_HELPER(svm_set_msr_bitmap_write, __set_bit, write)
-BUILD_SVM_MSR_BITMAP_HELPER(svm_clear_msr_bitmap_read, __clear_bit, read)
-BUILD_SVM_MSR_BITMAP_HELPER(svm_clear_msr_bitmap_write, __clear_bit, write)
-
void svm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
{
- struct vcpu_svm *svm = to_svm(vcpu);
- int slot;
-
- slot = kvm_passthrough_msr_slot(msr);
- WARN_ON(slot == -ENOENT);
- if (slot >= 0) {
- /* Set the shadow bitmaps to the desired intercept states */
- if (type & MSR_TYPE_R)
- __clear_bit(slot, vcpu->arch.shadow_msr_intercept.read);
- if (type & MSR_TYPE_W)
- __clear_bit(slot, vcpu->arch.shadow_msr_intercept.write);
- }
-
- /*
- * Don't disabled interception for the MSR if userspace wants to
- * handle it.
- */
- if ((type & MSR_TYPE_R) && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) {
- svm_set_msr_bitmap_read(vcpu, msr);
- type &= ~MSR_TYPE_R;
- }
-
- if ((type & MSR_TYPE_W) && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) {
- svm_set_msr_bitmap_write(vcpu, msr);
- type &= ~MSR_TYPE_W;
- }
-
- if (type & MSR_TYPE_R)
- svm_clear_msr_bitmap_read(vcpu, msr);
-
- if (type & MSR_TYPE_W)
- svm_clear_msr_bitmap_write(vcpu, msr);
+ kvm_disable_intercept_for_msr(vcpu, msr, type);
svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
- svm->nested.force_msr_bitmap_recalc = true;
+ to_svm(vcpu)->nested.force_msr_bitmap_recalc = true;
}
void svm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
{
- struct vcpu_svm *svm = to_svm(vcpu);
- int slot;
-
- slot = kvm_passthrough_msr_slot(msr);
- WARN_ON(slot == -ENOENT);
- if (slot >= 0) {
- /* Set the shadow bitmaps to the desired intercept states */
- if (type & MSR_TYPE_R)
- __set_bit(slot, vcpu->arch.shadow_msr_intercept.read);
- if (type & MSR_TYPE_W)
- __set_bit(slot, vcpu->arch.shadow_msr_intercept.write);
- }
-
- if (type & MSR_TYPE_R)
- svm_set_msr_bitmap_read(vcpu, msr);
-
- if (type & MSR_TYPE_W)
- svm_set_msr_bitmap_write(vcpu, msr);
+ kvm_enable_intercept_for_msr(vcpu, msr, type);
svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
- svm->nested.force_msr_bitmap_recalc = true;
+ to_svm(vcpu)->nested.force_msr_bitmap_recalc = true;
}
unsigned long *svm_vcpu_alloc_msrpm(void)
@@ -5127,6 +5063,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.possible_passthrough_msrs = direct_access_msrs,
.nr_possible_passthrough_msrs = ARRAY_SIZE(direct_access_msrs),
+ .get_msr_bitmap_entries = svm_get_msr_bitmap_entries,
.disable_intercept_for_msr = svm_disable_intercept_for_msr,
.complete_emulated_msr = svm_complete_emulated_msr,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2082ae8dc5db1..1e607a0eb58a0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1819,6 +1819,81 @@ int kvm_passthrough_msr_slot(u32 msr)
}
EXPORT_SYMBOL_GPL(kvm_passthrough_msr_slot);
+#define BUILD_KVM_MSR_BITMAP_HELPER(fn, bitop, access) \
+static inline void fn(struct kvm_vcpu *vcpu, u32 msr) \
+{ \
+ unsigned long *read_map, *write_map; \
+ u8 read_bit, write_bit; \
+ \
+ static_call(kvm_x86_get_msr_bitmap_entries)(vcpu, msr, \
+ &read_map, &read_bit, \
+ &write_map, &write_bit); \
+ bitop(access##_bit, access##_map); \
+}
+
+BUILD_KVM_MSR_BITMAP_HELPER(kvm_set_msr_bitmap_read, __set_bit, read)
+BUILD_KVM_MSR_BITMAP_HELPER(kvm_set_msr_bitmap_write, __set_bit, write)
+BUILD_KVM_MSR_BITMAP_HELPER(kvm_clear_msr_bitmap_read, __clear_bit, read)
+BUILD_KVM_MSR_BITMAP_HELPER(kvm_clear_msr_bitmap_write, __clear_bit, write)
+
+void kvm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+{
+ int slot;
+
+ slot = kvm_passthrough_msr_slot(msr);
+ WARN_ON(slot == -ENOENT);
+ if (slot >= 0) {
+ /* Set the shadow bitmaps to the desired intercept states */
+ if (type & MSR_TYPE_R)
+ __clear_bit(slot, vcpu->arch.shadow_msr_intercept.read);
+ if (type & MSR_TYPE_W)
+ __clear_bit(slot, vcpu->arch.shadow_msr_intercept.write);
+ }
+
+ /*
+ * Don't disabled interception for the MSR if userspace wants to
+ * handle it.
+ */
+ if ((type & MSR_TYPE_R) && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ)) {
+ kvm_set_msr_bitmap_read(vcpu, msr);
+ type &= ~MSR_TYPE_R;
+ }
+
+ if ((type & MSR_TYPE_W) && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE)) {
+ kvm_set_msr_bitmap_write(vcpu, msr);
+ type &= ~MSR_TYPE_W;
+ }
+
+ if (type & MSR_TYPE_R)
+ kvm_clear_msr_bitmap_read(vcpu, msr);
+
+ if (type & MSR_TYPE_W)
+ kvm_clear_msr_bitmap_write(vcpu, msr);
+}
+EXPORT_SYMBOL_GPL(kvm_disable_intercept_for_msr);
+
+void kvm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
+{
+ int slot;
+
+ slot = kvm_passthrough_msr_slot(msr);
+ WARN_ON(slot == -ENOENT);
+ if (slot >= 0) {
+ /* Set the shadow bitmaps to the desired intercept states */
+ if (type & MSR_TYPE_R)
+ __set_bit(slot, vcpu->arch.shadow_msr_intercept.read);
+ if (type & MSR_TYPE_W)
+ __set_bit(slot, vcpu->arch.shadow_msr_intercept.write);
+ }
+
+ if (type & MSR_TYPE_R)
+ kvm_set_msr_bitmap_read(vcpu, msr);
+
+ if (type & MSR_TYPE_W)
+ kvm_set_msr_bitmap_write(vcpu, msr);
+}
+EXPORT_SYMBOL_GPL(kvm_enable_intercept_for_msr);
+
static void kvm_msr_filter_changed(struct kvm_vcpu *vcpu)
{
u32 msr, i;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 208f0698c64e2..239cc4de49c58 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -556,6 +556,8 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type);
int kvm_passthrough_msr_slot(u32 msr);
+void kvm_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
+void kvm_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
enum kvm_msr_access {
MSR_TYPE_R = BIT(0),
--
2.47.0.338.g60cca15819-goog
next prev parent reply other threads:[~2024-11-27 20:20 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-11-27 20:19 [PATCH 00/15] Unify MSR intercepts in x86 Aaron Lewis
2024-11-27 20:19 ` [PATCH 01/15] KVM: x86: Use non-atomic bit ops to manipulate "shadow" MSR intercepts Aaron Lewis
2024-11-27 20:38 ` Sean Christopherson
2024-11-27 20:19 ` [PATCH 02/15] KVM: SVM: Use non-atomic bit ops to manipulate MSR interception bitmaps Aaron Lewis
2024-11-27 20:19 ` [PATCH 03/15] KVM: SVM: Invert the polarity of the "shadow" " Aaron Lewis
2024-11-27 20:42 ` Sean Christopherson
2024-12-03 21:08 ` Tom Lendacky
2024-11-27 20:19 ` [PATCH 04/15] KVM: SVM: Track MSRPM as "unsigned long", not "u32" Aaron Lewis
2024-11-27 20:19 ` [PATCH 05/15] KVM: x86: SVM: Adopt VMX style MSR intercepts in SVM Aaron Lewis
2024-11-27 20:43 ` Sean Christopherson
2024-11-27 20:19 ` [PATCH 06/15] KVM: SVM: Disable intercepts for all direct access MSRs on MSR filter changes Aaron Lewis
2024-11-27 20:47 ` Sean Christopherson
2024-11-27 20:19 ` [PATCH 07/15] KVM: SVM: Delete old SVM MSR management code Aaron Lewis
2024-11-27 20:19 ` [PATCH 08/15] KVM: SVM: Pass through GHCB MSR if and only if VM is SEV-ES Aaron Lewis
2024-12-03 21:21 ` Tom Lendacky
2024-11-27 20:19 ` [PATCH 09/15] KVM: SVM: Drop "always" flag from list of possible passthrough MSRs Aaron Lewis
2024-12-03 21:26 ` Tom Lendacky
2024-11-27 20:19 ` [PATCH 10/15] KVM: SVM: Don't "NULL terminate" the " Aaron Lewis
2024-12-03 21:30 ` Tom Lendacky
2024-11-27 20:19 ` [PATCH 11/15] KVM: VMX: Make list of possible passthrough MSRs "const" Aaron Lewis
2024-11-27 20:19 ` [PATCH 12/15] KVM: x86: Track possible passthrough MSRs in kvm_x86_ops Aaron Lewis
2024-11-27 21:57 ` Sean Christopherson
2024-11-28 16:46 ` Borislav Petkov
2024-12-03 19:47 ` Sean Christopherson
2024-12-05 17:56 ` Borislav Petkov
2024-12-05 18:06 ` Borislav Petkov
2024-12-06 15:23 ` Sean Christopherson
2024-12-06 16:01 ` Borislav Petkov
2024-11-27 20:19 ` [PATCH 13/15] KVM: x86: Move ownership of passthrough MSR "shadow" to common x86 Aaron Lewis
2024-11-27 20:19 ` Aaron Lewis [this message]
2024-11-27 20:19 ` [PATCH 15/15] KVM: x86: Hoist VMX MSR intercepts to common x86 code Aaron Lewis
2024-11-27 20:56 ` [PATCH 00/15] Unify MSR intercepts in x86 Sean Christopherson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241127201929.4005605-15-aaronlewis@google.com \
--to=aaronlewis@google.com \
--cc=jmattson@google.com \
--cc=kvm@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox