kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jinrong Liang <ljr.kernel@gmail.com>
To: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>, Like Xu <likexu@tencent.com>,
	Jim Mattson <jmattson@google.com>,
	Aaron Lewis <aaronlewis@google.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jinrong Liang <cloudliang@tencent.com>,
	Jinrong Liang <ljr.kernel@gmail.com>,
	kvm@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH 2/9] KVM: selftests: Test gp counters overflow interrupt handling
Date: Tue, 21 Nov 2023 19:54:50 +0800	[thread overview]
Message-ID: <20231121115457.76269-3-cloudliang@tencent.com> (raw)
In-Reply-To: <20231121115457.76269-1-cloudliang@tencent.com>

From: Jinrong Liang <cloudliang@tencent.com>

Add tests to verify that gp counters overflow interrupt handling
works as expected and clean up.

Signed-off-by: Jinrong Liang <cloudliang@tencent.com>
---
 .../selftests/kvm/x86_64/pmu_counters_test.c  | 121 ++++++++++++++----
 1 file changed, 98 insertions(+), 23 deletions(-)

diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
index 7d8094a27209..1b108e6718fc 100644
--- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
+++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c
@@ -6,6 +6,7 @@
 #define _GNU_SOURCE /* for program_invocation_short_name */
 #include <x86intrin.h>
 
+#include "apic.h"
 #include "pmu.h"
 #include "processor.h"
 
@@ -19,14 +20,15 @@
 #define NUM_EXTRA_INSNS		7
 #define NUM_INSNS_RETIRED	(NUM_BRANCHES + NUM_EXTRA_INSNS)
 
+#define PMI_VECTOR		0x20
+
 static uint8_t kvm_pmu_version;
 static bool kvm_has_perf_caps;
 static bool is_forced_emulation_enabled;
+static bool pmi_irq_called;
 
 static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
-						  void *guest_code,
-						  uint8_t pmu_version,
-						  uint64_t perf_capabilities)
+						  void *guest_code)
 {
 	struct kvm_vm *vm;
 
@@ -34,6 +36,17 @@ static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
 	vm_init_descriptor_tables(vm);
 	vcpu_init_descriptor_tables(*vcpu);
 
+	return vm;
+}
+
+static struct kvm_vm *intel_pmu_vm_create(struct kvm_vcpu **vcpu,
+					  void *guest_code, uint8_t pmu_version,
+					  uint64_t perf_capabilities)
+{
+	struct kvm_vm *vm;
+
+	vm = pmu_vm_create_with_one_vcpu(vcpu, guest_code);
+
 	sync_global_to_guest(vm, kvm_pmu_version);
 	sync_global_to_guest(vm, is_forced_emulation_enabled);
 
@@ -45,6 +58,7 @@ static struct kvm_vm *pmu_vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
 		vcpu_set_msr(*vcpu, MSR_IA32_PERF_CAPABILITIES, perf_capabilities);
 
 	vcpu_set_cpuid_property(*vcpu, X86_PROPERTY_PMU_VERSION, pmu_version);
+
 	return vm;
 }
 
@@ -198,6 +212,15 @@ static bool pmu_is_null_feature(struct kvm_x86_pmu_feature event)
 	return !(*(u64 *)&event);
 }
 
+static uint32_t get_pmc_msr(void)
+{
+	if (this_cpu_has(X86_FEATURE_PDCM) &&
+	    rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
+		return MSR_IA32_PMC0;
+	else
+		return MSR_IA32_PERFCTR0;
+}
+
 static void guest_test_arch_event(uint8_t idx)
 {
 	const struct {
@@ -226,18 +249,12 @@ static void guest_test_arch_event(uint8_t idx)
 	/* PERF_GLOBAL_CTRL exists only for Architectural PMU Version 2+. */
 	bool guest_has_perf_global_ctrl = pmu_version >= 2;
 	struct kvm_x86_pmu_feature gp_event, fixed_event;
-	uint32_t base_pmc_msr;
+	uint32_t base_pmc_msr = get_pmc_msr();
 	unsigned int i;
 
 	/* The host side shouldn't invoke this without a guest PMU. */
 	GUEST_ASSERT(pmu_version);
 
-	if (this_cpu_has(X86_FEATURE_PDCM) &&
-	    rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
-		base_pmc_msr = MSR_IA32_PMC0;
-	else
-		base_pmc_msr = MSR_IA32_PERFCTR0;
-
 	gp_event = intel_event_to_feature[idx].gp_event;
 	GUEST_ASSERT_EQ(idx, gp_event.f.bit);
 
@@ -293,8 +310,8 @@ static void test_arch_events(uint8_t pmu_version, uint64_t perf_capabilities,
 	if (!pmu_version)
 		return;
 
-	vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_arch_events,
-					 pmu_version, perf_capabilities);
+	vm = intel_pmu_vm_create(&vcpu, guest_test_arch_events, pmu_version,
+				 perf_capabilities);
 
 	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH,
 				length);
@@ -414,18 +431,12 @@ static void guest_rd_wr_counters(uint32_t base_msr, uint8_t nr_possible_counters
 
 static void guest_test_gp_counters(void)
 {
+	uint32_t base_msr = get_pmc_msr();
 	uint8_t nr_gp_counters = 0;
-	uint32_t base_msr;
 
 	if (guest_get_pmu_version())
 		nr_gp_counters = this_cpu_property(X86_PROPERTY_PMU_NR_GP_COUNTERS);
 
-	if (this_cpu_has(X86_FEATURE_PDCM) &&
-	    rdmsr(MSR_IA32_PERF_CAPABILITIES) & PMU_CAP_FW_WRITES)
-		base_msr = MSR_IA32_PMC0;
-	else
-		base_msr = MSR_IA32_PERFCTR0;
-
 	guest_rd_wr_counters(base_msr, MAX_NR_GP_COUNTERS, nr_gp_counters, 0);
 	GUEST_DONE();
 }
@@ -436,8 +447,8 @@ static void test_gp_counters(uint8_t pmu_version, uint64_t perf_capabilities,
 	struct kvm_vcpu *vcpu;
 	struct kvm_vm *vm;
 
-	vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_gp_counters,
-					 pmu_version, perf_capabilities);
+	vm = intel_pmu_vm_create(&vcpu, guest_test_gp_counters, pmu_version,
+				 perf_capabilities);
 
 	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_NR_GP_COUNTERS,
 				nr_gp_counters);
@@ -503,8 +514,8 @@ static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities,
 	struct kvm_vcpu *vcpu;
 	struct kvm_vm *vm;
 
-	vm = pmu_vm_create_with_one_vcpu(&vcpu, guest_test_fixed_counters,
-					 pmu_version, perf_capabilities);
+	vm = intel_pmu_vm_create(&vcpu, guest_test_fixed_counters, pmu_version,
+				 perf_capabilities);
 
 	vcpu_set_cpuid_property(vcpu, X86_PROPERTY_PMU_FIXED_COUNTERS_BITMASK,
 				supported_bitmask);
@@ -516,6 +527,68 @@ static void test_fixed_counters(uint8_t pmu_version, uint64_t perf_capabilities,
 	kvm_vm_free(vm);
 }
 
+static void pmi_irq_handler(struct ex_regs *regs)
+{
+	pmi_irq_called = true;
+	x2apic_write_reg(APIC_EOI, 0);
+}
+
+static void guest_test_counters_pmi_workload(void)
+{
+	__asm__ __volatile__
+	("sti\n"
+	 "loop .\n"
+	 "cli\n"
+	 : "+c"((int){NUM_BRANCHES})
+	);
+}
+
+static void test_pmi_init_x2apic(void)
+{
+	x2apic_enable();
+	x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT |
+			 APIC_DM_FIXED | PMI_VECTOR);
+	pmi_irq_called = false;
+}
+
+static void guest_test_gp_counter_pmi(void)
+{
+	uint8_t guest_pmu_version = guest_get_pmu_version();
+	uint32_t base_msr = get_pmc_msr();
+
+	test_pmi_init_x2apic();
+
+	wrmsr(base_msr,
+	      (1ULL << this_cpu_property(X86_PROPERTY_PMU_GP_COUNTERS_BIT_WIDTH)) - 2);
+	wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_OS |
+	      ARCH_PERFMON_EVENTSEL_ENABLE | ARCH_PERFMON_EVENTSEL_INT |
+	      INTEL_ARCH_CPU_CYCLES);
+
+	if (guest_pmu_version >= 2)
+		wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, BIT_ULL(0));
+	guest_test_counters_pmi_workload();
+
+	GUEST_ASSERT(pmi_irq_called);
+	GUEST_DONE();
+}
+
+static void test_intel_ovf_pmi(uint8_t pmu_version, uint64_t perf_capabilities)
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_vm *vm;
+
+	if (!pmu_version)
+		return;
+
+	vm = intel_pmu_vm_create(&vcpu, guest_test_gp_counter_pmi, pmu_version,
+				 perf_capabilities);
+
+	vm_install_exception_handler(vm, PMI_VECTOR, pmi_irq_handler);
+	run_vcpu(vcpu);
+
+	kvm_vm_free(vm);
+}
+
 static void test_intel_counters(void)
 {
 	uint8_t nr_arch_events = kvm_cpu_property(X86_PROPERTY_PMU_EBX_BIT_VECTOR_LENGTH);
@@ -596,6 +669,8 @@ static void test_intel_counters(void)
 				for (k = 0; k <= (BIT(nr_fixed_counters) - 1); k++)
 					test_fixed_counters(v, perf_caps[i], j, k);
 			}
+
+			test_intel_ovf_pmi(v, perf_caps[i]);
 		}
 	}
 }
-- 
2.39.3


  parent reply	other threads:[~2023-11-21 11:55 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-21 11:54 [PATCH 0/9] Test the consistency of AMD PMU counters and their features Jinrong Liang
2023-11-21 11:54 ` [PATCH 1/9] KVM: selftests: Add forced emulation check to fix #UD Jinrong Liang
2023-11-21 11:54 ` Jinrong Liang [this message]
2023-11-21 11:54 ` [PATCH 3/9] KVM: selftests: Test fixed counters overflow interrupt handling Jinrong Liang
2023-11-21 11:54 ` [PATCH 4/9] KVM: selftests: Add x86 feature and properties for AMD PMU in processor.h Jinrong Liang
2023-11-21 11:54 ` [PATCH 5/9] KVM: selftests: Test AMD PMU performance counters basic functions Jinrong Liang
2023-11-21 11:54 ` [PATCH 6/9] KVM: selftests: Test consistency of AMD PMU counters num Jinrong Liang
2023-11-21 11:54 ` [PATCH 7/9] KVM: selftests: Test consistency of PMU MSRs with AMD PMU version Jinrong Liang
2023-11-21 11:54 ` [PATCH 8/9] KVM: selftests: Test AMD Guest PerfCtrExtCore Jinrong Liang
2023-11-21 11:54 ` [PATCH 9/9] KVM: selftests: Test AMD Guest PerfMonV2 Jinrong Liang
2024-06-10 23:36 ` [PATCH 0/9] Test the consistency of AMD PMU counters and their features Colton Lewis
2024-06-11  3:48   ` Jinrong Liang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231121115457.76269-3-cloudliang@tencent.com \
    --to=ljr.kernel@gmail.com \
    --cc=aaronlewis@google.com \
    --cc=cloudliang@tencent.com \
    --cc=jmattson@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=likexu@tencent.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=seanjc@google.com \
    --cc=wanpengli@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).