From: Raghavendra Rao Ananta <rananta@google.com>
To: Oliver Upton <oupton@google.com>,
Reiji Watanabe <reijiw@google.com>, Marc Zyngier <maz@kernel.org>,
Ricardo Koller <ricarkol@google.com>,
James Morse <james.morse@arm.com>,
Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>,
Jing Zhang <jingzhangos@google.com>,
Colton Lewis <coltonlewis@google.com>,
Raghavendra Rao Anata <rananta@google.com>,
linux-arm-kernel@lists.infradead.org, kvmarm@lists.linux.dev,
linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Subject: [REPOST PATCH 04/16] KVM: selftests: aarch64: vPMU register test for unimplemented counters
Date: Wed, 15 Feb 2023 01:07:05 +0000 [thread overview]
Message-ID: <20230215010717.3612794-5-rananta@google.com> (raw)
In-Reply-To: <20230215010717.3612794-1-rananta@google.com>
From: Reiji Watanabe <reijiw@google.com>
Add a new test case to the vpmu_counter_access test to check
if PMU registers or their bits for unimplemented counters are not
accessible or are RAZ, as expected.
Signed-off-by: Reiji Watanabe <reijiw@google.com>
Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
---
.../kvm/aarch64/vpmu_counter_access.c | 111 ++++++++++++++++--
.../selftests/kvm/include/aarch64/processor.h | 1 +
2 files changed, 102 insertions(+), 10 deletions(-)
diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
index b6593eee2be3d..453f0dd240f44 100644
--- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
+++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
@@ -5,8 +5,8 @@
* Copyright (c) 2022 Google LLC.
*
* This test checks if the guest can see the same number of the PMU event
- * counters (PMCR_EL0.N) that userspace sets, and if the guest can access
- * those counters.
+ * counters (PMCR_EL0.N) that userspace sets, if the guest can access
+ * those counters, and if the guest cannot access any other counters.
* This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host.
*/
#include <kvm_util.h>
@@ -20,7 +20,7 @@
#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1)
/*
- * The macros and functions below for reading/writing PMEVT{CNTR,TYPER}<n>_EL0
+ * The macros and functions below for reading/writing PMEV{CNTR,TYPER}<n>_EL0
* were basically copied from arch/arm64/kernel/perf_event.c.
*/
#define PMEVN_CASE(n, case_macro) \
@@ -148,9 +148,9 @@ static inline void disable_counter(int idx)
}
/*
- * The pmc_accessor structure has pointers to PMEVT{CNTR,TYPER}<n>_EL0
+ * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}<n>_EL0
* accessors that test cases will use. Each of the accessors will
- * either directly reads/writes PMEVT{CNTR,TYPER}<n>_EL0
+ * either directly reads/writes PMEV{CNTR,TYPER}<n>_EL0
* (i.e. {read,write}_pmev{cnt,type}rn()), or reads/writes them through
* PMXEV{CNTR,TYPER}_EL0 (i.e. {read,write}_sel_ev{cnt,type}r()).
*
@@ -179,6 +179,51 @@ struct pmc_accessor pmc_accessors[] = {
{ read_sel_evcntr, write_pmevcntrn, read_sel_evtyper, write_pmevtypern },
};
+#define INVALID_EC (-1ul)
+uint64_t expected_ec = INVALID_EC;
+uint64_t op_end_addr;
+
+static void guest_sync_handler(struct ex_regs *regs)
+{
+ uint64_t esr, ec;
+
+ esr = read_sysreg(esr_el1);
+ ec = (esr >> ESR_EC_SHIFT) & ESR_EC_MASK;
+ GUEST_ASSERT_4(op_end_addr && (expected_ec == ec),
+ regs->pc, esr, ec, expected_ec);
+
+ /* Will go back to op_end_addr after the handler exits */
+ regs->pc = op_end_addr;
+
+ /*
+ * Clear op_end_addr, and setting expected_ec to INVALID_EC
+ * as a sign that an exception has occurred.
+ */
+ op_end_addr = 0;
+ expected_ec = INVALID_EC;
+}
+
+/*
+ * Run the given operation that should trigger an exception with the
+ * given exception class. The exception handler (guest_sync_handler)
+ * will reset op_end_addr to 0, and expected_ec to INVALID_EC, and
+ * will come back to the instruction at the @done_label.
+ * The @done_label must be a unique label in this test program.
+ */
+#define TEST_EXCEPTION(ec, ops, done_label) \
+{ \
+ extern int done_label; \
+ \
+ WRITE_ONCE(op_end_addr, (uint64_t)&done_label); \
+ GUEST_ASSERT(ec != INVALID_EC); \
+ WRITE_ONCE(expected_ec, ec); \
+ dsb(ish); \
+ ops; \
+ asm volatile(#done_label":"); \
+ GUEST_ASSERT(!op_end_addr); \
+ GUEST_ASSERT(expected_ec == INVALID_EC); \
+}
+
static void pmu_disable_reset(void)
{
uint64_t pmcr = read_sysreg(pmcr_el0);
@@ -350,16 +395,38 @@ static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
pmc_idx, acc, read_data, read_data_prev);
}
+/*
+ * Tests for reading/writing registers for the unimplemented event counter
+ * specified by @pmc_idx (>= PMCR_EL0.N).
+ */
+static void test_access_invalid_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
+{
+ /*
+ * Reading/writing the event count/type registers should cause
+ * an UNDEFINED exception.
+ */
+ TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_cntr(pmc_idx), inv_rd_cntr);
+ TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_cntr(pmc_idx, 0), inv_wr_cntr);
+ TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_typer(pmc_idx), inv_rd_typer);
+ TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_typer(pmc_idx, 0), inv_wr_typer);
+ /*
+ * The bit corresponding to the (unimplemented) counter in
+ * {PMCNTEN,PMOVS}{SET,CLR}_EL1 registers should be RAZ.
+ */
+ test_bitmap_pmu_regs(pmc_idx, 1);
+ test_bitmap_pmu_regs(pmc_idx, 0);
+}
+
/*
* The guest is configured with PMUv3 with @expected_pmcr_n number of
* event counters.
* Check if @expected_pmcr_n is consistent with PMCR_EL0.N, and
- * if reading/writing PMU registers for implemented counters can work
- * as expected.
+ * if reading/writing PMU registers for implemented or unimplemented
+ * counters can work as expected.
*/
static void guest_code(uint64_t expected_pmcr_n)
{
- uint64_t pmcr, pmcr_n;
+ uint64_t pmcr, pmcr_n, unimp_mask;
int i, pmc;
GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS);
@@ -370,15 +437,31 @@ static void guest_code(uint64_t expected_pmcr_n)
/* Make sure that PMCR_EL0.N indicates the value userspace set */
GUEST_ASSERT_2(pmcr_n == expected_pmcr_n, pmcr_n, expected_pmcr_n);
+ /*
+ * Make sure that (RAZ) bits corresponding to unimplemented event
+ * counters in {PMCNTEN,PMOVS}{SET,CLR}_EL1 registers are reset to zero.
+ * (NOTE: bits for implemented event counters are reset to UNKNOWN)
+ */
+ unimp_mask = GENMASK_ULL(ARMV8_PMU_MAX_GENERAL_COUNTERS - 1, pmcr_n);
+ check_bitmap_pmu_regs(unimp_mask, false);
+
/*
* Tests for reading/writing PMU registers for implemented counters.
- * Use each combination of PMEVT{CNTR,TYPER}<n>_EL0 accessor functions.
+ * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
*/
for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
for (pmc = 0; pmc < pmcr_n; pmc++)
test_access_pmc_regs(&pmc_accessors[i], pmc);
}
+ /*
+ * Tests for reading/writing PMU registers for unimplemented counters.
+ * Use each combination of PMEV{CNTR,TYPER}<n>_EL0 accessor functions.
+ */
+ for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) {
+ for (pmc = pmcr_n; pmc < ARMV8_PMU_MAX_GENERAL_COUNTERS; pmc++)
+ test_access_invalid_pmc_regs(&pmc_accessors[i], pmc);
+ }
GUEST_DONE();
}
@@ -392,7 +475,7 @@ static struct kvm_vm *create_vpmu_vm(void *guest_code, struct kvm_vcpu **vcpup,
struct kvm_vm *vm;
struct kvm_vcpu *vcpu;
struct kvm_vcpu_init init;
- uint8_t pmuver;
+ uint8_t pmuver, ec;
uint64_t dfr0, irq = 23;
struct kvm_device_attr irq_attr = {
.group = KVM_ARM_VCPU_PMU_V3_CTRL,
@@ -405,11 +488,18 @@ static struct kvm_vm *create_vpmu_vm(void *guest_code, struct kvm_vcpu **vcpup,
};
vm = vm_create(1);
+ vm_init_descriptor_tables(vm);
+ /* Catch exceptions for easier debugging */
+ for (ec = 0; ec < ESR_EC_NUM; ec++) {
+ vm_install_sync_handler(vm, VECTOR_SYNC_CURRENT, ec,
+ guest_sync_handler);
+ }
/* Create vCPU with PMUv3 */
vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
vcpu = aarch64_vcpu_add(vm, 0, &init, guest_code);
+ vcpu_init_descriptor_tables(vcpu);
*gic_fd = vgic_v3_setup(vm, 1, 64, GICD_BASE_GPA, GICR_BASE_GPA);
/* Make sure that PMUv3 support is indicated in the ID register */
@@ -478,6 +568,7 @@ static void run_test(uint64_t pmcr_n)
vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &init);
init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
aarch64_vcpu_setup(vcpu, &init);
+ vcpu_init_descriptor_tables(vcpu);
vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp);
vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h
index 5f977528e09c0..52d87809356c8 100644
--- a/tools/testing/selftests/kvm/include/aarch64/processor.h
+++ b/tools/testing/selftests/kvm/include/aarch64/processor.h
@@ -104,6 +104,7 @@ enum {
#define ESR_EC_SHIFT 26
#define ESR_EC_MASK (ESR_EC_NUM - 1)
+#define ESR_EC_UNKNOWN 0x0
#define ESR_EC_SVC64 0x15
#define ESR_EC_IABT 0x21
#define ESR_EC_DABT 0x25
--
2.39.1.581.gbfd45094c4-goog
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
next prev parent reply other threads:[~2023-02-15 1:08 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-15 1:07 [REPOST PATCH 00/16] Add support for vPMU selftests Raghavendra Rao Ananta
2023-02-15 1:07 ` [REPOST PATCH 01/16] tools: arm64: Import perf_event.h Raghavendra Rao Ananta
2023-02-15 1:07 ` [REPOST PATCH 02/16] KVM: selftests: aarch64: Introduce vpmu_counter_access test Raghavendra Rao Ananta
2023-02-15 1:07 ` [REPOST PATCH 03/16] KVM: selftests: aarch64: vPMU register test for implemented counters Raghavendra Rao Ananta
2023-02-15 1:07 ` Raghavendra Rao Ananta [this message]
2023-02-15 1:07 ` [REPOST PATCH 05/16] selftests: KVM: aarch64: Refactor the vPMU counter access tests Raghavendra Rao Ananta
2023-02-15 1:07 ` [REPOST PATCH 06/16] tools: arm64: perf_event: Define Cycle counter enable/overflow bits Raghavendra Rao Ananta
2023-03-03 0:46 ` Reiji Watanabe
2023-03-09 22:14 ` Raghavendra Rao Ananta
2023-02-15 1:07 ` [REPOST PATCH 07/16] selftests: KVM: aarch64: Add PMU cycle counter helpers Raghavendra Rao Ananta
2023-03-03 3:06 ` Reiji Watanabe
2023-03-09 22:19 ` Raghavendra Rao Ananta
2023-02-15 1:07 ` [REPOST PATCH 08/16] selftests: KVM: aarch64: Consider PMU event filters for VM creation Raghavendra Rao Ananta
2023-03-03 4:30 ` Reiji Watanabe
2023-03-09 22:45 ` Raghavendra Rao Ananta
2023-02-15 1:07 ` [REPOST PATCH 09/16] selftests: KVM: aarch64: Add KVM PMU event filter test Raghavendra Rao Ananta
2023-03-04 20:28 ` Reiji Watanabe
2023-03-09 23:17 ` Raghavendra Rao Ananta
2023-02-15 1:07 ` [REPOST PATCH 10/16] selftests: KVM: aarch64: Add KVM EVTYPE filter PMU test Raghavendra Rao Ananta
2023-03-07 1:19 ` Reiji Watanabe
2023-03-07 16:09 ` Sean Christopherson
2023-03-10 21:57 ` Raghavendra Rao Ananta
2023-02-15 1:07 ` [REPOST PATCH 11/16] selftests: KVM: aarch64: Add vCPU migration test for PMU Raghavendra Rao Ananta
2023-03-07 3:43 ` Reiji Watanabe
2023-03-10 2:28 ` Raghavendra Rao Ananta
2023-02-15 1:07 ` [REPOST PATCH 12/16] selftests: KVM: aarch64: Test PMU overflow/IRQ functionality Raghavendra Rao Ananta
2023-03-07 6:09 ` Reiji Watanabe
2023-03-08 1:19 ` Reiji Watanabe
2023-03-10 23:58 ` Raghavendra Rao Ananta
2023-02-15 1:07 ` [REPOST PATCH 13/16] selftests: KVM: aarch64: Test chained events for PMU Raghavendra Rao Ananta
2023-03-08 3:15 ` Reiji Watanabe
2023-02-15 1:07 ` [REPOST PATCH 14/16] selftests: KVM: aarch64: Add PMU test to chain all the counters Raghavendra Rao Ananta
2023-03-08 3:40 ` Reiji Watanabe
2023-02-15 1:07 ` [REPOST PATCH 15/16] selftests: KVM: aarch64: Add multi-vCPU support for vPMU VM creation Raghavendra Rao Ananta
2023-02-15 1:07 ` [REPOST PATCH 16/16] selftests: KVM: aarch64: Extend the vCPU migration test to multi-vCPUs Raghavendra Rao Ananta
2023-03-08 4:44 ` Reiji Watanabe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230215010717.3612794-5-rananta@google.com \
--to=rananta@google.com \
--cc=coltonlewis@google.com \
--cc=james.morse@arm.com \
--cc=jingzhangos@google.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=maz@kernel.org \
--cc=oupton@google.com \
--cc=pbonzini@redhat.com \
--cc=reijiw@google.com \
--cc=ricarkol@google.com \
--cc=suzuki.poulose@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).