* [kvm-unit-tests PATCH 0/4] riscv: sbi: Add support to test PMU extension
@ 2025-12-13 15:08 James Raphael Tiovalen
2025-12-13 15:08 ` [kvm-unit-tests PATCH 1/4] lib: riscv: Add SBI PMU CSRs and enums James Raphael Tiovalen
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: James Raphael Tiovalen @ 2025-12-13 15:08 UTC (permalink / raw)
To: kvm, kvm-riscv; +Cc: andrew.jones, atishp, James Raphael Tiovalen
This patch series adds support for testing most of the SBI PMU
extension functions. The functions related to shared memory
(FID #7 and #8) are not tested yet.
The first 3 patches add the required support for SBI PMU and some
helper functions, while the last patch adds the actual tests.
James Raphael Tiovalen (4):
lib: riscv: Add SBI PMU CSRs and enums
lib: riscv: Add SBI PMU support
lib: riscv: Add SBI PMU helper functions
riscv: sbi: Add tests for PMU extension
riscv/Makefile | 2 +
lib/riscv/asm/csr.h | 31 +++
lib/riscv/asm/pmu.h | 167 ++++++++++++++++
lib/riscv/asm/sbi.h | 104 ++++++++++
lib/riscv/pmu.c | 169 ++++++++++++++++
lib/riscv/sbi.c | 73 +++++++
riscv/sbi-tests.h | 1 +
riscv/sbi-pmu.c | 461 ++++++++++++++++++++++++++++++++++++++++++++
riscv/sbi.c | 2 +
9 files changed, 1010 insertions(+)
create mode 100644 lib/riscv/asm/pmu.h
create mode 100644 lib/riscv/pmu.c
create mode 100644 riscv/sbi-pmu.c
--
2.43.0
--
kvm-riscv mailing list
kvm-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kvm-riscv
^ permalink raw reply [flat|nested] 5+ messages in thread
* [kvm-unit-tests PATCH 1/4] lib: riscv: Add SBI PMU CSRs and enums
2025-12-13 15:08 [kvm-unit-tests PATCH 0/4] riscv: sbi: Add support to test PMU extension James Raphael Tiovalen
@ 2025-12-13 15:08 ` James Raphael Tiovalen
2025-12-13 15:08 ` [kvm-unit-tests PATCH 2/4] lib: riscv: Add SBI PMU support James Raphael Tiovalen
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: James Raphael Tiovalen @ 2025-12-13 15:08 UTC (permalink / raw)
To: kvm, kvm-riscv; +Cc: andrew.jones, atishp, James Raphael Tiovalen
Add the CSRs and enum values used by the RISC-V SBI PMU extension.
Signed-off-by: James Raphael Tiovalen <jamestiotio@gmail.com>
---
lib/riscv/asm/csr.h | 31 +++++++++++++++++
lib/riscv/asm/sbi.h | 82 +++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 113 insertions(+)
diff --git a/lib/riscv/asm/csr.h b/lib/riscv/asm/csr.h
index 6a8e0578..d5b1c7cc 100644
--- a/lib/riscv/asm/csr.h
+++ b/lib/riscv/asm/csr.h
@@ -14,7 +14,38 @@
#define CSR_STIMECMP 0x14d
#define CSR_STIMECMPH 0x15d
#define CSR_SATP 0x180
+#define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01
+#define CSR_INSTRET 0xc02
+#define CSR_HPMCOUNTER3 0xc03
+#define CSR_HPMCOUNTER4 0xc04
+#define CSR_HPMCOUNTER5 0xc05
+#define CSR_HPMCOUNTER6 0xc06
+#define CSR_HPMCOUNTER7 0xc07
+#define CSR_HPMCOUNTER8 0xc08
+#define CSR_HPMCOUNTER9 0xc09
+#define CSR_HPMCOUNTER10 0xc0a
+#define CSR_HPMCOUNTER11 0xc0b
+#define CSR_HPMCOUNTER12 0xc0c
+#define CSR_HPMCOUNTER13 0xc0d
+#define CSR_HPMCOUNTER14 0xc0e
+#define CSR_HPMCOUNTER15 0xc0f
+#define CSR_HPMCOUNTER16 0xc10
+#define CSR_HPMCOUNTER17 0xc11
+#define CSR_HPMCOUNTER18 0xc12
+#define CSR_HPMCOUNTER19 0xc13
+#define CSR_HPMCOUNTER20 0xc14
+#define CSR_HPMCOUNTER21 0xc15
+#define CSR_HPMCOUNTER22 0xc16
+#define CSR_HPMCOUNTER23 0xc17
+#define CSR_HPMCOUNTER24 0xc18
+#define CSR_HPMCOUNTER25 0xc19
+#define CSR_HPMCOUNTER26 0xc1a
+#define CSR_HPMCOUNTER27 0xc1b
+#define CSR_HPMCOUNTER28 0xc1c
+#define CSR_HPMCOUNTER29 0xc1d
+#define CSR_HPMCOUNTER30 0xc1e
+#define CSR_HPMCOUNTER31 0xc1f
#define SR_SIE _AC(0x00000002, UL)
#define SR_SPP _AC(0x00000100, UL)
diff --git a/lib/riscv/asm/sbi.h b/lib/riscv/asm/sbi.h
index 289a6a24..35dbf508 100644
--- a/lib/riscv/asm/sbi.h
+++ b/lib/riscv/asm/sbi.h
@@ -47,6 +47,7 @@ enum sbi_ext_id {
SBI_EXT_IPI = 0x735049,
SBI_EXT_HSM = 0x48534d,
SBI_EXT_SRST = 0x53525354,
+ SBI_EXT_PMU = 0x504d55,
SBI_EXT_DBCN = 0x4442434E,
SBI_EXT_SUSP = 0x53555350,
SBI_EXT_FWFT = 0x46574654,
@@ -94,6 +95,87 @@ enum sbi_ext_hsm_hart_suspend_type {
SBI_EXT_HSM_HART_SUSPEND_NON_RETENTIVE = 0x80000000,
};
+enum sbi_ext_pmu_fid {
+ SBI_EXT_PMU_NUM_COUNTERS = 0,
+ SBI_EXT_PMU_COUNTER_GET_INFO,
+ SBI_EXT_PMU_COUNTER_CONFIG_MATCHING,
+ SBI_EXT_PMU_COUNTER_START,
+ SBI_EXT_PMU_COUNTER_STOP,
+ SBI_EXT_PMU_COUNTER_FW_READ,
+ SBI_EXT_PMU_COUNTER_FW_READ_HI,
+ SBI_EXT_PMU_SNAPSHOT_SET_SHMEM,
+ SBI_EXT_PMU_EVENT_GET_INFO,
+};
+
+enum sbi_ext_pmu_eid_type {
+ SBI_EXT_PMU_EVENT_HW_GENERAL = 0,
+ SBI_EXT_PMU_EVENT_HW_CACHE,
+ SBI_EXT_PMU_EVENT_HW_RAW,
+ SBI_EXT_PMU_EVENT_HW_RAW_V2,
+ SBI_EXT_PMU_EVENT_FW = 15,
+};
+
+enum sbi_ext_pmu_hw_generic_event_code_id {
+ SBI_EXT_PMU_HW_NO_EVENT = 0,
+ SBI_EXT_PMU_HW_CPU_CYCLES,
+ SBI_EXT_PMU_HW_INSTRUCTIONS,
+ SBI_EXT_PMU_HW_CACHE_REFERENCES,
+ SBI_EXT_PMU_HW_CACHE_MISSES,
+ SBI_EXT_PMU_HW_BRANCH_INSTRUCTIONS,
+ SBI_EXT_PMU_HW_BRANCH_MISSES,
+ SBI_EXT_PMU_HW_BUS_CYCLES,
+ SBI_EXT_PMU_HW_STALLED_CYCLES_FRONTEND,
+ SBI_EXT_PMU_HW_STALLED_CYCLES_BACKEND,
+ SBI_EXT_PMU_HW_REF_CPU_CYCLES,
+};
+
+enum sbi_ext_pmu_hw_cache_id {
+ SBI_EXT_PMU_HW_CACHE_L1D = 0,
+ SBI_EXT_PMU_HW_CACHE_L1I,
+ SBI_EXT_PMU_HW_CACHE_LL,
+ SBI_EXT_PMU_HW_CACHE_DTLB,
+ SBI_EXT_PMU_HW_CACHE_ITLB,
+ SBI_EXT_PMU_HW_CACHE_BPU,
+ SBI_EXT_PMU_HW_CACHE_NODE,
+};
+
+enum sbi_ext_pmu_hw_cache_op_id {
+ SBI_EXT_PMU_HW_CACHE_OP_READ = 0,
+ SBI_EXT_PMU_HW_CACHE_OP_WRITE,
+ SBI_EXT_PMU_HW_CACHE_OP_PREFETCH
+};
+
+enum sbi_ext_pmu_hw_cache_op_result_id {
+ SBI_EXT_PMU_HW_CACHE_RESULT_ACCESS = 0,
+ SBI_EXT_PMU_HW_CACHE_RESULT_MISS,
+};
+
+enum sbi_ext_pmu_fw_event_code_id {
+ SBI_EXT_PMU_FW_MISALIGNED_LOAD = 0,
+ SBI_EXT_PMU_FW_MISALIGNED_STORE,
+ SBI_EXT_PMU_FW_ACCESS_LOAD,
+ SBI_EXT_PMU_FW_ACCESS_STORE,
+ SBI_EXT_PMU_FW_ILLEGAL_INSN,
+ SBI_EXT_PMU_FW_SET_TIMER,
+ SBI_EXT_PMU_FW_IPI_SENT,
+ SBI_EXT_PMU_FW_IPI_RECEIVED,
+ SBI_EXT_PMU_FW_FENCE_I_SENT,
+ SBI_EXT_PMU_FW_FENCE_I_RECEIVED,
+ SBI_EXT_PMU_FW_SFENCE_VMA_SENT,
+ SBI_EXT_PMU_FW_SFENCE_VMA_RECEIVED,
+ SBI_EXT_PMU_FW_SFENCE_VMA_ASID_SENT,
+ SBI_EXT_PMU_FW_SFENCE_VMA_ASID_RECEIVED,
+ SBI_EXT_PMU_FW_HFENCE_GVMA_SENT,
+ SBI_EXT_PMU_FW_HFENCE_GVMA_RECEIVED,
+ SBI_EXT_PMU_FW_HFENCE_GVMA_VMID_SENT,
+ SBI_EXT_PMU_FW_HFENCE_GVMA_VMID_RECEIVED,
+ SBI_EXT_PMU_FW_HFENCE_VVMA_SENT,
+ SBI_EXT_PMU_FW_HFENCE_VVMA_RECEIVED,
+ SBI_EXT_PMU_FW_HFENCE_VVMA_ASID_SENT,
+ SBI_EXT_PMU_FW_HFENCE_VVMA_ASID_RECEIVED,
+ SBI_EXT_PMU_FW_PLATFORM = 65535,
+};
+
enum sbi_ext_dbcn_fid {
SBI_EXT_DBCN_CONSOLE_WRITE = 0,
SBI_EXT_DBCN_CONSOLE_READ,
--
2.43.0
--
kvm-riscv mailing list
kvm-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kvm-riscv
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [kvm-unit-tests PATCH 2/4] lib: riscv: Add SBI PMU support
2025-12-13 15:08 [kvm-unit-tests PATCH 0/4] riscv: sbi: Add support to test PMU extension James Raphael Tiovalen
2025-12-13 15:08 ` [kvm-unit-tests PATCH 1/4] lib: riscv: Add SBI PMU CSRs and enums James Raphael Tiovalen
@ 2025-12-13 15:08 ` James Raphael Tiovalen
2025-12-13 15:08 ` [kvm-unit-tests PATCH 3/4] lib: riscv: Add SBI PMU helper functions James Raphael Tiovalen
2025-12-13 15:08 ` [kvm-unit-tests PATCH 4/4] riscv: sbi: Add tests for PMU extension James Raphael Tiovalen
3 siblings, 0 replies; 5+ messages in thread
From: James Raphael Tiovalen @ 2025-12-13 15:08 UTC (permalink / raw)
To: kvm, kvm-riscv; +Cc: andrew.jones, atishp, James Raphael Tiovalen
Add support for all of the SBI PMU functions, which will be used by the
SBI tests.
Signed-off-by: James Raphael Tiovalen <jamestiotio@gmail.com>
---
lib/riscv/asm/sbi.h | 22 ++++++++++++++
lib/riscv/sbi.c | 73 +++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 95 insertions(+)
diff --git a/lib/riscv/asm/sbi.h b/lib/riscv/asm/sbi.h
index 35dbf508..8794c126 100644
--- a/lib/riscv/asm/sbi.h
+++ b/lib/riscv/asm/sbi.h
@@ -390,5 +390,27 @@ struct sbiret sbi_fwft_set(uint32_t feature, unsigned long value, unsigned long
struct sbiret sbi_fwft_get_raw(unsigned long feature);
struct sbiret sbi_fwft_get(uint32_t feature);
+struct sbiret sbi_pmu_num_counters(void);
+struct sbiret sbi_pmu_counter_get_info(unsigned long counter_idx);
+struct sbiret sbi_pmu_counter_config_matching(unsigned long counter_idx_base,
+ unsigned long counter_idx_mask,
+ unsigned long config_flags,
+ unsigned long event_idx,
+ unsigned long event_data);
+struct sbiret sbi_pmu_counter_start(unsigned long counter_idx_base, unsigned long counter_idx_mask,
+ unsigned long start_flags, unsigned long initial_value);
+struct sbiret sbi_pmu_counter_stop(unsigned long counter_idx_base, unsigned long counter_idx_mask,
+ unsigned long stop_flags);
+struct sbiret sbi_pmu_counter_fw_read(unsigned long counter_idx);
+struct sbiret sbi_pmu_counter_fw_read_hi(unsigned long counter_idx);
+struct sbiret sbi_pmu_snapshot_set_shmem_raw(unsigned long shmem_phys_lo,
+ unsigned long shmem_phys_hi,
+ unsigned long flags);
+struct sbiret sbi_pmu_snapshot_set_shmem(unsigned long *shmem, unsigned long flags);
+struct sbiret sbi_pmu_event_get_info_raw(unsigned long shmem_phys_lo, unsigned long shmem_phys_hi,
+ unsigned long num_entries, unsigned long flags);
+struct sbiret sbi_pmu_event_get_info(unsigned long *shmem, unsigned long num_entries,
+ unsigned long flags);
+
#endif /* !__ASSEMBLER__ */
#endif /* _ASMRISCV_SBI_H_ */
diff --git a/lib/riscv/sbi.c b/lib/riscv/sbi.c
index 39f6138f..ca8f3d33 100644
--- a/lib/riscv/sbi.c
+++ b/lib/riscv/sbi.c
@@ -32,6 +32,79 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
return ret;
}
+struct sbiret sbi_pmu_num_counters(void)
+{
+ return sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_NUM_COUNTERS, 0, 0, 0, 0, 0, 0);
+}
+
+struct sbiret sbi_pmu_counter_get_info(unsigned long counter_idx)
+{
+ return sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, counter_idx, 0, 0, 0, 0, 0);
+}
+
+struct sbiret sbi_pmu_counter_config_matching(unsigned long counter_idx_base,
+ unsigned long counter_idx_mask,
+ unsigned long config_flags,
+ unsigned long event_idx,
+ unsigned long event_data)
+{
+ return sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CONFIG_MATCHING, counter_idx_base,
+ counter_idx_mask, config_flags, event_idx, event_data, 0);
+}
+
+struct sbiret sbi_pmu_counter_start(unsigned long counter_idx_base, unsigned long counter_idx_mask,
+ unsigned long start_flags, unsigned long initial_value)
+{
+ return sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, counter_idx_base,
+ counter_idx_mask, start_flags, initial_value, 0, 0);
+}
+
+struct sbiret sbi_pmu_counter_stop(unsigned long counter_idx_base, unsigned long counter_idx_mask,
+ unsigned long stop_flags)
+{
+ return sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, counter_idx_base,
+ counter_idx_mask, stop_flags, 0, 0, 0);
+}
+
+struct sbiret sbi_pmu_counter_fw_read(unsigned long counter_idx)
+{
+ return sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ, counter_idx, 0, 0, 0, 0, 0);
+}
+
+struct sbiret sbi_pmu_counter_fw_read_hi(unsigned long counter_idx)
+{
+ return sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ_HI, counter_idx, 0, 0, 0, 0, 0);
+}
+
+struct sbiret sbi_pmu_snapshot_set_shmem_raw(unsigned long shmem_phys_lo, unsigned long shmem_phys_hi,
+ unsigned long flags)
+{
+ return sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM, shmem_phys_lo,
+ shmem_phys_hi, flags, 0, 0, 0);
+}
+
+struct sbiret sbi_pmu_snapshot_set_shmem(unsigned long *shmem, unsigned long flags)
+{
+ phys_addr_t p = virt_to_phys(shmem);
+
+ return sbi_pmu_snapshot_set_shmem_raw(lower_32_bits(p), upper_32_bits(p), flags);
+}
+
+struct sbiret sbi_pmu_event_get_info_raw(unsigned long shmem_phys_lo, unsigned long shmem_phys_hi,
+ unsigned long num_entries, unsigned long flags)
+{
+ return sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_EVENT_GET_INFO, shmem_phys_lo,
+ shmem_phys_hi, num_entries, flags, 0, 0);
+}
+
+struct sbiret sbi_pmu_event_get_info(unsigned long *shmem, unsigned long num_entries,
+ unsigned long flags)
+{
+ phys_addr_t p = virt_to_phys(shmem);
+
+ return sbi_pmu_event_get_info_raw(lower_32_bits(p), upper_32_bits(p), num_entries, flags);
+}
+
struct sbiret sbi_sse_read_attrs_raw(unsigned long event_id, unsigned long base_attr_id,
unsigned long attr_count, unsigned long phys_lo,
unsigned long phys_hi)
--
2.43.0
--
kvm-riscv mailing list
kvm-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kvm-riscv
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [kvm-unit-tests PATCH 3/4] lib: riscv: Add SBI PMU helper functions
2025-12-13 15:08 [kvm-unit-tests PATCH 0/4] riscv: sbi: Add support to test PMU extension James Raphael Tiovalen
2025-12-13 15:08 ` [kvm-unit-tests PATCH 1/4] lib: riscv: Add SBI PMU CSRs and enums James Raphael Tiovalen
2025-12-13 15:08 ` [kvm-unit-tests PATCH 2/4] lib: riscv: Add SBI PMU support James Raphael Tiovalen
@ 2025-12-13 15:08 ` James Raphael Tiovalen
2025-12-13 15:08 ` [kvm-unit-tests PATCH 4/4] riscv: sbi: Add tests for PMU extension James Raphael Tiovalen
3 siblings, 0 replies; 5+ messages in thread
From: James Raphael Tiovalen @ 2025-12-13 15:08 UTC (permalink / raw)
To: kvm, kvm-riscv; +Cc: andrew.jones, atishp, James Raphael Tiovalen
Add some helper macros to handle event types and codes and some helper
functions to access the FDT. These will be used by the SBI tests.
Signed-off-by: James Raphael Tiovalen <jamestiotio@gmail.com>
---
riscv/Makefile | 1 +
lib/riscv/asm/pmu.h | 167 +++++++++++++++++++++++++++++++++++++++++++
lib/riscv/pmu.c | 169 ++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 337 insertions(+)
create mode 100644 lib/riscv/asm/pmu.h
create mode 100644 lib/riscv/pmu.c
diff --git a/riscv/Makefile b/riscv/Makefile
index 64720c38..c0dd5465 100644
--- a/riscv/Makefile
+++ b/riscv/Makefile
@@ -42,6 +42,7 @@ cflatobjs += lib/riscv/delay.o
cflatobjs += lib/riscv/io.o
cflatobjs += lib/riscv/isa.o
cflatobjs += lib/riscv/mmu.o
+cflatobjs += lib/riscv/pmu.o
cflatobjs += lib/riscv/processor.o
cflatobjs += lib/riscv/sbi.o
cflatobjs += lib/riscv/setjmp.o
diff --git a/lib/riscv/asm/pmu.h b/lib/riscv/asm/pmu.h
new file mode 100644
index 00000000..8bb5e3e9
--- /dev/null
+++ b/lib/riscv/asm/pmu.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASMRISCV_PMU_H_
+#define _ASMRISCV_PMU_H_
+
+#include <libcflat.h>
+#include <asm/csr.h>
+
+#define SBI_PMU_HW_CTR_MAX 32
+
+#define SBI_EXT_PMU_EVENT_IDX_TYPE_OFFSET 16
+#define SBI_EXT_PMU_EVENT_IDX_TYPE_MASK (0xF << SBI_EXT_PMU_EVENT_IDX_TYPE_OFFSET)
+#define SBI_EXT_PMU_EVENT_IDX_CODE_MASK 0xFFFF
+
+#define SBI_EXT_PMU_EVENT_HW_CACHE_OPS_RESULT 0x1
+#define SBI_EXT_PMU_EVENT_HW_CACHE_OPS_ID_MASK 0x6
+#define SBI_EXT_PMU_EVENT_HW_CACHE_OPS_ID_OFFSET 1
+#define SBI_EXT_PMU_EVENT_HW_CACHE_ID_MASK 0xFFF8
+#define SBI_EXT_PMU_EVENT_HW_CACHE_ID_OFFSET 3
+
+#define SBI_EXT_PMU_CFG_FLAG_SKIP_MATCH (1 << 0)
+#define SBI_EXT_PMU_CFG_FLAG_CLEAR_VALUE (1 << 1)
+#define SBI_EXT_PMU_CFG_FLAG_AUTO_START (1 << 2)
+#define SBI_EXT_PMU_CFG_FLAG_SET_VUINH (1 << 3)
+#define SBI_EXT_PMU_CFG_FLAG_SET_VSINH (1 << 4)
+#define SBI_EXT_PMU_CFG_FLAG_SET_UINH (1 << 5)
+#define SBI_EXT_PMU_CFG_FLAG_SET_SINH (1 << 6)
+#define SBI_EXT_PMU_CFG_FLAG_SET_MINH (1 << 7)
+
+#define SBI_EXT_PMU_START_SET_INIT_VALUE (1 << 0)
+#define SBI_EXT_PMU_START_FLAG_INIT_SNAPSHOT (1 << 1)
+
+#define SBI_EXT_PMU_STOP_FLAG_RESET (1 << 0)
+#define SBI_EXT_PMU_STOP_FLAG_TAKE_SNAPSHOT (1 << 1)
+
+#define SBI_EXT_PMU_HPM_COUNTER_CASE(n) \
+ case CSR_HPMCOUNTER##n: \
+ return csr_read(CSR_HPMCOUNTER##n)
+
+enum sbi_ext_pmu_ctr_type {
+ SBI_EXT_PMU_CTR_TYPE_HW = 0,
+ SBI_EXT_PMU_CTR_TYPE_FW,
+};
+
+union sbi_ext_pmu_ctr_info {
+ unsigned long value;
+ struct {
+ unsigned long csr:12;
+ unsigned long width:6;
+#if __riscv_xlen == 32
+ unsigned long reserved:13;
+#else
+ unsigned long reserved:45;
+#endif
+ unsigned long type:1;
+ };
+};
+
+#define get_cidx_type(x) \
+ (((x) & SBI_EXT_PMU_EVENT_IDX_TYPE_MASK) >> SBI_EXT_PMU_EVENT_IDX_TYPE_OFFSET)
+
+#define get_cidx_code(x) (x & SBI_EXT_PMU_EVENT_IDX_CODE_MASK)
+
+#define get_cidx_cache_id(x) \
+ (((get_cidx_code(x)) & SBI_EXT_PMU_EVENT_HW_CACHE_ID_MASK) >> SBI_EXT_PMU_EVENT_HW_CACHE_ID_OFFSET)
+
+#define get_cidx_op_id(x) \
+ (((get_cidx_code(x)) & SBI_EXT_PMU_EVENT_HW_CACHE_OPS_ID_MASK) >> SBI_EXT_PMU_EVENT_HW_CACHE_OPS_ID_OFFSET)
+
+#define get_cidx_result_id(x) \
+ ((get_cidx_code(x)) & SBI_EXT_PMU_EVENT_HW_CACHE_OPS_RESULT)
+
+#define set_cidx_type(x, t) \
+ ((x) = ((((x) & ~SBI_EXT_PMU_EVENT_IDX_TYPE_MASK) | \
+ ((((unsigned long)(t)) << SBI_EXT_PMU_EVENT_IDX_TYPE_OFFSET) \
+ & SBI_EXT_PMU_EVENT_IDX_TYPE_MASK))))
+
+#define set_cidx_code(x, c) \
+ ((x) = ((((x) & ~SBI_EXT_PMU_EVENT_IDX_CODE_MASK) | \
+ (((unsigned long)(c)) & SBI_EXT_PMU_EVENT_IDX_CODE_MASK))))
+
+#define set_cidx_cache_id(x, id) \
+ (set_cidx_code((x), (((get_cidx_code(x)) & ~SBI_EXT_PMU_EVENT_HW_CACHE_ID_MASK) | \
+ ((((unsigned long)(id)) << SBI_EXT_PMU_EVENT_HW_CACHE_ID_OFFSET) \
+ & SBI_EXT_PMU_EVENT_HW_CACHE_ID_MASK))))
+
+#define set_cidx_op_id(x, op) \
+ (set_cidx_code((x), (((get_cidx_code(x)) & ~SBI_EXT_PMU_EVENT_HW_CACHE_OPS_ID_MASK) | \
+ ((((unsigned long)(op)) << SBI_EXT_PMU_EVENT_HW_CACHE_OPS_ID_OFFSET) \
+ & SBI_EXT_PMU_EVENT_HW_CACHE_OPS_ID_MASK))))
+
+#define set_cidx_result_id(x, res) \
+ (set_cidx_code((x), (((get_cidx_code(x)) & ~SBI_EXT_PMU_EVENT_HW_CACHE_OPS_RESULT) | \
+ (((unsigned long)(res)) & SBI_EXT_PMU_EVENT_HW_CACHE_OPS_RESULT))))
+
+static inline uint64_t pmu_get_cycles(void)
+{
+ return csr_read(CSR_CYCLE);
+}
+
+static inline uint64_t pmu_get_instret(void)
+{
+ return csr_read(CSR_INSTRET);
+}
+
+static inline uint64_t pmu_get_counter(unsigned long csr)
+{
+ switch (csr) {
+ case CSR_CYCLE:
+ return pmu_get_cycles();
+ case CSR_INSTRET:
+ return pmu_get_instret();
+
+ SBI_EXT_PMU_HPM_COUNTER_CASE(3);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(4);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(5);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(6);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(7);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(8);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(9);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(10);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(11);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(12);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(13);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(14);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(15);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(16);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(17);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(18);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(19);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(20);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(21);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(22);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(23);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(24);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(25);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(26);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(27);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(28);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(29);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(30);
+ SBI_EXT_PMU_HPM_COUNTER_CASE(31);
+
+ default:
+ __builtin_unreachable();
+ }
+}
+
+struct sbi_ext_pmu_hw_event {
+ uint32_t counters;
+ unsigned long start_idx;
+ unsigned long end_idx;
+};
+
+struct sbi_ext_pmu_test_ctr {
+ int ctr_idx;
+ unsigned long eid;
+};
+
+int sbi_ext_pmu_get_counters_for_hw_event(unsigned long event_idx);
+int sbi_ext_pmu_get_first_counter_for_hw_event(unsigned long event_idx);
+int sbi_ext_pmu_get_first_unsupported_hw_event(int ctr_idx);
+struct sbi_ext_pmu_test_ctr sbi_ext_pmu_get_candidate_hw_counter_for_test(void);
+void sbi_ext_pmu_add_hw_event_counter_map(u32 event_idx_start, u32 event_idx_end, u32 ctr_map, int i);
+void fdt_pmu_setup(void);
+void fdt_pmu_free(void);
+
+#endif /* _ASMRISCV_PMU_H_ */
diff --git a/lib/riscv/pmu.c b/lib/riscv/pmu.c
new file mode 100644
index 00000000..7bbd8221
--- /dev/null
+++ b/lib/riscv/pmu.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2025, James Raphael Tiovalen <jamestiotio@gmail.com>
+ */
+#include <alloc.h>
+#include <libcflat.h>
+#include <devicetree.h>
+#include <asm/sbi.h>
+#include <asm/pmu.h>
+
+static struct sbi_ext_pmu_hw_event *hw_event_map;
+
+int sbi_ext_pmu_get_counters_for_hw_event(unsigned long event_idx)
+{
+ int i;
+
+ if (!hw_event_map)
+ return -1;
+
+ for (i = 0; i < SBI_PMU_HW_CTR_MAX; i++) {
+ if (hw_event_map[i].start_idx <= event_idx &&
+ hw_event_map[i].end_idx >= event_idx) {
+ return hw_event_map[i].counters;
+ }
+ }
+
+ return -1;
+}
+
+int sbi_ext_pmu_get_first_counter_for_hw_event(unsigned long event_idx)
+{
+ int i, counters = sbi_ext_pmu_get_counters_for_hw_event(event_idx);
+
+ if (!hw_event_map || counters < 0)
+ return -1;
+
+ for (i = CSR_HPMCOUNTER3 - CSR_CYCLE; i < SBI_PMU_HW_CTR_MAX; i++) {
+ if (counters & (1U << i))
+ return i;
+ }
+
+ return -1;
+}
+
+void sbi_ext_pmu_add_hw_event_counter_map(u32 event_idx_start, u32 event_idx_end, u32 ctr_map, int i)
+{
+ assert(event_idx_start <= event_idx_end);
+
+ hw_event_map[i].counters = ctr_map;
+ hw_event_map[i].start_idx = event_idx_start;
+ hw_event_map[i].end_idx = event_idx_end;
+
+ assert(get_cidx_type(hw_event_map[i].start_idx) == SBI_EXT_PMU_EVENT_HW_GENERAL
+ || get_cidx_type(hw_event_map[i].start_idx) == SBI_EXT_PMU_EVENT_HW_CACHE);
+ assert(get_cidx_type(hw_event_map[i].end_idx) == SBI_EXT_PMU_EVENT_HW_GENERAL
+ || get_cidx_type(hw_event_map[i].end_idx) == SBI_EXT_PMU_EVENT_HW_CACHE);
+}
+
+int sbi_ext_pmu_get_first_unsupported_hw_event(int ctr_idx)
+{
+ int i, j, k;
+ unsigned long candidate_eid = {0};
+
+ if (!hw_event_map)
+ return -1;
+
+ for (i = SBI_EXT_PMU_HW_CPU_CYCLES; i <= SBI_EXT_PMU_HW_REF_CPU_CYCLES; i++) {
+ set_cidx_type(candidate_eid, SBI_EXT_PMU_EVENT_HW_GENERAL);
+ set_cidx_code(candidate_eid, i);
+
+ if (sbi_ext_pmu_get_counters_for_hw_event(candidate_eid) < 0)
+ return candidate_eid;
+ }
+
+ for (i = SBI_EXT_PMU_HW_CACHE_L1D; i <= SBI_EXT_PMU_HW_CACHE_NODE; i++) {
+ for (j = SBI_EXT_PMU_HW_CACHE_OP_READ; j <= SBI_EXT_PMU_HW_CACHE_OP_PREFETCH; j++) {
+ for (k = SBI_EXT_PMU_HW_CACHE_RESULT_ACCESS; k <= SBI_EXT_PMU_HW_CACHE_RESULT_MISS; k++) {
+ set_cidx_type(candidate_eid, SBI_EXT_PMU_EVENT_HW_CACHE);
+ set_cidx_cache_id(candidate_eid, i);
+ set_cidx_op_id(candidate_eid, j);
+ set_cidx_result_id(candidate_eid, k);
+
+ if (sbi_ext_pmu_get_counters_for_hw_event(candidate_eid) < 0)
+ return candidate_eid;
+ }
+ }
+ }
+
+ return -1;
+}
+
+struct sbi_ext_pmu_test_ctr sbi_ext_pmu_get_candidate_hw_counter_for_test(void)
+{
+ struct sbi_ext_pmu_test_ctr test_ctr = {0};
+ int i, j, k, ctr_idx;
+
+ if (!hw_event_map)
+ return test_ctr;
+
+ unsigned long candidate_eid = {0};
+
+ for (i = SBI_EXT_PMU_HW_CPU_CYCLES; i <= SBI_EXT_PMU_HW_REF_CPU_CYCLES; i++) {
+ set_cidx_type(candidate_eid, SBI_EXT_PMU_EVENT_HW_GENERAL);
+ set_cidx_code(candidate_eid, i);
+ ctr_idx = sbi_ext_pmu_get_first_counter_for_hw_event(candidate_eid);
+
+ if (ctr_idx >= 0) {
+ test_ctr.ctr_idx = ctr_idx;
+ test_ctr.eid = candidate_eid;
+ return test_ctr;
+ }
+ }
+
+ for (i = SBI_EXT_PMU_HW_CACHE_L1D; i <= SBI_EXT_PMU_HW_CACHE_NODE; i++) {
+ for (j = SBI_EXT_PMU_HW_CACHE_OP_READ; j <= SBI_EXT_PMU_HW_CACHE_OP_PREFETCH; j++) {
+ for (k = SBI_EXT_PMU_HW_CACHE_RESULT_ACCESS; k <= SBI_EXT_PMU_HW_CACHE_RESULT_MISS; k++) {
+ set_cidx_type(candidate_eid, SBI_EXT_PMU_EVENT_HW_CACHE);
+ set_cidx_cache_id(candidate_eid, i);
+ set_cidx_op_id(candidate_eid, j);
+ set_cidx_result_id(candidate_eid, k);
+ ctr_idx = sbi_ext_pmu_get_first_counter_for_hw_event(candidate_eid);
+
+ if (ctr_idx >= 0) {
+ test_ctr.ctr_idx = ctr_idx;
+ test_ctr.eid = candidate_eid;
+ return test_ctr;
+ }
+ }
+ }
+ }
+
+ return test_ctr;
+}
+
+void fdt_pmu_setup(void)
+{
+ const void *fdt;
+ int i, pmu_offset, len;
+ const u32 *event_ctr_map;
+ u32 event_idx_start, event_idx_end, ctr_map;
+
+ assert_msg(dt_available(), "ACPI not yet supported");
+
+ fdt = dt_fdt();
+
+ pmu_offset = fdt_node_offset_by_compatible(fdt, -1, "riscv,pmu");
+ assert(pmu_offset >= 0);
+
+ event_ctr_map = fdt_getprop(fdt, pmu_offset, "riscv,event-to-mhpmcounters", &len);
+ if (event_ctr_map) {
+ len = len / (sizeof(u32) * 3);
+ hw_event_map = calloc(len, sizeof(struct sbi_ext_pmu_hw_event));
+ for (i = 0; i < len; i++) {
+ event_idx_start = fdt32_to_cpu(event_ctr_map[3 * i]);
+ event_idx_end = fdt32_to_cpu(event_ctr_map[3 * i + 1]);
+ ctr_map = fdt32_to_cpu(event_ctr_map[3 * i + 2]);
+ sbi_ext_pmu_add_hw_event_counter_map(event_idx_start, event_idx_end, ctr_map, i);
+ }
+ report_info("added %d hw event counter mappings", len);
+ }
+}
+
+void fdt_pmu_free(void)
+{
+ if (hw_event_map) {
+ free(hw_event_map);
+ hw_event_map = NULL;
+ }
+}
--
2.43.0
--
kvm-riscv mailing list
kvm-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kvm-riscv
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [kvm-unit-tests PATCH 4/4] riscv: sbi: Add tests for PMU extension
2025-12-13 15:08 [kvm-unit-tests PATCH 0/4] riscv: sbi: Add support to test PMU extension James Raphael Tiovalen
` (2 preceding siblings ...)
2025-12-13 15:08 ` [kvm-unit-tests PATCH 3/4] lib: riscv: Add SBI PMU helper functions James Raphael Tiovalen
@ 2025-12-13 15:08 ` James Raphael Tiovalen
3 siblings, 0 replies; 5+ messages in thread
From: James Raphael Tiovalen @ 2025-12-13 15:08 UTC (permalink / raw)
To: kvm, kvm-riscv; +Cc: andrew.jones, atishp, James Raphael Tiovalen
Add the actual tests for the SBI PMU extension. Functions related to
shared memory (FID #7 and #8) are untested for now.
Signed-off-by: James Raphael Tiovalen <jamestiotio@gmail.com>
---
riscv/Makefile | 1 +
riscv/sbi-tests.h | 1 +
riscv/sbi-pmu.c | 461 ++++++++++++++++++++++++++++++++++++++++++++++
riscv/sbi.c | 2 +
4 files changed, 465 insertions(+)
create mode 100644 riscv/sbi-pmu.c
diff --git a/riscv/Makefile b/riscv/Makefile
index c0dd5465..75a108c1 100644
--- a/riscv/Makefile
+++ b/riscv/Makefile
@@ -21,6 +21,7 @@ all: $(tests)
sbi-deps += $(TEST_DIR)/sbi-asm.o
sbi-deps += $(TEST_DIR)/sbi-dbtr.o
sbi-deps += $(TEST_DIR)/sbi-fwft.o
+sbi-deps += $(TEST_DIR)/sbi-pmu.o
sbi-deps += $(TEST_DIR)/sbi-sse.o
all_deps += $(sbi-deps)
diff --git a/riscv/sbi-tests.h b/riscv/sbi-tests.h
index c1ebf016..509ec547 100644
--- a/riscv/sbi-tests.h
+++ b/riscv/sbi-tests.h
@@ -99,6 +99,7 @@ static inline bool env_enabled(const char *env)
void split_phys_addr(phys_addr_t paddr, unsigned long *hi, unsigned long *lo);
void sbi_bad_fid(int ext);
+void check_pmu(void);
void check_sse(void);
void check_dbtr(void);
diff --git a/riscv/sbi-pmu.c b/riscv/sbi-pmu.c
new file mode 100644
index 00000000..5d2e034a
--- /dev/null
+++ b/riscv/sbi-pmu.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SBI PMU test suite
+ *
+ * Copyright (C) 2025, James Raphael Tiovalen <jamestiotio@gmail.com>
+ */
+#include <alloc.h>
+#include <alloc_page.h>
+#include <bitops.h>
+#include <cpumask.h>
+#include <libcflat.h>
+#include <on-cpus.h>
+#include <stdlib.h>
+
+#include <asm/csr.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/sbi.h>
+#include <asm/pmu.h>
+
+#include "sbi-tests.h"
+
+#define SBI_PMU_COUNTER_TEST_INIT_VALUE 0x7FFFFFFF
+
+struct sbi_ext_pmu_ctr_csr_map {
+ bool mapped;
+ bool is_fw_ctr;
+ unsigned long ctr_idx;
+ unsigned long csr;
+};
+
+static unsigned long number_of_counters;
+static struct sbi_ext_pmu_ctr_csr_map *sbi_ext_pmu_ctr_csr_map;
+
+static unsigned long get_counter_idx_from_csr(unsigned long csr);
+struct sbi_ext_pmu_test_ctr sbi_ext_pmu_get_candidate_fw_counter_for_test(void);
+uint64_t sbi_ext_pmu_read_fw_counter(unsigned long ctr_idx);
+
+static unsigned long get_counter_idx_from_csr(unsigned long csr)
+{
+ for (unsigned long i = 0; i < number_of_counters; i++) {
+ if (sbi_ext_pmu_ctr_csr_map[i].mapped &&
+ sbi_ext_pmu_ctr_csr_map[i].csr == csr) {
+ return sbi_ext_pmu_ctr_csr_map[i].ctr_idx;
+ }
+ }
+
+ assert_msg(false, "CSR %lx not found in the map", csr);
+}
+
+struct sbi_ext_pmu_test_ctr sbi_ext_pmu_get_candidate_fw_counter_for_test(void)
+{
+ struct sbi_ext_pmu_test_ctr test_ctr = {0};
+
+ if (!sbi_probe(SBI_EXT_TIME)) {
+ test_ctr.ctr_idx = -1;
+ return test_ctr;
+ }
+
+ set_cidx_type(test_ctr.eid, SBI_EXT_PMU_EVENT_FW);
+ set_cidx_code(test_ctr.eid, SBI_EXT_PMU_FW_SET_TIMER);
+
+ /* Since any firmware counter can be used for testing, return the first one found */
+ for (unsigned long i = 0; i < number_of_counters; i++) {
+ if (sbi_ext_pmu_ctr_csr_map[i].mapped && sbi_ext_pmu_ctr_csr_map[i].is_fw_ctr) {
+ test_ctr.ctr_idx = sbi_ext_pmu_ctr_csr_map[i].ctr_idx;
+ return test_ctr;
+ }
+ }
+
+ test_ctr.ctr_idx = -1;
+ return test_ctr;
+}
+
+uint64_t sbi_ext_pmu_read_fw_counter(unsigned long ctr_idx)
+{
+ struct sbiret ret;
+ uint64_t ctr_val = 0;
+
+ ret = sbi_pmu_counter_fw_read(ctr_idx);
+ report(ret.error == SBI_SUCCESS,
+ "expected to read lower bits of firmware counter %ld successfully, got %ld", ctr_idx, ret.error);
+
+ ctr_val = ret.value;
+
+ ret = sbi_pmu_counter_fw_read_hi(ctr_idx);
+ report(ret.error == SBI_SUCCESS,
+ "expected to read upper bits of firmware counter %ld successfully, got %ld", ctr_idx, ret.error);
+
+ ctr_val += ((uint64_t)ret.value << 32);
+
+ return ctr_val;
+}
+
+void check_pmu(void)
+{
+ struct sbiret ret;
+ unsigned long valid_counter_info = 0, num_of_hw_counters = 0;
+ uint64_t cycle_count, instret_count, test_counter_value;
+ bool timer_counter_found = false;
+ union sbi_ext_pmu_ctr_info info;
+ unsigned long test_eid = 0, set_timer_count = 0;
+ int test_counter_idx;
+ struct sbi_ext_pmu_test_ctr test_ctr = {0};
+
+ report_prefix_push("pmu");
+
+ if (!sbi_probe(SBI_EXT_PMU)) {
+ report_skip("PMU extension unavailable");
+ report_prefix_pop();
+ return;
+ }
+
+ sbi_bad_fid(SBI_EXT_PMU);
+
+ report_prefix_push("pmu_num_counters");
+
+ ret = sbi_pmu_num_counters();
+ if (ret.error) {
+ report_fail("failed to get number of counters (error=%ld)", ret.error);
+ report_prefix_popn(2);
+ return;
+ }
+ number_of_counters = ret.value;
+
+ /* CSR_CYCLE, CSR_TIME, and CSR_INSTRET are mandatory counters */
+ if (number_of_counters < 3) {
+ report_fail("number of counters is %ld, expected at least 3", number_of_counters);
+ report_prefix_popn(2);
+ return;
+ }
+
+ report_info("number of counters is %ld", number_of_counters);
+
+ report_prefix_pop();
+
+ report_prefix_push("sbi_pmu_counter_get_info");
+
+ fdt_pmu_setup();
+
+ sbi_ext_pmu_ctr_csr_map = calloc(number_of_counters,
+ sizeof(struct sbi_ext_pmu_ctr_csr_map));
+
+ for (unsigned long i = 0; i < number_of_counters; i++) {
+ sbi_ext_pmu_ctr_csr_map[i].mapped = false;
+ sbi_ext_pmu_ctr_csr_map[i].is_fw_ctr = false;
+ sbi_ext_pmu_ctr_csr_map[i].ctr_idx = 0;
+ sbi_ext_pmu_ctr_csr_map[i].csr = 0;
+ }
+
+ for (unsigned long i = 0; i < number_of_counters; i++) {
+ ret = sbi_pmu_counter_get_info(i);
+
+ if (ret.error == SBI_ERR_INVALID_PARAM && !timer_counter_found) {
+ /* Assume that this is the CSR_TIME counter and skip it */
+ timer_counter_found = true;
+ sbi_ext_pmu_ctr_csr_map[i].ctr_idx = i;
+ sbi_ext_pmu_ctr_csr_map[i].csr = CSR_TIME;
+ valid_counter_info++;
+ report_info("skipping CSR_TIME counter with index %ld", i);
+ continue;
+ } else if (ret.error) {
+ free(sbi_ext_pmu_ctr_csr_map);
+ fdt_pmu_free();
+ report_fail("failed to get counter info (error=%ld)", ret.error);
+ report_prefix_popn(2);
+ return;
+ }
+
+ info = *(union sbi_ext_pmu_ctr_info *)&ret.value;
+
+ if (info.type == SBI_EXT_PMU_CTR_TYPE_HW) {
+ sbi_ext_pmu_ctr_csr_map[i].mapped = true;
+ sbi_ext_pmu_ctr_csr_map[i].ctr_idx = i;
+ sbi_ext_pmu_ctr_csr_map[i].csr = info.csr;
+
+ if ((info.csr == CSR_CYCLE) || (info.csr == CSR_INSTRET))
+ valid_counter_info += info.width == 63;
+ else
+ valid_counter_info++;
+
+ num_of_hw_counters++;
+ } else if (info.type == SBI_EXT_PMU_CTR_TYPE_FW) {
+ sbi_ext_pmu_ctr_csr_map[i].mapped = true;
+ sbi_ext_pmu_ctr_csr_map[i].is_fw_ctr = true;
+ sbi_ext_pmu_ctr_csr_map[i].ctr_idx = i;
+ valid_counter_info++;
+ } else {
+ free(sbi_ext_pmu_ctr_csr_map);
+ fdt_pmu_free();
+ report_fail("unknown counter type %d", info.type);
+ report_prefix_popn(2);
+ return;
+ }
+ }
+
+ report(valid_counter_info == number_of_counters,
+ "number of counters with valid info is %ld", valid_counter_info);
+
+ ret = sbi_pmu_counter_get_info(number_of_counters);
+ report(ret.error == SBI_ERR_INVALID_PARAM,
+ "expected %d when counter_idx == num_counters, got %ld", SBI_ERR_INVALID_PARAM, ret.error);
+
+ report_prefix_pop();
+
+ report_prefix_push("sbi_pmu_counter_config_matching");
+
+ cycle_count = pmu_get_cycles();
+ instret_count = pmu_get_instret();
+
+ set_cidx_type(test_eid, SBI_EXT_PMU_EVENT_HW_GENERAL);
+ set_cidx_code(test_eid, SBI_EXT_PMU_HW_CPU_CYCLES);
+ ret = sbi_pmu_counter_config_matching(get_counter_idx_from_csr(CSR_CYCLE),
+ 1,
+ SBI_EXT_PMU_CFG_FLAG_CLEAR_VALUE,
+ test_eid,
+ 0);
+
+ if (ret.error) {
+ free(sbi_ext_pmu_ctr_csr_map);
+ fdt_pmu_free();
+ report_fail("failed to configure counter (error=%ld)", ret.error);
+ report_prefix_popn(2);
+ return;
+ }
+
+ test_counter_value = pmu_get_cycles();
+
+ report(test_counter_value < cycle_count,
+ "expected cycle count to reset (%ld < %ld)", test_counter_value, cycle_count);
+
+ set_cidx_code(test_eid, SBI_EXT_PMU_HW_INSTRUCTIONS);
+ ret = sbi_pmu_counter_config_matching(get_counter_idx_from_csr(CSR_INSTRET),
+ 1,
+ SBI_EXT_PMU_CFG_FLAG_CLEAR_VALUE,
+ test_eid,
+ 0);
+
+ if (ret.error) {
+ free(sbi_ext_pmu_ctr_csr_map);
+ fdt_pmu_free();
+ report_fail("failed to configure counter (error=%ld)", ret.error);
+ report_prefix_popn(2);
+ return;
+ }
+
+ test_counter_value = pmu_get_instret();
+
+ report(test_counter_value < instret_count,
+ "expected instret count to reset (%ld < %ld)", test_counter_value, instret_count);
+
+ set_cidx_code(test_eid, SBI_EXT_PMU_HW_CPU_CYCLES);
+ test_counter_idx = sbi_ext_pmu_get_first_counter_for_hw_event(test_eid);
+
+ report_info("first counter for test hw event %ld is %d", test_eid, test_counter_idx);
+
+ if (test_counter_idx <= 0) {
+ report_skip("failed to get first counter for test hw event");
+ } else {
+ test_counter_value = pmu_get_cycles();
+ ret = sbi_pmu_counter_config_matching(test_counter_idx, 0, SBI_EXT_PMU_CFG_FLAG_CLEAR_VALUE,
+ test_eid, 0);
+ report(ret.error == SBI_ERR_INVALID_PARAM,
+ "expected %d when counter_idx_mask == 0, got %ld", SBI_ERR_INVALID_PARAM, ret.error);
+ report(pmu_get_cycles() > test_counter_value,
+ "expected cycle counter to be unaffected when configuring counter %d", test_counter_idx);
+ }
+
+ test_ctr = sbi_ext_pmu_get_candidate_hw_counter_for_test();
+ test_counter_idx = test_ctr.ctr_idx;
+ test_eid = test_ctr.eid;
+
+ report_info("testing hardware counter %d with event %ld", test_counter_idx, test_eid);
+
+ ret = sbi_pmu_counter_config_matching(test_counter_idx, 1,
+ SBI_EXT_PMU_CFG_FLAG_SKIP_MATCH,
+ test_eid, 0);
+ report(ret.error == SBI_ERR_INVALID_PARAM,
+ "expected %d when skipping match before configuring counter, got %ld",
+ SBI_ERR_INVALID_PARAM, ret.error);
+
+ ret = sbi_pmu_counter_config_matching(test_counter_idx, 1,
+ SBI_EXT_PMU_CFG_FLAG_CLEAR_VALUE | SBI_EXT_PMU_CFG_FLAG_AUTO_START,
+ test_eid, 0);
+ if (ret.error) {
+ free(sbi_ext_pmu_ctr_csr_map);
+ fdt_pmu_free();
+ report_fail("failed to configure counter (error=%ld)", ret.error);
+ report_prefix_popn(2);
+ return;
+ }
+ report(ret.value == test_counter_idx,
+ "expected counter %d to be configured (%ld == %d)", test_counter_idx, ret.value, test_counter_idx);
+
+ test_counter_value = pmu_get_counter(sbi_ext_pmu_ctr_csr_map[test_counter_idx].csr);
+
+ report(test_counter_value > 0,
+ "expected counter %d to auto-start (%ld > 0)",
+ test_counter_idx,
+ test_counter_value);
+
+ test_eid = sbi_ext_pmu_get_first_unsupported_hw_event(test_counter_idx);
+ ret = sbi_pmu_counter_config_matching(test_counter_idx, 1,
+ SBI_EXT_PMU_CFG_FLAG_CLEAR_VALUE | SBI_EXT_PMU_CFG_FLAG_AUTO_START,
+ test_eid, 0);
+ report(ret.error == SBI_ERR_NOT_SUPPORTED,
+ "expected counter %d to be unable to monitor event %ld, got %ld",
+ test_counter_idx, test_eid, ret.error);
+
+ test_eid = test_ctr.eid;
+
+ report_prefix_pop();
+
+ report_prefix_push("sbi_pmu_counter_start");
+
+ ret = sbi_pmu_counter_start(test_counter_idx, 0, 0, 0);
+ report(ret.error == SBI_ERR_INVALID_PARAM,
+ "expected %d when counter_idx_mask == 0, got %ld", SBI_ERR_INVALID_PARAM, ret.error);
+
+ ret = sbi_pmu_counter_start(test_counter_idx, 1, 0, 0);
+ report(ret.error == SBI_ERR_ALREADY_STARTED,
+ "expected counter %d to be already started, got %ld", test_counter_idx, ret.error);
+
+ report_prefix_pop();
+
+ report_prefix_push("sbi_pmu_counter_stop");
+
+ ret = sbi_pmu_counter_stop(test_counter_idx, 0, 0);
+ report(ret.error == SBI_ERR_INVALID_PARAM,
+ "expected %d when counter_idx_mask == 0, got %ld", SBI_ERR_INVALID_PARAM, ret.error);
+
+ ret = sbi_pmu_counter_stop(test_counter_idx, 1, 0);
+ report(ret.error == SBI_SUCCESS,
+ "expected counter %d to be stopped, got %ld", test_counter_idx, ret.error);
+
+ ret = sbi_pmu_counter_stop(test_counter_idx, 1, 0);
+ report(ret.error == SBI_ERR_ALREADY_STOPPED,
+ "expected counter %d to be already stopped, got %ld", test_counter_idx, ret.error);
+
+ report_prefix_pop();
+
+ report_prefix_push("sbi_pmu_counter_start");
+
+ ret = sbi_pmu_counter_start(test_counter_idx, 1, SBI_EXT_PMU_START_SET_INIT_VALUE,
+ SBI_PMU_COUNTER_TEST_INIT_VALUE);
+ report(ret.error == SBI_SUCCESS,
+ "expected counter %d to be started with initial value, got %ld", test_counter_idx, ret.error);
+
+ test_counter_value = pmu_get_counter(sbi_ext_pmu_ctr_csr_map[test_counter_idx].csr);
+ report(test_counter_value > SBI_PMU_COUNTER_TEST_INIT_VALUE,
+ "expected counter %d to start with initial value (%ld > %d)",
+ test_counter_idx, test_counter_value, SBI_PMU_COUNTER_TEST_INIT_VALUE);
+
+ report_prefix_pop();
+
+ report_prefix_push("sbi_pmu_counter_stop");
+
+ ret = sbi_pmu_counter_stop(test_counter_idx, 1, 0);
+ report(ret.error == SBI_SUCCESS,
+ "expected counter %d to be stopped, got %ld", test_counter_idx, ret.error);
+
+ report_prefix_pop();
+
+ report_prefix_push("sbi_pmu_counter_fw_read");
+
+ ret = sbi_pmu_counter_fw_read(number_of_counters);
+ report(ret.error == SBI_ERR_INVALID_PARAM,
+ "expected %d when counter_idx == num_counters, got %ld", SBI_ERR_INVALID_PARAM, ret.error);
+
+ ret = sbi_pmu_counter_fw_read_hi(number_of_counters);
+ report(ret.error == SBI_ERR_INVALID_PARAM,
+ "expected %d when counter_idx == num_counters, got %ld", SBI_ERR_INVALID_PARAM, ret.error);
+
+ test_ctr = sbi_ext_pmu_get_candidate_fw_counter_for_test();
+ test_counter_idx = test_ctr.ctr_idx;
+ test_eid = test_ctr.eid;
+
+ if (test_counter_idx < 0) {
+ free(sbi_ext_pmu_ctr_csr_map);
+ fdt_pmu_free();
+ report_skip("no firmware counters available for testing");
+ report_prefix_popn(2);
+ return;
+ }
+
+ report_info("testing firmware counter %d with event 0x%lx", test_counter_idx, test_eid);
+
+ ret = sbi_pmu_counter_config_matching(test_counter_idx, 1,
+ SBI_EXT_PMU_CFG_FLAG_CLEAR_VALUE | SBI_EXT_PMU_CFG_FLAG_AUTO_START,
+ test_eid, 0);
+ if (ret.error) {
+ free(sbi_ext_pmu_ctr_csr_map);
+ fdt_pmu_free();
+ report_fail("failed to configure counter (error=%ld)", ret.error);
+ report_prefix_popn(2);
+ return;
+ }
+ report(ret.value == test_counter_idx,
+ "expected counter %d to be configured (%ld == %d)", test_counter_idx, ret.value, test_counter_idx);
+
+ test_counter_value = sbi_ext_pmu_read_fw_counter(test_counter_idx);
+
+ report(test_counter_value == set_timer_count,
+ "expected counter %d to be cleared (%ld == %ld)",
+ test_counter_idx,
+ test_counter_value,
+ set_timer_count);
+
+ ret = sbi_pmu_counter_start(test_counter_idx, 0, 0, 0);
+ report(ret.error == SBI_ERR_INVALID_PARAM,
+ "expected %d when counter_idx_mask == 0, got %ld", SBI_ERR_INVALID_PARAM, ret.error);
+
+ ret = sbi_pmu_counter_start(test_counter_idx, 1, 0, 0);
+ report(ret.error == SBI_ERR_ALREADY_STARTED,
+ "expected counter %d to be already started, got %ld", test_counter_idx, ret.error);
+
+ sbi_set_timer(0);
+ set_timer_count++;
+ test_counter_value = sbi_ext_pmu_read_fw_counter(test_counter_idx);
+
+ report(test_counter_value == set_timer_count,
+ "expected counter %d to have incremented (%ld == %ld)",
+ test_counter_idx,
+ test_counter_value,
+ set_timer_count);
+
+ sbi_set_timer(ULONG_MAX);
+ set_timer_count++;
+ test_counter_value = sbi_ext_pmu_read_fw_counter(test_counter_idx);
+
+ report(test_counter_value == set_timer_count,
+ "expected counter %d to have incremented (%ld == %ld)",
+ test_counter_idx,
+ test_counter_value,
+ set_timer_count);
+
+ ret = sbi_pmu_counter_stop(test_counter_idx, 0, 0);
+ report(ret.error == SBI_ERR_INVALID_PARAM,
+ "expected %d when counter_idx_mask == 0, got %ld", SBI_ERR_INVALID_PARAM, ret.error);
+
+ ret = sbi_pmu_counter_stop(test_counter_idx, 1, 0);
+ report(ret.error == SBI_SUCCESS,
+ "expected counter %d to be stopped, got %ld", test_counter_idx, ret.error);
+
+ ret = sbi_pmu_counter_stop(test_counter_idx, 1, 0);
+ report(ret.error == SBI_ERR_ALREADY_STOPPED,
+ "expected counter %d to be already stopped, got %ld", test_counter_idx, ret.error);
+
+ sbi_set_timer(ULONG_MAX);
+ test_counter_value = sbi_ext_pmu_read_fw_counter(test_counter_idx);
+
+ report(test_counter_value == set_timer_count,
+ "expected counter %d to be unchanged after stop (%ld == %ld)",
+ test_counter_idx,
+ test_counter_value,
+ set_timer_count);
+
+ free(sbi_ext_pmu_ctr_csr_map);
+ fdt_pmu_free();
+ report_prefix_popn(2);
+}
diff --git a/riscv/sbi.c b/riscv/sbi.c
index 3b8aadce..fdb6a38a 100644
--- a/riscv/sbi.c
+++ b/riscv/sbi.c
@@ -32,6 +32,7 @@
#define HIGH_ADDR_BOUNDARY ((phys_addr_t)1 << 32)
+void check_pmu(void);
void check_sse(void);
void check_fwft(void);
@@ -1557,6 +1558,7 @@ int main(int argc, char **argv)
check_time();
check_ipi();
check_hsm();
+ check_pmu();
check_dbcn();
check_susp();
check_sse();
--
2.43.0
--
kvm-riscv mailing list
kvm-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kvm-riscv
^ permalink raw reply related [flat|nested] 5+ messages in thread
end of thread, other threads:[~2025-12-13 15:09 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-12-13 15:08 [kvm-unit-tests PATCH 0/4] riscv: sbi: Add support to test PMU extension James Raphael Tiovalen
2025-12-13 15:08 ` [kvm-unit-tests PATCH 1/4] lib: riscv: Add SBI PMU CSRs and enums James Raphael Tiovalen
2025-12-13 15:08 ` [kvm-unit-tests PATCH 2/4] lib: riscv: Add SBI PMU support James Raphael Tiovalen
2025-12-13 15:08 ` [kvm-unit-tests PATCH 3/4] lib: riscv: Add SBI PMU helper functions James Raphael Tiovalen
2025-12-13 15:08 ` [kvm-unit-tests PATCH 4/4] riscv: sbi: Add tests for PMU extension James Raphael Tiovalen
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).