kvm-riscv.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
From: yunhui cui <cuiyunhui@bytedance.com>
To: Atish Patra <atishp@rivosinc.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	 Rob Herring <robh@kernel.org>,
	Krzysztof Kozlowski <krzk+dt@kernel.org>,
	Conor Dooley <conor+dt@kernel.org>,
	 Anup Patel <anup@brainfault.org>,
	Atish Patra <atishp@atishpatra.org>,
	 Will Deacon <will@kernel.org>,
	Mark Rutland <mark.rutland@arm.com>,
	 Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@redhat.com>,
	 Arnaldo Carvalho de Melo <acme@kernel.org>,
	Namhyung Kim <namhyung@kernel.org>,
	 Alexander Shishkin <alexander.shishkin@linux.intel.com>,
	Jiri Olsa <jolsa@kernel.org>,  Ian Rogers <irogers@google.com>,
	Adrian Hunter <adrian.hunter@intel.com>,
	weilin.wang@intel.com,  linux-riscv@lists.infradead.org,
	linux-kernel@vger.kernel.org,  Conor Dooley <conor@kernel.org>,
	devicetree@vger.kernel.org, kvm@vger.kernel.org,
	 kvm-riscv@lists.infradead.org,
	linux-arm-kernel@lists.infradead.org,
	 linux-perf-users@vger.kernel.org
Subject: Re: [External] [PATCH v5 12/21] RISC-V: perf: Modify the counter discovery mechanism
Date: Tue, 23 Sep 2025 14:12:03 +0800	[thread overview]
Message-ID: <CAEEQ3wm-TGcRFjmb7cw5K-M13CicwgJSLZrgY1KMZA5SgUjziw@mail.gmail.com> (raw)
In-Reply-To: <20250327-counter_delegation-v5-12-1ee538468d1b@rivosinc.com>

Hi Atish,

On Fri, Mar 28, 2025 at 3:42 AM Atish Patra <atishp@rivosinc.com> wrote:
>
> If both counter delegation and SBI PMU is present, the counter
> delegation will be used for hardware pmu counters while the SBI PMU
> will be used for firmware counters. Thus, the driver has to probe
> the counters info via SBI PMU to distinguish the firmware counters.
>
> The hybrid scheme also requires improvements of the informational
> logging messages to indicate the user about underlying interface
> used for each use case.
>
> Signed-off-by: Atish Patra <atishp@rivosinc.com>
> ---
>  drivers/perf/riscv_pmu_dev.c | 130 ++++++++++++++++++++++++++++++++-----------
>  1 file changed, 96 insertions(+), 34 deletions(-)
>
> diff --git a/drivers/perf/riscv_pmu_dev.c b/drivers/perf/riscv_pmu_dev.c
> index 6cebbc16bfe4..c0397bd68b91 100644
> --- a/drivers/perf/riscv_pmu_dev.c
> +++ b/drivers/perf/riscv_pmu_dev.c
> @@ -66,6 +66,20 @@ static bool sbi_v2_available;
>  static DEFINE_STATIC_KEY_FALSE(sbi_pmu_snapshot_available);
>  #define sbi_pmu_snapshot_available() \
>         static_branch_unlikely(&sbi_pmu_snapshot_available)
> +static DEFINE_STATIC_KEY_FALSE(riscv_pmu_sbi_available);
> +static DEFINE_STATIC_KEY_FALSE(riscv_pmu_cdeleg_available);
> +
> +/* Avoid unnecessary code patching in the one time booting path*/
> +#define riscv_pmu_cdeleg_available_boot() \
> +       static_key_enabled(&riscv_pmu_cdeleg_available)
> +#define riscv_pmu_sbi_available_boot() \
> +       static_key_enabled(&riscv_pmu_sbi_available)
> +
> +/* Perform a runtime code patching with static key */
> +#define riscv_pmu_cdeleg_available() \
> +       static_branch_unlikely(&riscv_pmu_cdeleg_available)
> +#define riscv_pmu_sbi_available() \
> +               static_branch_likely(&riscv_pmu_sbi_available)
>
>  static struct attribute *riscv_arch_formats_attr[] = {
>         &format_attr_event.attr,
> @@ -88,7 +102,8 @@ static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS;
>
>  /*
>   * This structure is SBI specific but counter delegation also require counter
> - * width, csr mapping. Reuse it for now.
> + * width, csr mapping. Reuse it for now we can have firmware counters for
> + * platfroms with counter delegation support.
>   * RISC-V doesn't have heterogeneous harts yet. This need to be part of
>   * per_cpu in case of harts with different pmu counters
>   */
> @@ -100,6 +115,8 @@ static unsigned int riscv_pmu_irq;
>
>  /* Cache the available counters in a bitmask */
>  static unsigned long cmask;
> +/* Cache the available firmware counters in another bitmask */
> +static unsigned long firmware_cmask;
>
>  struct sbi_pmu_event_data {
>         union {
> @@ -780,34 +797,38 @@ static int rvpmu_sbi_find_num_ctrs(void)
>                 return sbi_err_map_linux_errno(ret.error);
>  }
>
> -static int rvpmu_sbi_get_ctrinfo(int nctr, unsigned long *mask)
> +static u32 rvpmu_deleg_find_ctrs(void)
> +{
> +       /* TODO */
> +       return 0;
> +}
> +
> +static int rvpmu_sbi_get_ctrinfo(u32 nsbi_ctr, u32 *num_fw_ctr, u32 *num_hw_ctr)
>  {
>         struct sbiret ret;
> -       int i, num_hw_ctr = 0, num_fw_ctr = 0;
> +       int i;
>         union sbi_pmu_ctr_info cinfo;
>
> -       pmu_ctr_list = kcalloc(nctr, sizeof(*pmu_ctr_list), GFP_KERNEL);
> -       if (!pmu_ctr_list)
> -               return -ENOMEM;
> -
> -       for (i = 0; i < nctr; i++) {
> +       for (i = 0; i < nsbi_ctr; i++) {
>                 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
>                 if (ret.error)
>                         /* The logical counter ids are not expected to be contiguous */
>                         continue;
>
> -               *mask |= BIT(i);
> -
>                 cinfo.value = ret.value;
> -               if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
> -                       num_fw_ctr++;
> -               else
> -                       num_hw_ctr++;
> -               pmu_ctr_list[i].value = cinfo.value;
> +               if (cinfo.type == SBI_PMU_CTR_TYPE_FW) {
> +                       /* Track firmware counters in a different mask */
> +                       firmware_cmask |= BIT(i);
> +                       pmu_ctr_list[i].value = cinfo.value;
> +                       *num_fw_ctr = *num_fw_ctr + 1;
> +               } else if (cinfo.type == SBI_PMU_CTR_TYPE_HW &&
> +                          !riscv_pmu_cdeleg_available_boot()) {
> +                       *num_hw_ctr = *num_hw_ctr + 1;
> +                       cmask |= BIT(i);
> +                       pmu_ctr_list[i].value = cinfo.value;
> +               }
>         }
>
> -       pr_info("%d firmware and %d hardware counters\n", num_fw_ctr, num_hw_ctr);
> -
>         return 0;
>  }
>
> @@ -1069,16 +1090,41 @@ static void rvpmu_ctr_stop(struct perf_event *event, unsigned long flag)
>         /* TODO: Counter delegation implementation */
>  }
>
> -static int rvpmu_find_num_ctrs(void)
> +static int rvpmu_find_ctrs(void)
>  {
> -       return rvpmu_sbi_find_num_ctrs();
> -       /* TODO: Counter delegation implementation */
> -}
> +       u32 num_sbi_counters = 0, num_deleg_counters = 0;
> +       u32 num_hw_ctr = 0, num_fw_ctr = 0, num_ctr = 0;
> +       /*
> +        * We don't know how many firmware counters are available. Just allocate
> +        * for maximum counters the driver can support. The default is 64 anyways.
> +        */
> +       pmu_ctr_list = kcalloc(RISCV_MAX_COUNTERS, sizeof(*pmu_ctr_list),
> +                              GFP_KERNEL);
> +       if (!pmu_ctr_list)
> +               return -ENOMEM;
>
> -static int rvpmu_get_ctrinfo(int nctr, unsigned long *mask)
> -{
> -       return rvpmu_sbi_get_ctrinfo(nctr, mask);
> -       /* TODO: Counter delegation implementation */
> +       if (riscv_pmu_cdeleg_available_boot())
> +               num_deleg_counters = rvpmu_deleg_find_ctrs();
> +
> +       /* This is required for firmware counters even if the above is true */
> +       if (riscv_pmu_sbi_available_boot()) {
> +               num_sbi_counters = rvpmu_sbi_find_num_ctrs();
> +               /* cache all the information about counters now */
> +               rvpmu_sbi_get_ctrinfo(num_sbi_counters, &num_hw_ctr, &num_fw_ctr);
> +       }
> +
> +       if (num_sbi_counters > RISCV_MAX_COUNTERS || num_deleg_counters > RISCV_MAX_COUNTERS)
> +               return -ENOSPC;
> +
> +       if (riscv_pmu_cdeleg_available_boot()) {
> +               pr_info("%u firmware and %u hardware counters\n", num_fw_ctr, num_deleg_counters);
> +               num_ctr = num_fw_ctr + num_deleg_counters;
> +       } else {
> +               pr_info("%u firmware and %u hardware counters\n", num_fw_ctr, num_hw_ctr);
> +               num_ctr = num_sbi_counters;
> +       }
> +
> +       return num_ctr;
>  }
>
>  static int rvpmu_event_map(struct perf_event *event, u64 *econfig)
> @@ -1379,12 +1425,21 @@ static int rvpmu_device_probe(struct platform_device *pdev)
>         int ret = -ENODEV;
>         int num_counters;
>
> -       pr_info("SBI PMU extension is available\n");
> +       if (riscv_pmu_cdeleg_available_boot()) {
> +               pr_info("hpmcounters will use the counter delegation ISA extension\n");
> +               if (riscv_pmu_sbi_available_boot())
> +                       pr_info("Firmware counters will use SBI PMU extension\n");
> +               else
> +                       pr_info("Firmware counters will not be available as SBI PMU extension is not present\n");
> +       } else if (riscv_pmu_sbi_available_boot()) {
> +               pr_info("Both hpmcounters and firmware counters will use SBI PMU extension\n");
> +       }
> +
>         pmu = riscv_pmu_alloc();
>         if (!pmu)
>                 return -ENOMEM;
>
> -       num_counters = rvpmu_find_num_ctrs();
> +       num_counters = rvpmu_find_ctrs();
>         if (num_counters < 0) {
>                 pr_err("SBI PMU extension doesn't provide any counters\n");
>                 goto out_free;
> @@ -1396,9 +1451,6 @@ static int rvpmu_device_probe(struct platform_device *pdev)
>                 pr_info("SBI returned more than maximum number of counters. Limiting the number of counters to %d\n", num_counters);
>         }
>
> -       /* cache all the information about counters now */
> -       if (rvpmu_get_ctrinfo(num_counters, &cmask))
> -               goto out_free;
>
>         ret = rvpmu_setup_irqs(pmu, pdev);
>         if (ret < 0) {
> @@ -1488,13 +1540,23 @@ static int __init rvpmu_devinit(void)
>         int ret;
>         struct platform_device *pdev;
>
> -       if (sbi_spec_version < sbi_mk_version(0, 3) ||
> -           !sbi_probe_extension(SBI_EXT_PMU)) {
> -               return 0;
> -       }
> +       if (sbi_spec_version >= sbi_mk_version(0, 3) &&
> +           sbi_probe_extension(SBI_EXT_PMU))
> +               static_branch_enable(&riscv_pmu_sbi_available);
>
>         if (sbi_spec_version >= sbi_mk_version(2, 0))
>                 sbi_v2_available = true;
> +       /*
> +        * We need all three extensions to be present to access the counters
> +        * in S-mode via Supervisor Counter delegation.
> +        */
> +       if (riscv_isa_extension_available(NULL, SSCCFG) &&
> +           riscv_isa_extension_available(NULL, SMCDELEG) &&

Is there no need to check SMCDELEG (Machine-level) in the kernel, and
can it be done directly via SSCCFG or sbi_probe_extension?
The #define RISCV_ISA_EXT_SMCDELEG 98 also doesn't need to be defined
in the kernel.

> +           riscv_isa_extension_available(NULL, SSCSRIND))
> +               static_branch_enable(&riscv_pmu_cdeleg_available);
> +
> +       if (!(riscv_pmu_sbi_available_boot() || riscv_pmu_cdeleg_available_boot()))
> +               return 0;
>
>         ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_RISCV_STARTING,
>                                       "perf/riscv/pmu:starting",
>
> --
> 2.43.0
>
>

Thanks,
Yunhui

-- 
kvm-riscv mailing list
kvm-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kvm-riscv

  reply	other threads:[~2025-09-23  6:12 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-03-27 19:35 [PATCH v5 00/21] Add Counter delegation ISA extension support Atish Patra
2025-03-27 19:35 ` [PATCH v5 01/21] perf pmu-events: Add functions in jevent.py to parse counter and event info for hardware aware grouping Atish Patra
2025-04-23  0:13   ` Atish Patra
2025-03-27 19:35 ` [PATCH v5 02/21] RISC-V: Add Sxcsrind ISA extension CSR definitions Atish Patra
2025-03-27 19:35 ` [PATCH v5 03/21] RISC-V: Add Sxcsrind ISA extension definition and parsing Atish Patra
2025-03-27 19:35 ` [PATCH v5 04/21] dt-bindings: riscv: add Sxcsrind ISA extension description Atish Patra
2025-03-27 19:35 ` [PATCH v5 05/21] RISC-V: Define indirect CSR access helpers Atish Patra
2025-03-27 19:35 ` [PATCH v5 06/21] RISC-V: Add Smcntrpmf extension parsing Atish Patra
2025-03-27 19:35 ` [PATCH v5 07/21] dt-bindings: riscv: add Smcntrpmf ISA extension description Atish Patra
2025-03-27 19:35 ` [PATCH v5 08/21] RISC-V: Add Sscfg extension CSR definition Atish Patra
2025-03-27 19:35 ` [PATCH v5 09/21] RISC-V: Add Ssccfg/Smcdeleg ISA extension definition and parsing Atish Patra
2025-03-27 19:35 ` [PATCH v5 10/21] dt-bindings: riscv: add Counter delegation ISA extensions description Atish Patra
2025-03-31 15:38   ` Conor Dooley
2025-03-27 19:35 ` [PATCH v5 11/21] RISC-V: perf: Restructure the SBI PMU code Atish Patra
2025-04-04 13:49   ` Will Deacon
2025-04-23  0:02     ` Atish Patra
2025-03-27 19:35 ` [PATCH v5 12/21] RISC-V: perf: Modify the counter discovery mechanism Atish Patra
2025-09-23  6:12   ` yunhui cui [this message]
2025-03-27 19:35 ` [PATCH v5 13/21] RISC-V: perf: Add a mechanism to defined legacy event encoding Atish Patra
2025-03-27 19:35 ` [PATCH v5 14/21] RISC-V: perf: Implement supervisor counter delegation support Atish Patra
2025-08-28  9:56   ` [External] " yunhui cui
2025-03-27 19:35 ` [PATCH v5 15/21] RISC-V: perf: Skip PMU SBI extension when not implemented Atish Patra
2025-03-27 19:35 ` [PATCH v5 16/21] RISC-V: perf: Use config2/vendor table for event to counter mapping Atish Patra
2025-03-27 19:35 ` [PATCH v5 17/21] RISC-V: perf: Add legacy event encodings via sysfs Atish Patra
2025-03-27 19:35 ` [PATCH v5 18/21] RISC-V: perf: Add Qemu virt machine events Atish Patra
2025-09-11 13:24   ` [External] " yunhui cui
2025-03-27 19:36 ` [PATCH v5 19/21] tools/perf: Support event code for arch standard events Atish Patra
2025-03-27 19:36 ` [PATCH v5 20/21] tools/perf: Pass the Counter constraint values in the pmu events Atish Patra
2025-04-23  0:17   ` Atish Patra
2025-03-27 19:36 ` [PATCH v5 21/21] Sync empty-pmu-events.c with autogenerated one Atish Patra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAEEQ3wm-TGcRFjmb7cw5K-M13CicwgJSLZrgY1KMZA5SgUjziw@mail.gmail.com \
    --to=cuiyunhui@bytedance.com \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=anup@brainfault.org \
    --cc=atishp@atishpatra.org \
    --cc=atishp@rivosinc.com \
    --cc=conor+dt@kernel.org \
    --cc=conor@kernel.org \
    --cc=devicetree@vger.kernel.org \
    --cc=irogers@google.com \
    --cc=jolsa@kernel.org \
    --cc=krzk+dt@kernel.org \
    --cc=kvm-riscv@lists.infradead.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=mark.rutland@arm.com \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=palmer@dabbelt.com \
    --cc=paul.walmsley@sifive.com \
    --cc=peterz@infradead.org \
    --cc=robh@kernel.org \
    --cc=weilin.wang@intel.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).