From: Alistair Francis <alistair23@gmail.com>
To: Jay Chang <jay.chang@sifive.com>
Cc: qemu-devel@nongnu.org, qemu-riscv@nongnu.org,
Palmer Dabbelt <palmer@dabbelt.com>,
Alistair Francis <alistair.francis@wdc.com>,
Weiwei Li <liwei1518@gmail.com>,
Daniel Henrique Barboza <dbarboza@ventanamicro.com>,
Liu Zhiwei <zhiwei_liu@linux.alibaba.com>,
Frank Chang <frank.chang@sifive.com>
Subject: Re: [PATCH v5 2/2] target/riscv: Make PMP region count configurable
Date: Mon, 9 Jun 2025 13:05:22 +1000 [thread overview]
Message-ID: <CAKmqyKOt+QJgWPH9osWQsioFch4icJzExR69kM1vCCx6qxk_0g@mail.gmail.com> (raw)
In-Reply-To: <20250606072525.17313-3-jay.chang@sifive.com>
On Fri, Jun 6, 2025 at 5:28 PM Jay Chang <jay.chang@sifive.com> wrote:
>
> Previously, the number of PMP regions was hardcoded to 16 in QEMU.
> This patch replaces the fixed value with a new `pmp_regions` field,
> allowing platforms to configure the number of PMP regions.
>
> If no specific value is provided, the default number of PMP regions
> remains 16 to preserve the existing behavior.
>
> A new CPU parameter num-pmp-regions has been introduced to the QEMU
> command line. For example:
>
> -cpu rv64, g=true, c=true, pmp=true, num-pmp-regions=8
>
> Signed-off-by: Jay Chang <jay.chang@sifive.com>
> Reviewed-by: Frank Chang <frank.chang@sifive.com>
Thanks!
Applied to riscv-to-apply.next
Alistair
> ---
> target/riscv/cpu.c | 48 +++++++++++++++++++++++++++++--
> target/riscv/cpu.h | 3 +-
> target/riscv/cpu_cfg_fields.h.inc | 1 +
> target/riscv/csr.c | 5 +++-
> target/riscv/machine.c | 3 +-
> target/riscv/pmp.c | 28 ++++++++++++------
> 6 files changed, 74 insertions(+), 14 deletions(-)
>
> diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> index 629ac37501..f4a09ae70f 100644
> --- a/target/riscv/cpu.c
> +++ b/target/riscv/cpu.c
> @@ -1117,6 +1117,7 @@ static void riscv_cpu_init(Object *obj)
> cpu->cfg.cbom_blocksize = 64;
> cpu->cfg.cbop_blocksize = 64;
> cpu->cfg.cboz_blocksize = 64;
> + cpu->cfg.pmp_regions = 16;
> cpu->env.vext_ver = VEXT_VERSION_1_00_0;
> cpu->cfg.max_satp_mode = -1;
>
> @@ -1568,6 +1569,46 @@ static const PropertyInfo prop_pmp = {
> .set = prop_pmp_set,
> };
>
> +static void prop_num_pmp_regions_set(Object *obj, Visitor *v, const char *name,
> + void *opaque, Error **errp)
> +{
> + RISCVCPU *cpu = RISCV_CPU(obj);
> + uint8_t value;
> +
> + visit_type_uint8(v, name, &value, errp);
> +
> + if (cpu->cfg.pmp_regions != value && riscv_cpu_is_vendor(obj)) {
> + cpu_set_prop_err(cpu, name, errp);
> + return;
> + }
> +
> + if (cpu->env.priv_ver < PRIV_VERSION_1_12_0 && value > OLD_MAX_RISCV_PMPS) {
> + error_setg(errp, "Number of PMP regions exceeds maximum available");
> + return;
> + } else if (value > MAX_RISCV_PMPS) {
> + error_setg(errp, "Number of PMP regions exceeds maximum available");
> + return;
> + }
> +
> + cpu_option_add_user_setting(name, value);
> + cpu->cfg.pmp_regions = value;
> +}
> +
> +static void prop_num_pmp_regions_get(Object *obj, Visitor *v, const char *name,
> + void *opaque, Error **errp)
> +{
> + uint8_t value = RISCV_CPU(obj)->cfg.pmp_regions;
> +
> + visit_type_uint8(v, name, &value, errp);
> +}
> +
> +static const PropertyInfo prop_num_pmp_regions = {
> + .type = "uint8",
> + .description = "num-pmp-regions",
> + .get = prop_num_pmp_regions_get,
> + .set = prop_num_pmp_regions_set,
> +};
> +
> static int priv_spec_from_str(const char *priv_spec_str)
> {
> int priv_version = -1;
> @@ -2567,6 +2608,7 @@ static const Property riscv_cpu_properties[] = {
>
> {.name = "mmu", .info = &prop_mmu},
> {.name = "pmp", .info = &prop_pmp},
> + {.name = "num-pmp-regions", .info = &prop_num_pmp_regions},
>
> {.name = "priv_spec", .info = &prop_priv_spec},
> {.name = "vext_spec", .info = &prop_vext_spec},
> @@ -2937,7 +2979,8 @@ static const TypeInfo riscv_cpu_type_infos[] = {
> .cfg.max_satp_mode = VM_1_10_MBARE,
> .cfg.ext_zifencei = true,
> .cfg.ext_zicsr = true,
> - .cfg.pmp = true
> + .cfg.pmp = true,
> + .cfg.pmp_regions = 8
> ),
>
> DEFINE_ABSTRACT_RISCV_CPU(TYPE_RISCV_CPU_SIFIVE_U, TYPE_RISCV_VENDOR_CPU,
> @@ -2948,7 +2991,8 @@ static const TypeInfo riscv_cpu_type_infos[] = {
> .cfg.ext_zifencei = true,
> .cfg.ext_zicsr = true,
> .cfg.mmu = true,
> - .cfg.pmp = true
> + .cfg.pmp = true,
> + .cfg.pmp_regions = 8
> ),
>
> #if defined(TARGET_RISCV32) || \
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index 229ade9ed9..67323a7d9d 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -159,7 +159,8 @@ extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
>
> #define MMU_USER_IDX 3
>
> -#define MAX_RISCV_PMPS (16)
> +#define MAX_RISCV_PMPS (64)
> +#define OLD_MAX_RISCV_PMPS (16)
>
> #if !defined(CONFIG_USER_ONLY)
> #include "pmp.h"
> diff --git a/target/riscv/cpu_cfg_fields.h.inc b/target/riscv/cpu_cfg_fields.h.inc
> index 59f134a419..33c4f9bac8 100644
> --- a/target/riscv/cpu_cfg_fields.h.inc
> +++ b/target/riscv/cpu_cfg_fields.h.inc
> @@ -163,6 +163,7 @@ TYPED_FIELD(uint16_t, elen, 0)
> TYPED_FIELD(uint16_t, cbom_blocksize, 0)
> TYPED_FIELD(uint16_t, cbop_blocksize, 0)
> TYPED_FIELD(uint16_t, cboz_blocksize, 0)
> +TYPED_FIELD(uint8_t, pmp_regions, 0)
>
> TYPED_FIELD(int8_t, max_satp_mode, -1)
>
> diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> index d6cd441133..6296ecd1e1 100644
> --- a/target/riscv/csr.c
> +++ b/target/riscv/csr.c
> @@ -738,7 +738,10 @@ static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
> static RISCVException pmp(CPURISCVState *env, int csrno)
> {
> if (riscv_cpu_cfg(env)->pmp) {
> - if (csrno <= CSR_PMPCFG3) {
> + int max_pmpcfg = (env->priv_ver >= PRIV_VERSION_1_12_0) ?
> ++ CSR_PMPCFG15 : CSR_PMPCFG3;
> +
> + if (csrno <= max_pmpcfg) {
> uint32_t reg_index = csrno - CSR_PMPCFG0;
>
> /* TODO: RV128 restriction check */
> diff --git a/target/riscv/machine.c b/target/riscv/machine.c
> index c97e9ce9df..1600ec44f0 100644
> --- a/target/riscv/machine.c
> +++ b/target/riscv/machine.c
> @@ -36,8 +36,9 @@ static int pmp_post_load(void *opaque, int version_id)
> RISCVCPU *cpu = opaque;
> CPURISCVState *env = &cpu->env;
> int i;
> + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> - for (i = 0; i < MAX_RISCV_PMPS; i++) {
> + for (i = 0; i < pmp_regions; i++) {
> pmp_update_rule_addr(env, i);
> }
> pmp_update_rule_nums(env);
> diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
> index 5af295e410..3540327c9a 100644
> --- a/target/riscv/pmp.c
> +++ b/target/riscv/pmp.c
> @@ -122,7 +122,9 @@ uint32_t pmp_get_num_rules(CPURISCVState *env)
> */
> static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
> {
> - if (pmp_index < MAX_RISCV_PMPS) {
> + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> +
> + if (pmp_index < pmp_regions) {
> return env->pmp_state.pmp[pmp_index].cfg_reg;
> }
>
> @@ -136,7 +138,9 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
> */
> static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
> {
> - if (pmp_index < MAX_RISCV_PMPS) {
> + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> +
> + if (pmp_index < pmp_regions) {
> if (env->pmp_state.pmp[pmp_index].cfg_reg == val) {
> /* no change */
> return false;
> @@ -236,9 +240,10 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
> void pmp_update_rule_nums(CPURISCVState *env)
> {
> int i;
> + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> env->pmp_state.num_rules = 0;
> - for (i = 0; i < MAX_RISCV_PMPS; i++) {
> + for (i = 0; i < pmp_regions; i++) {
> const uint8_t a_field =
> pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
> if (PMP_AMATCH_OFF != a_field) {
> @@ -332,6 +337,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
> int pmp_size = 0;
> hwaddr s = 0;
> hwaddr e = 0;
> + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> /* Short cut if no rules */
> if (0 == pmp_get_num_rules(env)) {
> @@ -356,7 +362,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
> * 1.10 draft priv spec states there is an implicit order
> * from low to high
> */
> - for (i = 0; i < MAX_RISCV_PMPS; i++) {
> + for (i = 0; i < pmp_regions; i++) {
> s = pmp_is_in_range(env, i, addr);
> e = pmp_is_in_range(env, i, addr + pmp_size - 1);
>
> @@ -527,8 +533,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
> {
> trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
> bool is_next_cfg_tor = false;
> + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> - if (addr_index < MAX_RISCV_PMPS) {
> + if (addr_index < pmp_regions) {
> if (env->pmp_state.pmp[addr_index].addr_reg == val) {
> /* no change */
> return;
> @@ -538,7 +545,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
> * In TOR mode, need to check the lock bit of the next pmp
> * (if there is a next).
> */
> - if (addr_index + 1 < MAX_RISCV_PMPS) {
> + if (addr_index + 1 < pmp_regions) {
> uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
> is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
>
> @@ -573,8 +580,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
> target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
> {
> target_ulong val = 0;
> + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> - if (addr_index < MAX_RISCV_PMPS) {
> + if (addr_index < pmp_regions) {
> val = env->pmp_state.pmp[addr_index].addr_reg;
> trace_pmpaddr_csr_read(env->mhartid, addr_index, val);
> } else {
> @@ -592,6 +600,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
> {
> int i;
> uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
> + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> /* Update PMM field only if the value is valid according to Zjpm v1.0 */
> if (riscv_cpu_cfg(env)->ext_smmpm &&
> riscv_cpu_mxl(env) == MXL_RV64 &&
> @@ -603,7 +612,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
>
> /* RLB cannot be enabled if it's already 0 and if any regions are locked */
> if (!MSECCFG_RLB_ISSET(env)) {
> - for (i = 0; i < MAX_RISCV_PMPS; i++) {
> + for (i = 0; i < pmp_regions; i++) {
> if (pmp_is_locked(env, i)) {
> val &= ~MSECCFG_RLB;
> break;
> @@ -659,6 +668,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
> hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
> hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
> int i;
> + uint8_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> /*
> * If PMP is not supported or there are no PMP rules, the TLB page will not
> @@ -669,7 +679,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
> return TARGET_PAGE_SIZE;
> }
>
> - for (i = 0; i < MAX_RISCV_PMPS; i++) {
> + for (i = 0; i < pmp_regions; i++) {
> if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) {
> continue;
> }
> --
> 2.48.1
>
>
prev parent reply other threads:[~2025-06-09 3:07 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-06 7:25 [PATCH v5 0/2] Extend and configure PMP region count Jay Chang
2025-06-06 7:25 ` [PATCH v5 1/2] target/riscv: Extend PMP region up to 64 Jay Chang
2025-06-06 7:25 ` [PATCH v5 2/2] target/riscv: Make PMP region count configurable Jay Chang
2025-06-09 2:47 ` Alistair Francis
2025-06-09 3:05 ` Alistair Francis [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=CAKmqyKOt+QJgWPH9osWQsioFch4icJzExR69kM1vCCx6qxk_0g@mail.gmail.com \
--to=alistair23@gmail.com \
--cc=alistair.francis@wdc.com \
--cc=dbarboza@ventanamicro.com \
--cc=frank.chang@sifive.com \
--cc=jay.chang@sifive.com \
--cc=liwei1518@gmail.com \
--cc=palmer@dabbelt.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-riscv@nongnu.org \
--cc=zhiwei_liu@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).