qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] Extend and configure PMP region count
@ 2025-04-21  9:46 Jay Chang
  2025-04-21  9:46 ` [PATCH 1/2] target/riscv: Extend PMP region up to 64 Jay Chang
  2025-04-21  9:46 ` [PATCH 2/2] target/riscv: Make PMP region count configurable Jay Chang
  0 siblings, 2 replies; 8+ messages in thread
From: Jay Chang @ 2025-04-21  9:46 UTC (permalink / raw)
  To: qemu-devel, qemu-riscv
  Cc: Palmer Dabbelt, Alistair Francis, Weiwei Li,
	Daniel Henrique Barboza, Liu Zhiwei, Jay Chang

The first patch extends the number of PMP regions supported up to 64,
following the RISC-V Privileged Specification (version >1.12), where
RV32 can have up to 64 PMP regions configured through 16 CSRs.

The second patch makes the PMP region count configurable via a new
CPU parameter `num-pmp-regions`. This allows platforms to adjust
the number of PMP regions without relying on a fixed default value.
If unspecified, the default remains 16 to preserve compatibility.

Jay Chang (2):
  target/riscv: Extend PMP region up to 64
  target/riscv: Make PMP region count configurable

 target/riscv/cpu.c      |  46 ++++++++++++++
 target/riscv/cpu.h      |   2 +-
 target/riscv/cpu_bits.h |  60 +++++++++++++++++++
 target/riscv/cpu_cfg.h  |   1 +
 target/riscv/csr.c      | 129 +++++++++++++++++++++++++++++++++++++++-
 target/riscv/machine.c  |   3 +-
 target/riscv/pmp.c      |  28 ++++++---
 7 files changed, 255 insertions(+), 14 deletions(-)

-- 
2.48.1



^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH 1/2] target/riscv: Extend PMP region up to 64
  2025-04-21  9:46 [PATCH 0/2] Extend and configure PMP region count Jay Chang
@ 2025-04-21  9:46 ` Jay Chang
  2025-04-23 11:22   ` Daniel Henrique Barboza
  2025-04-24 10:53   ` Alistair Francis
  2025-04-21  9:46 ` [PATCH 2/2] target/riscv: Make PMP region count configurable Jay Chang
  1 sibling, 2 replies; 8+ messages in thread
From: Jay Chang @ 2025-04-21  9:46 UTC (permalink / raw)
  To: qemu-devel, qemu-riscv
  Cc: Palmer Dabbelt, Alistair Francis, Weiwei Li,
	Daniel Henrique Barboza, Liu Zhiwei, Jay Chang, Frank Chang

According to the RISC-V Privileged Specification (version >1.12),
RV32 supports 16 CSRs (pmpcfg0–pmpcfg15) to configure 64 PMP regions
(pmpaddr0–pmpaddr63).

Reviewed-by: Frank Chang <frank.chang@sifive.com>
Signed-off-by: Jay Chang <jay.chang@sifive.com>
---
 target/riscv/cpu_bits.h |  60 +++++++++++++++++++
 target/riscv/csr.c      | 124 +++++++++++++++++++++++++++++++++++++++-
 2 files changed, 182 insertions(+), 2 deletions(-)

diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
index a30317c617..e6b3e28386 100644
--- a/target/riscv/cpu_bits.h
+++ b/target/riscv/cpu_bits.h
@@ -372,6 +372,18 @@
 #define CSR_PMPCFG1         0x3a1
 #define CSR_PMPCFG2         0x3a2
 #define CSR_PMPCFG3         0x3a3
+#define CSR_PMPCFG4         0x3a4
+#define CSR_PMPCFG5         0x3a5
+#define CSR_PMPCFG6         0x3a6
+#define CSR_PMPCFG7         0x3a7
+#define CSR_PMPCFG8         0x3a8
+#define CSR_PMPCFG9         0x3a9
+#define CSR_PMPCFG10        0x3aa
+#define CSR_PMPCFG11        0x3ab
+#define CSR_PMPCFG12        0x3ac
+#define CSR_PMPCFG13        0x3ad
+#define CSR_PMPCFG14        0x3ae
+#define CSR_PMPCFG15        0x3af
 #define CSR_PMPADDR0        0x3b0
 #define CSR_PMPADDR1        0x3b1
 #define CSR_PMPADDR2        0x3b2
@@ -388,6 +400,54 @@
 #define CSR_PMPADDR13       0x3bd
 #define CSR_PMPADDR14       0x3be
 #define CSR_PMPADDR15       0x3bf
+#define CSR_PMPADDR16       0x3c0
+#define CSR_PMPADDR17       0x3c1
+#define CSR_PMPADDR18       0x3c2
+#define CSR_PMPADDR19       0x3c3
+#define CSR_PMPADDR20       0x3c4
+#define CSR_PMPADDR21       0x3c5
+#define CSR_PMPADDR22       0x3c6
+#define CSR_PMPADDR23       0x3c7
+#define CSR_PMPADDR24       0x3c8
+#define CSR_PMPADDR25       0x3c9
+#define CSR_PMPADDR26       0x3ca
+#define CSR_PMPADDR27       0x3cb
+#define CSR_PMPADDR28       0x3cc
+#define CSR_PMPADDR29       0x3cd
+#define CSR_PMPADDR30       0x3ce
+#define CSR_PMPADDR31       0x3cf
+#define CSR_PMPADDR32       0x3d0
+#define CSR_PMPADDR33       0x3d1
+#define CSR_PMPADDR34       0x3d2
+#define CSR_PMPADDR35       0x3d3
+#define CSR_PMPADDR36       0x3d4
+#define CSR_PMPADDR37       0x3d5
+#define CSR_PMPADDR38       0x3d6
+#define CSR_PMPADDR39       0x3d7
+#define CSR_PMPADDR40       0x3d8
+#define CSR_PMPADDR41       0x3d9
+#define CSR_PMPADDR42       0x3da
+#define CSR_PMPADDR43       0x3db
+#define CSR_PMPADDR44       0x3dc
+#define CSR_PMPADDR45       0x3dd
+#define CSR_PMPADDR46       0x3de
+#define CSR_PMPADDR47       0x3df
+#define CSR_PMPADDR48       0x3e0
+#define CSR_PMPADDR49       0x3e1
+#define CSR_PMPADDR50       0x3e2
+#define CSR_PMPADDR51       0x3e3
+#define CSR_PMPADDR52       0x3e4
+#define CSR_PMPADDR53       0x3e5
+#define CSR_PMPADDR54       0x3e6
+#define CSR_PMPADDR55       0x3e7
+#define CSR_PMPADDR56       0x3e8
+#define CSR_PMPADDR57       0x3e9
+#define CSR_PMPADDR58       0x3ea
+#define CSR_PMPADDR59       0x3eb
+#define CSR_PMPADDR60       0x3ec
+#define CSR_PMPADDR61       0x3ed
+#define CSR_PMPADDR62       0x3ee
+#define CSR_PMPADDR63       0x3ef
 
 /* RNMI */
 #define CSR_MNSCRATCH       0x740
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 7948188356..f8f61ffff5 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -6088,6 +6088,30 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
     [CSR_PMPCFG1]    = { "pmpcfg1",   pmp, read_pmpcfg,  write_pmpcfg  },
     [CSR_PMPCFG2]    = { "pmpcfg2",   pmp, read_pmpcfg,  write_pmpcfg  },
     [CSR_PMPCFG3]    = { "pmpcfg3",   pmp, read_pmpcfg,  write_pmpcfg  },
+    [CSR_PMPCFG4]    = { "pmpcfg4",   pmp, read_pmpcfg,  write_pmpcfg,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPCFG5]    = { "pmpcfg5",   pmp, read_pmpcfg,  write_pmpcfg,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPCFG6]    = { "pmpcfg6",   pmp, read_pmpcfg,  write_pmpcfg,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPCFG7]    = { "pmpcfg7",   pmp, read_pmpcfg,  write_pmpcfg,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPCFG8]    = { "pmpcfg8",   pmp, read_pmpcfg,  write_pmpcfg,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPCFG9]    = { "pmpcfg9",   pmp, read_pmpcfg,  write_pmpcfg,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPCFG10]   = { "pmpcfg10",  pmp, read_pmpcfg,  write_pmpcfg,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPCFG11]   = { "pmpcfg11",  pmp, read_pmpcfg,  write_pmpcfg,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPCFG12]   = { "pmpcfg12",  pmp, read_pmpcfg,  write_pmpcfg,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPCFG13]   = { "pmpcfg13",  pmp, read_pmpcfg,  write_pmpcfg,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPCFG14]   = { "pmpcfg14",  pmp, read_pmpcfg,  write_pmpcfg,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPCFG15]   = { "pmpcfg15",  pmp, read_pmpcfg,  write_pmpcfg,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
     [CSR_PMPADDR0]   = { "pmpaddr0",  pmp, read_pmpaddr, write_pmpaddr },
     [CSR_PMPADDR1]   = { "pmpaddr1",  pmp, read_pmpaddr, write_pmpaddr },
     [CSR_PMPADDR2]   = { "pmpaddr2",  pmp, read_pmpaddr, write_pmpaddr },
@@ -6102,8 +6126,104 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
     [CSR_PMPADDR11]  = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
     [CSR_PMPADDR12]  = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
     [CSR_PMPADDR13]  = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
-    [CSR_PMPADDR14] =  { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
-    [CSR_PMPADDR15] =  { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
+    [CSR_PMPADDR14]  = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
+    [CSR_PMPADDR15]  = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
+    [CSR_PMPADDR16]  = { "pmpaddr16", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR17]  = { "pmpaddr17", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR18]  = { "pmpaddr18", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR19]  = { "pmpaddr19", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR20]  = { "pmpaddr20", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR21]  = { "pmpaddr21", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR22]  = { "pmpaddr22", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR23]  = { "pmpaddr23", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR24]  = { "pmpaddr24", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR25]  = { "pmpaddr25", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR26]  = { "pmpaddr26", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR27]  = { "pmpaddr27", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR28]  = { "pmpaddr28", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR29]  = { "pmpaddr29", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR30]  = { "pmpaddr30", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR31]  = { "pmpaddr31", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR32]  = { "pmpaddr32", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR33]  = { "pmpaddr33", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR34]  = { "pmpaddr34", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR35]  = { "pmpaddr35", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR36]  = { "pmpaddr36", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR37]  = { "pmpaddr37", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR38]  = { "pmpaddr38", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR39]  = { "pmpaddr39", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR40]  = { "pmpaddr40", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR41]  = { "pmpaddr41", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR42]  = { "pmpaddr42", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR43]  = { "pmpaddr43", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR44]  = { "pmpaddr44", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR45]  = { "pmpaddr45", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR46]  = { "pmpaddr46", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR47]  = { "pmpaddr47", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR48]  = { "pmpaddr48", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR49]  = { "pmpaddr49", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR50]  = { "pmpaddr50", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR51]  = { "pmpaddr51", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR52]  = { "pmpaddr52", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR53]  = { "pmpaddr53", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR54]  = { "pmpaddr54", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR55]  = { "pmpaddr55", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR56]  = { "pmpaddr56", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR57]  = { "pmpaddr57", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR58]  = { "pmpaddr58", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR59]  = { "pmpaddr59", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR60]  = { "pmpaddr60", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR61]  = { "pmpaddr61", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR62]  = { "pmpaddr62", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
+    [CSR_PMPADDR63]  = { "pmpaddr63", pmp, read_pmpaddr, write_pmpaddr,
+                         .min_priv_ver = PRIV_VERSION_1_12_0           },
 
     /* Debug CSRs */
     [CSR_TSELECT]   =  { "tselect",  debug, read_tselect,  write_tselect  },
-- 
2.48.1



^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/2] target/riscv: Make PMP region count configurable
  2025-04-21  9:46 [PATCH 0/2] Extend and configure PMP region count Jay Chang
  2025-04-21  9:46 ` [PATCH 1/2] target/riscv: Extend PMP region up to 64 Jay Chang
@ 2025-04-21  9:46 ` Jay Chang
  2025-04-23 11:30   ` Daniel Henrique Barboza
  2025-04-24 10:55   ` Alistair Francis
  1 sibling, 2 replies; 8+ messages in thread
From: Jay Chang @ 2025-04-21  9:46 UTC (permalink / raw)
  To: qemu-devel, qemu-riscv
  Cc: Palmer Dabbelt, Alistair Francis, Weiwei Li,
	Daniel Henrique Barboza, Liu Zhiwei, Jay Chang, Frank Chang

Previously, the number of PMP regions was hardcoded to 16 in QEMU.
This patch replaces the fixed value with a new `pmp_regions` field,
allowing platforms to configure the number of PMP regions.

If no specific value is provided, the default number of PMP regions
remains 16 to preserve the existing behavior.

A new CPU parameter num-pmp-regions has been introduced to the QEMU
command line. For example:

	-cpu rv64, g=true, c=true, pmp=true, num-pmp-regions=8

Reviewed-by: Frank Chang <frank.chang@sifive.com>
Signed-off-by: Jay Chang <jay.chang@sifive.com>
---
 target/riscv/cpu.c     | 46 ++++++++++++++++++++++++++++++++++++++++++
 target/riscv/cpu.h     |  2 +-
 target/riscv/cpu_cfg.h |  1 +
 target/riscv/csr.c     |  5 ++++-
 target/riscv/machine.c |  3 ++-
 target/riscv/pmp.c     | 28 ++++++++++++++++---------
 6 files changed, 73 insertions(+), 12 deletions(-)

diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 09ded6829a..528d77b820 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -512,6 +512,7 @@ static void rv64_sifive_u_cpu_init(Object *obj)
     cpu->cfg.ext_zicsr = true;
     cpu->cfg.mmu = true;
     cpu->cfg.pmp = true;
+    cpu->cfg.pmp_regions = 8;
 }
 
 static void rv64_sifive_e_cpu_init(Object *obj)
@@ -529,6 +530,7 @@ static void rv64_sifive_e_cpu_init(Object *obj)
     cpu->cfg.ext_zifencei = true;
     cpu->cfg.ext_zicsr = true;
     cpu->cfg.pmp = true;
+    cpu->cfg.pmp_regions = 8;
 }
 
 static void rv64_thead_c906_cpu_init(Object *obj)
@@ -761,6 +763,7 @@ static void rv32_sifive_u_cpu_init(Object *obj)
     cpu->cfg.ext_zicsr = true;
     cpu->cfg.mmu = true;
     cpu->cfg.pmp = true;
+    cpu->cfg.pmp_regions = 8;
 }
 
 static void rv32_sifive_e_cpu_init(Object *obj)
@@ -778,6 +781,7 @@ static void rv32_sifive_e_cpu_init(Object *obj)
     cpu->cfg.ext_zifencei = true;
     cpu->cfg.ext_zicsr = true;
     cpu->cfg.pmp = true;
+    cpu->cfg.pmp_regions = 8;
 }
 
 static void rv32_ibex_cpu_init(Object *obj)
@@ -1478,6 +1482,7 @@ static void riscv_cpu_init(Object *obj)
     cpu->cfg.cbom_blocksize = 64;
     cpu->cfg.cbop_blocksize = 64;
     cpu->cfg.cboz_blocksize = 64;
+    cpu->cfg.pmp_regions = 16;
     cpu->env.vext_ver = VEXT_VERSION_1_00_0;
 }
 
@@ -1935,6 +1940,46 @@ static const PropertyInfo prop_pmp = {
     .set = prop_pmp_set,
 };
 
+static void prop_num_pmp_regions_set(Object *obj, Visitor *v, const char *name,
+                                     void *opaque, Error **errp)
+{
+    RISCVCPU *cpu = RISCV_CPU(obj);
+    uint16_t value;
+
+    visit_type_uint16(v, name, &value, errp);
+
+    if (cpu->cfg.pmp_regions != value && riscv_cpu_is_vendor(obj)) {
+        cpu_set_prop_err(cpu, name, errp);
+        return;
+    }
+
+    if (cpu->env.priv_ver < PRIV_VERSION_1_12_0 && value > 16) {
+        error_setg(errp, "Number of PMP regions exceeds maximum available");
+        return;
+    } else if (value > 64) {
+        error_setg(errp, "Number of PMP regions exceeds maximum available");
+        return;
+    }
+
+    cpu_option_add_user_setting(name, value);
+    cpu->cfg.pmp_regions = value;
+}
+
+static void prop_num_pmp_regions_get(Object *obj, Visitor *v, const char *name,
+                                     void *opaque, Error **errp)
+{
+    uint16_t value = RISCV_CPU(obj)->cfg.pmp_regions;
+
+    visit_type_uint16(v, name, &value, errp);
+}
+
+static const PropertyInfo prop_num_pmp_regions = {
+    .type = "uint16",
+    .description = "num-pmp-regions",
+    .get = prop_num_pmp_regions_get,
+    .set = prop_num_pmp_regions_set,
+};
+
 static int priv_spec_from_str(const char *priv_spec_str)
 {
     int priv_version = -1;
@@ -2934,6 +2979,7 @@ static const Property riscv_cpu_properties[] = {
 
     {.name = "mmu", .info = &prop_mmu},
     {.name = "pmp", .info = &prop_pmp},
+    {.name = "num-pmp-regions", .info = &prop_num_pmp_regions},
 
     {.name = "priv_spec", .info = &prop_priv_spec},
     {.name = "vext_spec", .info = &prop_vext_spec},
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 51e49e03de..50d58c15f2 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -162,7 +162,7 @@ extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
 
 #define MMU_USER_IDX 3
 
-#define MAX_RISCV_PMPS (16)
+#define MAX_RISCV_PMPS (64)
 
 #if !defined(CONFIG_USER_ONLY)
 #include "pmp.h"
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
index 8a843482cc..8c805b45f6 100644
--- a/target/riscv/cpu_cfg.h
+++ b/target/riscv/cpu_cfg.h
@@ -189,6 +189,7 @@ struct RISCVCPUConfig {
     uint16_t cbom_blocksize;
     uint16_t cbop_blocksize;
     uint16_t cboz_blocksize;
+    uint16_t pmp_regions;
     bool mmu;
     bool pmp;
     bool debug;
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index f8f61ffff5..65f91be9c0 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -736,7 +736,10 @@ static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
 static RISCVException pmp(CPURISCVState *env, int csrno)
 {
     if (riscv_cpu_cfg(env)->pmp) {
-        if (csrno <= CSR_PMPCFG3) {
+        uint16_t MAX_PMPCFG = (env->priv_ver >= PRIV_VERSION_1_12_0) ?
++                              CSR_PMPCFG15 : CSR_PMPCFG3;
+
+        if (csrno <= MAX_PMPCFG) {
             uint32_t reg_index = csrno - CSR_PMPCFG0;
 
             /* TODO: RV128 restriction check */
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
index 889e2b6570..c3e4e78802 100644
--- a/target/riscv/machine.c
+++ b/target/riscv/machine.c
@@ -36,8 +36,9 @@ static int pmp_post_load(void *opaque, int version_id)
     RISCVCPU *cpu = opaque;
     CPURISCVState *env = &cpu->env;
     int i;
+    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
 
-    for (i = 0; i < MAX_RISCV_PMPS; i++) {
+    for (i = 0; i < pmp_regions; i++) {
         pmp_update_rule_addr(env, i);
     }
     pmp_update_rule_nums(env);
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index c685f7f2c5..3439295d41 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -121,7 +121,9 @@ uint32_t pmp_get_num_rules(CPURISCVState *env)
  */
 static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
 {
-    if (pmp_index < MAX_RISCV_PMPS) {
+    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
+
+    if (pmp_index < pmp_regions) {
         return env->pmp_state.pmp[pmp_index].cfg_reg;
     }
 
@@ -135,7 +137,9 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
  */
 static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
 {
-    if (pmp_index < MAX_RISCV_PMPS) {
+    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
+
+    if (pmp_index < pmp_regions) {
         if (env->pmp_state.pmp[pmp_index].cfg_reg == val) {
             /* no change */
             return false;
@@ -235,9 +239,10 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
 void pmp_update_rule_nums(CPURISCVState *env)
 {
     int i;
+    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
 
     env->pmp_state.num_rules = 0;
-    for (i = 0; i < MAX_RISCV_PMPS; i++) {
+    for (i = 0; i < pmp_regions; i++) {
         const uint8_t a_field =
             pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
         if (PMP_AMATCH_OFF != a_field) {
@@ -331,6 +336,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
     int pmp_size = 0;
     hwaddr s = 0;
     hwaddr e = 0;
+    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
 
     /* Short cut if no rules */
     if (0 == pmp_get_num_rules(env)) {
@@ -355,7 +361,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
      * 1.10 draft priv spec states there is an implicit order
      * from low to high
      */
-    for (i = 0; i < MAX_RISCV_PMPS; i++) {
+    for (i = 0; i < pmp_regions; i++) {
         s = pmp_is_in_range(env, i, addr);
         e = pmp_is_in_range(env, i, addr + pmp_size - 1);
 
@@ -526,8 +532,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
 {
     trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
     bool is_next_cfg_tor = false;
+    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
 
-    if (addr_index < MAX_RISCV_PMPS) {
+    if (addr_index < pmp_regions) {
         if (env->pmp_state.pmp[addr_index].addr_reg == val) {
             /* no change */
             return;
@@ -537,7 +544,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
          * In TOR mode, need to check the lock bit of the next pmp
          * (if there is a next).
          */
-        if (addr_index + 1 < MAX_RISCV_PMPS) {
+        if (addr_index + 1 < pmp_regions) {
             uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
             is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
 
@@ -572,8 +579,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
 target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
 {
     target_ulong val = 0;
+    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
 
-    if (addr_index < MAX_RISCV_PMPS) {
+    if (addr_index < pmp_regions) {
         val = env->pmp_state.pmp[addr_index].addr_reg;
         trace_pmpaddr_csr_read(env->mhartid, addr_index, val);
     } else {
@@ -591,6 +599,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
 {
     int i;
     uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
+    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
     /* Update PMM field only if the value is valid according to Zjpm v1.0 */
     if (riscv_cpu_cfg(env)->ext_smmpm &&
         riscv_cpu_mxl(env) == MXL_RV64 &&
@@ -602,7 +611,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
 
     /* RLB cannot be enabled if it's already 0 and if any regions are locked */
     if (!MSECCFG_RLB_ISSET(env)) {
-        for (i = 0; i < MAX_RISCV_PMPS; i++) {
+        for (i = 0; i < pmp_regions; i++) {
             if (pmp_is_locked(env, i)) {
                 val &= ~MSECCFG_RLB;
                 break;
@@ -658,6 +667,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
     hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
     hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
     int i;
+    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
 
     /*
      * If PMP is not supported or there are no PMP rules, the TLB page will not
@@ -668,7 +678,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
         return TARGET_PAGE_SIZE;
     }
 
-    for (i = 0; i < MAX_RISCV_PMPS; i++) {
+    for (i = 0; i < pmp_regions; i++) {
         if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) {
             continue;
         }
-- 
2.48.1



^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] target/riscv: Extend PMP region up to 64
  2025-04-21  9:46 ` [PATCH 1/2] target/riscv: Extend PMP region up to 64 Jay Chang
@ 2025-04-23 11:22   ` Daniel Henrique Barboza
  2025-04-24 10:53   ` Alistair Francis
  1 sibling, 0 replies; 8+ messages in thread
From: Daniel Henrique Barboza @ 2025-04-23 11:22 UTC (permalink / raw)
  To: Jay Chang, qemu-devel, qemu-riscv
  Cc: Palmer Dabbelt, Alistair Francis, Weiwei Li, Liu Zhiwei,
	Frank Chang



On 4/21/25 6:46 AM, Jay Chang wrote:
> According to the RISC-V Privileged Specification (version >1.12),
> RV32 supports 16 CSRs (pmpcfg0–pmpcfg15) to configure 64 PMP regions
> (pmpaddr0–pmpaddr63).
> 
> Reviewed-by: Frank Chang <frank.chang@sifive.com>
> Signed-off-by: Jay Chang <jay.chang@sifive.com>
> ---


Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>

>   target/riscv/cpu_bits.h |  60 +++++++++++++++++++
>   target/riscv/csr.c      | 124 +++++++++++++++++++++++++++++++++++++++-
>   2 files changed, 182 insertions(+), 2 deletions(-)
> 
> diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
> index a30317c617..e6b3e28386 100644
> --- a/target/riscv/cpu_bits.h
> +++ b/target/riscv/cpu_bits.h
> @@ -372,6 +372,18 @@
>   #define CSR_PMPCFG1         0x3a1
>   #define CSR_PMPCFG2         0x3a2
>   #define CSR_PMPCFG3         0x3a3
> +#define CSR_PMPCFG4         0x3a4
> +#define CSR_PMPCFG5         0x3a5
> +#define CSR_PMPCFG6         0x3a6
> +#define CSR_PMPCFG7         0x3a7
> +#define CSR_PMPCFG8         0x3a8
> +#define CSR_PMPCFG9         0x3a9
> +#define CSR_PMPCFG10        0x3aa
> +#define CSR_PMPCFG11        0x3ab
> +#define CSR_PMPCFG12        0x3ac
> +#define CSR_PMPCFG13        0x3ad
> +#define CSR_PMPCFG14        0x3ae
> +#define CSR_PMPCFG15        0x3af
>   #define CSR_PMPADDR0        0x3b0
>   #define CSR_PMPADDR1        0x3b1
>   #define CSR_PMPADDR2        0x3b2
> @@ -388,6 +400,54 @@
>   #define CSR_PMPADDR13       0x3bd
>   #define CSR_PMPADDR14       0x3be
>   #define CSR_PMPADDR15       0x3bf
> +#define CSR_PMPADDR16       0x3c0
> +#define CSR_PMPADDR17       0x3c1
> +#define CSR_PMPADDR18       0x3c2
> +#define CSR_PMPADDR19       0x3c3
> +#define CSR_PMPADDR20       0x3c4
> +#define CSR_PMPADDR21       0x3c5
> +#define CSR_PMPADDR22       0x3c6
> +#define CSR_PMPADDR23       0x3c7
> +#define CSR_PMPADDR24       0x3c8
> +#define CSR_PMPADDR25       0x3c9
> +#define CSR_PMPADDR26       0x3ca
> +#define CSR_PMPADDR27       0x3cb
> +#define CSR_PMPADDR28       0x3cc
> +#define CSR_PMPADDR29       0x3cd
> +#define CSR_PMPADDR30       0x3ce
> +#define CSR_PMPADDR31       0x3cf
> +#define CSR_PMPADDR32       0x3d0
> +#define CSR_PMPADDR33       0x3d1
> +#define CSR_PMPADDR34       0x3d2
> +#define CSR_PMPADDR35       0x3d3
> +#define CSR_PMPADDR36       0x3d4
> +#define CSR_PMPADDR37       0x3d5
> +#define CSR_PMPADDR38       0x3d6
> +#define CSR_PMPADDR39       0x3d7
> +#define CSR_PMPADDR40       0x3d8
> +#define CSR_PMPADDR41       0x3d9
> +#define CSR_PMPADDR42       0x3da
> +#define CSR_PMPADDR43       0x3db
> +#define CSR_PMPADDR44       0x3dc
> +#define CSR_PMPADDR45       0x3dd
> +#define CSR_PMPADDR46       0x3de
> +#define CSR_PMPADDR47       0x3df
> +#define CSR_PMPADDR48       0x3e0
> +#define CSR_PMPADDR49       0x3e1
> +#define CSR_PMPADDR50       0x3e2
> +#define CSR_PMPADDR51       0x3e3
> +#define CSR_PMPADDR52       0x3e4
> +#define CSR_PMPADDR53       0x3e5
> +#define CSR_PMPADDR54       0x3e6
> +#define CSR_PMPADDR55       0x3e7
> +#define CSR_PMPADDR56       0x3e8
> +#define CSR_PMPADDR57       0x3e9
> +#define CSR_PMPADDR58       0x3ea
> +#define CSR_PMPADDR59       0x3eb
> +#define CSR_PMPADDR60       0x3ec
> +#define CSR_PMPADDR61       0x3ed
> +#define CSR_PMPADDR62       0x3ee
> +#define CSR_PMPADDR63       0x3ef
>   
>   /* RNMI */
>   #define CSR_MNSCRATCH       0x740
> diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> index 7948188356..f8f61ffff5 100644
> --- a/target/riscv/csr.c
> +++ b/target/riscv/csr.c
> @@ -6088,6 +6088,30 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
>       [CSR_PMPCFG1]    = { "pmpcfg1",   pmp, read_pmpcfg,  write_pmpcfg  },
>       [CSR_PMPCFG2]    = { "pmpcfg2",   pmp, read_pmpcfg,  write_pmpcfg  },
>       [CSR_PMPCFG3]    = { "pmpcfg3",   pmp, read_pmpcfg,  write_pmpcfg  },
> +    [CSR_PMPCFG4]    = { "pmpcfg4",   pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG5]    = { "pmpcfg5",   pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG6]    = { "pmpcfg6",   pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG7]    = { "pmpcfg7",   pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG8]    = { "pmpcfg8",   pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG9]    = { "pmpcfg9",   pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG10]   = { "pmpcfg10",  pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG11]   = { "pmpcfg11",  pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG12]   = { "pmpcfg12",  pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG13]   = { "pmpcfg13",  pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG14]   = { "pmpcfg14",  pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG15]   = { "pmpcfg15",  pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
>       [CSR_PMPADDR0]   = { "pmpaddr0",  pmp, read_pmpaddr, write_pmpaddr },
>       [CSR_PMPADDR1]   = { "pmpaddr1",  pmp, read_pmpaddr, write_pmpaddr },
>       [CSR_PMPADDR2]   = { "pmpaddr2",  pmp, read_pmpaddr, write_pmpaddr },
> @@ -6102,8 +6126,104 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
>       [CSR_PMPADDR11]  = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
>       [CSR_PMPADDR12]  = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
>       [CSR_PMPADDR13]  = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
> -    [CSR_PMPADDR14] =  { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
> -    [CSR_PMPADDR15] =  { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
> +    [CSR_PMPADDR14]  = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
> +    [CSR_PMPADDR15]  = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
> +    [CSR_PMPADDR16]  = { "pmpaddr16", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR17]  = { "pmpaddr17", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR18]  = { "pmpaddr18", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR19]  = { "pmpaddr19", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR20]  = { "pmpaddr20", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR21]  = { "pmpaddr21", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR22]  = { "pmpaddr22", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR23]  = { "pmpaddr23", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR24]  = { "pmpaddr24", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR25]  = { "pmpaddr25", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR26]  = { "pmpaddr26", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR27]  = { "pmpaddr27", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR28]  = { "pmpaddr28", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR29]  = { "pmpaddr29", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR30]  = { "pmpaddr30", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR31]  = { "pmpaddr31", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR32]  = { "pmpaddr32", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR33]  = { "pmpaddr33", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR34]  = { "pmpaddr34", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR35]  = { "pmpaddr35", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR36]  = { "pmpaddr36", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR37]  = { "pmpaddr37", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR38]  = { "pmpaddr38", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR39]  = { "pmpaddr39", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR40]  = { "pmpaddr40", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR41]  = { "pmpaddr41", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR42]  = { "pmpaddr42", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR43]  = { "pmpaddr43", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR44]  = { "pmpaddr44", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR45]  = { "pmpaddr45", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR46]  = { "pmpaddr46", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR47]  = { "pmpaddr47", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR48]  = { "pmpaddr48", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR49]  = { "pmpaddr49", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR50]  = { "pmpaddr50", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR51]  = { "pmpaddr51", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR52]  = { "pmpaddr52", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR53]  = { "pmpaddr53", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR54]  = { "pmpaddr54", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR55]  = { "pmpaddr55", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR56]  = { "pmpaddr56", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR57]  = { "pmpaddr57", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR58]  = { "pmpaddr58", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR59]  = { "pmpaddr59", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR60]  = { "pmpaddr60", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR61]  = { "pmpaddr61", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR62]  = { "pmpaddr62", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR63]  = { "pmpaddr63", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
>   
>       /* Debug CSRs */
>       [CSR_TSELECT]   =  { "tselect",  debug, read_tselect,  write_tselect  },



^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/2] target/riscv: Make PMP region count configurable
  2025-04-21  9:46 ` [PATCH 2/2] target/riscv: Make PMP region count configurable Jay Chang
@ 2025-04-23 11:30   ` Daniel Henrique Barboza
  2025-04-24 10:55   ` Alistair Francis
  1 sibling, 0 replies; 8+ messages in thread
From: Daniel Henrique Barboza @ 2025-04-23 11:30 UTC (permalink / raw)
  To: Jay Chang, qemu-devel, qemu-riscv
  Cc: Palmer Dabbelt, Alistair Francis, Weiwei Li, Liu Zhiwei,
	Frank Chang



On 4/21/25 6:46 AM, Jay Chang wrote:
> Previously, the number of PMP regions was hardcoded to 16 in QEMU.
> This patch replaces the fixed value with a new `pmp_regions` field,
> allowing platforms to configure the number of PMP regions.
> 
> If no specific value is provided, the default number of PMP regions
> remains 16 to preserve the existing behavior.
> 
> A new CPU parameter num-pmp-regions has been introduced to the QEMU
> command line. For example:
> 
> 	-cpu rv64, g=true, c=true, pmp=true, num-pmp-regions=8
> 
> Reviewed-by: Frank Chang <frank.chang@sifive.com>
> Signed-off-by: Jay Chang <jay.chang@sifive.com>
> ---


Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>

>   target/riscv/cpu.c     | 46 ++++++++++++++++++++++++++++++++++++++++++
>   target/riscv/cpu.h     |  2 +-
>   target/riscv/cpu_cfg.h |  1 +
>   target/riscv/csr.c     |  5 ++++-
>   target/riscv/machine.c |  3 ++-
>   target/riscv/pmp.c     | 28 ++++++++++++++++---------
>   6 files changed, 73 insertions(+), 12 deletions(-)
> 
> diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> index 09ded6829a..528d77b820 100644
> --- a/target/riscv/cpu.c
> +++ b/target/riscv/cpu.c
> @@ -512,6 +512,7 @@ static void rv64_sifive_u_cpu_init(Object *obj)
>       cpu->cfg.ext_zicsr = true;
>       cpu->cfg.mmu = true;
>       cpu->cfg.pmp = true;
> +    cpu->cfg.pmp_regions = 8;
>   }
>   
>   static void rv64_sifive_e_cpu_init(Object *obj)
> @@ -529,6 +530,7 @@ static void rv64_sifive_e_cpu_init(Object *obj)
>       cpu->cfg.ext_zifencei = true;
>       cpu->cfg.ext_zicsr = true;
>       cpu->cfg.pmp = true;
> +    cpu->cfg.pmp_regions = 8;
>   }
>   
>   static void rv64_thead_c906_cpu_init(Object *obj)
> @@ -761,6 +763,7 @@ static void rv32_sifive_u_cpu_init(Object *obj)
>       cpu->cfg.ext_zicsr = true;
>       cpu->cfg.mmu = true;
>       cpu->cfg.pmp = true;
> +    cpu->cfg.pmp_regions = 8;
>   }
>   
>   static void rv32_sifive_e_cpu_init(Object *obj)
> @@ -778,6 +781,7 @@ static void rv32_sifive_e_cpu_init(Object *obj)
>       cpu->cfg.ext_zifencei = true;
>       cpu->cfg.ext_zicsr = true;
>       cpu->cfg.pmp = true;
> +    cpu->cfg.pmp_regions = 8;
>   }
>   
>   static void rv32_ibex_cpu_init(Object *obj)
> @@ -1478,6 +1482,7 @@ static void riscv_cpu_init(Object *obj)
>       cpu->cfg.cbom_blocksize = 64;
>       cpu->cfg.cbop_blocksize = 64;
>       cpu->cfg.cboz_blocksize = 64;
> +    cpu->cfg.pmp_regions = 16;
>       cpu->env.vext_ver = VEXT_VERSION_1_00_0;
>   }
>   
> @@ -1935,6 +1940,46 @@ static const PropertyInfo prop_pmp = {
>       .set = prop_pmp_set,
>   };
>   
> +static void prop_num_pmp_regions_set(Object *obj, Visitor *v, const char *name,
> +                                     void *opaque, Error **errp)
> +{
> +    RISCVCPU *cpu = RISCV_CPU(obj);
> +    uint16_t value;
> +
> +    visit_type_uint16(v, name, &value, errp);
> +
> +    if (cpu->cfg.pmp_regions != value && riscv_cpu_is_vendor(obj)) {
> +        cpu_set_prop_err(cpu, name, errp);
> +        return;
> +    }
> +
> +    if (cpu->env.priv_ver < PRIV_VERSION_1_12_0 && value > 16) {
> +        error_setg(errp, "Number of PMP regions exceeds maximum available");
> +        return;
> +    } else if (value > 64) {
> +        error_setg(errp, "Number of PMP regions exceeds maximum available");
> +        return;
> +    }
> +
> +    cpu_option_add_user_setting(name, value);
> +    cpu->cfg.pmp_regions = value;
> +}
> +
> +static void prop_num_pmp_regions_get(Object *obj, Visitor *v, const char *name,
> +                                     void *opaque, Error **errp)
> +{
> +    uint16_t value = RISCV_CPU(obj)->cfg.pmp_regions;
> +
> +    visit_type_uint16(v, name, &value, errp);
> +}
> +
> +static const PropertyInfo prop_num_pmp_regions = {
> +    .type = "uint16",
> +    .description = "num-pmp-regions",
> +    .get = prop_num_pmp_regions_get,
> +    .set = prop_num_pmp_regions_set,
> +};
> +
>   static int priv_spec_from_str(const char *priv_spec_str)
>   {
>       int priv_version = -1;
> @@ -2934,6 +2979,7 @@ static const Property riscv_cpu_properties[] = {
>   
>       {.name = "mmu", .info = &prop_mmu},
>       {.name = "pmp", .info = &prop_pmp},
> +    {.name = "num-pmp-regions", .info = &prop_num_pmp_regions},
>   
>       {.name = "priv_spec", .info = &prop_priv_spec},
>       {.name = "vext_spec", .info = &prop_vext_spec},
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index 51e49e03de..50d58c15f2 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -162,7 +162,7 @@ extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
>   
>   #define MMU_USER_IDX 3
>   
> -#define MAX_RISCV_PMPS (16)
> +#define MAX_RISCV_PMPS (64)
>   
>   #if !defined(CONFIG_USER_ONLY)
>   #include "pmp.h"
> diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
> index 8a843482cc..8c805b45f6 100644
> --- a/target/riscv/cpu_cfg.h
> +++ b/target/riscv/cpu_cfg.h
> @@ -189,6 +189,7 @@ struct RISCVCPUConfig {
>       uint16_t cbom_blocksize;
>       uint16_t cbop_blocksize;
>       uint16_t cboz_blocksize;
> +    uint16_t pmp_regions;
>       bool mmu;
>       bool pmp;
>       bool debug;
> diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> index f8f61ffff5..65f91be9c0 100644
> --- a/target/riscv/csr.c
> +++ b/target/riscv/csr.c
> @@ -736,7 +736,10 @@ static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
>   static RISCVException pmp(CPURISCVState *env, int csrno)
>   {
>       if (riscv_cpu_cfg(env)->pmp) {
> -        if (csrno <= CSR_PMPCFG3) {
> +        uint16_t MAX_PMPCFG = (env->priv_ver >= PRIV_VERSION_1_12_0) ?
> ++                              CSR_PMPCFG15 : CSR_PMPCFG3;
> +
> +        if (csrno <= MAX_PMPCFG) {
>               uint32_t reg_index = csrno - CSR_PMPCFG0;
>   
>               /* TODO: RV128 restriction check */
> diff --git a/target/riscv/machine.c b/target/riscv/machine.c
> index 889e2b6570..c3e4e78802 100644
> --- a/target/riscv/machine.c
> +++ b/target/riscv/machine.c
> @@ -36,8 +36,9 @@ static int pmp_post_load(void *opaque, int version_id)
>       RISCVCPU *cpu = opaque;
>       CPURISCVState *env = &cpu->env;
>       int i;
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>   
> -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +    for (i = 0; i < pmp_regions; i++) {
>           pmp_update_rule_addr(env, i);
>       }
>       pmp_update_rule_nums(env);
> diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
> index c685f7f2c5..3439295d41 100644
> --- a/target/riscv/pmp.c
> +++ b/target/riscv/pmp.c
> @@ -121,7 +121,9 @@ uint32_t pmp_get_num_rules(CPURISCVState *env)
>    */
>   static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
>   {
> -    if (pmp_index < MAX_RISCV_PMPS) {
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> +
> +    if (pmp_index < pmp_regions) {
>           return env->pmp_state.pmp[pmp_index].cfg_reg;
>       }
>   
> @@ -135,7 +137,9 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
>    */
>   static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
>   {
> -    if (pmp_index < MAX_RISCV_PMPS) {
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> +
> +    if (pmp_index < pmp_regions) {
>           if (env->pmp_state.pmp[pmp_index].cfg_reg == val) {
>               /* no change */
>               return false;
> @@ -235,9 +239,10 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
>   void pmp_update_rule_nums(CPURISCVState *env)
>   {
>       int i;
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>   
>       env->pmp_state.num_rules = 0;
> -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +    for (i = 0; i < pmp_regions; i++) {
>           const uint8_t a_field =
>               pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
>           if (PMP_AMATCH_OFF != a_field) {
> @@ -331,6 +336,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
>       int pmp_size = 0;
>       hwaddr s = 0;
>       hwaddr e = 0;
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>   
>       /* Short cut if no rules */
>       if (0 == pmp_get_num_rules(env)) {
> @@ -355,7 +361,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
>        * 1.10 draft priv spec states there is an implicit order
>        * from low to high
>        */
> -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +    for (i = 0; i < pmp_regions; i++) {
>           s = pmp_is_in_range(env, i, addr);
>           e = pmp_is_in_range(env, i, addr + pmp_size - 1);
>   
> @@ -526,8 +532,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
>   {
>       trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
>       bool is_next_cfg_tor = false;
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>   
> -    if (addr_index < MAX_RISCV_PMPS) {
> +    if (addr_index < pmp_regions) {
>           if (env->pmp_state.pmp[addr_index].addr_reg == val) {
>               /* no change */
>               return;
> @@ -537,7 +544,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
>            * In TOR mode, need to check the lock bit of the next pmp
>            * (if there is a next).
>            */
> -        if (addr_index + 1 < MAX_RISCV_PMPS) {
> +        if (addr_index + 1 < pmp_regions) {
>               uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
>               is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
>   
> @@ -572,8 +579,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
>   target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
>   {
>       target_ulong val = 0;
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>   
> -    if (addr_index < MAX_RISCV_PMPS) {
> +    if (addr_index < pmp_regions) {
>           val = env->pmp_state.pmp[addr_index].addr_reg;
>           trace_pmpaddr_csr_read(env->mhartid, addr_index, val);
>       } else {
> @@ -591,6 +599,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
>   {
>       int i;
>       uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>       /* Update PMM field only if the value is valid according to Zjpm v1.0 */
>       if (riscv_cpu_cfg(env)->ext_smmpm &&
>           riscv_cpu_mxl(env) == MXL_RV64 &&
> @@ -602,7 +611,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
>   
>       /* RLB cannot be enabled if it's already 0 and if any regions are locked */
>       if (!MSECCFG_RLB_ISSET(env)) {
> -        for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +        for (i = 0; i < pmp_regions; i++) {
>               if (pmp_is_locked(env, i)) {
>                   val &= ~MSECCFG_RLB;
>                   break;
> @@ -658,6 +667,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
>       hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
>       hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
>       int i;
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>   
>       /*
>        * If PMP is not supported or there are no PMP rules, the TLB page will not
> @@ -668,7 +678,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
>           return TARGET_PAGE_SIZE;
>       }
>   
> -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +    for (i = 0; i < pmp_regions; i++) {
>           if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) {
>               continue;
>           }



^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] target/riscv: Extend PMP region up to 64
  2025-04-21  9:46 ` [PATCH 1/2] target/riscv: Extend PMP region up to 64 Jay Chang
  2025-04-23 11:22   ` Daniel Henrique Barboza
@ 2025-04-24 10:53   ` Alistair Francis
  1 sibling, 0 replies; 8+ messages in thread
From: Alistair Francis @ 2025-04-24 10:53 UTC (permalink / raw)
  To: Jay Chang
  Cc: qemu-devel, qemu-riscv, Palmer Dabbelt, Alistair Francis,
	Weiwei Li, Daniel Henrique Barboza, Liu Zhiwei, Frank Chang

On Mon, Apr 21, 2025 at 7:48 PM Jay Chang <jay.chang@sifive.com> wrote:
>
> According to the RISC-V Privileged Specification (version >1.12),
> RV32 supports 16 CSRs (pmpcfg0–pmpcfg15) to configure 64 PMP regions
> (pmpaddr0–pmpaddr63).
>
> Reviewed-by: Frank Chang <frank.chang@sifive.com>
> Signed-off-by: Jay Chang <jay.chang@sifive.com>

Reviewed-by: Alistair Francis <alistair.francis@wdc.com>

Alistair

> ---
>  target/riscv/cpu_bits.h |  60 +++++++++++++++++++
>  target/riscv/csr.c      | 124 +++++++++++++++++++++++++++++++++++++++-
>  2 files changed, 182 insertions(+), 2 deletions(-)
>
> diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
> index a30317c617..e6b3e28386 100644
> --- a/target/riscv/cpu_bits.h
> +++ b/target/riscv/cpu_bits.h
> @@ -372,6 +372,18 @@
>  #define CSR_PMPCFG1         0x3a1
>  #define CSR_PMPCFG2         0x3a2
>  #define CSR_PMPCFG3         0x3a3
> +#define CSR_PMPCFG4         0x3a4
> +#define CSR_PMPCFG5         0x3a5
> +#define CSR_PMPCFG6         0x3a6
> +#define CSR_PMPCFG7         0x3a7
> +#define CSR_PMPCFG8         0x3a8
> +#define CSR_PMPCFG9         0x3a9
> +#define CSR_PMPCFG10        0x3aa
> +#define CSR_PMPCFG11        0x3ab
> +#define CSR_PMPCFG12        0x3ac
> +#define CSR_PMPCFG13        0x3ad
> +#define CSR_PMPCFG14        0x3ae
> +#define CSR_PMPCFG15        0x3af
>  #define CSR_PMPADDR0        0x3b0
>  #define CSR_PMPADDR1        0x3b1
>  #define CSR_PMPADDR2        0x3b2
> @@ -388,6 +400,54 @@
>  #define CSR_PMPADDR13       0x3bd
>  #define CSR_PMPADDR14       0x3be
>  #define CSR_PMPADDR15       0x3bf
> +#define CSR_PMPADDR16       0x3c0
> +#define CSR_PMPADDR17       0x3c1
> +#define CSR_PMPADDR18       0x3c2
> +#define CSR_PMPADDR19       0x3c3
> +#define CSR_PMPADDR20       0x3c4
> +#define CSR_PMPADDR21       0x3c5
> +#define CSR_PMPADDR22       0x3c6
> +#define CSR_PMPADDR23       0x3c7
> +#define CSR_PMPADDR24       0x3c8
> +#define CSR_PMPADDR25       0x3c9
> +#define CSR_PMPADDR26       0x3ca
> +#define CSR_PMPADDR27       0x3cb
> +#define CSR_PMPADDR28       0x3cc
> +#define CSR_PMPADDR29       0x3cd
> +#define CSR_PMPADDR30       0x3ce
> +#define CSR_PMPADDR31       0x3cf
> +#define CSR_PMPADDR32       0x3d0
> +#define CSR_PMPADDR33       0x3d1
> +#define CSR_PMPADDR34       0x3d2
> +#define CSR_PMPADDR35       0x3d3
> +#define CSR_PMPADDR36       0x3d4
> +#define CSR_PMPADDR37       0x3d5
> +#define CSR_PMPADDR38       0x3d6
> +#define CSR_PMPADDR39       0x3d7
> +#define CSR_PMPADDR40       0x3d8
> +#define CSR_PMPADDR41       0x3d9
> +#define CSR_PMPADDR42       0x3da
> +#define CSR_PMPADDR43       0x3db
> +#define CSR_PMPADDR44       0x3dc
> +#define CSR_PMPADDR45       0x3dd
> +#define CSR_PMPADDR46       0x3de
> +#define CSR_PMPADDR47       0x3df
> +#define CSR_PMPADDR48       0x3e0
> +#define CSR_PMPADDR49       0x3e1
> +#define CSR_PMPADDR50       0x3e2
> +#define CSR_PMPADDR51       0x3e3
> +#define CSR_PMPADDR52       0x3e4
> +#define CSR_PMPADDR53       0x3e5
> +#define CSR_PMPADDR54       0x3e6
> +#define CSR_PMPADDR55       0x3e7
> +#define CSR_PMPADDR56       0x3e8
> +#define CSR_PMPADDR57       0x3e9
> +#define CSR_PMPADDR58       0x3ea
> +#define CSR_PMPADDR59       0x3eb
> +#define CSR_PMPADDR60       0x3ec
> +#define CSR_PMPADDR61       0x3ed
> +#define CSR_PMPADDR62       0x3ee
> +#define CSR_PMPADDR63       0x3ef
>
>  /* RNMI */
>  #define CSR_MNSCRATCH       0x740
> diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> index 7948188356..f8f61ffff5 100644
> --- a/target/riscv/csr.c
> +++ b/target/riscv/csr.c
> @@ -6088,6 +6088,30 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
>      [CSR_PMPCFG1]    = { "pmpcfg1",   pmp, read_pmpcfg,  write_pmpcfg  },
>      [CSR_PMPCFG2]    = { "pmpcfg2",   pmp, read_pmpcfg,  write_pmpcfg  },
>      [CSR_PMPCFG3]    = { "pmpcfg3",   pmp, read_pmpcfg,  write_pmpcfg  },
> +    [CSR_PMPCFG4]    = { "pmpcfg4",   pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG5]    = { "pmpcfg5",   pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG6]    = { "pmpcfg6",   pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG7]    = { "pmpcfg7",   pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG8]    = { "pmpcfg8",   pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG9]    = { "pmpcfg9",   pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG10]   = { "pmpcfg10",  pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG11]   = { "pmpcfg11",  pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG12]   = { "pmpcfg12",  pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG13]   = { "pmpcfg13",  pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG14]   = { "pmpcfg14",  pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPCFG15]   = { "pmpcfg15",  pmp, read_pmpcfg,  write_pmpcfg,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
>      [CSR_PMPADDR0]   = { "pmpaddr0",  pmp, read_pmpaddr, write_pmpaddr },
>      [CSR_PMPADDR1]   = { "pmpaddr1",  pmp, read_pmpaddr, write_pmpaddr },
>      [CSR_PMPADDR2]   = { "pmpaddr2",  pmp, read_pmpaddr, write_pmpaddr },
> @@ -6102,8 +6126,104 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
>      [CSR_PMPADDR11]  = { "pmpaddr11", pmp, read_pmpaddr, write_pmpaddr },
>      [CSR_PMPADDR12]  = { "pmpaddr12", pmp, read_pmpaddr, write_pmpaddr },
>      [CSR_PMPADDR13]  = { "pmpaddr13", pmp, read_pmpaddr, write_pmpaddr },
> -    [CSR_PMPADDR14] =  { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
> -    [CSR_PMPADDR15] =  { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
> +    [CSR_PMPADDR14]  = { "pmpaddr14", pmp, read_pmpaddr, write_pmpaddr },
> +    [CSR_PMPADDR15]  = { "pmpaddr15", pmp, read_pmpaddr, write_pmpaddr },
> +    [CSR_PMPADDR16]  = { "pmpaddr16", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR17]  = { "pmpaddr17", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR18]  = { "pmpaddr18", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR19]  = { "pmpaddr19", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR20]  = { "pmpaddr20", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR21]  = { "pmpaddr21", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR22]  = { "pmpaddr22", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR23]  = { "pmpaddr23", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR24]  = { "pmpaddr24", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR25]  = { "pmpaddr25", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR26]  = { "pmpaddr26", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR27]  = { "pmpaddr27", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR28]  = { "pmpaddr28", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR29]  = { "pmpaddr29", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR30]  = { "pmpaddr30", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR31]  = { "pmpaddr31", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR32]  = { "pmpaddr32", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR33]  = { "pmpaddr33", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR34]  = { "pmpaddr34", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR35]  = { "pmpaddr35", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR36]  = { "pmpaddr36", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR37]  = { "pmpaddr37", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR38]  = { "pmpaddr38", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR39]  = { "pmpaddr39", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR40]  = { "pmpaddr40", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR41]  = { "pmpaddr41", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR42]  = { "pmpaddr42", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR43]  = { "pmpaddr43", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR44]  = { "pmpaddr44", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR45]  = { "pmpaddr45", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR46]  = { "pmpaddr46", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR47]  = { "pmpaddr47", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR48]  = { "pmpaddr48", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR49]  = { "pmpaddr49", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR50]  = { "pmpaddr50", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR51]  = { "pmpaddr51", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR52]  = { "pmpaddr52", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR53]  = { "pmpaddr53", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR54]  = { "pmpaddr54", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR55]  = { "pmpaddr55", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR56]  = { "pmpaddr56", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR57]  = { "pmpaddr57", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR58]  = { "pmpaddr58", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR59]  = { "pmpaddr59", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR60]  = { "pmpaddr60", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR61]  = { "pmpaddr61", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR62]  = { "pmpaddr62", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
> +    [CSR_PMPADDR63]  = { "pmpaddr63", pmp, read_pmpaddr, write_pmpaddr,
> +                         .min_priv_ver = PRIV_VERSION_1_12_0           },
>
>      /* Debug CSRs */
>      [CSR_TSELECT]   =  { "tselect",  debug, read_tselect,  write_tselect  },
> --
> 2.48.1
>
>


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/2] target/riscv: Make PMP region count configurable
  2025-04-21  9:46 ` [PATCH 2/2] target/riscv: Make PMP region count configurable Jay Chang
  2025-04-23 11:30   ` Daniel Henrique Barboza
@ 2025-04-24 10:55   ` Alistair Francis
  2025-04-25  9:43     ` Jay Chang
  1 sibling, 1 reply; 8+ messages in thread
From: Alistair Francis @ 2025-04-24 10:55 UTC (permalink / raw)
  To: Jay Chang
  Cc: qemu-devel, qemu-riscv, Palmer Dabbelt, Alistair Francis,
	Weiwei Li, Daniel Henrique Barboza, Liu Zhiwei, Frank Chang

On Mon, Apr 21, 2025 at 7:48 PM Jay Chang <jay.chang@sifive.com> wrote:
>
> Previously, the number of PMP regions was hardcoded to 16 in QEMU.
> This patch replaces the fixed value with a new `pmp_regions` field,
> allowing platforms to configure the number of PMP regions.
>
> If no specific value is provided, the default number of PMP regions
> remains 16 to preserve the existing behavior.
>
> A new CPU parameter num-pmp-regions has been introduced to the QEMU
> command line. For example:
>
>         -cpu rv64, g=true, c=true, pmp=true, num-pmp-regions=8
>
> Reviewed-by: Frank Chang <frank.chang@sifive.com>
> Signed-off-by: Jay Chang <jay.chang@sifive.com>
> ---
>  target/riscv/cpu.c     | 46 ++++++++++++++++++++++++++++++++++++++++++
>  target/riscv/cpu.h     |  2 +-
>  target/riscv/cpu_cfg.h |  1 +
>  target/riscv/csr.c     |  5 ++++-
>  target/riscv/machine.c |  3 ++-
>  target/riscv/pmp.c     | 28 ++++++++++++++++---------
>  6 files changed, 73 insertions(+), 12 deletions(-)
>
> diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> index 09ded6829a..528d77b820 100644
> --- a/target/riscv/cpu.c
> +++ b/target/riscv/cpu.c
> @@ -512,6 +512,7 @@ static void rv64_sifive_u_cpu_init(Object *obj)
>      cpu->cfg.ext_zicsr = true;
>      cpu->cfg.mmu = true;
>      cpu->cfg.pmp = true;
> +    cpu->cfg.pmp_regions = 8;
>  }
>
>  static void rv64_sifive_e_cpu_init(Object *obj)
> @@ -529,6 +530,7 @@ static void rv64_sifive_e_cpu_init(Object *obj)
>      cpu->cfg.ext_zifencei = true;
>      cpu->cfg.ext_zicsr = true;
>      cpu->cfg.pmp = true;
> +    cpu->cfg.pmp_regions = 8;
>  }
>
>  static void rv64_thead_c906_cpu_init(Object *obj)
> @@ -761,6 +763,7 @@ static void rv32_sifive_u_cpu_init(Object *obj)
>      cpu->cfg.ext_zicsr = true;
>      cpu->cfg.mmu = true;
>      cpu->cfg.pmp = true;
> +    cpu->cfg.pmp_regions = 8;
>  }
>
>  static void rv32_sifive_e_cpu_init(Object *obj)
> @@ -778,6 +781,7 @@ static void rv32_sifive_e_cpu_init(Object *obj)
>      cpu->cfg.ext_zifencei = true;
>      cpu->cfg.ext_zicsr = true;
>      cpu->cfg.pmp = true;
> +    cpu->cfg.pmp_regions = 8;
>  }
>
>  static void rv32_ibex_cpu_init(Object *obj)
> @@ -1478,6 +1482,7 @@ static void riscv_cpu_init(Object *obj)
>      cpu->cfg.cbom_blocksize = 64;
>      cpu->cfg.cbop_blocksize = 64;
>      cpu->cfg.cboz_blocksize = 64;
> +    cpu->cfg.pmp_regions = 16;
>      cpu->env.vext_ver = VEXT_VERSION_1_00_0;
>  }

Thanks for the patch

These CPU init properties will need a rebase on:
https://github.com/alistair23/qemu/tree/riscv-to-apply.next

Do you mind rebasing and sending a new version

Alistair

>
> @@ -1935,6 +1940,46 @@ static const PropertyInfo prop_pmp = {
>      .set = prop_pmp_set,
>  };
>
> +static void prop_num_pmp_regions_set(Object *obj, Visitor *v, const char *name,
> +                                     void *opaque, Error **errp)
> +{
> +    RISCVCPU *cpu = RISCV_CPU(obj);
> +    uint16_t value;
> +
> +    visit_type_uint16(v, name, &value, errp);
> +
> +    if (cpu->cfg.pmp_regions != value && riscv_cpu_is_vendor(obj)) {
> +        cpu_set_prop_err(cpu, name, errp);
> +        return;
> +    }
> +
> +    if (cpu->env.priv_ver < PRIV_VERSION_1_12_0 && value > 16) {
> +        error_setg(errp, "Number of PMP regions exceeds maximum available");
> +        return;
> +    } else if (value > 64) {
> +        error_setg(errp, "Number of PMP regions exceeds maximum available");
> +        return;
> +    }
> +
> +    cpu_option_add_user_setting(name, value);
> +    cpu->cfg.pmp_regions = value;
> +}
> +
> +static void prop_num_pmp_regions_get(Object *obj, Visitor *v, const char *name,
> +                                     void *opaque, Error **errp)
> +{
> +    uint16_t value = RISCV_CPU(obj)->cfg.pmp_regions;
> +
> +    visit_type_uint16(v, name, &value, errp);
> +}
> +
> +static const PropertyInfo prop_num_pmp_regions = {
> +    .type = "uint16",
> +    .description = "num-pmp-regions",
> +    .get = prop_num_pmp_regions_get,
> +    .set = prop_num_pmp_regions_set,
> +};
> +
>  static int priv_spec_from_str(const char *priv_spec_str)
>  {
>      int priv_version = -1;
> @@ -2934,6 +2979,7 @@ static const Property riscv_cpu_properties[] = {
>
>      {.name = "mmu", .info = &prop_mmu},
>      {.name = "pmp", .info = &prop_pmp},
> +    {.name = "num-pmp-regions", .info = &prop_num_pmp_regions},
>
>      {.name = "priv_spec", .info = &prop_priv_spec},
>      {.name = "vext_spec", .info = &prop_vext_spec},
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index 51e49e03de..50d58c15f2 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -162,7 +162,7 @@ extern RISCVCPUImpliedExtsRule *riscv_multi_ext_implied_rules[];
>
>  #define MMU_USER_IDX 3
>
> -#define MAX_RISCV_PMPS (16)
> +#define MAX_RISCV_PMPS (64)
>
>  #if !defined(CONFIG_USER_ONLY)
>  #include "pmp.h"
> diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
> index 8a843482cc..8c805b45f6 100644
> --- a/target/riscv/cpu_cfg.h
> +++ b/target/riscv/cpu_cfg.h
> @@ -189,6 +189,7 @@ struct RISCVCPUConfig {
>      uint16_t cbom_blocksize;
>      uint16_t cbop_blocksize;
>      uint16_t cboz_blocksize;
> +    uint16_t pmp_regions;
>      bool mmu;
>      bool pmp;
>      bool debug;
> diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> index f8f61ffff5..65f91be9c0 100644
> --- a/target/riscv/csr.c
> +++ b/target/riscv/csr.c
> @@ -736,7 +736,10 @@ static RISCVException dbltrp_hmode(CPURISCVState *env, int csrno)
>  static RISCVException pmp(CPURISCVState *env, int csrno)
>  {
>      if (riscv_cpu_cfg(env)->pmp) {
> -        if (csrno <= CSR_PMPCFG3) {
> +        uint16_t MAX_PMPCFG = (env->priv_ver >= PRIV_VERSION_1_12_0) ?
> ++                              CSR_PMPCFG15 : CSR_PMPCFG3;
> +
> +        if (csrno <= MAX_PMPCFG) {
>              uint32_t reg_index = csrno - CSR_PMPCFG0;
>
>              /* TODO: RV128 restriction check */
> diff --git a/target/riscv/machine.c b/target/riscv/machine.c
> index 889e2b6570..c3e4e78802 100644
> --- a/target/riscv/machine.c
> +++ b/target/riscv/machine.c
> @@ -36,8 +36,9 @@ static int pmp_post_load(void *opaque, int version_id)
>      RISCVCPU *cpu = opaque;
>      CPURISCVState *env = &cpu->env;
>      int i;
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +    for (i = 0; i < pmp_regions; i++) {
>          pmp_update_rule_addr(env, i);
>      }
>      pmp_update_rule_nums(env);
> diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
> index c685f7f2c5..3439295d41 100644
> --- a/target/riscv/pmp.c
> +++ b/target/riscv/pmp.c
> @@ -121,7 +121,9 @@ uint32_t pmp_get_num_rules(CPURISCVState *env)
>   */
>  static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
>  {
> -    if (pmp_index < MAX_RISCV_PMPS) {
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> +
> +    if (pmp_index < pmp_regions) {
>          return env->pmp_state.pmp[pmp_index].cfg_reg;
>      }
>
> @@ -135,7 +137,9 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
>   */
>  static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
>  {
> -    if (pmp_index < MAX_RISCV_PMPS) {
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> +
> +    if (pmp_index < pmp_regions) {
>          if (env->pmp_state.pmp[pmp_index].cfg_reg == val) {
>              /* no change */
>              return false;
> @@ -235,9 +239,10 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index)
>  void pmp_update_rule_nums(CPURISCVState *env)
>  {
>      int i;
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
>      env->pmp_state.num_rules = 0;
> -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +    for (i = 0; i < pmp_regions; i++) {
>          const uint8_t a_field =
>              pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
>          if (PMP_AMATCH_OFF != a_field) {
> @@ -331,6 +336,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
>      int pmp_size = 0;
>      hwaddr s = 0;
>      hwaddr e = 0;
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
>      /* Short cut if no rules */
>      if (0 == pmp_get_num_rules(env)) {
> @@ -355,7 +361,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr addr,
>       * 1.10 draft priv spec states there is an implicit order
>       * from low to high
>       */
> -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +    for (i = 0; i < pmp_regions; i++) {
>          s = pmp_is_in_range(env, i, addr);
>          e = pmp_is_in_range(env, i, addr + pmp_size - 1);
>
> @@ -526,8 +532,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
>  {
>      trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
>      bool is_next_cfg_tor = false;
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> -    if (addr_index < MAX_RISCV_PMPS) {
> +    if (addr_index < pmp_regions) {
>          if (env->pmp_state.pmp[addr_index].addr_reg == val) {
>              /* no change */
>              return;
> @@ -537,7 +544,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
>           * In TOR mode, need to check the lock bit of the next pmp
>           * (if there is a next).
>           */
> -        if (addr_index + 1 < MAX_RISCV_PMPS) {
> +        if (addr_index + 1 < pmp_regions) {
>              uint8_t pmp_cfg = env->pmp_state.pmp[addr_index + 1].cfg_reg;
>              is_next_cfg_tor = PMP_AMATCH_TOR == pmp_get_a_field(pmp_cfg);
>
> @@ -572,8 +579,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
>  target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
>  {
>      target_ulong val = 0;
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
> -    if (addr_index < MAX_RISCV_PMPS) {
> +    if (addr_index < pmp_regions) {
>          val = env->pmp_state.pmp[addr_index].addr_reg;
>          trace_pmpaddr_csr_read(env->mhartid, addr_index, val);
>      } else {
> @@ -591,6 +599,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
>  {
>      int i;
>      uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>      /* Update PMM field only if the value is valid according to Zjpm v1.0 */
>      if (riscv_cpu_cfg(env)->ext_smmpm &&
>          riscv_cpu_mxl(env) == MXL_RV64 &&
> @@ -602,7 +611,7 @@ void mseccfg_csr_write(CPURISCVState *env, target_ulong val)
>
>      /* RLB cannot be enabled if it's already 0 and if any regions are locked */
>      if (!MSECCFG_RLB_ISSET(env)) {
> -        for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +        for (i = 0; i < pmp_regions; i++) {
>              if (pmp_is_locked(env, i)) {
>                  val &= ~MSECCFG_RLB;
>                  break;
> @@ -658,6 +667,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
>      hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
>      hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
>      int i;
> +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
>
>      /*
>       * If PMP is not supported or there are no PMP rules, the TLB page will not
> @@ -668,7 +678,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, hwaddr addr)
>          return TARGET_PAGE_SIZE;
>      }
>
> -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> +    for (i = 0; i < pmp_regions; i++) {
>          if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) == PMP_AMATCH_OFF) {
>              continue;
>          }
> --
> 2.48.1
>
>


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/2] target/riscv: Make PMP region count configurable
  2025-04-24 10:55   ` Alistair Francis
@ 2025-04-25  9:43     ` Jay Chang
  0 siblings, 0 replies; 8+ messages in thread
From: Jay Chang @ 2025-04-25  9:43 UTC (permalink / raw)
  To: Alistair Francis
  Cc: qemu-devel, qemu-riscv, Palmer Dabbelt, Alistair Francis,
	Weiwei Li, Daniel Henrique Barboza, Liu Zhiwei, Frank Chang

[-- Attachment #1: Type: text/plain, Size: 12663 bytes --]

I will send v2 patch

Jay Chang

On Thu, Apr 24, 2025 at 6:55 PM Alistair Francis <alistair23@gmail.com>
wrote:

> On Mon, Apr 21, 2025 at 7:48 PM Jay Chang <jay.chang@sifive.com> wrote:
> >
> > Previously, the number of PMP regions was hardcoded to 16 in QEMU.
> > This patch replaces the fixed value with a new `pmp_regions` field,
> > allowing platforms to configure the number of PMP regions.
> >
> > If no specific value is provided, the default number of PMP regions
> > remains 16 to preserve the existing behavior.
> >
> > A new CPU parameter num-pmp-regions has been introduced to the QEMU
> > command line. For example:
> >
> >         -cpu rv64, g=true, c=true, pmp=true, num-pmp-regions=8
> >
> > Reviewed-by: Frank Chang <frank.chang@sifive.com>
> > Signed-off-by: Jay Chang <jay.chang@sifive.com>
> > ---
> >  target/riscv/cpu.c     | 46 ++++++++++++++++++++++++++++++++++++++++++
> >  target/riscv/cpu.h     |  2 +-
> >  target/riscv/cpu_cfg.h |  1 +
> >  target/riscv/csr.c     |  5 ++++-
> >  target/riscv/machine.c |  3 ++-
> >  target/riscv/pmp.c     | 28 ++++++++++++++++---------
> >  6 files changed, 73 insertions(+), 12 deletions(-)
> >
> > diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> > index 09ded6829a..528d77b820 100644
> > --- a/target/riscv/cpu.c
> > +++ b/target/riscv/cpu.c
> > @@ -512,6 +512,7 @@ static void rv64_sifive_u_cpu_init(Object *obj)
> >      cpu->cfg.ext_zicsr = true;
> >      cpu->cfg.mmu = true;
> >      cpu->cfg.pmp = true;
> > +    cpu->cfg.pmp_regions = 8;
> >  }
> >
> >  static void rv64_sifive_e_cpu_init(Object *obj)
> > @@ -529,6 +530,7 @@ static void rv64_sifive_e_cpu_init(Object *obj)
> >      cpu->cfg.ext_zifencei = true;
> >      cpu->cfg.ext_zicsr = true;
> >      cpu->cfg.pmp = true;
> > +    cpu->cfg.pmp_regions = 8;
> >  }
> >
> >  static void rv64_thead_c906_cpu_init(Object *obj)
> > @@ -761,6 +763,7 @@ static void rv32_sifive_u_cpu_init(Object *obj)
> >      cpu->cfg.ext_zicsr = true;
> >      cpu->cfg.mmu = true;
> >      cpu->cfg.pmp = true;
> > +    cpu->cfg.pmp_regions = 8;
> >  }
> >
> >  static void rv32_sifive_e_cpu_init(Object *obj)
> > @@ -778,6 +781,7 @@ static void rv32_sifive_e_cpu_init(Object *obj)
> >      cpu->cfg.ext_zifencei = true;
> >      cpu->cfg.ext_zicsr = true;
> >      cpu->cfg.pmp = true;
> > +    cpu->cfg.pmp_regions = 8;
> >  }
> >
> >  static void rv32_ibex_cpu_init(Object *obj)
> > @@ -1478,6 +1482,7 @@ static void riscv_cpu_init(Object *obj)
> >      cpu->cfg.cbom_blocksize = 64;
> >      cpu->cfg.cbop_blocksize = 64;
> >      cpu->cfg.cboz_blocksize = 64;
> > +    cpu->cfg.pmp_regions = 16;
> >      cpu->env.vext_ver = VEXT_VERSION_1_00_0;
> >  }
>
> Thanks for the patch
>
> These CPU init properties will need a rebase on:
> https://github.com/alistair23/qemu/tree/riscv-to-apply.next
>
> Do you mind rebasing and sending a new version
>
> Alistair
>
> >
> > @@ -1935,6 +1940,46 @@ static const PropertyInfo prop_pmp = {
> >      .set = prop_pmp_set,
> >  };
> >
> > +static void prop_num_pmp_regions_set(Object *obj, Visitor *v, const
> char *name,
> > +                                     void *opaque, Error **errp)
> > +{
> > +    RISCVCPU *cpu = RISCV_CPU(obj);
> > +    uint16_t value;
> > +
> > +    visit_type_uint16(v, name, &value, errp);
> > +
> > +    if (cpu->cfg.pmp_regions != value && riscv_cpu_is_vendor(obj)) {
> > +        cpu_set_prop_err(cpu, name, errp);
> > +        return;
> > +    }
> > +
> > +    if (cpu->env.priv_ver < PRIV_VERSION_1_12_0 && value > 16) {
> > +        error_setg(errp, "Number of PMP regions exceeds maximum
> available");
> > +        return;
> > +    } else if (value > 64) {
> > +        error_setg(errp, "Number of PMP regions exceeds maximum
> available");
> > +        return;
> > +    }
> > +
> > +    cpu_option_add_user_setting(name, value);
> > +    cpu->cfg.pmp_regions = value;
> > +}
> > +
> > +static void prop_num_pmp_regions_get(Object *obj, Visitor *v, const
> char *name,
> > +                                     void *opaque, Error **errp)
> > +{
> > +    uint16_t value = RISCV_CPU(obj)->cfg.pmp_regions;
> > +
> > +    visit_type_uint16(v, name, &value, errp);
> > +}
> > +
> > +static const PropertyInfo prop_num_pmp_regions = {
> > +    .type = "uint16",
> > +    .description = "num-pmp-regions",
> > +    .get = prop_num_pmp_regions_get,
> > +    .set = prop_num_pmp_regions_set,
> > +};
> > +
> >  static int priv_spec_from_str(const char *priv_spec_str)
> >  {
> >      int priv_version = -1;
> > @@ -2934,6 +2979,7 @@ static const Property riscv_cpu_properties[] = {
> >
> >      {.name = "mmu", .info = &prop_mmu},
> >      {.name = "pmp", .info = &prop_pmp},
> > +    {.name = "num-pmp-regions", .info = &prop_num_pmp_regions},
> >
> >      {.name = "priv_spec", .info = &prop_priv_spec},
> >      {.name = "vext_spec", .info = &prop_vext_spec},
> > diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> > index 51e49e03de..50d58c15f2 100644
> > --- a/target/riscv/cpu.h
> > +++ b/target/riscv/cpu.h
> > @@ -162,7 +162,7 @@ extern RISCVCPUImpliedExtsRule
> *riscv_multi_ext_implied_rules[];
> >
> >  #define MMU_USER_IDX 3
> >
> > -#define MAX_RISCV_PMPS (16)
> > +#define MAX_RISCV_PMPS (64)
> >
> >  #if !defined(CONFIG_USER_ONLY)
> >  #include "pmp.h"
> > diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
> > index 8a843482cc..8c805b45f6 100644
> > --- a/target/riscv/cpu_cfg.h
> > +++ b/target/riscv/cpu_cfg.h
> > @@ -189,6 +189,7 @@ struct RISCVCPUConfig {
> >      uint16_t cbom_blocksize;
> >      uint16_t cbop_blocksize;
> >      uint16_t cboz_blocksize;
> > +    uint16_t pmp_regions;
> >      bool mmu;
> >      bool pmp;
> >      bool debug;
> > diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> > index f8f61ffff5..65f91be9c0 100644
> > --- a/target/riscv/csr.c
> > +++ b/target/riscv/csr.c
> > @@ -736,7 +736,10 @@ static RISCVException dbltrp_hmode(CPURISCVState
> *env, int csrno)
> >  static RISCVException pmp(CPURISCVState *env, int csrno)
> >  {
> >      if (riscv_cpu_cfg(env)->pmp) {
> > -        if (csrno <= CSR_PMPCFG3) {
> > +        uint16_t MAX_PMPCFG = (env->priv_ver >= PRIV_VERSION_1_12_0) ?
> > ++                              CSR_PMPCFG15 : CSR_PMPCFG3;
> > +
> > +        if (csrno <= MAX_PMPCFG) {
> >              uint32_t reg_index = csrno - CSR_PMPCFG0;
> >
> >              /* TODO: RV128 restriction check */
> > diff --git a/target/riscv/machine.c b/target/riscv/machine.c
> > index 889e2b6570..c3e4e78802 100644
> > --- a/target/riscv/machine.c
> > +++ b/target/riscv/machine.c
> > @@ -36,8 +36,9 @@ static int pmp_post_load(void *opaque, int version_id)
> >      RISCVCPU *cpu = opaque;
> >      CPURISCVState *env = &cpu->env;
> >      int i;
> > +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> >
> > -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> > +    for (i = 0; i < pmp_regions; i++) {
> >          pmp_update_rule_addr(env, i);
> >      }
> >      pmp_update_rule_nums(env);
> > diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
> > index c685f7f2c5..3439295d41 100644
> > --- a/target/riscv/pmp.c
> > +++ b/target/riscv/pmp.c
> > @@ -121,7 +121,9 @@ uint32_t pmp_get_num_rules(CPURISCVState *env)
> >   */
> >  static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t
> pmp_index)
> >  {
> > -    if (pmp_index < MAX_RISCV_PMPS) {
> > +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> > +
> > +    if (pmp_index < pmp_regions) {
> >          return env->pmp_state.pmp[pmp_index].cfg_reg;
> >      }
> >
> > @@ -135,7 +137,9 @@ static inline uint8_t pmp_read_cfg(CPURISCVState
> *env, uint32_t pmp_index)
> >   */
> >  static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index,
> uint8_t val)
> >  {
> > -    if (pmp_index < MAX_RISCV_PMPS) {
> > +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> > +
> > +    if (pmp_index < pmp_regions) {
> >          if (env->pmp_state.pmp[pmp_index].cfg_reg == val) {
> >              /* no change */
> >              return false;
> > @@ -235,9 +239,10 @@ void pmp_update_rule_addr(CPURISCVState *env,
> uint32_t pmp_index)
> >  void pmp_update_rule_nums(CPURISCVState *env)
> >  {
> >      int i;
> > +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> >
> >      env->pmp_state.num_rules = 0;
> > -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> > +    for (i = 0; i < pmp_regions; i++) {
> >          const uint8_t a_field =
> >              pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
> >          if (PMP_AMATCH_OFF != a_field) {
> > @@ -331,6 +336,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr
> addr,
> >      int pmp_size = 0;
> >      hwaddr s = 0;
> >      hwaddr e = 0;
> > +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> >
> >      /* Short cut if no rules */
> >      if (0 == pmp_get_num_rules(env)) {
> > @@ -355,7 +361,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, hwaddr
> addr,
> >       * 1.10 draft priv spec states there is an implicit order
> >       * from low to high
> >       */
> > -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> > +    for (i = 0; i < pmp_regions; i++) {
> >          s = pmp_is_in_range(env, i, addr);
> >          e = pmp_is_in_range(env, i, addr + pmp_size - 1);
> >
> > @@ -526,8 +532,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t
> addr_index,
> >  {
> >      trace_pmpaddr_csr_write(env->mhartid, addr_index, val);
> >      bool is_next_cfg_tor = false;
> > +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> >
> > -    if (addr_index < MAX_RISCV_PMPS) {
> > +    if (addr_index < pmp_regions) {
> >          if (env->pmp_state.pmp[addr_index].addr_reg == val) {
> >              /* no change */
> >              return;
> > @@ -537,7 +544,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t
> addr_index,
> >           * In TOR mode, need to check the lock bit of the next pmp
> >           * (if there is a next).
> >           */
> > -        if (addr_index + 1 < MAX_RISCV_PMPS) {
> > +        if (addr_index + 1 < pmp_regions) {
> >              uint8_t pmp_cfg = env->pmp_state.pmp[addr_index +
> 1].cfg_reg;
> >              is_next_cfg_tor = PMP_AMATCH_TOR ==
> pmp_get_a_field(pmp_cfg);
> >
> > @@ -572,8 +579,9 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t
> addr_index,
> >  target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
> >  {
> >      target_ulong val = 0;
> > +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> >
> > -    if (addr_index < MAX_RISCV_PMPS) {
> > +    if (addr_index < pmp_regions) {
> >          val = env->pmp_state.pmp[addr_index].addr_reg;
> >          trace_pmpaddr_csr_read(env->mhartid, addr_index, val);
> >      } else {
> > @@ -591,6 +599,7 @@ void mseccfg_csr_write(CPURISCVState *env,
> target_ulong val)
> >  {
> >      int i;
> >      uint64_t mask = MSECCFG_MMWP | MSECCFG_MML;
> > +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> >      /* Update PMM field only if the value is valid according to Zjpm
> v1.0 */
> >      if (riscv_cpu_cfg(env)->ext_smmpm &&
> >          riscv_cpu_mxl(env) == MXL_RV64 &&
> > @@ -602,7 +611,7 @@ void mseccfg_csr_write(CPURISCVState *env,
> target_ulong val)
> >
> >      /* RLB cannot be enabled if it's already 0 and if any regions are
> locked */
> >      if (!MSECCFG_RLB_ISSET(env)) {
> > -        for (i = 0; i < MAX_RISCV_PMPS; i++) {
> > +        for (i = 0; i < pmp_regions; i++) {
> >              if (pmp_is_locked(env, i)) {
> >                  val &= ~MSECCFG_RLB;
> >                  break;
> > @@ -658,6 +667,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env,
> hwaddr addr)
> >      hwaddr tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
> >      hwaddr tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
> >      int i;
> > +    uint16_t pmp_regions = riscv_cpu_cfg(env)->pmp_regions;
> >
> >      /*
> >       * If PMP is not supported or there are no PMP rules, the TLB page
> will not
> > @@ -668,7 +678,7 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env,
> hwaddr addr)
> >          return TARGET_PAGE_SIZE;
> >      }
> >
> > -    for (i = 0; i < MAX_RISCV_PMPS; i++) {
> > +    for (i = 0; i < pmp_regions; i++) {
> >          if (pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg) ==
> PMP_AMATCH_OFF) {
> >              continue;
> >          }
> > --
> > 2.48.1
> >
> >
>

[-- Attachment #2: Type: text/html, Size: 15757 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2025-04-25  9:44 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-04-21  9:46 [PATCH 0/2] Extend and configure PMP region count Jay Chang
2025-04-21  9:46 ` [PATCH 1/2] target/riscv: Extend PMP region up to 64 Jay Chang
2025-04-23 11:22   ` Daniel Henrique Barboza
2025-04-24 10:53   ` Alistair Francis
2025-04-21  9:46 ` [PATCH 2/2] target/riscv: Make PMP region count configurable Jay Chang
2025-04-23 11:30   ` Daniel Henrique Barboza
2025-04-24 10:55   ` Alistair Francis
2025-04-25  9:43     ` Jay Chang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).