From: Yu-Chien Peter Lin <peter.lin@sifive.com>
To: opensbi@lists.infradead.org
Cc: greentime.hu@sifive.com,
Yu-Chien Peter Lin <peter.lin@sifive.com>,
zong.li@sifive.com
Subject: [RFC PATCH 6/7] lib: sbi: sbi_hart: extend PMP handling to support multiple reserved entries
Date: Fri, 15 Aug 2025 18:01:14 +0800 [thread overview]
Message-ID: <20250815100116.27776-7-peter.lin@sifive.com> (raw)
In-Reply-To: <20250815100116.27776-1-peter.lin@sifive.com>
Previously, OpenSBI supported only a single reserved PMP entry. This
change adds support for multiple reserved PMP entries, configurable
via the `reserved-pmp-count` DT property in the opensbi-config.
Signed-off-by: Yu-Chien Peter Lin <peter.lin@sifive.com>
---
include/sbi/sbi_hart.h | 15 ----------
lib/sbi/sbi_domain_context.c | 6 +++-
lib/sbi/sbi_hart.c | 56 +++++++++++++++++++++++++++---------
3 files changed, 47 insertions(+), 30 deletions(-)
diff --git a/include/sbi/sbi_hart.h b/include/sbi/sbi_hart.h
index 82b19dcf..86c2675b 100644
--- a/include/sbi/sbi_hart.h
+++ b/include/sbi/sbi_hart.h
@@ -101,21 +101,6 @@ enum sbi_hart_csrs {
SBI_HART_CSR_MAX,
};
-/*
- * Smepmp enforces access boundaries between M-mode and
- * S/U-mode. When it is enabled, the PMPs are programmed
- * such that M-mode doesn't have access to S/U-mode memory.
- *
- * To give M-mode R/W access to the shared memory between M and
- * S/U-mode, first entry is reserved. It is disabled at boot.
- * When shared memory access is required, the physical address
- * should be programmed into the first PMP entry with R/W
- * permissions to the M-mode. Once the work is done, it should be
- * unmapped. sbi_hart_map_saddr/sbi_hart_unmap_saddr function
- * pair should be used to map/unmap the shared memory.
- */
-#define SBI_SMEPMP_RESV_ENTRY 0
-
struct sbi_hart_features {
bool detected;
int priv_version;
diff --git a/lib/sbi/sbi_domain_context.c b/lib/sbi/sbi_domain_context.c
index fb04d81d..a78bd28c 100644
--- a/lib/sbi/sbi_domain_context.c
+++ b/lib/sbi/sbi_domain_context.c
@@ -101,6 +101,7 @@ static void switch_to_next_domain_context(struct hart_context *ctx,
struct sbi_domain *current_dom = ctx->dom;
struct sbi_domain *target_dom = dom_ctx->dom;
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
+ const struct sbi_platform *plat = sbi_platform_thishart_ptr();
unsigned int pmp_count = sbi_hart_pmp_count(scratch);
/* Assign current hart to target domain */
@@ -115,7 +116,10 @@ static void switch_to_next_domain_context(struct hart_context *ctx,
spin_unlock(&target_dom->assigned_harts_lock);
/* Reconfigure PMP settings for the new domain */
- for (int i = 0; i < pmp_count; i++) {
+ for (int i = plat->reserved_pmp_count; i < pmp_count; i++) {
+ if (pmp_is_fw_region(i, current_dom))
+ continue;
+
sbi_platform_pmp_disable(sbi_platform_thishart_ptr(), i);
pmp_disable(i);
}
diff --git a/lib/sbi/sbi_hart.c b/lib/sbi/sbi_hart.c
index 6a2d7d6f..e8762084 100644
--- a/lib/sbi/sbi_hart.c
+++ b/lib/sbi/sbi_hart.c
@@ -17,6 +17,7 @@
#include <sbi/sbi_csr_detect.h>
#include <sbi/sbi_error.h>
#include <sbi/sbi_hart.h>
+#include <sbi/sbi_init.h>
#include <sbi/sbi_math.h>
#include <sbi/sbi_platform.h>
#include <sbi/sbi_pmu.h>
@@ -30,6 +31,7 @@ extern void __sbi_expected_trap_hext(void);
void (*sbi_hart_expected_trap)(void) = &__sbi_expected_trap;
static unsigned long hart_features_offset;
+static unsigned int saddr_pmp_id;
static void mstatus_init(struct sbi_scratch *scratch)
{
@@ -393,6 +395,7 @@ static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch,
unsigned long pmp_addr_max)
{
struct sbi_domain_memregion *reg;
+ const struct sbi_platform *plat = sbi_platform_thishart_ptr();
struct sbi_domain *dom = sbi_domain_thishart_ptr();
unsigned int pmp_idx, pmp_flags;
@@ -402,16 +405,19 @@ static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch,
*/
csr_set(CSR_MSECCFG, MSECCFG_RLB);
- /* Disable the reserved entry */
- pmp_disable(SBI_SMEPMP_RESV_ENTRY);
+ /* Disable the reserved entries */
+ for (int i = 0; i < plat->reserved_pmp_count; i++)
+ pmp_disable(i);
/* Program M-only regions when MML is not set. */
pmp_idx = 0;
sbi_domain_for_each_memregion(dom, reg) {
/* Skip reserved entry */
- if (pmp_idx == SBI_SMEPMP_RESV_ENTRY)
- pmp_idx++;
- if (pmp_count <= pmp_idx)
+ if (pmp_idx < plat->reserved_pmp_count)
+ pmp_idx += plat->reserved_pmp_count;
+ if (pmp_count <= pmp_idx) {
+ sbi_printf("%s: ERR: region %#lx cannot be protected - "
+ "insufficient PMP entries\n", __func__, reg->base);
break;
/* Skip shared and SU-only regions */
@@ -435,9 +441,11 @@ static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch,
pmp_idx = 0;
sbi_domain_for_each_memregion(dom, reg) {
/* Skip reserved entry */
- if (pmp_idx == SBI_SMEPMP_RESV_ENTRY)
- pmp_idx++;
- if (pmp_count <= pmp_idx)
+ if (pmp_idx < plat->reserved_pmp_count)
+ pmp_idx += plat->reserved_pmp_count;
+ if (pmp_count <= pmp_idx) {
+ sbi_printf("%s: ERR: region %#lx cannot be protected - "
+ "insufficient PMP entries\n", __func__, reg->base);
break;
/* Skip M-only regions */
@@ -468,13 +476,19 @@ static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch,
unsigned long pmp_addr_max)
{
struct sbi_domain_memregion *reg;
+ const struct sbi_platform *plat = sbi_platform_thishart_ptr();
struct sbi_domain *dom = sbi_domain_thishart_ptr();
unsigned int pmp_idx = 0;
unsigned int pmp_flags;
unsigned long pmp_addr;
sbi_domain_for_each_memregion(dom, reg) {
- if (pmp_count <= pmp_idx)
+ /* Skip reserved entry */
+ if (pmp_idx < plat->reserved_pmp_count)
+ pmp_idx += plat->reserved_pmp_count;
+ if (pmp_count <= pmp_idx) {
+ sbi_printf("%s: ERR: region %#lx cannot be protected - "
+ "insufficient PMP entries\n", __func__, reg->base);
break;
pmp_flags = 0;
@@ -510,6 +524,19 @@ static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch,
return 0;
}
+/*
+ * Smepmp enforces access boundaries between M-mode and
+ * S/U-mode. When it is enabled, the PMPs are programmed
+ * such that M-mode doesn't have access to S/U-mode memory.
+ *
+ * To give M-mode R/W access to the shared memory between M and
+ * S/U-mode, high-priority entry is reserved. It is disabled at boot.
+ * When shared memory access is required, the physical address
+ * should be programmed into the reserved PMP entry with R/W
+ * permissions to the M-mode. Once the work is done, it should be
+ * unmapped. sbi_hart_map_saddr/sbi_hart_unmap_saddr function
+ * pair should be used to map/unmap the shared memory.
+ */
int sbi_hart_map_saddr(unsigned long addr, unsigned long size)
{
/* shared R/W access for M and S/U mode */
@@ -521,8 +548,9 @@ int sbi_hart_map_saddr(unsigned long addr, unsigned long size)
if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
return SBI_OK;
- if (is_pmp_entry_mapped(SBI_SMEPMP_RESV_ENTRY))
+ if (reserved_pmp_alloc(&saddr_pmp_id)) {
return SBI_ENOSPC;
+ }
for (order = MAX(sbi_hart_pmp_log2gran(scratch), log2roundup(size));
order <= __riscv_xlen; order++) {
@@ -538,10 +566,10 @@ int sbi_hart_map_saddr(unsigned long addr, unsigned long size)
}
}
- sbi_platform_pmp_set(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY,
+ sbi_platform_pmp_set(sbi_platform_ptr(scratch), saddr_pmp_id,
SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW,
pmp_flags, base, order);
- pmp_set(SBI_SMEPMP_RESV_ENTRY, pmp_flags, base, order);
+ pmp_set(saddr_pmp_id, pmp_flags, base, order);
return SBI_OK;
}
@@ -553,8 +581,8 @@ int sbi_hart_unmap_saddr(void)
if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
return SBI_OK;
- sbi_platform_pmp_disable(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY);
- return pmp_disable(SBI_SMEPMP_RESV_ENTRY);
+ sbi_platform_pmp_disable(sbi_platform_ptr(scratch), saddr_pmp_id);
+ return pmp_disable(saddr_pmp_id);
}
int sbi_hart_pmp_configure(struct sbi_scratch *scratch)
--
2.48.0
--
opensbi mailing list
opensbi@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/opensbi
next prev parent reply other threads:[~2025-08-15 13:14 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-15 10:01 [RFC PATCH 0/7] Extend the reserved PMP entries Yu-Chien Peter Lin
2025-08-15 10:01 ` [RFC PATCH 1/7] lib: utils: fdt_helper: add fdt_has_isa_extension() helper Yu-Chien Peter Lin
2025-08-15 10:01 ` [RFC PATCH 2/7] include: sbi: sbi_platform: add reserved_pmp_count field Yu-Chien Peter Lin
2025-08-15 10:01 ` [RFC PATCH 3/7] platform: generic: platform: introduce fw_platform_get_reserved_pmp_count() Yu-Chien Peter Lin
2025-08-15 10:01 ` [RFC PATCH 4/7] lib: sbi_init: print total and reserved PMP counts Yu-Chien Peter Lin
2025-08-15 10:01 ` [RFC PATCH 5/7] lib: sbi: riscv_asm: add reserved_pmp_alloc() to allocate a reserved PMP Yu-Chien Peter Lin
2025-08-15 10:01 ` Yu-Chien Peter Lin [this message]
2025-08-15 10:01 ` [RFC PATCH 7/7] docs: opensbi_config: add description for reserved-pmp-count Yu-Chien Peter Lin
2025-10-06 5:21 ` [RFC PATCH 0/7] Extend the reserved PMP entries Anup Patel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250815100116.27776-7-peter.lin@sifive.com \
--to=peter.lin@sifive.com \
--cc=greentime.hu@sifive.com \
--cc=opensbi@lists.infradead.org \
--cc=zong.li@sifive.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox