From: Anup Patel <apatel@ventanamicro.com>
To: Atish Patra <atishp@rivosinc.com>
Cc: Andrew Jones <ajones@ventanamicro.com>,
Anup Patel <anup@brainfault.org>,
opensbi@lists.infradead.org, Anup Patel <apatel@ventanamicro.com>
Subject: [PATCH 3/5] lib: sbi: Implement hart protection for PMP and ePMP
Date: Wed, 26 Nov 2025 19:48:42 +0530 [thread overview]
Message-ID: <20251126141845.248697-4-apatel@ventanamicro.com> (raw)
In-Reply-To: <20251126141845.248697-1-apatel@ventanamicro.com>
Implement PMP and ePMP based hart protection abstraction so
that usage of sbi_hart_pmp_xyz() functions can be replaced
with sbi_hart_protection_xyz() functions.
Signed-off-by: Anup Patel <apatel@ventanamicro.com>
---
lib/sbi/sbi_hart.c | 209 ++++++++++++++++++++++++++++-----------------
1 file changed, 133 insertions(+), 76 deletions(-)
diff --git a/lib/sbi/sbi_hart.c b/lib/sbi/sbi_hart.c
index c5a8d248..8869839d 100644
--- a/lib/sbi/sbi_hart.c
+++ b/lib/sbi/sbi_hart.c
@@ -17,6 +17,7 @@
#include <sbi/sbi_csr_detect.h>
#include <sbi/sbi_error.h>
#include <sbi/sbi_hart.h>
+#include <sbi/sbi_hart_protection.h>
#include <sbi/sbi_math.h>
#include <sbi/sbi_platform.h>
#include <sbi/sbi_pmu.h>
@@ -311,6 +312,30 @@ bool sbi_hart_smepmp_is_fw_region(unsigned int pmp_idx)
return bitmap_test(fw_smepmp_ids, pmp_idx) ? true : false;
}
+static void sbi_hart_pmp_fence(void)
+{
+ /*
+ * As per section 3.7.2 of privileged specification v1.12,
+ * virtual address translations can be speculatively performed
+ * (even before actual access). These, along with PMP traslations,
+ * can be cached. This can pose a problem with CPU hotplug
+ * and non-retentive suspend scenario because PMP states are
+ * not preserved.
+ * It is advisable to flush the caching structures under such
+ * conditions.
+ */
+ if (misa_extension('S')) {
+ __asm__ __volatile__("sfence.vma");
+
+ /*
+ * If hypervisor mode is supported, flush caching
+ * structures in guest mode too.
+ */
+ if (misa_extension('H'))
+ __sbi_hfence_gvma_all();
+ }
+}
+
static void sbi_hart_smepmp_set(struct sbi_scratch *scratch,
struct sbi_domain *dom,
struct sbi_domain_memregion *reg,
@@ -343,14 +368,19 @@ static bool is_valid_pmp_idx(unsigned int pmp_count, unsigned int pmp_idx)
return false;
}
-static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch,
- unsigned int pmp_count,
- unsigned int pmp_log2gran,
- unsigned long pmp_addr_max)
+static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch)
{
struct sbi_domain_memregion *reg;
struct sbi_domain *dom = sbi_domain_thishart_ptr();
- unsigned int pmp_idx, pmp_flags;
+ unsigned int pmp_log2gran, pmp_bits;
+ unsigned int pmp_idx, pmp_count;
+ unsigned long pmp_addr_max;
+ unsigned int pmp_flags;
+
+ pmp_count = sbi_hart_pmp_count(scratch);
+ pmp_log2gran = sbi_hart_pmp_log2gran(scratch);
+ pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
+ pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
/*
* Set the RLB so that, we can write to PMP entries without
@@ -430,20 +460,64 @@ static int sbi_hart_smepmp_configure(struct sbi_scratch *scratch,
* Keep the RLB bit so that dynamic mappings can be done.
*/
+ sbi_hart_pmp_fence();
return 0;
}
-static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch,
- unsigned int pmp_count,
- unsigned int pmp_log2gran,
- unsigned long pmp_addr_max)
+static int sbi_hart_smepmp_map_range(struct sbi_scratch *scratch,
+ unsigned long addr, unsigned long size)
+{
+ /* shared R/W access for M and S/U mode */
+ unsigned int pmp_flags = (PMP_W | PMP_X);
+ unsigned long order, base = 0;
+
+ if (is_pmp_entry_mapped(SBI_SMEPMP_RESV_ENTRY))
+ return SBI_ENOSPC;
+
+ for (order = MAX(sbi_hart_pmp_log2gran(scratch), log2roundup(size));
+ order <= __riscv_xlen; order++) {
+ if (order < __riscv_xlen) {
+ base = addr & ~((1UL << order) - 1UL);
+ if ((base <= addr) &&
+ (addr < (base + (1UL << order))) &&
+ (base <= (addr + size - 1UL)) &&
+ ((addr + size - 1UL) < (base + (1UL << order))))
+ break;
+ } else {
+ return SBI_EFAIL;
+ }
+ }
+
+ sbi_platform_pmp_set(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY,
+ SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW,
+ pmp_flags, base, order);
+ pmp_set(SBI_SMEPMP_RESV_ENTRY, pmp_flags, base, order);
+
+ return SBI_OK;
+}
+
+static int sbi_hart_smepmp_unmap_range(struct sbi_scratch *scratch,
+ unsigned long addr, unsigned long size)
+{
+ sbi_platform_pmp_disable(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY);
+ return pmp_disable(SBI_SMEPMP_RESV_ENTRY);
+}
+
+static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch)
{
struct sbi_domain_memregion *reg;
struct sbi_domain *dom = sbi_domain_thishart_ptr();
- unsigned int pmp_idx = 0;
+ unsigned long pmp_addr, pmp_addr_max;
+ unsigned int pmp_log2gran, pmp_bits;
+ unsigned int pmp_idx, pmp_count;
unsigned int pmp_flags;
- unsigned long pmp_addr;
+ pmp_count = sbi_hart_pmp_count(scratch);
+ pmp_log2gran = sbi_hart_pmp_log2gran(scratch);
+ pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
+ pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
+
+ pmp_idx = 0;
sbi_domain_for_each_memregion(dom, reg) {
if (!is_valid_pmp_idx(pmp_count, pmp_idx))
return SBI_EFAIL;
@@ -478,43 +552,19 @@ static int sbi_hart_oldpmp_configure(struct sbi_scratch *scratch,
}
}
+ sbi_hart_pmp_fence();
return 0;
}
int sbi_hart_map_saddr(unsigned long addr, unsigned long size)
{
- /* shared R/W access for M and S/U mode */
- unsigned int pmp_flags = (PMP_W | PMP_X);
- unsigned long order, base = 0;
struct sbi_scratch *scratch = sbi_scratch_thishart_ptr();
/* If Smepmp is not supported no special mapping is required */
if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
return SBI_OK;
- if (is_pmp_entry_mapped(SBI_SMEPMP_RESV_ENTRY))
- return SBI_ENOSPC;
-
- for (order = MAX(sbi_hart_pmp_log2gran(scratch), log2roundup(size));
- order <= __riscv_xlen; order++) {
- if (order < __riscv_xlen) {
- base = addr & ~((1UL << order) - 1UL);
- if ((base <= addr) &&
- (addr < (base + (1UL << order))) &&
- (base <= (addr + size - 1UL)) &&
- ((addr + size - 1UL) < (base + (1UL << order))))
- break;
- } else {
- return SBI_EFAIL;
- }
- }
-
- sbi_platform_pmp_set(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY,
- SBI_DOMAIN_MEMREGION_SHARED_SURW_MRW,
- pmp_flags, base, order);
- pmp_set(SBI_SMEPMP_RESV_ENTRY, pmp_flags, base, order);
-
- return SBI_OK;
+ return sbi_hart_smepmp_map_range(scratch, addr, size);
}
int sbi_hart_unmap_saddr(void)
@@ -524,53 +574,18 @@ int sbi_hart_unmap_saddr(void)
if (!sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
return SBI_OK;
- sbi_platform_pmp_disable(sbi_platform_ptr(scratch), SBI_SMEPMP_RESV_ENTRY);
- return pmp_disable(SBI_SMEPMP_RESV_ENTRY);
+ return sbi_hart_smepmp_unmap_range(scratch, 0, 0);
}
int sbi_hart_pmp_configure(struct sbi_scratch *scratch)
{
- int rc;
- unsigned int pmp_bits, pmp_log2gran;
- unsigned int pmp_count = sbi_hart_pmp_count(scratch);
- unsigned long pmp_addr_max;
-
- if (!pmp_count)
+ if (!sbi_hart_pmp_count(scratch))
return 0;
- pmp_log2gran = sbi_hart_pmp_log2gran(scratch);
- pmp_bits = sbi_hart_pmp_addrbits(scratch) - 1;
- pmp_addr_max = (1UL << pmp_bits) | ((1UL << pmp_bits) - 1);
-
if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP))
- rc = sbi_hart_smepmp_configure(scratch, pmp_count,
- pmp_log2gran, pmp_addr_max);
+ return sbi_hart_smepmp_configure(scratch);
else
- rc = sbi_hart_oldpmp_configure(scratch, pmp_count,
- pmp_log2gran, pmp_addr_max);
-
- /*
- * As per section 3.7.2 of privileged specification v1.12,
- * virtual address translations can be speculatively performed
- * (even before actual access). These, along with PMP traslations,
- * can be cached. This can pose a problem with CPU hotplug
- * and non-retentive suspend scenario because PMP states are
- * not preserved.
- * It is advisable to flush the caching structures under such
- * conditions.
- */
- if (misa_extension('S')) {
- __asm__ __volatile__("sfence.vma");
-
- /*
- * If hypervisor mode is supported, flush caching
- * structures in guest mode too.
- */
- if (misa_extension('H'))
- __sbi_hfence_gvma_all();
- }
-
- return rc;
+ return sbi_hart_oldpmp_configure(scratch);
}
void sbi_hart_pmp_unconfigure(struct sbi_scratch *scratch)
@@ -587,6 +602,42 @@ void sbi_hart_pmp_unconfigure(struct sbi_scratch *scratch)
}
}
+static struct sbi_hart_protection pmp_protection = {
+ .name = "pmp",
+ .rating = 100,
+ .configure = sbi_hart_oldpmp_configure,
+ .unconfigure = sbi_hart_pmp_unconfigure,
+};
+
+static struct sbi_hart_protection epmp_protection = {
+ .name = "epmp",
+ .rating = 200,
+ .configure = sbi_hart_smepmp_configure,
+ .unconfigure = sbi_hart_pmp_unconfigure,
+ .map_range = sbi_hart_smepmp_map_range,
+ .unmap_range = sbi_hart_smepmp_unmap_range,
+};
+
+static int sbi_hart_pmp_init(struct sbi_scratch *scratch)
+{
+ int rc;
+
+ if (!sbi_hart_pmp_count(scratch))
+ return 0;
+
+ rc = sbi_hart_protection_register(&pmp_protection);
+ if (rc)
+ return rc;
+
+ if (sbi_hart_has_extension(scratch, SBI_HART_EXT_SMEPMP)) {
+ rc = sbi_hart_protection_register(&epmp_protection);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
int sbi_hart_priv_version(struct sbi_scratch *scratch)
{
struct sbi_hart_features *hfeatures =
@@ -1051,6 +1102,12 @@ int sbi_hart_init(struct sbi_scratch *scratch, bool cold_boot)
if (rc)
return rc;
+ if (cold_boot) {
+ rc = sbi_hart_pmp_init(scratch);
+ if (rc)
+ return rc;
+ }
+
rc = delegate_traps(scratch);
if (rc)
return rc;
--
2.43.0
--
opensbi mailing list
opensbi@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/opensbi
next prev parent reply other threads:[~2025-11-26 14:19 UTC|newest]
Thread overview: 12+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-11-26 14:18 [PATCH 0/5] OpenSBI hart protection abstraction Anup Patel
2025-11-26 14:18 ` [PATCH 1/5] lib: sbi: Introduce sbi_hart_pmp_unconfigure() function Anup Patel
2025-11-26 14:18 ` [PATCH 2/5] lib: sbi: Introduce hart protection abstraction Anup Patel
2025-12-07 11:10 ` Samuel Holland
2025-12-07 15:58 ` Anup Patel
2025-12-09 7:04 ` Samuel Holland
2025-12-09 8:30 ` Anup Patel
2025-11-26 14:18 ` Anup Patel [this message]
2025-12-07 11:12 ` [PATCH 3/5] lib: sbi: Implement hart protection for PMP and ePMP Samuel Holland
2025-12-09 12:55 ` Anup Patel
2025-11-26 14:18 ` [PATCH 4/5] lib: sbi: Replace sbi_hart_pmp_xyz() and sbi_hart_map/unmap_addr() Anup Patel
2025-11-26 14:18 ` [PATCH 5/5] lib: sbi: Factor-out PMP programming into separate sources Anup Patel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251126141845.248697-4-apatel@ventanamicro.com \
--to=apatel@ventanamicro.com \
--cc=ajones@ventanamicro.com \
--cc=anup@brainfault.org \
--cc=atishp@rivosinc.com \
--cc=opensbi@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox