* [PATCH v2 1/8] target/riscv: Update pmp_get_tlb_size()
2023-04-18 14:06 [PATCH v2 0/8] target/riscv: Fix PMP related problem Weiwei Li
@ 2023-04-18 14:06 ` Weiwei Li
2023-04-18 14:06 ` [PATCH v2 2/8] target/riscv: Move pmp_get_tlb_size apart from get_physical_address_pmp Weiwei Li
` (6 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Weiwei Li @ 2023-04-18 14:06 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: palmer, alistair.francis, bin.meng, dbarboza, zhiwei_liu,
richard.henderson, wangjunqiang, lazyparser, Weiwei Li
PMP entries before the matched PMP entry(including the matched PMP entry)
may overlap partial of the tlb page, which may make different regions in
that page have different permission rights, such as for
PMP0(0x80000008~0x8000000F, R) and PMP1(0x80001000~0x80001FFF, RWX))
write access to 0x80000000 will match PMP1. However we cannot cache the tlb
for it since this will make the write access to 0x80000008 bypass the check
of PMP0. So we should check all of them and set the tlb size to 1 in this
case.
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
---
target/riscv/cpu_helper.c | 7 ++-----
target/riscv/pmp.c | 35 ++++++++++++++++++++++-------------
target/riscv/pmp.h | 3 +--
3 files changed, 25 insertions(+), 20 deletions(-)
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 433ea529b0..075fc0538a 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -703,11 +703,8 @@ static int get_physical_address_pmp(CPURISCVState *env, int *prot,
}
*prot = pmp_priv_to_page_prot(pmp_priv);
- if ((tlb_size != NULL) && pmp_index != MAX_RISCV_PMPS) {
- target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
- target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
-
- *tlb_size = pmp_get_tlb_size(env, pmp_index, tlb_sa, tlb_ea);
+ if (tlb_size != NULL) {
+ *tlb_size = pmp_get_tlb_size(env, addr);
}
return TRANSLATE_SUCCESS;
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index 1f5aca42e8..78bcd969ec 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -601,28 +601,37 @@ target_ulong mseccfg_csr_read(CPURISCVState *env)
}
/*
- * Calculate the TLB size if the start address or the end address of
+ * Calculate the TLB size if any start address or the end address of
* PMP entry is presented in the TLB page.
*/
-target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
- target_ulong tlb_sa, target_ulong tlb_ea)
+target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr)
{
- target_ulong pmp_sa = env->pmp_state.addr[pmp_index].sa;
- target_ulong pmp_ea = env->pmp_state.addr[pmp_index].ea;
+ target_ulong pmp_sa;
+ target_ulong pmp_ea;
+ target_ulong tlb_sa = addr & ~(TARGET_PAGE_SIZE - 1);
+ target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
+ int i;
+
+ for (i = 0; i < MAX_RISCV_PMPS; i++) {
+ pmp_sa = env->pmp_state.addr[i].sa;
+ pmp_ea = env->pmp_state.addr[i].ea;
- if (pmp_sa <= tlb_sa && pmp_ea >= tlb_ea) {
- return TARGET_PAGE_SIZE;
- } else {
/*
- * At this point we have a tlb_size that is the smallest possible size
- * That fits within a TARGET_PAGE_SIZE and the PMP region.
- *
- * If the size is less then TARGET_PAGE_SIZE we drop the size to 1.
+ * If any start address or the end address of PMP entry is presented
+ * in the TLB page and cannot override the whole TLB page we drop the
+ * size to 1.
* This means the result isn't cached in the TLB and is only used for
* a single translation.
*/
- return 1;
+ if (pmp_sa <= tlb_sa && pmp_ea >= tlb_ea) {
+ return TARGET_PAGE_SIZE;
+ } else if ((pmp_sa >= tlb_sa && pmp_sa <= tlb_ea) ||
+ (pmp_ea >= tlb_sa && pmp_ea <= tlb_ea)) {
+ return 1;
+ }
}
+
+ return TARGET_PAGE_SIZE;
}
/*
diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h
index b296ea1fc6..0a7e24750b 100644
--- a/target/riscv/pmp.h
+++ b/target/riscv/pmp.h
@@ -76,8 +76,7 @@ int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
target_ulong size, pmp_priv_t privs,
pmp_priv_t *allowed_privs,
target_ulong mode);
-target_ulong pmp_get_tlb_size(CPURISCVState *env, int pmp_index,
- target_ulong tlb_sa, target_ulong tlb_ea);
+target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr);
void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index);
void pmp_update_rule_nums(CPURISCVState *env);
uint32_t pmp_get_num_rules(CPURISCVState *env);
--
2.25.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 2/8] target/riscv: Move pmp_get_tlb_size apart from get_physical_address_pmp
2023-04-18 14:06 [PATCH v2 0/8] target/riscv: Fix PMP related problem Weiwei Li
2023-04-18 14:06 ` [PATCH v2 1/8] target/riscv: Update pmp_get_tlb_size() Weiwei Li
@ 2023-04-18 14:06 ` Weiwei Li
2023-04-18 14:06 ` [PATCH v2 3/8] target/riscv: flush tlb when pmpaddr is updated Weiwei Li
` (5 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Weiwei Li @ 2023-04-18 14:06 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: palmer, alistair.francis, bin.meng, dbarboza, zhiwei_liu,
richard.henderson, wangjunqiang, lazyparser, Weiwei Li
pmp_get_tlb_size can be separated from get_physical_address_pmp and is only
needed when ret == TRANSLATE_SUCCESS.
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
---
target/riscv/cpu_helper.c | 21 +++++++--------------
target/riscv/pmp.c | 4 ++++
2 files changed, 11 insertions(+), 14 deletions(-)
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 075fc0538a..ea08ca9fbb 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -676,14 +676,11 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
*
* @env: CPURISCVState
* @prot: The returned protection attributes
- * @tlb_size: TLB page size containing addr. It could be modified after PMP
- * permission checking. NULL if not set TLB page for addr.
* @addr: The physical address to be checked permission
* @access_type: The type of MMU access
* @mode: Indicates current privilege level.
*/
-static int get_physical_address_pmp(CPURISCVState *env, int *prot,
- target_ulong *tlb_size, hwaddr addr,
+static int get_physical_address_pmp(CPURISCVState *env, int *prot, hwaddr addr,
int size, MMUAccessType access_type,
int mode)
{
@@ -703,9 +700,6 @@ static int get_physical_address_pmp(CPURISCVState *env, int *prot,
}
*prot = pmp_priv_to_page_prot(pmp_priv);
- if (tlb_size != NULL) {
- *tlb_size = pmp_get_tlb_size(env, addr);
- }
return TRANSLATE_SUCCESS;
}
@@ -905,7 +899,7 @@ restart:
}
int pmp_prot;
- int pmp_ret = get_physical_address_pmp(env, &pmp_prot, NULL, pte_addr,
+ int pmp_ret = get_physical_address_pmp(env, &pmp_prot, pte_addr,
sizeof(target_ulong),
MMU_DATA_LOAD, PRV_S);
if (pmp_ret != TRANSLATE_SUCCESS) {
@@ -1300,13 +1294,12 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
prot &= prot2;
if (ret == TRANSLATE_SUCCESS) {
- ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
+ ret = get_physical_address_pmp(env, &prot_pmp, pa,
size, access_type, mode);
qemu_log_mask(CPU_LOG_MMU,
"%s PMP address=" HWADDR_FMT_plx " ret %d prot"
- " %d tlb_size " TARGET_FMT_lu "\n",
- __func__, pa, ret, prot_pmp, tlb_size);
+ " %d\n", __func__, pa, ret, prot_pmp);
prot &= prot_pmp;
}
@@ -1333,13 +1326,12 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
__func__, address, ret, pa, prot);
if (ret == TRANSLATE_SUCCESS) {
- ret = get_physical_address_pmp(env, &prot_pmp, &tlb_size, pa,
+ ret = get_physical_address_pmp(env, &prot_pmp, pa,
size, access_type, mode);
qemu_log_mask(CPU_LOG_MMU,
"%s PMP address=" HWADDR_FMT_plx " ret %d prot"
- " %d tlb_size " TARGET_FMT_lu "\n",
- __func__, pa, ret, prot_pmp, tlb_size);
+ " %d\n", __func__, pa, ret, prot_pmp);
prot &= prot_pmp;
}
@@ -1350,6 +1342,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
}
if (ret == TRANSLATE_SUCCESS) {
+ tlb_size = pmp_get_tlb_size(env, pa);
tlb_set_page(cs, address & ~(tlb_size - 1), pa & ~(tlb_size - 1),
prot, mmu_idx, tlb_size);
return true;
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index 78bcd969ec..643388dc23 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -612,6 +612,10 @@ target_ulong pmp_get_tlb_size(CPURISCVState *env, target_ulong addr)
target_ulong tlb_ea = tlb_sa + TARGET_PAGE_SIZE - 1;
int i;
+ if (!riscv_cpu_cfg(env)->pmp || !pmp_get_num_rules(env)) {
+ return TARGET_PAGE_SIZE;
+ }
+
for (i = 0; i < MAX_RISCV_PMPS; i++) {
pmp_sa = env->pmp_state.addr[i].sa;
pmp_ea = env->pmp_state.addr[i].ea;
--
2.25.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 3/8] target/riscv: flush tlb when pmpaddr is updated
2023-04-18 14:06 [PATCH v2 0/8] target/riscv: Fix PMP related problem Weiwei Li
2023-04-18 14:06 ` [PATCH v2 1/8] target/riscv: Update pmp_get_tlb_size() Weiwei Li
2023-04-18 14:06 ` [PATCH v2 2/8] target/riscv: Move pmp_get_tlb_size apart from get_physical_address_pmp Weiwei Li
@ 2023-04-18 14:06 ` Weiwei Li
2023-04-18 14:06 ` [PATCH v2 4/8] target/riscv: Flush TLB only when pmpcfg/pmpaddr really changes Weiwei Li
` (4 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Weiwei Li @ 2023-04-18 14:06 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: palmer, alistair.francis, bin.meng, dbarboza, zhiwei_liu,
richard.henderson, wangjunqiang, lazyparser, Weiwei Li
TLB should be flushed not only for pmpcfg csr changes, but also for
pmpaddr csr changes.
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
---
target/riscv/pmp.c | 1 +
1 file changed, 1 insertion(+)
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index 643388dc23..8645b1e1c1 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -537,6 +537,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
if (!pmp_is_locked(env, addr_index)) {
env->pmp_state.pmp[addr_index].addr_reg = val;
pmp_update_rule(env, addr_index);
+ tlb_flush(env_cpu(env));
} else {
qemu_log_mask(LOG_GUEST_ERROR,
"ignoring pmpaddr write - locked\n");
--
2.25.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 4/8] target/riscv: Flush TLB only when pmpcfg/pmpaddr really changes
2023-04-18 14:06 [PATCH v2 0/8] target/riscv: Fix PMP related problem Weiwei Li
` (2 preceding siblings ...)
2023-04-18 14:06 ` [PATCH v2 3/8] target/riscv: flush tlb when pmpaddr is updated Weiwei Li
@ 2023-04-18 14:06 ` Weiwei Li
2023-04-18 14:06 ` [PATCH v2 5/8] target/riscv: flush tb when PMP entry changes Weiwei Li
` (3 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Weiwei Li @ 2023-04-18 14:06 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: palmer, alistair.francis, bin.meng, dbarboza, zhiwei_liu,
richard.henderson, wangjunqiang, lazyparser, Weiwei Li
TLB needn't be flushed when pmpcfg/pmpaddr don't changes.
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
---
target/riscv/pmp.c | 24 ++++++++++++++++--------
1 file changed, 16 insertions(+), 8 deletions(-)
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index 8645b1e1c1..ec86fccd2e 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -26,7 +26,7 @@
#include "trace.h"
#include "exec/exec-all.h"
-static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
+static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
uint8_t val);
static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index);
@@ -83,7 +83,7 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
* Accessor to set the cfg reg for a specific PMP/HART
* Bounds checks and relevant lock bit.
*/
-static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
+static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
{
if (pmp_index < MAX_RISCV_PMPS) {
bool locked = true;
@@ -119,14 +119,17 @@ static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
if (locked) {
qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
- } else {
+ } else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) {
env->pmp_state.pmp[pmp_index].cfg_reg = val;
pmp_update_rule(env, pmp_index);
+ return true;
}
} else {
qemu_log_mask(LOG_GUEST_ERROR,
"ignoring pmpcfg write - out of bounds\n");
}
+
+ return false;
}
static void pmp_decode_napot(target_ulong a, target_ulong *sa,
@@ -477,16 +480,19 @@ void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
int i;
uint8_t cfg_val;
int pmpcfg_nums = 2 << riscv_cpu_mxl(env);
+ bool modified = false;
trace_pmpcfg_csr_write(env->mhartid, reg_index, val);
for (i = 0; i < pmpcfg_nums; i++) {
cfg_val = (val >> 8 * i) & 0xff;
- pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
+ modified |= pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
}
/* If PMP permission of any addr has been changed, flush TLB pages. */
- tlb_flush(env_cpu(env));
+ if (modified) {
+ tlb_flush(env_cpu(env));
+ }
}
@@ -535,9 +541,11 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
}
if (!pmp_is_locked(env, addr_index)) {
- env->pmp_state.pmp[addr_index].addr_reg = val;
- pmp_update_rule(env, addr_index);
- tlb_flush(env_cpu(env));
+ if (env->pmp_state.pmp[addr_index].addr_reg != val) {
+ env->pmp_state.pmp[addr_index].addr_reg = val;
+ pmp_update_rule(env, addr_index);
+ tlb_flush(env_cpu(env));
+ }
} else {
qemu_log_mask(LOG_GUEST_ERROR,
"ignoring pmpaddr write - locked\n");
--
2.25.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 5/8] target/riscv: flush tb when PMP entry changes
2023-04-18 14:06 [PATCH v2 0/8] target/riscv: Fix PMP related problem Weiwei Li
` (3 preceding siblings ...)
2023-04-18 14:06 ` [PATCH v2 4/8] target/riscv: Flush TLB only when pmpcfg/pmpaddr really changes Weiwei Li
@ 2023-04-18 14:06 ` Weiwei Li
2023-04-18 14:06 ` [PATCH v2 6/8] accel/tcg: Uncache the host address for instruction fetch when tlb size < 1 Weiwei Li
` (2 subsequent siblings)
7 siblings, 0 replies; 11+ messages in thread
From: Weiwei Li @ 2023-04-18 14:06 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: palmer, alistair.francis, bin.meng, dbarboza, zhiwei_liu,
richard.henderson, wangjunqiang, lazyparser, Weiwei Li
The translation block may also be affected when PMP entry changes.
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
---
target/riscv/pmp.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index ec86fccd2e..37bc76c474 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -25,6 +25,7 @@
#include "cpu.h"
#include "trace.h"
#include "exec/exec-all.h"
+#include "exec/tb-flush.h"
static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
uint8_t val);
@@ -492,6 +493,7 @@ void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
/* If PMP permission of any addr has been changed, flush TLB pages. */
if (modified) {
tlb_flush(env_cpu(env));
+ tb_flush(env_cpu(env));
}
}
@@ -545,6 +547,7 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
env->pmp_state.pmp[addr_index].addr_reg = val;
pmp_update_rule(env, addr_index);
tlb_flush(env_cpu(env));
+ tb_flush(env_cpu(env));
}
} else {
qemu_log_mask(LOG_GUEST_ERROR,
--
2.25.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 6/8] accel/tcg: Uncache the host address for instruction fetch when tlb size < 1
2023-04-18 14:06 [PATCH v2 0/8] target/riscv: Fix PMP related problem Weiwei Li
` (4 preceding siblings ...)
2023-04-18 14:06 ` [PATCH v2 5/8] target/riscv: flush tb when PMP entry changes Weiwei Li
@ 2023-04-18 14:06 ` Weiwei Li
2023-04-19 1:36 ` LIU Zhiwei
2023-04-19 5:41 ` Richard Henderson
2023-04-18 14:06 ` [PATCH v2 7/8] target/riscv: Make the short cut really work in pmp_hart_has_privs Weiwei Li
2023-04-18 14:06 ` [PATCH v2 8/8] target/riscv: Separate pmp_update_rule() in pmpcfg_csr_write Use pmp_update_rule_addr() and pmp_update_rule_nums() separately to update rule nums only once for each pmpcfg_csr_write. Then we can also move tlb_flush and tb_flush into pmp_update_rule_nums() Weiwei Li
7 siblings, 2 replies; 11+ messages in thread
From: Weiwei Li @ 2023-04-18 14:06 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: palmer, alistair.francis, bin.meng, dbarboza, zhiwei_liu,
richard.henderson, wangjunqiang, lazyparser, Weiwei Li
When PMP entry overlap part of the page, we'll set the tlb_size to 1, which
will make the address in tlb entry set with TLB_INVALID_MASK, and the next
access will again go through tlb_fill.However, this way will not work in
tb_gen_code() => get_page_addr_code_hostp(): the TLB host address will be
cached, and the following instructions can use this host address directly
which may lead to the bypass of PMP related check.
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
---
accel/tcg/cputlb.c | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index e984a98dc4..efa0cb67c9 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1696,6 +1696,11 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
if (p == NULL) {
return -1;
}
+
+ if (full->lg_page_size < TARGET_PAGE_BITS) {
+ return -1;
+ }
+
if (hostp) {
*hostp = p;
}
--
2.25.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [PATCH v2 6/8] accel/tcg: Uncache the host address for instruction fetch when tlb size < 1
2023-04-18 14:06 ` [PATCH v2 6/8] accel/tcg: Uncache the host address for instruction fetch when tlb size < 1 Weiwei Li
@ 2023-04-19 1:36 ` LIU Zhiwei
2023-04-19 5:41 ` Richard Henderson
1 sibling, 0 replies; 11+ messages in thread
From: LIU Zhiwei @ 2023-04-19 1:36 UTC (permalink / raw)
To: Weiwei Li, qemu-riscv, qemu-devel
Cc: palmer, alistair.francis, bin.meng, dbarboza, richard.henderson,
wangjunqiang, lazyparser
On 2023/4/18 22:06, Weiwei Li wrote:
> When PMP entry overlap part of the page, we'll set the tlb_size to 1, which
> will make the address in tlb entry set with TLB_INVALID_MASK, and the next
> access will again go through tlb_fill.However, this way will not work in
> tb_gen_code() => get_page_addr_code_hostp(): the TLB host address will be
> cached, and the following instructions can use this host address directly
> which may lead to the bypass of PMP related check.
We can add a link to the issue in the commit message,
https://gitlab.com/qemu-project/qemu/-/issues/1542
Reviewed-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
Zhiwei
>
> Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
> Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
> ---
> accel/tcg/cputlb.c | 5 +++++
> 1 file changed, 5 insertions(+)
>
> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
> index e984a98dc4..efa0cb67c9 100644
> --- a/accel/tcg/cputlb.c
> +++ b/accel/tcg/cputlb.c
> @@ -1696,6 +1696,11 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
> if (p == NULL) {
> return -1;
> }
> +
> + if (full->lg_page_size < TARGET_PAGE_BITS) {
> + return -1;
> + }
> +
> if (hostp) {
> *hostp = p;
> }
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [PATCH v2 6/8] accel/tcg: Uncache the host address for instruction fetch when tlb size < 1
2023-04-18 14:06 ` [PATCH v2 6/8] accel/tcg: Uncache the host address for instruction fetch when tlb size < 1 Weiwei Li
2023-04-19 1:36 ` LIU Zhiwei
@ 2023-04-19 5:41 ` Richard Henderson
1 sibling, 0 replies; 11+ messages in thread
From: Richard Henderson @ 2023-04-19 5:41 UTC (permalink / raw)
To: Weiwei Li, qemu-riscv, qemu-devel
Cc: palmer, alistair.francis, bin.meng, dbarboza, zhiwei_liu,
wangjunqiang, lazyparser
On 4/18/23 16:06, Weiwei Li wrote:
> When PMP entry overlap part of the page, we'll set the tlb_size to 1, which
> will make the address in tlb entry set with TLB_INVALID_MASK, and the next
> access will again go through tlb_fill.However, this way will not work in
> tb_gen_code() => get_page_addr_code_hostp(): the TLB host address will be
> cached, and the following instructions can use this host address directly
> which may lead to the bypass of PMP related check.
>
> Signed-off-by: Weiwei Li<liweiwei@iscas.ac.cn>
> Signed-off-by: Junqiang Wang<wangjunqiang@iscas.ac.cn>
> ---
> accel/tcg/cputlb.c | 5 +++++
> 1 file changed, 5 insertions(+)
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
r~
^ permalink raw reply [flat|nested] 11+ messages in thread
* [PATCH v2 7/8] target/riscv: Make the short cut really work in pmp_hart_has_privs
2023-04-18 14:06 [PATCH v2 0/8] target/riscv: Fix PMP related problem Weiwei Li
` (5 preceding siblings ...)
2023-04-18 14:06 ` [PATCH v2 6/8] accel/tcg: Uncache the host address for instruction fetch when tlb size < 1 Weiwei Li
@ 2023-04-18 14:06 ` Weiwei Li
2023-04-18 14:06 ` [PATCH v2 8/8] target/riscv: Separate pmp_update_rule() in pmpcfg_csr_write Use pmp_update_rule_addr() and pmp_update_rule_nums() separately to update rule nums only once for each pmpcfg_csr_write. Then we can also move tlb_flush and tb_flush into pmp_update_rule_nums() Weiwei Li
7 siblings, 0 replies; 11+ messages in thread
From: Weiwei Li @ 2023-04-18 14:06 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: palmer, alistair.francis, bin.meng, dbarboza, zhiwei_liu,
richard.henderson, wangjunqiang, lazyparser, Weiwei Li
We needn't check the PMP entries if there is no PMP rules.
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
---
target/riscv/pmp.c | 251 ++++++++++++++++++++++-----------------------
1 file changed, 123 insertions(+), 128 deletions(-)
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index 37bc76c474..67347c5887 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -315,149 +315,144 @@ int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
target_ulong e = 0;
/* Short cut if no rules */
- if (0 == pmp_get_num_rules(env)) {
- if (pmp_hart_has_privs_default(env, addr, size, privs,
- allowed_privs, mode)) {
- ret = MAX_RISCV_PMPS;
- }
- }
-
- if (size == 0) {
- if (riscv_cpu_cfg(env)->mmu) {
- /*
- * If size is unknown (0), assume that all bytes
- * from addr to the end of the page will be accessed.
- */
- pmp_size = -(addr | TARGET_PAGE_MASK);
+ if (pmp_get_num_rules(env) != 0) {
+ if (size == 0) {
+ if (riscv_cpu_cfg(env)->mmu) {
+ /*
+ * If size is unknown (0), assume that all bytes
+ * from addr to the end of the page will be accessed.
+ */
+ pmp_size = -(addr | TARGET_PAGE_MASK);
+ } else {
+ pmp_size = sizeof(target_ulong);
+ }
} else {
- pmp_size = sizeof(target_ulong);
- }
- } else {
- pmp_size = size;
- }
-
- /*
- * 1.10 draft priv spec states there is an implicit order
- * from low to high
- */
- for (i = 0; i < MAX_RISCV_PMPS; i++) {
- s = pmp_is_in_range(env, i, addr);
- e = pmp_is_in_range(env, i, addr + pmp_size - 1);
-
- /* partially inside */
- if ((s + e) == 1) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "pmp violation - access is partially inside\n");
- ret = -1;
- break;
+ pmp_size = size;
}
- /* fully inside */
- const uint8_t a_field =
- pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
-
/*
- * Convert the PMP permissions to match the truth table in the
- * ePMP spec.
+ * 1.10 draft priv spec states there is an implicit order
+ * from low to high
*/
- const uint8_t epmp_operation =
- ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
- ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
- (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
- ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2);
+ for (i = 0; i < MAX_RISCV_PMPS; i++) {
+ s = pmp_is_in_range(env, i, addr);
+ e = pmp_is_in_range(env, i, addr + pmp_size - 1);
+
+ /* partially inside */
+ if ((s + e) == 1) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "pmp violation - access is partially inside\n");
+ ret = -1;
+ break;
+ }
+
+ /* fully inside */
+ const uint8_t a_field =
+ pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
- if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
/*
- * If the PMP entry is not off and the address is in range,
- * do the priv check
+ * Convert the PMP permissions to match the truth table in the
+ * ePMP spec.
*/
- if (!MSECCFG_MML_ISSET(env)) {
- /*
- * If mseccfg.MML Bit is not set, do pmp priv check
- * This will always apply to regular PMP.
- */
- *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
- if ((mode != PRV_M) || pmp_is_locked(env, i)) {
- *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
- }
- } else {
+ const uint8_t epmp_operation =
+ ((env->pmp_state.pmp[i].cfg_reg & PMP_LOCK) >> 4) |
+ ((env->pmp_state.pmp[i].cfg_reg & PMP_READ) << 2) |
+ (env->pmp_state.pmp[i].cfg_reg & PMP_WRITE) |
+ ((env->pmp_state.pmp[i].cfg_reg & PMP_EXEC) >> 2);
+
+ if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) {
/*
- * If mseccfg.MML Bit set, do the enhanced pmp priv check
+ * If the PMP entry is not off and the address is in range,
+ * do the priv check
*/
- if (mode == PRV_M) {
- switch (epmp_operation) {
- case 0:
- case 1:
- case 4:
- case 5:
- case 6:
- case 7:
- case 8:
- *allowed_privs = 0;
- break;
- case 2:
- case 3:
- case 14:
- *allowed_privs = PMP_READ | PMP_WRITE;
- break;
- case 9:
- case 10:
- *allowed_privs = PMP_EXEC;
- break;
- case 11:
- case 13:
- *allowed_privs = PMP_READ | PMP_EXEC;
- break;
- case 12:
- case 15:
- *allowed_privs = PMP_READ;
- break;
- default:
- g_assert_not_reached();
+ if (!MSECCFG_MML_ISSET(env)) {
+ /*
+ * If mseccfg.MML Bit is not set, do pmp priv check
+ * This will always apply to regular PMP.
+ */
+ *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+ if ((mode != PRV_M) || pmp_is_locked(env, i)) {
+ *allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
}
} else {
- switch (epmp_operation) {
- case 0:
- case 8:
- case 9:
- case 12:
- case 13:
- case 14:
- *allowed_privs = 0;
- break;
- case 1:
- case 10:
- case 11:
- *allowed_privs = PMP_EXEC;
- break;
- case 2:
- case 4:
- case 15:
- *allowed_privs = PMP_READ;
- break;
- case 3:
- case 6:
- *allowed_privs = PMP_READ | PMP_WRITE;
- break;
- case 5:
- *allowed_privs = PMP_READ | PMP_EXEC;
- break;
- case 7:
- *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
- break;
- default:
- g_assert_not_reached();
+ /*
+ * If mseccfg.MML Bit set, do the enhanced pmp priv check
+ */
+ if (mode == PRV_M) {
+ switch (epmp_operation) {
+ case 0:
+ case 1:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ *allowed_privs = 0;
+ break;
+ case 2:
+ case 3:
+ case 14:
+ *allowed_privs = PMP_READ | PMP_WRITE;
+ break;
+ case 9:
+ case 10:
+ *allowed_privs = PMP_EXEC;
+ break;
+ case 11:
+ case 13:
+ *allowed_privs = PMP_READ | PMP_EXEC;
+ break;
+ case 12:
+ case 15:
+ *allowed_privs = PMP_READ;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+ } else {
+ switch (epmp_operation) {
+ case 0:
+ case 8:
+ case 9:
+ case 12:
+ case 13:
+ case 14:
+ *allowed_privs = 0;
+ break;
+ case 1:
+ case 10:
+ case 11:
+ *allowed_privs = PMP_EXEC;
+ break;
+ case 2:
+ case 4:
+ case 15:
+ *allowed_privs = PMP_READ;
+ break;
+ case 3:
+ case 6:
+ *allowed_privs = PMP_READ | PMP_WRITE;
+ break;
+ case 5:
+ *allowed_privs = PMP_READ | PMP_EXEC;
+ break;
+ case 7:
+ *allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
+ break;
+ default:
+ g_assert_not_reached();
+ }
}
}
- }
- /*
- * If matching address range was found, the protection bits
- * defined with PMP must be used. We shouldn't fallback on
- * finding default privileges.
- */
- ret = i;
- break;
+ /*
+ * If matching address range was found, the protection bits
+ * defined with PMP must be used. We shouldn't fallback on
+ * finding default privileges.
+ */
+ ret = i;
+ break;
+ }
}
}
--
2.25.1
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [PATCH v2 8/8] target/riscv: Separate pmp_update_rule() in pmpcfg_csr_write Use pmp_update_rule_addr() and pmp_update_rule_nums() separately to update rule nums only once for each pmpcfg_csr_write. Then we can also move tlb_flush and tb_flush into pmp_update_rule_nums().
2023-04-18 14:06 [PATCH v2 0/8] target/riscv: Fix PMP related problem Weiwei Li
` (6 preceding siblings ...)
2023-04-18 14:06 ` [PATCH v2 7/8] target/riscv: Make the short cut really work in pmp_hart_has_privs Weiwei Li
@ 2023-04-18 14:06 ` Weiwei Li
7 siblings, 0 replies; 11+ messages in thread
From: Weiwei Li @ 2023-04-18 14:06 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: palmer, alistair.francis, bin.meng, dbarboza, zhiwei_liu,
richard.henderson, wangjunqiang, lazyparser, Weiwei Li
Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
---
target/riscv/pmp.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
index 67347c5887..1cce3f0ce4 100644
--- a/target/riscv/pmp.c
+++ b/target/riscv/pmp.c
@@ -122,7 +122,7 @@ static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
} else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) {
env->pmp_state.pmp[pmp_index].cfg_reg = val;
- pmp_update_rule(env, pmp_index);
+ pmp_update_rule_addr(env, pmp_index);
return true;
}
} else {
@@ -208,6 +208,9 @@ void pmp_update_rule_nums(CPURISCVState *env)
env->pmp_state.num_rules++;
}
}
+
+ tlb_flush(env_cpu(env));
+ tb_flush(env_cpu(env));
}
/*
@@ -487,8 +490,7 @@ void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
/* If PMP permission of any addr has been changed, flush TLB pages. */
if (modified) {
- tlb_flush(env_cpu(env));
- tb_flush(env_cpu(env));
+ pmp_update_rule_nums(env);
}
}
@@ -541,8 +543,6 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
if (env->pmp_state.pmp[addr_index].addr_reg != val) {
env->pmp_state.pmp[addr_index].addr_reg = val;
pmp_update_rule(env, addr_index);
- tlb_flush(env_cpu(env));
- tb_flush(env_cpu(env));
}
} else {
qemu_log_mask(LOG_GUEST_ERROR,
--
2.25.1
^ permalink raw reply related [flat|nested] 11+ messages in thread