From: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
To: Weiwei Li <liweiwei@iscas.ac.cn>,
qemu-riscv@nongnu.org, qemu-devel@nongnu.org
Cc: palmer@dabbelt.com, alistair.francis@wdc.com,
bin.meng@windriver.com, dbarboza@ventanamicro.com,
richard.henderson@linaro.org, wangjunqiang@iscas.ac.cn,
lazyparser@gmail.com
Subject: Re: [PATCH v3 4/7] target/riscv: Flush TLB only when pmpcfg/pmpaddr really changes
Date: Thu, 20 Apr 2023 21:23:26 +0800 [thread overview]
Message-ID: <ddaada2a-fd9d-9e53-4852-26a15dbe70b2@linux.alibaba.com> (raw)
In-Reply-To: <20230419032725.29721-5-liweiwei@iscas.ac.cn>
On 2023/4/19 11:27, Weiwei Li wrote:
> TLB needn't be flushed when pmpcfg/pmpaddr don't changes.
>
> Signed-off-by: Weiwei Li <liweiwei@iscas.ac.cn>
> Signed-off-by: Junqiang Wang <wangjunqiang@iscas.ac.cn>
> Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
> ---
> target/riscv/pmp.c | 24 ++++++++++++++++--------
> 1 file changed, 16 insertions(+), 8 deletions(-)
>
> diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c
> index bcd190d3a3..7feaddd7eb 100644
> --- a/target/riscv/pmp.c
> +++ b/target/riscv/pmp.c
> @@ -26,7 +26,7 @@
> #include "trace.h"
> #include "exec/exec-all.h"
>
> -static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
> +static bool pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
> uint8_t val);
> static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
> static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index);
> @@ -83,7 +83,7 @@ static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
> * Accessor to set the cfg reg for a specific PMP/HART
> * Bounds checks and relevant lock bit.
> */
> -static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
> +static bool pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
> {
> if (pmp_index < MAX_RISCV_PMPS) {
> bool locked = true;
> @@ -119,14 +119,17 @@ static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
>
> if (locked) {
> qemu_log_mask(LOG_GUEST_ERROR, "ignoring pmpcfg write - locked\n");
> - } else {
> + } else if (env->pmp_state.pmp[pmp_index].cfg_reg != val) {
> env->pmp_state.pmp[pmp_index].cfg_reg = val;
> pmp_update_rule(env, pmp_index);
> + return true;
> }
> } else {
> qemu_log_mask(LOG_GUEST_ERROR,
> "ignoring pmpcfg write - out of bounds\n");
> }
> +
> + return false;
> }
>
> static void pmp_decode_napot(target_ulong a, target_ulong *sa,
> @@ -477,16 +480,19 @@ void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
> int i;
> uint8_t cfg_val;
> int pmpcfg_nums = 2 << riscv_cpu_mxl(env);
> + bool modified = false;
>
> trace_pmpcfg_csr_write(env->mhartid, reg_index, val);
>
> for (i = 0; i < pmpcfg_nums; i++) {
> cfg_val = (val >> 8 * i) & 0xff;
> - pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
> + modified |= pmp_write_cfg(env, (reg_index * 4) + i, cfg_val);
> }
>
> /* If PMP permission of any addr has been changed, flush TLB pages. */
> - tlb_flush(env_cpu(env));
> + if (modified) {
> + tlb_flush(env_cpu(env));
> + }
> }
>
>
> @@ -535,9 +541,11 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
> }
>
> if (!pmp_is_locked(env, addr_index)) {
> - env->pmp_state.pmp[addr_index].addr_reg = val;
> - pmp_update_rule(env, addr_index);
> - tlb_flush(env_cpu(env));
> + if (env->pmp_state.pmp[addr_index].addr_reg != val) {
> + env->pmp_state.pmp[addr_index].addr_reg = val;
> + pmp_update_rule(env, addr_index);
> + tlb_flush(env_cpu(env));
> + }
Reviewed-by: LIU Zhiwei <zhiwei_liu@linux.alibaba.com>
Zhiwei
> } else {
> qemu_log_mask(LOG_GUEST_ERROR,
> "ignoring pmpaddr write - locked\n");
next prev parent reply other threads:[~2023-04-20 13:24 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-19 3:27 [PATCH v3 0/7] target/riscv: Fix PMP related problem Weiwei Li
2023-04-19 3:27 ` [PATCH v3 1/7] target/riscv: Update pmp_get_tlb_size() Weiwei Li
2023-04-20 11:58 ` LIU Zhiwei
2023-04-20 12:27 ` Weiwei Li
2023-04-19 3:27 ` [PATCH v3 2/7] target/riscv: Move pmp_get_tlb_size apart from get_physical_address_pmp Weiwei Li
2023-04-20 13:19 ` LIU Zhiwei
2023-04-20 13:46 ` Weiwei Li
2023-04-19 3:27 ` [PATCH v3 3/7] target/riscv: Flush TLB when pmpaddr is updated Weiwei Li
2023-04-20 13:21 ` LIU Zhiwei
2023-04-19 3:27 ` [PATCH v3 4/7] target/riscv: Flush TLB only when pmpcfg/pmpaddr really changes Weiwei Li
2023-04-20 13:23 ` LIU Zhiwei [this message]
2023-04-19 3:27 ` [PATCH v3 5/7] accel/tcg: Uncache the host address for instruction fetch when tlb size < 1 Weiwei Li
2023-04-19 5:45 ` Richard Henderson
2023-04-19 3:27 ` [PATCH v3 6/7] target/riscv: Make the short cut really work in pmp_hart_has_privs Weiwei Li
2023-04-20 13:33 ` LIU Zhiwei
2023-04-20 13:53 ` Weiwei Li
2023-04-19 3:27 ` [PATCH v3 7/7] target/riscv: Separate pmp_update_rule() in pmpcfg_csr_write Weiwei Li
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=ddaada2a-fd9d-9e53-4852-26a15dbe70b2@linux.alibaba.com \
--to=zhiwei_liu@linux.alibaba.com \
--cc=alistair.francis@wdc.com \
--cc=bin.meng@windriver.com \
--cc=dbarboza@ventanamicro.com \
--cc=lazyparser@gmail.com \
--cc=liweiwei@iscas.ac.cn \
--cc=palmer@dabbelt.com \
--cc=qemu-devel@nongnu.org \
--cc=qemu-riscv@nongnu.org \
--cc=richard.henderson@linaro.org \
--cc=wangjunqiang@iscas.ac.cn \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).