* [PATCH v7 01/11] target/riscv: Combine set_mode and set_virt functions.
2024-06-26 23:57 [PATCH v7 00/11] Add RISC-V ISA extension smcntrpmf support Atish Patra
@ 2024-06-26 23:57 ` Atish Patra
2024-07-01 18:21 ` Daniel Henrique Barboza
2024-07-03 1:07 ` Alistair Francis
2024-06-26 23:57 ` [PATCH v7 02/11] target/riscv: Fix the predicate functions for mhpmeventhX CSRs Atish Patra
` (9 subsequent siblings)
10 siblings, 2 replies; 27+ messages in thread
From: Atish Patra @ 2024-06-26 23:57 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, Atish Patra, palmer, liwei1518, zhiwei_liu,
bin.meng, dbarboza, alistair.francis
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
Combining riscv_cpu_set_virt_enabled() and riscv_cpu_set_mode()
functions. This is to make complete mode change information
available through a single function.
This allows to easily differentiate between HS->VS, VS->HS
and VS->VS transitions when executing state update codes.
For example: One use-case which inspired this change is
to update mode-specific instruction and cycle counters
which requires information of both prev mode and current
mode.
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
---
target/riscv/cpu.h | 2 +-
target/riscv/cpu_helper.c | 57 +++++++++++++++++++++++------------------------
target/riscv/op_helper.c | 17 +++++---------
3 files changed, 35 insertions(+), 41 deletions(-)
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 90b8f1b08f83..46faefd24e09 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -544,7 +544,7 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
#endif /* !CONFIG_USER_ONLY */
-void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
+void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
void riscv_translate_init(void);
G_NORETURN void riscv_raise_exception(CPURISCVState *env,
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 6709622dd3ab..10d3fdaed376 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -619,30 +619,6 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
env->geilen = geilen;
}
-/* This function can only be called to set virt when RVH is enabled */
-void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
-{
- /* Flush the TLB on all virt mode changes. */
- if (env->virt_enabled != enable) {
- tlb_flush(env_cpu(env));
- }
-
- env->virt_enabled = enable;
-
- if (enable) {
- /*
- * The guest external interrupts from an interrupt controller are
- * delivered only when the Guest/VM is running (i.e. V=1). This means
- * any guest external interrupt which is triggered while the Guest/VM
- * is not running (i.e. V=0) will be missed on QEMU resulting in guest
- * with sluggish response to serial console input and other I/O events.
- *
- * To solve this, we check and inject interrupt after setting V=1.
- */
- riscv_cpu_update_mip(env, 0, 0);
- }
-}
-
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
{
CPURISCVState *env = &cpu->env;
@@ -715,7 +691,7 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
}
}
-void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
+void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
{
g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
@@ -736,6 +712,28 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
* preemptive context switch. As a result, do both.
*/
env->load_res = -1;
+
+ if (riscv_has_ext(env, RVH)) {
+ /* Flush the TLB on all virt mode changes. */
+ if (env->virt_enabled != virt_en) {
+ tlb_flush(env_cpu(env));
+ }
+
+ env->virt_enabled = virt_en;
+ if (virt_en) {
+ /*
+ * The guest external interrupts from an interrupt controller are
+ * delivered only when the Guest/VM is running (i.e. V=1). This
+ * means any guest external interrupt which is triggered while the
+ * Guest/VM is not running (i.e. V=0) will be missed on QEMU
+ * resulting in guest with sluggish response to serial console
+ * input and other I/O events.
+ *
+ * To solve this, we check and inject interrupt after setting V=1.
+ */
+ riscv_cpu_update_mip(env, 0, 0);
+ }
+ }
}
/*
@@ -1648,6 +1646,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
+ bool virt = env->virt_enabled;
bool write_gva = false;
uint64_t s;
@@ -1778,7 +1777,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
htval = env->guest_phys_fault_addr;
- riscv_cpu_set_virt_enabled(env, 0);
+ virt = false;
} else {
/* Trap into HS mode */
env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
@@ -1799,7 +1798,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
env->htinst = tinst;
env->pc = (env->stvec >> 2 << 2) +
((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
- riscv_cpu_set_mode(env, PRV_S);
+ riscv_cpu_set_mode(env, PRV_S, virt);
} else {
/* handle the trap in M-mode */
if (riscv_has_ext(env, RVH)) {
@@ -1815,7 +1814,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
mtval2 = env->guest_phys_fault_addr;
/* Trapping to M mode, virt is disabled */
- riscv_cpu_set_virt_enabled(env, 0);
+ virt = false;
}
s = env->mstatus;
@@ -1830,7 +1829,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
env->mtinst = tinst;
env->pc = (env->mtvec >> 2 << 2) +
((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
- riscv_cpu_set_mode(env, PRV_M);
+ riscv_cpu_set_mode(env, PRV_M, virt);
}
/*
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index 2baf5bc3ca19..ec1408ba0fb1 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -264,7 +264,7 @@ void helper_cbo_inval(CPURISCVState *env, target_ulong address)
target_ulong helper_sret(CPURISCVState *env)
{
uint64_t mstatus;
- target_ulong prev_priv, prev_virt;
+ target_ulong prev_priv, prev_virt = env->virt_enabled;
if (!(env->priv >= PRV_S)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
@@ -307,11 +307,9 @@ target_ulong helper_sret(CPURISCVState *env)
if (prev_virt) {
riscv_cpu_swap_hypervisor_regs(env);
}
-
- riscv_cpu_set_virt_enabled(env, prev_virt);
}
- riscv_cpu_set_mode(env, prev_priv);
+ riscv_cpu_set_mode(env, prev_priv, prev_virt);
return retpc;
}
@@ -347,16 +345,13 @@ target_ulong helper_mret(CPURISCVState *env)
mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
}
env->mstatus = mstatus;
- riscv_cpu_set_mode(env, prev_priv);
-
- if (riscv_has_ext(env, RVH)) {
- if (prev_virt) {
- riscv_cpu_swap_hypervisor_regs(env);
- }
- riscv_cpu_set_virt_enabled(env, prev_virt);
+ if (riscv_has_ext(env, RVH) && prev_virt) {
+ riscv_cpu_swap_hypervisor_regs(env);
}
+ riscv_cpu_set_mode(env, prev_priv, prev_virt);
+
return retpc;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread
* Re: [PATCH v7 01/11] target/riscv: Combine set_mode and set_virt functions.
2024-06-26 23:57 ` [PATCH v7 01/11] target/riscv: Combine set_mode and set_virt functions Atish Patra
@ 2024-07-01 18:21 ` Daniel Henrique Barboza
2024-07-03 1:07 ` Alistair Francis
1 sibling, 0 replies; 27+ messages in thread
From: Daniel Henrique Barboza @ 2024-07-01 18:21 UTC (permalink / raw)
To: Atish Patra, qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, palmer, liwei1518, zhiwei_liu, bin.meng,
alistair.francis
On 6/26/24 8:57 PM, Atish Patra wrote:
> From: Rajnesh Kanwal <rkanwal@rivosinc.com>
>
> Combining riscv_cpu_set_virt_enabled() and riscv_cpu_set_mode()
> functions. This is to make complete mode change information
> available through a single function.
>
> This allows to easily differentiate between HS->VS, VS->HS
> and VS->VS transitions when executing state update codes.
> For example: One use-case which inspired this change is
> to update mode-specific instruction and cycle counters
> which requires information of both prev mode and current
> mode.
>
> Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
> ---
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
> target/riscv/cpu.h | 2 +-
> target/riscv/cpu_helper.c | 57 +++++++++++++++++++++++------------------------
> target/riscv/op_helper.c | 17 +++++---------
> 3 files changed, 35 insertions(+), 41 deletions(-)
>
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index 90b8f1b08f83..46faefd24e09 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -544,7 +544,7 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
> RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
> #endif /* !CONFIG_USER_ONLY */
>
> -void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
> +void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
>
> void riscv_translate_init(void);
> G_NORETURN void riscv_raise_exception(CPURISCVState *env,
> diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> index 6709622dd3ab..10d3fdaed376 100644
> --- a/target/riscv/cpu_helper.c
> +++ b/target/riscv/cpu_helper.c
> @@ -619,30 +619,6 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
> env->geilen = geilen;
> }
>
> -/* This function can only be called to set virt when RVH is enabled */
> -void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
> -{
> - /* Flush the TLB on all virt mode changes. */
> - if (env->virt_enabled != enable) {
> - tlb_flush(env_cpu(env));
> - }
> -
> - env->virt_enabled = enable;
> -
> - if (enable) {
> - /*
> - * The guest external interrupts from an interrupt controller are
> - * delivered only when the Guest/VM is running (i.e. V=1). This means
> - * any guest external interrupt which is triggered while the Guest/VM
> - * is not running (i.e. V=0) will be missed on QEMU resulting in guest
> - * with sluggish response to serial console input and other I/O events.
> - *
> - * To solve this, we check and inject interrupt after setting V=1.
> - */
> - riscv_cpu_update_mip(env, 0, 0);
> - }
> -}
> -
> int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
> {
> CPURISCVState *env = &cpu->env;
> @@ -715,7 +691,7 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
> }
> }
>
> -void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
> +void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
> {
> g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
>
> @@ -736,6 +712,28 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
> * preemptive context switch. As a result, do both.
> */
> env->load_res = -1;
> +
> + if (riscv_has_ext(env, RVH)) {
> + /* Flush the TLB on all virt mode changes. */
> + if (env->virt_enabled != virt_en) {
> + tlb_flush(env_cpu(env));
> + }
> +
> + env->virt_enabled = virt_en;
> + if (virt_en) {
> + /*
> + * The guest external interrupts from an interrupt controller are
> + * delivered only when the Guest/VM is running (i.e. V=1). This
> + * means any guest external interrupt which is triggered while the
> + * Guest/VM is not running (i.e. V=0) will be missed on QEMU
> + * resulting in guest with sluggish response to serial console
> + * input and other I/O events.
> + *
> + * To solve this, we check and inject interrupt after setting V=1.
> + */
> + riscv_cpu_update_mip(env, 0, 0);
> + }
> + }
> }
>
> /*
> @@ -1648,6 +1646,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
> {
> RISCVCPU *cpu = RISCV_CPU(cs);
> CPURISCVState *env = &cpu->env;
> + bool virt = env->virt_enabled;
> bool write_gva = false;
> uint64_t s;
>
> @@ -1778,7 +1777,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
>
> htval = env->guest_phys_fault_addr;
>
> - riscv_cpu_set_virt_enabled(env, 0);
> + virt = false;
> } else {
> /* Trap into HS mode */
> env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
> @@ -1799,7 +1798,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
> env->htinst = tinst;
> env->pc = (env->stvec >> 2 << 2) +
> ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
> - riscv_cpu_set_mode(env, PRV_S);
> + riscv_cpu_set_mode(env, PRV_S, virt);
> } else {
> /* handle the trap in M-mode */
> if (riscv_has_ext(env, RVH)) {
> @@ -1815,7 +1814,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
> mtval2 = env->guest_phys_fault_addr;
>
> /* Trapping to M mode, virt is disabled */
> - riscv_cpu_set_virt_enabled(env, 0);
> + virt = false;
> }
>
> s = env->mstatus;
> @@ -1830,7 +1829,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
> env->mtinst = tinst;
> env->pc = (env->mtvec >> 2 << 2) +
> ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
> - riscv_cpu_set_mode(env, PRV_M);
> + riscv_cpu_set_mode(env, PRV_M, virt);
> }
>
> /*
> diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
> index 2baf5bc3ca19..ec1408ba0fb1 100644
> --- a/target/riscv/op_helper.c
> +++ b/target/riscv/op_helper.c
> @@ -264,7 +264,7 @@ void helper_cbo_inval(CPURISCVState *env, target_ulong address)
> target_ulong helper_sret(CPURISCVState *env)
> {
> uint64_t mstatus;
> - target_ulong prev_priv, prev_virt;
> + target_ulong prev_priv, prev_virt = env->virt_enabled;
>
> if (!(env->priv >= PRV_S)) {
> riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
> @@ -307,11 +307,9 @@ target_ulong helper_sret(CPURISCVState *env)
> if (prev_virt) {
> riscv_cpu_swap_hypervisor_regs(env);
> }
> -
> - riscv_cpu_set_virt_enabled(env, prev_virt);
> }
>
> - riscv_cpu_set_mode(env, prev_priv);
> + riscv_cpu_set_mode(env, prev_priv, prev_virt);
>
> return retpc;
> }
> @@ -347,16 +345,13 @@ target_ulong helper_mret(CPURISCVState *env)
> mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
> }
> env->mstatus = mstatus;
> - riscv_cpu_set_mode(env, prev_priv);
> -
> - if (riscv_has_ext(env, RVH)) {
> - if (prev_virt) {
> - riscv_cpu_swap_hypervisor_regs(env);
> - }
>
> - riscv_cpu_set_virt_enabled(env, prev_virt);
> + if (riscv_has_ext(env, RVH) && prev_virt) {
> + riscv_cpu_swap_hypervisor_regs(env);
> }
>
> + riscv_cpu_set_mode(env, prev_priv, prev_virt);
> +
> return retpc;
> }
>
>
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [PATCH v7 01/11] target/riscv: Combine set_mode and set_virt functions.
2024-06-26 23:57 ` [PATCH v7 01/11] target/riscv: Combine set_mode and set_virt functions Atish Patra
2024-07-01 18:21 ` Daniel Henrique Barboza
@ 2024-07-03 1:07 ` Alistair Francis
1 sibling, 0 replies; 27+ messages in thread
From: Alistair Francis @ 2024-07-03 1:07 UTC (permalink / raw)
To: Atish Patra
Cc: qemu-riscv, qemu-devel, Rajnesh Kanwal, palmer, liwei1518,
zhiwei_liu, bin.meng, dbarboza, alistair.francis
On Thu, Jun 27, 2024 at 10:02 AM Atish Patra <atishp@rivosinc.com> wrote:
>
> From: Rajnesh Kanwal <rkanwal@rivosinc.com>
>
> Combining riscv_cpu_set_virt_enabled() and riscv_cpu_set_mode()
> functions. This is to make complete mode change information
> available through a single function.
>
> This allows to easily differentiate between HS->VS, VS->HS
> and VS->VS transitions when executing state update codes.
> For example: One use-case which inspired this change is
> to update mode-specific instruction and cycle counters
> which requires information of both prev mode and current
> mode.
>
> Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Alistair
> ---
> target/riscv/cpu.h | 2 +-
> target/riscv/cpu_helper.c | 57 +++++++++++++++++++++++------------------------
> target/riscv/op_helper.c | 17 +++++---------
> 3 files changed, 35 insertions(+), 41 deletions(-)
>
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index 90b8f1b08f83..46faefd24e09 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -544,7 +544,7 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
> RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit);
> #endif /* !CONFIG_USER_ONLY */
>
> -void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
> +void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en);
>
> void riscv_translate_init(void);
> G_NORETURN void riscv_raise_exception(CPURISCVState *env,
> diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> index 6709622dd3ab..10d3fdaed376 100644
> --- a/target/riscv/cpu_helper.c
> +++ b/target/riscv/cpu_helper.c
> @@ -619,30 +619,6 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
> env->geilen = geilen;
> }
>
> -/* This function can only be called to set virt when RVH is enabled */
> -void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
> -{
> - /* Flush the TLB on all virt mode changes. */
> - if (env->virt_enabled != enable) {
> - tlb_flush(env_cpu(env));
> - }
> -
> - env->virt_enabled = enable;
> -
> - if (enable) {
> - /*
> - * The guest external interrupts from an interrupt controller are
> - * delivered only when the Guest/VM is running (i.e. V=1). This means
> - * any guest external interrupt which is triggered while the Guest/VM
> - * is not running (i.e. V=0) will be missed on QEMU resulting in guest
> - * with sluggish response to serial console input and other I/O events.
> - *
> - * To solve this, we check and inject interrupt after setting V=1.
> - */
> - riscv_cpu_update_mip(env, 0, 0);
> - }
> -}
> -
> int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
> {
> CPURISCVState *env = &cpu->env;
> @@ -715,7 +691,7 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
> }
> }
>
> -void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
> +void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
> {
> g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
>
> @@ -736,6 +712,28 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
> * preemptive context switch. As a result, do both.
> */
> env->load_res = -1;
> +
> + if (riscv_has_ext(env, RVH)) {
> + /* Flush the TLB on all virt mode changes. */
> + if (env->virt_enabled != virt_en) {
> + tlb_flush(env_cpu(env));
> + }
> +
> + env->virt_enabled = virt_en;
> + if (virt_en) {
> + /*
> + * The guest external interrupts from an interrupt controller are
> + * delivered only when the Guest/VM is running (i.e. V=1). This
> + * means any guest external interrupt which is triggered while the
> + * Guest/VM is not running (i.e. V=0) will be missed on QEMU
> + * resulting in guest with sluggish response to serial console
> + * input and other I/O events.
> + *
> + * To solve this, we check and inject interrupt after setting V=1.
> + */
> + riscv_cpu_update_mip(env, 0, 0);
> + }
> + }
> }
>
> /*
> @@ -1648,6 +1646,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
> {
> RISCVCPU *cpu = RISCV_CPU(cs);
> CPURISCVState *env = &cpu->env;
> + bool virt = env->virt_enabled;
> bool write_gva = false;
> uint64_t s;
>
> @@ -1778,7 +1777,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
>
> htval = env->guest_phys_fault_addr;
>
> - riscv_cpu_set_virt_enabled(env, 0);
> + virt = false;
> } else {
> /* Trap into HS mode */
> env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false);
> @@ -1799,7 +1798,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
> env->htinst = tinst;
> env->pc = (env->stvec >> 2 << 2) +
> ((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
> - riscv_cpu_set_mode(env, PRV_S);
> + riscv_cpu_set_mode(env, PRV_S, virt);
> } else {
> /* handle the trap in M-mode */
> if (riscv_has_ext(env, RVH)) {
> @@ -1815,7 +1814,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
> mtval2 = env->guest_phys_fault_addr;
>
> /* Trapping to M mode, virt is disabled */
> - riscv_cpu_set_virt_enabled(env, 0);
> + virt = false;
> }
>
> s = env->mstatus;
> @@ -1830,7 +1829,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
> env->mtinst = tinst;
> env->pc = (env->mtvec >> 2 << 2) +
> ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
> - riscv_cpu_set_mode(env, PRV_M);
> + riscv_cpu_set_mode(env, PRV_M, virt);
> }
>
> /*
> diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
> index 2baf5bc3ca19..ec1408ba0fb1 100644
> --- a/target/riscv/op_helper.c
> +++ b/target/riscv/op_helper.c
> @@ -264,7 +264,7 @@ void helper_cbo_inval(CPURISCVState *env, target_ulong address)
> target_ulong helper_sret(CPURISCVState *env)
> {
> uint64_t mstatus;
> - target_ulong prev_priv, prev_virt;
> + target_ulong prev_priv, prev_virt = env->virt_enabled;
>
> if (!(env->priv >= PRV_S)) {
> riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
> @@ -307,11 +307,9 @@ target_ulong helper_sret(CPURISCVState *env)
> if (prev_virt) {
> riscv_cpu_swap_hypervisor_regs(env);
> }
> -
> - riscv_cpu_set_virt_enabled(env, prev_virt);
> }
>
> - riscv_cpu_set_mode(env, prev_priv);
> + riscv_cpu_set_mode(env, prev_priv, prev_virt);
>
> return retpc;
> }
> @@ -347,16 +345,13 @@ target_ulong helper_mret(CPURISCVState *env)
> mstatus = set_field(mstatus, MSTATUS_MPRV, 0);
> }
> env->mstatus = mstatus;
> - riscv_cpu_set_mode(env, prev_priv);
> -
> - if (riscv_has_ext(env, RVH)) {
> - if (prev_virt) {
> - riscv_cpu_swap_hypervisor_regs(env);
> - }
>
> - riscv_cpu_set_virt_enabled(env, prev_virt);
> + if (riscv_has_ext(env, RVH) && prev_virt) {
> + riscv_cpu_swap_hypervisor_regs(env);
> }
>
> + riscv_cpu_set_mode(env, prev_priv, prev_virt);
> +
> return retpc;
> }
>
>
> --
> 2.34.1
>
>
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v7 02/11] target/riscv: Fix the predicate functions for mhpmeventhX CSRs
2024-06-26 23:57 [PATCH v7 00/11] Add RISC-V ISA extension smcntrpmf support Atish Patra
2024-06-26 23:57 ` [PATCH v7 01/11] target/riscv: Combine set_mode and set_virt functions Atish Patra
@ 2024-06-26 23:57 ` Atish Patra
2024-06-26 23:57 ` [PATCH v7 03/11] target/riscv: Add cycle & instret privilege mode filtering properties Atish Patra
` (8 subsequent siblings)
10 siblings, 0 replies; 27+ messages in thread
From: Atish Patra @ 2024-06-26 23:57 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, Atish Patra, palmer, liwei1518, zhiwei_liu,
bin.meng, dbarboza, alistair.francis
mhpmeventhX CSRs are available for RV32. The predicate function
should check that first before checking sscofpmf extension.
Fixes: 14664483457b ("target/riscv: Add sscofpmf extension support")
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Signed-off-by: Atish Patra <atishp@rivosinc.com>
---
target/riscv/csr.c | 67 +++++++++++++++++++++++++++++++-----------------------
1 file changed, 38 insertions(+), 29 deletions(-)
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 432c59dc66be..3ad851707e5c 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -227,6 +227,15 @@ static RISCVException sscofpmf(CPURISCVState *env, int csrno)
return RISCV_EXCP_NONE;
}
+static RISCVException sscofpmf_32(CPURISCVState *env, int csrno)
+{
+ if (riscv_cpu_mxl(env) != MXL_RV32) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return sscofpmf(env, csrno);
+}
+
static RISCVException any(CPURISCVState *env, int csrno)
{
return RISCV_EXCP_NONE;
@@ -5101,91 +5110,91 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
write_mhpmevent },
- [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
- [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf, read_mhpmeventh,
+ [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread
* [PATCH v7 03/11] target/riscv: Add cycle & instret privilege mode filtering properties
2024-06-26 23:57 [PATCH v7 00/11] Add RISC-V ISA extension smcntrpmf support Atish Patra
2024-06-26 23:57 ` [PATCH v7 01/11] target/riscv: Combine set_mode and set_virt functions Atish Patra
2024-06-26 23:57 ` [PATCH v7 02/11] target/riscv: Fix the predicate functions for mhpmeventhX CSRs Atish Patra
@ 2024-06-26 23:57 ` Atish Patra
2024-07-01 19:10 ` Daniel Henrique Barboza
2024-07-03 2:02 ` Alistair Francis
2024-06-26 23:57 ` [PATCH v7 04/11] target/riscv: Add cycle & instret privilege mode filtering definitions Atish Patra
` (7 subsequent siblings)
10 siblings, 2 replies; 27+ messages in thread
From: Atish Patra @ 2024-06-26 23:57 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, Atish Patra, palmer, liwei1518, zhiwei_liu,
bin.meng, dbarboza, alistair.francis, Kaiwen Xue
From: Kaiwen Xue <kaiwenx@rivosinc.com>
This adds the properties for ISA extension smcntrpmf. Patches
implementing it will follow.
Signed-off-by: Atish Patra <atishp@rivosinc.com>
Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
---
target/riscv/cpu.c | 2 ++
target/riscv/cpu_cfg.h | 1 +
2 files changed, 3 insertions(+)
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 4760cb2cc17f..ef50130a91e7 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -178,6 +178,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
+ ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
@@ -1467,6 +1468,7 @@ const char *riscv_get_misa_ext_description(uint32_t bit)
const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
/* Defaults for standard extensions */
MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
+ MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
index fb7eebde523b..b1376beb1dab 100644
--- a/target/riscv/cpu_cfg.h
+++ b/target/riscv/cpu_cfg.h
@@ -74,6 +74,7 @@ struct RISCVCPUConfig {
bool ext_ztso;
bool ext_smstateen;
bool ext_sstc;
+ bool ext_smcntrpmf;
bool ext_svadu;
bool ext_svinval;
bool ext_svnapot;
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread
* Re: [PATCH v7 03/11] target/riscv: Add cycle & instret privilege mode filtering properties
2024-06-26 23:57 ` [PATCH v7 03/11] target/riscv: Add cycle & instret privilege mode filtering properties Atish Patra
@ 2024-07-01 19:10 ` Daniel Henrique Barboza
2024-07-03 2:02 ` Alistair Francis
1 sibling, 0 replies; 27+ messages in thread
From: Daniel Henrique Barboza @ 2024-07-01 19:10 UTC (permalink / raw)
To: Atish Patra, qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, palmer, liwei1518, zhiwei_liu, bin.meng,
alistair.francis, Kaiwen Xue
On 6/26/24 8:57 PM, Atish Patra wrote:
> From: Kaiwen Xue <kaiwenx@rivosinc.com>
>
> This adds the properties for ISA extension smcntrpmf. Patches
> implementing it will follow.
>
> Signed-off-by: Atish Patra <atishp@rivosinc.com>
> Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
> ---
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
> target/riscv/cpu.c | 2 ++
> target/riscv/cpu_cfg.h | 1 +
> 2 files changed, 3 insertions(+)
>
> diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> index 4760cb2cc17f..ef50130a91e7 100644
> --- a/target/riscv/cpu.c
> +++ b/target/riscv/cpu.c
> @@ -178,6 +178,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
> ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
> ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
> ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
> + ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
> ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
> ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
> ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
> @@ -1467,6 +1468,7 @@ const char *riscv_get_misa_ext_description(uint32_t bit)
> const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
> /* Defaults for standard extensions */
> MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
> + MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
> MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
> MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
> MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
> diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
> index fb7eebde523b..b1376beb1dab 100644
> --- a/target/riscv/cpu_cfg.h
> +++ b/target/riscv/cpu_cfg.h
> @@ -74,6 +74,7 @@ struct RISCVCPUConfig {
> bool ext_ztso;
> bool ext_smstateen;
> bool ext_sstc;
> + bool ext_smcntrpmf;
> bool ext_svadu;
> bool ext_svinval;
> bool ext_svnapot;
>
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [PATCH v7 03/11] target/riscv: Add cycle & instret privilege mode filtering properties
2024-06-26 23:57 ` [PATCH v7 03/11] target/riscv: Add cycle & instret privilege mode filtering properties Atish Patra
2024-07-01 19:10 ` Daniel Henrique Barboza
@ 2024-07-03 2:02 ` Alistair Francis
2024-07-10 7:03 ` Atish Kumar Patra
1 sibling, 1 reply; 27+ messages in thread
From: Alistair Francis @ 2024-07-03 2:02 UTC (permalink / raw)
To: Atish Patra
Cc: qemu-riscv, qemu-devel, Rajnesh Kanwal, palmer, liwei1518,
zhiwei_liu, bin.meng, dbarboza, alistair.francis, Kaiwen Xue
On Thu, Jun 27, 2024 at 9:59 AM Atish Patra <atishp@rivosinc.com> wrote:
>
> From: Kaiwen Xue <kaiwenx@rivosinc.com>
>
> This adds the properties for ISA extension smcntrpmf. Patches
> implementing it will follow.
>
> Signed-off-by: Atish Patra <atishp@rivosinc.com>
> Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
> ---
> target/riscv/cpu.c | 2 ++
> target/riscv/cpu_cfg.h | 1 +
> 2 files changed, 3 insertions(+)
>
> diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> index 4760cb2cc17f..ef50130a91e7 100644
> --- a/target/riscv/cpu.c
> +++ b/target/riscv/cpu.c
> @@ -178,6 +178,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
> ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
> ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
> ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
> + ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
> ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
> ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
> ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
> @@ -1467,6 +1468,7 @@ const char *riscv_get_misa_ext_description(uint32_t bit)
> const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
> /* Defaults for standard extensions */
> MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
> + MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
Exposing the config should be at the end of the series. Implement then expose
Alistair
> MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
> MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
> MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
> diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
> index fb7eebde523b..b1376beb1dab 100644
> --- a/target/riscv/cpu_cfg.h
> +++ b/target/riscv/cpu_cfg.h
> @@ -74,6 +74,7 @@ struct RISCVCPUConfig {
> bool ext_ztso;
> bool ext_smstateen;
> bool ext_sstc;
> + bool ext_smcntrpmf;
> bool ext_svadu;
> bool ext_svinval;
> bool ext_svnapot;
>
> --
> 2.34.1
>
>
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [PATCH v7 03/11] target/riscv: Add cycle & instret privilege mode filtering properties
2024-07-03 2:02 ` Alistair Francis
@ 2024-07-10 7:03 ` Atish Kumar Patra
0 siblings, 0 replies; 27+ messages in thread
From: Atish Kumar Patra @ 2024-07-10 7:03 UTC (permalink / raw)
To: Alistair Francis
Cc: qemu-riscv, qemu-devel, Rajnesh Kanwal, palmer, liwei1518,
zhiwei_liu, bin.meng, dbarboza, alistair.francis, Kaiwen Xue
On Tue, Jul 2, 2024 at 7:03 PM Alistair Francis <alistair23@gmail.com> wrote:
>
> On Thu, Jun 27, 2024 at 9:59 AM Atish Patra <atishp@rivosinc.com> wrote:
> >
> > From: Kaiwen Xue <kaiwenx@rivosinc.com>
> >
> > This adds the properties for ISA extension smcntrpmf. Patches
> > implementing it will follow.
> >
> > Signed-off-by: Atish Patra <atishp@rivosinc.com>
> > Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
> > ---
> > target/riscv/cpu.c | 2 ++
> > target/riscv/cpu_cfg.h | 1 +
> > 2 files changed, 3 insertions(+)
> >
> > diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
> > index 4760cb2cc17f..ef50130a91e7 100644
> > --- a/target/riscv/cpu.c
> > +++ b/target/riscv/cpu.c
> > @@ -178,6 +178,7 @@ const RISCVIsaExtData isa_edata_arr[] = {
> > ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
> > ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
> > ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
> > + ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf),
> > ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp),
> > ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen),
> > ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia),
> > @@ -1467,6 +1468,7 @@ const char *riscv_get_misa_ext_description(uint32_t bit)
> > const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = {
> > /* Defaults for standard extensions */
> > MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false),
> > + MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false),
>
> Exposing the config should be at the end of the series. Implement then expose
>
Sure. I will move this to the end of the series.
> Alistair
>
> > MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true),
> > MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true),
> > MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true),
> > diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
> > index fb7eebde523b..b1376beb1dab 100644
> > --- a/target/riscv/cpu_cfg.h
> > +++ b/target/riscv/cpu_cfg.h
> > @@ -74,6 +74,7 @@ struct RISCVCPUConfig {
> > bool ext_ztso;
> > bool ext_smstateen;
> > bool ext_sstc;
> > + bool ext_smcntrpmf;
> > bool ext_svadu;
> > bool ext_svinval;
> > bool ext_svnapot;
> >
> > --
> > 2.34.1
> >
> >
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v7 04/11] target/riscv: Add cycle & instret privilege mode filtering definitions
2024-06-26 23:57 [PATCH v7 00/11] Add RISC-V ISA extension smcntrpmf support Atish Patra
` (2 preceding siblings ...)
2024-06-26 23:57 ` [PATCH v7 03/11] target/riscv: Add cycle & instret privilege mode filtering properties Atish Patra
@ 2024-06-26 23:57 ` Atish Patra
2024-07-03 1:12 ` Alistair Francis
2024-06-26 23:57 ` [PATCH v7 05/11] target/riscv: Add cycle & instret privilege mode filtering support Atish Patra
` (6 subsequent siblings)
10 siblings, 1 reply; 27+ messages in thread
From: Atish Patra @ 2024-06-26 23:57 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, Atish Patra, palmer, liwei1518, zhiwei_liu,
bin.meng, dbarboza, alistair.francis, Kaiwen Xue
From: Kaiwen Xue <kaiwenx@rivosinc.com>
This adds the definitions for ISA extension smcntrpmf.
Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Signed-off-by: Atish Patra <atishp@rivosinc.com>
---
target/riscv/cpu.h | 6 ++++++
target/riscv/cpu_bits.h | 29 +++++++++++++++++++++++++++++
2 files changed, 35 insertions(+)
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 46faefd24e09..c5d289e5f4b9 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -339,6 +339,12 @@ struct CPUArchState {
uint32_t mcountinhibit;
+ /* PMU cycle & instret privilege mode filtering */
+ target_ulong mcyclecfg;
+ target_ulong mcyclecfgh;
+ target_ulong minstretcfg;
+ target_ulong minstretcfgh;
+
/* PMU counter state */
PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
index c257c5ed7dc9..5faa817453bb 100644
--- a/target/riscv/cpu_bits.h
+++ b/target/riscv/cpu_bits.h
@@ -397,6 +397,10 @@
/* Machine counter-inhibit register */
#define CSR_MCOUNTINHIBIT 0x320
+/* Machine counter configuration registers */
+#define CSR_MCYCLECFG 0x321
+#define CSR_MINSTRETCFG 0x322
+
#define CSR_MHPMEVENT3 0x323
#define CSR_MHPMEVENT4 0x324
#define CSR_MHPMEVENT5 0x325
@@ -427,6 +431,9 @@
#define CSR_MHPMEVENT30 0x33e
#define CSR_MHPMEVENT31 0x33f
+#define CSR_MCYCLECFGH 0x721
+#define CSR_MINSTRETCFGH 0x722
+
#define CSR_MHPMEVENT3H 0x723
#define CSR_MHPMEVENT4H 0x724
#define CSR_MHPMEVENT5H 0x725
@@ -884,6 +891,28 @@ typedef enum RISCVException {
/* PMU related bits */
#define MIE_LCOFIE (1 << IRQ_PMU_OVF)
+#define MCYCLECFG_BIT_MINH BIT_ULL(62)
+#define MCYCLECFGH_BIT_MINH BIT(30)
+#define MCYCLECFG_BIT_SINH BIT_ULL(61)
+#define MCYCLECFGH_BIT_SINH BIT(29)
+#define MCYCLECFG_BIT_UINH BIT_ULL(60)
+#define MCYCLECFGH_BIT_UINH BIT(28)
+#define MCYCLECFG_BIT_VSINH BIT_ULL(59)
+#define MCYCLECFGH_BIT_VSINH BIT(27)
+#define MCYCLECFG_BIT_VUINH BIT_ULL(58)
+#define MCYCLECFGH_BIT_VUINH BIT(26)
+
+#define MINSTRETCFG_BIT_MINH BIT_ULL(62)
+#define MINSTRETCFGH_BIT_MINH BIT(30)
+#define MINSTRETCFG_BIT_SINH BIT_ULL(61)
+#define MINSTRETCFGH_BIT_SINH BIT(29)
+#define MINSTRETCFG_BIT_UINH BIT_ULL(60)
+#define MINSTRETCFGH_BIT_UINH BIT(28)
+#define MINSTRETCFG_BIT_VSINH BIT_ULL(59)
+#define MINSTRETCFGH_BIT_VSINH BIT(27)
+#define MINSTRETCFG_BIT_VUINH BIT_ULL(58)
+#define MINSTRETCFGH_BIT_VUINH BIT(26)
+
#define MHPMEVENT_BIT_OF BIT_ULL(63)
#define MHPMEVENTH_BIT_OF BIT(31)
#define MHPMEVENT_BIT_MINH BIT_ULL(62)
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread
* Re: [PATCH v7 04/11] target/riscv: Add cycle & instret privilege mode filtering definitions
2024-06-26 23:57 ` [PATCH v7 04/11] target/riscv: Add cycle & instret privilege mode filtering definitions Atish Patra
@ 2024-07-03 1:12 ` Alistair Francis
0 siblings, 0 replies; 27+ messages in thread
From: Alistair Francis @ 2024-07-03 1:12 UTC (permalink / raw)
To: Atish Patra
Cc: qemu-riscv, qemu-devel, Rajnesh Kanwal, palmer, liwei1518,
zhiwei_liu, bin.meng, dbarboza, alistair.francis, Kaiwen Xue
On Thu, Jun 27, 2024 at 9:58 AM Atish Patra <atishp@rivosinc.com> wrote:
>
> From: Kaiwen Xue <kaiwenx@rivosinc.com>
>
> This adds the definitions for ISA extension smcntrpmf.
>
> Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
> Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
> Signed-off-by: Atish Patra <atishp@rivosinc.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Alistair
> ---
> target/riscv/cpu.h | 6 ++++++
> target/riscv/cpu_bits.h | 29 +++++++++++++++++++++++++++++
> 2 files changed, 35 insertions(+)
>
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index 46faefd24e09..c5d289e5f4b9 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -339,6 +339,12 @@ struct CPUArchState {
>
> uint32_t mcountinhibit;
>
> + /* PMU cycle & instret privilege mode filtering */
> + target_ulong mcyclecfg;
> + target_ulong mcyclecfgh;
> + target_ulong minstretcfg;
> + target_ulong minstretcfgh;
> +
> /* PMU counter state */
> PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS];
>
> diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
> index c257c5ed7dc9..5faa817453bb 100644
> --- a/target/riscv/cpu_bits.h
> +++ b/target/riscv/cpu_bits.h
> @@ -397,6 +397,10 @@
> /* Machine counter-inhibit register */
> #define CSR_MCOUNTINHIBIT 0x320
>
> +/* Machine counter configuration registers */
> +#define CSR_MCYCLECFG 0x321
> +#define CSR_MINSTRETCFG 0x322
> +
> #define CSR_MHPMEVENT3 0x323
> #define CSR_MHPMEVENT4 0x324
> #define CSR_MHPMEVENT5 0x325
> @@ -427,6 +431,9 @@
> #define CSR_MHPMEVENT30 0x33e
> #define CSR_MHPMEVENT31 0x33f
>
> +#define CSR_MCYCLECFGH 0x721
> +#define CSR_MINSTRETCFGH 0x722
> +
> #define CSR_MHPMEVENT3H 0x723
> #define CSR_MHPMEVENT4H 0x724
> #define CSR_MHPMEVENT5H 0x725
> @@ -884,6 +891,28 @@ typedef enum RISCVException {
> /* PMU related bits */
> #define MIE_LCOFIE (1 << IRQ_PMU_OVF)
>
> +#define MCYCLECFG_BIT_MINH BIT_ULL(62)
> +#define MCYCLECFGH_BIT_MINH BIT(30)
> +#define MCYCLECFG_BIT_SINH BIT_ULL(61)
> +#define MCYCLECFGH_BIT_SINH BIT(29)
> +#define MCYCLECFG_BIT_UINH BIT_ULL(60)
> +#define MCYCLECFGH_BIT_UINH BIT(28)
> +#define MCYCLECFG_BIT_VSINH BIT_ULL(59)
> +#define MCYCLECFGH_BIT_VSINH BIT(27)
> +#define MCYCLECFG_BIT_VUINH BIT_ULL(58)
> +#define MCYCLECFGH_BIT_VUINH BIT(26)
> +
> +#define MINSTRETCFG_BIT_MINH BIT_ULL(62)
> +#define MINSTRETCFGH_BIT_MINH BIT(30)
> +#define MINSTRETCFG_BIT_SINH BIT_ULL(61)
> +#define MINSTRETCFGH_BIT_SINH BIT(29)
> +#define MINSTRETCFG_BIT_UINH BIT_ULL(60)
> +#define MINSTRETCFGH_BIT_UINH BIT(28)
> +#define MINSTRETCFG_BIT_VSINH BIT_ULL(59)
> +#define MINSTRETCFGH_BIT_VSINH BIT(27)
> +#define MINSTRETCFG_BIT_VUINH BIT_ULL(58)
> +#define MINSTRETCFGH_BIT_VUINH BIT(26)
> +
> #define MHPMEVENT_BIT_OF BIT_ULL(63)
> #define MHPMEVENTH_BIT_OF BIT(31)
> #define MHPMEVENT_BIT_MINH BIT_ULL(62)
>
> --
> 2.34.1
>
>
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v7 05/11] target/riscv: Add cycle & instret privilege mode filtering support
2024-06-26 23:57 [PATCH v7 00/11] Add RISC-V ISA extension smcntrpmf support Atish Patra
` (3 preceding siblings ...)
2024-06-26 23:57 ` [PATCH v7 04/11] target/riscv: Add cycle & instret privilege mode filtering definitions Atish Patra
@ 2024-06-26 23:57 ` Atish Patra
2024-07-03 1:19 ` Alistair Francis
2024-06-26 23:57 ` [PATCH v7 06/11] target/riscv: Implement privilege mode filtering for cycle/instret Atish Patra
` (5 subsequent siblings)
10 siblings, 1 reply; 27+ messages in thread
From: Atish Patra @ 2024-06-26 23:57 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, Atish Patra, palmer, liwei1518, zhiwei_liu,
bin.meng, dbarboza, alistair.francis, Kaiwen Xue
From: Kaiwen Xue <kaiwenx@rivosinc.com>
QEMU only calculates dummy cycles and instructions, so there is no
actual means to stop the icount in QEMU. Hence this patch merely adds
the functionality of accessing the cfg registers, and cause no actual
effects on the counting of cycle and instret counters.
Signed-off-by: Atish Patra <atishp@rivosinc.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
---
target/riscv/csr.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 88 insertions(+)
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 3ad851707e5c..665c534db1a0 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -236,6 +236,24 @@ static RISCVException sscofpmf_32(CPURISCVState *env, int csrno)
return sscofpmf(env, csrno);
}
+static RISCVException smcntrpmf(CPURISCVState *env, int csrno)
+{
+ if (!riscv_cpu_cfg(env)->ext_smcntrpmf) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException smcntrpmf_32(CPURISCVState *env, int csrno)
+{
+ if (riscv_cpu_mxl(env) != MXL_RV32) {
+ return RISCV_EXCP_ILLEGAL_INST;
+ }
+
+ return smcntrpmf(env, csrno);
+}
+
static RISCVException any(CPURISCVState *env, int csrno)
{
return RISCV_EXCP_NONE;
@@ -830,6 +848,62 @@ static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
#else /* CONFIG_USER_ONLY */
+static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mcyclecfg;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->mcyclecfg = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->mcyclecfgh;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->mcyclecfgh = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->minstretcfg;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->minstretcfg = val;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
+ target_ulong *val)
+{
+ *val = env->minstretcfgh;
+ return RISCV_EXCP_NONE;
+}
+
+static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
+ target_ulong val)
+{
+ env->minstretcfgh = val;
+ return RISCV_EXCP_NONE;
+}
+
static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -5051,6 +5125,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
write_mcountinhibit,
.min_priv_ver = PRIV_VERSION_1_11_0 },
+ [CSR_MCYCLECFG] = { "mcyclecfg", smcntrpmf, read_mcyclecfg,
+ write_mcyclecfg,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MINSTRETCFG] = { "minstretcfg", smcntrpmf, read_minstretcfg,
+ write_minstretcfg,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+
[CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
write_mhpmevent },
[CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
@@ -5110,6 +5191,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
write_mhpmevent },
+ [CSR_MCYCLECFGH] = { "mcyclecfgh", smcntrpmf_32, read_mcyclecfgh,
+ write_mcyclecfgh,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+ [CSR_MINSTRETCFGH] = { "minstretcfgh", smcntrpmf_32, read_minstretcfgh,
+ write_minstretcfgh,
+ .min_priv_ver = PRIV_VERSION_1_12_0 },
+
[CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh,
write_mhpmeventh,
.min_priv_ver = PRIV_VERSION_1_12_0 },
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread
* Re: [PATCH v7 05/11] target/riscv: Add cycle & instret privilege mode filtering support
2024-06-26 23:57 ` [PATCH v7 05/11] target/riscv: Add cycle & instret privilege mode filtering support Atish Patra
@ 2024-07-03 1:19 ` Alistair Francis
2024-07-10 7:38 ` Atish Kumar Patra
0 siblings, 1 reply; 27+ messages in thread
From: Alistair Francis @ 2024-07-03 1:19 UTC (permalink / raw)
To: Atish Patra
Cc: qemu-riscv, qemu-devel, Rajnesh Kanwal, palmer, liwei1518,
zhiwei_liu, bin.meng, dbarboza, alistair.francis, Kaiwen Xue
On Thu, Jun 27, 2024 at 10:00 AM Atish Patra <atishp@rivosinc.com> wrote:
>
> From: Kaiwen Xue <kaiwenx@rivosinc.com>
>
> QEMU only calculates dummy cycles and instructions, so there is no
> actual means to stop the icount in QEMU. Hence this patch merely adds
> the functionality of accessing the cfg registers, and cause no actual
> effects on the counting of cycle and instret counters.
>
> Signed-off-by: Atish Patra <atishp@rivosinc.com>
> Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
> Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
> ---
> target/riscv/csr.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 88 insertions(+)
>
> diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> index 3ad851707e5c..665c534db1a0 100644
> --- a/target/riscv/csr.c
> +++ b/target/riscv/csr.c
> @@ -236,6 +236,24 @@ static RISCVException sscofpmf_32(CPURISCVState *env, int csrno)
> return sscofpmf(env, csrno);
> }
>
> +static RISCVException smcntrpmf(CPURISCVState *env, int csrno)
> +{
> + if (!riscv_cpu_cfg(env)->ext_smcntrpmf) {
> + return RISCV_EXCP_ILLEGAL_INST;
> + }
> +
> + return RISCV_EXCP_NONE;
> +}
> +
> +static RISCVException smcntrpmf_32(CPURISCVState *env, int csrno)
> +{
> + if (riscv_cpu_mxl(env) != MXL_RV32) {
> + return RISCV_EXCP_ILLEGAL_INST;
> + }
> +
> + return smcntrpmf(env, csrno);
> +}
> +
> static RISCVException any(CPURISCVState *env, int csrno)
> {
> return RISCV_EXCP_NONE;
> @@ -830,6 +848,62 @@ static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
>
> #else /* CONFIG_USER_ONLY */
>
> +static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
> + target_ulong *val)
> +{
> + *val = env->mcyclecfg;
We don't do a good job of this in other places, but we should check
for RVU and RVS to determine if the bits can actually be set.
This is especially important for Hypervisor support (VS/VU-modes), as
that is often not supported so we should report that here
Alistair
> + return RISCV_EXCP_NONE;
> +}
> +
> +static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
> + target_ulong val)
> +{
> + env->mcyclecfg = val;
> + return RISCV_EXCP_NONE;
> +}
> +
> +static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
> + target_ulong *val)
> +{
> + *val = env->mcyclecfgh;
> + return RISCV_EXCP_NONE;
> +}
> +
> +static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
> + target_ulong val)
> +{
> + env->mcyclecfgh = val;
> + return RISCV_EXCP_NONE;
> +}
> +
> +static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
> + target_ulong *val)
> +{
> + *val = env->minstretcfg;
> + return RISCV_EXCP_NONE;
> +}
> +
> +static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
> + target_ulong val)
> +{
> + env->minstretcfg = val;
> + return RISCV_EXCP_NONE;
> +}
> +
> +static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
> + target_ulong *val)
> +{
> + *val = env->minstretcfgh;
> + return RISCV_EXCP_NONE;
> +}
> +
> +static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
> + target_ulong val)
> +{
> + env->minstretcfgh = val;
> + return RISCV_EXCP_NONE;
> +}
> +
> static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
> target_ulong *val)
> {
> @@ -5051,6 +5125,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
> write_mcountinhibit,
> .min_priv_ver = PRIV_VERSION_1_11_0 },
>
> + [CSR_MCYCLECFG] = { "mcyclecfg", smcntrpmf, read_mcyclecfg,
> + write_mcyclecfg,
> + .min_priv_ver = PRIV_VERSION_1_12_0 },
> + [CSR_MINSTRETCFG] = { "minstretcfg", smcntrpmf, read_minstretcfg,
> + write_minstretcfg,
> + .min_priv_ver = PRIV_VERSION_1_12_0 },
> +
> [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
> write_mhpmevent },
> [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
> @@ -5110,6 +5191,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
> [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
> write_mhpmevent },
>
> + [CSR_MCYCLECFGH] = { "mcyclecfgh", smcntrpmf_32, read_mcyclecfgh,
> + write_mcyclecfgh,
> + .min_priv_ver = PRIV_VERSION_1_12_0 },
> + [CSR_MINSTRETCFGH] = { "minstretcfgh", smcntrpmf_32, read_minstretcfgh,
> + write_minstretcfgh,
> + .min_priv_ver = PRIV_VERSION_1_12_0 },
> +
> [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh,
> write_mhpmeventh,
> .min_priv_ver = PRIV_VERSION_1_12_0 },
>
> --
> 2.34.1
>
>
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [PATCH v7 05/11] target/riscv: Add cycle & instret privilege mode filtering support
2024-07-03 1:19 ` Alistair Francis
@ 2024-07-10 7:38 ` Atish Kumar Patra
0 siblings, 0 replies; 27+ messages in thread
From: Atish Kumar Patra @ 2024-07-10 7:38 UTC (permalink / raw)
To: Alistair Francis
Cc: qemu-riscv, qemu-devel, Rajnesh Kanwal, palmer, liwei1518,
zhiwei_liu, bin.meng, dbarboza, alistair.francis, Kaiwen Xue
On Tue, Jul 2, 2024 at 6:19 PM Alistair Francis <alistair23@gmail.com> wrote:
>
> On Thu, Jun 27, 2024 at 10:00 AM Atish Patra <atishp@rivosinc.com> wrote:
> >
> > From: Kaiwen Xue <kaiwenx@rivosinc.com>
> >
> > QEMU only calculates dummy cycles and instructions, so there is no
> > actual means to stop the icount in QEMU. Hence this patch merely adds
> > the functionality of accessing the cfg registers, and cause no actual
> > effects on the counting of cycle and instret counters.
> >
> > Signed-off-by: Atish Patra <atishp@rivosinc.com>
> > Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
> > Signed-off-by: Kaiwen Xue <kaiwenx@rivosinc.com>
> > ---
> > target/riscv/csr.c | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
> > 1 file changed, 88 insertions(+)
> >
> > diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> > index 3ad851707e5c..665c534db1a0 100644
> > --- a/target/riscv/csr.c
> > +++ b/target/riscv/csr.c
> > @@ -236,6 +236,24 @@ static RISCVException sscofpmf_32(CPURISCVState *env, int csrno)
> > return sscofpmf(env, csrno);
> > }
> >
> > +static RISCVException smcntrpmf(CPURISCVState *env, int csrno)
> > +{
> > + if (!riscv_cpu_cfg(env)->ext_smcntrpmf) {
> > + return RISCV_EXCP_ILLEGAL_INST;
> > + }
> > +
> > + return RISCV_EXCP_NONE;
> > +}
> > +
> > +static RISCVException smcntrpmf_32(CPURISCVState *env, int csrno)
> > +{
> > + if (riscv_cpu_mxl(env) != MXL_RV32) {
> > + return RISCV_EXCP_ILLEGAL_INST;
> > + }
> > +
> > + return smcntrpmf(env, csrno);
> > +}
> > +
> > static RISCVException any(CPURISCVState *env, int csrno)
> > {
> > return RISCV_EXCP_NONE;
> > @@ -830,6 +848,62 @@ static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
> >
> > #else /* CONFIG_USER_ONLY */
> >
> > +static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno,
> > + target_ulong *val)
> > +{
> > + *val = env->mcyclecfg;
>
> We don't do a good job of this in other places, but we should check
> for RVU and RVS to determine if the bits can actually be set.
>
> This is especially important for Hypervisor support (VS/VU-modes), as
> that is often not supported so we should report that here
>
Agreed. I have fixed that here and added a patch for checking that
while updating
the INH bits for mhpmevent as well.
> Alistair
>
> > + return RISCV_EXCP_NONE;
> > +}
> > +
> > +static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno,
> > + target_ulong val)
> > +{
> > + env->mcyclecfg = val;
> > + return RISCV_EXCP_NONE;
> > +}
> > +
> > +static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno,
> > + target_ulong *val)
> > +{
> > + *val = env->mcyclecfgh;
> > + return RISCV_EXCP_NONE;
> > +}
> > +
> > +static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno,
> > + target_ulong val)
> > +{
> > + env->mcyclecfgh = val;
> > + return RISCV_EXCP_NONE;
> > +}
> > +
> > +static RISCVException read_minstretcfg(CPURISCVState *env, int csrno,
> > + target_ulong *val)
> > +{
> > + *val = env->minstretcfg;
> > + return RISCV_EXCP_NONE;
> > +}
> > +
> > +static RISCVException write_minstretcfg(CPURISCVState *env, int csrno,
> > + target_ulong val)
> > +{
> > + env->minstretcfg = val;
> > + return RISCV_EXCP_NONE;
> > +}
> > +
> > +static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno,
> > + target_ulong *val)
> > +{
> > + *val = env->minstretcfgh;
> > + return RISCV_EXCP_NONE;
> > +}
> > +
> > +static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno,
> > + target_ulong val)
> > +{
> > + env->minstretcfgh = val;
> > + return RISCV_EXCP_NONE;
> > +}
> > +
> > static RISCVException read_mhpmevent(CPURISCVState *env, int csrno,
> > target_ulong *val)
> > {
> > @@ -5051,6 +5125,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
> > write_mcountinhibit,
> > .min_priv_ver = PRIV_VERSION_1_11_0 },
> >
> > + [CSR_MCYCLECFG] = { "mcyclecfg", smcntrpmf, read_mcyclecfg,
> > + write_mcyclecfg,
> > + .min_priv_ver = PRIV_VERSION_1_12_0 },
> > + [CSR_MINSTRETCFG] = { "minstretcfg", smcntrpmf, read_minstretcfg,
> > + write_minstretcfg,
> > + .min_priv_ver = PRIV_VERSION_1_12_0 },
> > +
> > [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent,
> > write_mhpmevent },
> > [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent,
> > @@ -5110,6 +5191,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
> > [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent,
> > write_mhpmevent },
> >
> > + [CSR_MCYCLECFGH] = { "mcyclecfgh", smcntrpmf_32, read_mcyclecfgh,
> > + write_mcyclecfgh,
> > + .min_priv_ver = PRIV_VERSION_1_12_0 },
> > + [CSR_MINSTRETCFGH] = { "minstretcfgh", smcntrpmf_32, read_minstretcfgh,
> > + write_minstretcfgh,
> > + .min_priv_ver = PRIV_VERSION_1_12_0 },
> > +
> > [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh,
> > write_mhpmeventh,
> > .min_priv_ver = PRIV_VERSION_1_12_0 },
> >
> > --
> > 2.34.1
> >
> >
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v7 06/11] target/riscv: Implement privilege mode filtering for cycle/instret
2024-06-26 23:57 [PATCH v7 00/11] Add RISC-V ISA extension smcntrpmf support Atish Patra
` (4 preceding siblings ...)
2024-06-26 23:57 ` [PATCH v7 05/11] target/riscv: Add cycle & instret privilege mode filtering support Atish Patra
@ 2024-06-26 23:57 ` Atish Patra
2024-07-01 19:29 ` Daniel Henrique Barboza
2024-07-03 1:25 ` Alistair Francis
2024-06-26 23:57 ` [PATCH v7 07/11] target/riscv: Save counter values during countinhibit update Atish Patra
` (4 subsequent siblings)
10 siblings, 2 replies; 27+ messages in thread
From: Atish Patra @ 2024-06-26 23:57 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, Atish Patra, palmer, liwei1518, zhiwei_liu,
bin.meng, dbarboza, alistair.francis
Privilege mode filtering can also be emulated for cycle/instret by
tracking host_ticks/icount during each privilege mode switch. This
patch implements that for both cycle/instret and mhpmcounters. The
first one requires Smcntrpmf while the other one requires Sscofpmf
to be enabled.
The cycle/instret are still computed using host ticks when icount
is not enabled. Otherwise, they are computed using raw icount which
is more accurate in icount mode.
Co-Developed-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
Signed-off-by: Atish Patra <atishp@rivosinc.com>
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
---
target/riscv/cpu.h | 11 +++++
target/riscv/cpu_bits.h | 5 ++
target/riscv/cpu_helper.c | 9 +++-
target/riscv/csr.c | 117 ++++++++++++++++++++++++++++++++--------------
target/riscv/pmu.c | 92 ++++++++++++++++++++++++++++++++++++
target/riscv/pmu.h | 2 +
6 files changed, 199 insertions(+), 37 deletions(-)
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index c5d289e5f4b9..d56d640b06be 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -158,6 +158,15 @@ typedef struct PMUCTRState {
target_ulong irq_overflow_left;
} PMUCTRState;
+typedef struct PMUFixedCtrState {
+ /* Track cycle and icount for each privilege mode */
+ uint64_t counter[4];
+ uint64_t counter_prev[4];
+ /* Track cycle and icount for each privilege mode when V = 1*/
+ uint64_t counter_virt[2];
+ uint64_t counter_virt_prev[2];
+} PMUFixedCtrState;
+
struct CPUArchState {
target_ulong gpr[32];
target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
@@ -354,6 +363,8 @@ struct CPUArchState {
/* PMU event selector configured values for RV32 */
target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
+ PMUFixedCtrState pmu_fixed_ctrs[2];
+
target_ulong sscratch;
target_ulong mscratch;
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
index 5faa817453bb..18f19615e4fe 100644
--- a/target/riscv/cpu_bits.h
+++ b/target/riscv/cpu_bits.h
@@ -926,6 +926,11 @@ typedef enum RISCVException {
#define MHPMEVENT_BIT_VUINH BIT_ULL(58)
#define MHPMEVENTH_BIT_VUINH BIT(26)
+#define MHPMEVENT_FILTER_MASK (MHPMEVENT_BIT_MINH | \
+ MHPMEVENT_BIT_SINH | \
+ MHPMEVENT_BIT_UINH | \
+ MHPMEVENT_BIT_VSINH | \
+ MHPMEVENT_BIT_VUINH)
#define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000)
#define MHPMEVENT_IDX_MASK 0xFFFFF
#define MHPMEVENT_SSCOF_RESVD 16
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 10d3fdaed376..395a1d914061 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -695,9 +695,14 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
{
g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
- if (icount_enabled() && newpriv != env->priv) {
- riscv_itrigger_update_priv(env);
+ if (newpriv != env->priv || env->virt_enabled != virt_en) {
+ if (icount_enabled()) {
+ riscv_itrigger_update_priv(env);
+ }
+
+ riscv_pmu_update_fixed_ctrs(env, newpriv, virt_en);
}
+
/* tlb_flush is unnecessary as mode is contained in mmu_idx */
env->priv = newpriv;
env->xl = cpu_recompute_xl(env);
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 665c534db1a0..c292d036bcb2 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -788,36 +788,16 @@ static RISCVException write_vcsr(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+#if defined(CONFIG_USER_ONLY)
/* User Timers and Counters */
-static target_ulong get_ticks(bool shift, bool instructions)
+static target_ulong get_ticks(bool shift)
{
- int64_t val;
- target_ulong result;
-
-#if !defined(CONFIG_USER_ONLY)
- if (icount_enabled()) {
- if (instructions) {
- val = icount_get_raw();
- } else {
- val = icount_get();
- }
- } else {
- val = cpu_get_host_ticks();
- }
-#else
- val = cpu_get_host_ticks();
-#endif
-
- if (shift) {
- result = val >> 32;
- } else {
- result = val;
- }
+ int64_t val = cpu_get_host_ticks();
+ target_ulong result = shift ? val >> 32 : val;
return result;
}
-#if defined(CONFIG_USER_ONLY)
static RISCVException read_time(CPURISCVState *env, int csrno,
target_ulong *val)
{
@@ -835,14 +815,14 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno,
static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
target_ulong *val)
{
- *val = get_ticks(false, (csrno == CSR_INSTRET));
+ *val = get_ticks(false);
return RISCV_EXCP_NONE;
}
static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
target_ulong *val)
{
- *val = get_ticks(true, (csrno == CSR_INSTRETH));
+ *val = get_ticks(true);
return RISCV_EXCP_NONE;
}
@@ -956,17 +936,82 @@ static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
+static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
+ int counter_idx,
+ bool upper_half)
+{
+ int inst = riscv_pmu_ctr_monitor_instructions(env, counter_idx);
+ uint64_t *counter_arr_virt = env->pmu_fixed_ctrs[inst].counter_virt;
+ uint64_t *counter_arr = env->pmu_fixed_ctrs[inst].counter;
+ target_ulong result = 0;
+ uint64_t curr_val = 0;
+ uint64_t cfg_val = 0;
+
+ if (counter_idx == 0) {
+ cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) :
+ env->mcyclecfg;
+ } else if (counter_idx == 2) {
+ cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) :
+ env->minstretcfg;
+ } else {
+ cfg_val = upper_half ?
+ ((uint64_t)env->mhpmeventh_val[counter_idx] << 32) :
+ env->mhpmevent_val[counter_idx];
+ cfg_val &= MHPMEVENT_FILTER_MASK;
+ }
+
+ if (!cfg_val) {
+ if (icount_enabled()) {
+ curr_val = inst ? icount_get_raw() : icount_get();
+ } else {
+ curr_val = cpu_get_host_ticks();
+ }
+
+ goto done;
+ }
+
+ if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
+ curr_val += counter_arr[PRV_M];
+ }
+
+ if (!(cfg_val & MCYCLECFG_BIT_SINH)) {
+ curr_val += counter_arr[PRV_S];
+ }
+
+ if (!(cfg_val & MCYCLECFG_BIT_UINH)) {
+ curr_val += counter_arr[PRV_U];
+ }
+
+ if (!(cfg_val & MCYCLECFG_BIT_VSINH)) {
+ curr_val += counter_arr_virt[PRV_S];
+ }
+
+ if (!(cfg_val & MCYCLECFG_BIT_VUINH)) {
+ curr_val += counter_arr_virt[PRV_U];
+ }
+
+done:
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ result = upper_half ? curr_val >> 32 : curr_val;
+ } else {
+ result = curr_val;
+ }
+
+ return result;
+}
+
static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
target_ulong val)
{
int ctr_idx = csrno - CSR_MCYCLE;
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = val;
- bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
counter->mhpmcounter_val = val;
- if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
- counter->mhpmcounter_prev = get_ticks(false, instr);
+ if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
+ riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
+ counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
+ ctr_idx, false);
if (ctr_idx > 2) {
if (riscv_cpu_mxl(env) == MXL_RV32) {
mhpmctr_val = mhpmctr_val |
@@ -989,12 +1034,13 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
uint64_t mhpmctr_val = counter->mhpmcounter_val;
uint64_t mhpmctrh_val = val;
- bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
counter->mhpmcounterh_val = val;
mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
- if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
- counter->mhpmcounterh_prev = get_ticks(true, instr);
+ if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
+ riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
+ counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
+ ctr_idx, true);
if (ctr_idx > 2) {
riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
}
@@ -1013,7 +1059,6 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
counter->mhpmcounter_prev;
target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
counter->mhpmcounter_val;
- bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
/*
@@ -1034,8 +1079,10 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
* The kernel computes the perf delta by subtracting the current value from
* the value it initialized previously (ctr_val).
*/
- if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
- *val = get_ticks(upper_half, instr) - ctr_prev + ctr_val;
+ if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
+ riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
+ *val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, upper_half) -
+ ctr_prev + ctr_val;
} else {
*val = ctr_val;
}
diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
index 0e7d58b8a5c2..ac648cff8d7c 100644
--- a/target/riscv/pmu.c
+++ b/target/riscv/pmu.c
@@ -19,6 +19,7 @@
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qemu/error-report.h"
+#include "qemu/timer.h"
#include "cpu.h"
#include "pmu.h"
#include "sysemu/cpu-timers.h"
@@ -176,6 +177,97 @@ static int riscv_pmu_incr_ctr_rv64(RISCVCPU *cpu, uint32_t ctr_idx)
return 0;
}
+/*
+ * Information needed to update counters:
+ * new_priv, new_virt: To correctly save starting snapshot for the newly
+ * started mode. Look at array being indexed with newprv.
+ * old_priv, old_virt: To correctly select previous snapshot for old priv
+ * and compute delta. Also to select correct counter
+ * to inc. Look at arrays being indexed with env->priv.
+ *
+ * To avoid the complexity of calling this function, we assume that
+ * env->priv and env->virt_enabled contain old priv and old virt and
+ * new priv and new virt values are passed in as arguments.
+ */
+static void riscv_pmu_icount_update_priv(CPURISCVState *env,
+ target_ulong newpriv, bool new_virt)
+{
+ uint64_t *snapshot_prev, *snapshot_new;
+ uint64_t current_icount;
+ uint64_t *counter_arr;
+ uint64_t delta;
+
+ if (icount_enabled()) {
+ current_icount = icount_get_raw();
+ } else {
+ current_icount = cpu_get_host_ticks();
+ }
+
+ if (env->virt_enabled) {
+ counter_arr = env->pmu_fixed_ctrs[1].counter_virt;
+ snapshot_prev = env->pmu_fixed_ctrs[1].counter_virt_prev;
+ } else {
+ counter_arr = env->pmu_fixed_ctrs[1].counter;
+ snapshot_prev = env->pmu_fixed_ctrs[1].counter_prev;
+ }
+
+ if (new_virt) {
+ snapshot_new = env->pmu_fixed_ctrs[1].counter_virt_prev;
+ } else {
+ snapshot_new = env->pmu_fixed_ctrs[1].counter_prev;
+ }
+
+ /*
+ * new_priv can be same as env->priv. So we need to calculate
+ * delta first before updating snapshot_new[new_priv].
+ */
+ delta = current_icount - snapshot_prev[env->priv];
+ snapshot_new[newpriv] = current_icount;
+
+ counter_arr[env->priv] += delta;
+}
+
+static void riscv_pmu_cycle_update_priv(CPURISCVState *env,
+ target_ulong newpriv, bool new_virt)
+{
+ uint64_t *snapshot_prev, *snapshot_new;
+ uint64_t current_ticks;
+ uint64_t *counter_arr;
+ uint64_t delta;
+
+ if (icount_enabled()) {
+ current_ticks = icount_get();
+ } else {
+ current_ticks = cpu_get_host_ticks();
+ }
+
+ if (env->virt_enabled) {
+ counter_arr = env->pmu_fixed_ctrs[0].counter_virt;
+ snapshot_prev = env->pmu_fixed_ctrs[0].counter_virt_prev;
+ } else {
+ counter_arr = env->pmu_fixed_ctrs[0].counter;
+ snapshot_prev = env->pmu_fixed_ctrs[0].counter_prev;
+ }
+
+ if (new_virt) {
+ snapshot_new = env->pmu_fixed_ctrs[0].counter_virt_prev;
+ } else {
+ snapshot_new = env->pmu_fixed_ctrs[0].counter_prev;
+ }
+
+ delta = current_ticks - snapshot_prev[env->priv];
+ snapshot_new[newpriv] = current_ticks;
+
+ counter_arr[env->priv] += delta;
+}
+
+void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
+ bool new_virt)
+{
+ riscv_pmu_cycle_update_priv(env, newpriv, new_virt);
+ riscv_pmu_icount_update_priv(env, newpriv, new_virt);
+}
+
int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
{
uint32_t ctr_idx;
diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h
index 7c0ad661e050..ca40cfeed647 100644
--- a/target/riscv/pmu.h
+++ b/target/riscv/pmu.h
@@ -34,5 +34,7 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx);
void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name);
int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value,
uint32_t ctr_idx);
+void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
+ bool new_virt);
#endif /* RISCV_PMU_H */
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread
* Re: [PATCH v7 06/11] target/riscv: Implement privilege mode filtering for cycle/instret
2024-06-26 23:57 ` [PATCH v7 06/11] target/riscv: Implement privilege mode filtering for cycle/instret Atish Patra
@ 2024-07-01 19:29 ` Daniel Henrique Barboza
2024-07-03 1:25 ` Alistair Francis
1 sibling, 0 replies; 27+ messages in thread
From: Daniel Henrique Barboza @ 2024-07-01 19:29 UTC (permalink / raw)
To: Atish Patra, qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, palmer, liwei1518, zhiwei_liu, bin.meng,
alistair.francis
On 6/26/24 8:57 PM, Atish Patra wrote:
> Privilege mode filtering can also be emulated for cycle/instret by
> tracking host_ticks/icount during each privilege mode switch. This
> patch implements that for both cycle/instret and mhpmcounters. The
> first one requires Smcntrpmf while the other one requires Sscofpmf
> to be enabled.
>
> The cycle/instret are still computed using host ticks when icount
> is not enabled. Otherwise, they are computed using raw icount which
> is more accurate in icount mode.
>
> Co-Developed-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
> Signed-off-by: Atish Patra <atishp@rivosinc.com>
> Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
> ---
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
> target/riscv/cpu.h | 11 +++++
> target/riscv/cpu_bits.h | 5 ++
> target/riscv/cpu_helper.c | 9 +++-
> target/riscv/csr.c | 117 ++++++++++++++++++++++++++++++++--------------
> target/riscv/pmu.c | 92 ++++++++++++++++++++++++++++++++++++
> target/riscv/pmu.h | 2 +
> 6 files changed, 199 insertions(+), 37 deletions(-)
>
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index c5d289e5f4b9..d56d640b06be 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -158,6 +158,15 @@ typedef struct PMUCTRState {
> target_ulong irq_overflow_left;
> } PMUCTRState;
>
> +typedef struct PMUFixedCtrState {
> + /* Track cycle and icount for each privilege mode */
> + uint64_t counter[4];
> + uint64_t counter_prev[4];
> + /* Track cycle and icount for each privilege mode when V = 1*/
> + uint64_t counter_virt[2];
> + uint64_t counter_virt_prev[2];
> +} PMUFixedCtrState;
> +
> struct CPUArchState {
> target_ulong gpr[32];
> target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
> @@ -354,6 +363,8 @@ struct CPUArchState {
> /* PMU event selector configured values for RV32 */
> target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
>
> + PMUFixedCtrState pmu_fixed_ctrs[2];
> +
> target_ulong sscratch;
> target_ulong mscratch;
>
> diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
> index 5faa817453bb..18f19615e4fe 100644
> --- a/target/riscv/cpu_bits.h
> +++ b/target/riscv/cpu_bits.h
> @@ -926,6 +926,11 @@ typedef enum RISCVException {
> #define MHPMEVENT_BIT_VUINH BIT_ULL(58)
> #define MHPMEVENTH_BIT_VUINH BIT(26)
>
> +#define MHPMEVENT_FILTER_MASK (MHPMEVENT_BIT_MINH | \
> + MHPMEVENT_BIT_SINH | \
> + MHPMEVENT_BIT_UINH | \
> + MHPMEVENT_BIT_VSINH | \
> + MHPMEVENT_BIT_VUINH)
> #define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000)
> #define MHPMEVENT_IDX_MASK 0xFFFFF
> #define MHPMEVENT_SSCOF_RESVD 16
> diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> index 10d3fdaed376..395a1d914061 100644
> --- a/target/riscv/cpu_helper.c
> +++ b/target/riscv/cpu_helper.c
> @@ -695,9 +695,14 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
> {
> g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
>
> - if (icount_enabled() && newpriv != env->priv) {
> - riscv_itrigger_update_priv(env);
> + if (newpriv != env->priv || env->virt_enabled != virt_en) {
> + if (icount_enabled()) {
> + riscv_itrigger_update_priv(env);
> + }
> +
> + riscv_pmu_update_fixed_ctrs(env, newpriv, virt_en);
> }
> +
> /* tlb_flush is unnecessary as mode is contained in mmu_idx */
> env->priv = newpriv;
> env->xl = cpu_recompute_xl(env);
> diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> index 665c534db1a0..c292d036bcb2 100644
> --- a/target/riscv/csr.c
> +++ b/target/riscv/csr.c
> @@ -788,36 +788,16 @@ static RISCVException write_vcsr(CPURISCVState *env, int csrno,
> return RISCV_EXCP_NONE;
> }
>
> +#if defined(CONFIG_USER_ONLY)
> /* User Timers and Counters */
> -static target_ulong get_ticks(bool shift, bool instructions)
> +static target_ulong get_ticks(bool shift)
> {
> - int64_t val;
> - target_ulong result;
> -
> -#if !defined(CONFIG_USER_ONLY)
> - if (icount_enabled()) {
> - if (instructions) {
> - val = icount_get_raw();
> - } else {
> - val = icount_get();
> - }
> - } else {
> - val = cpu_get_host_ticks();
> - }
> -#else
> - val = cpu_get_host_ticks();
> -#endif
> -
> - if (shift) {
> - result = val >> 32;
> - } else {
> - result = val;
> - }
> + int64_t val = cpu_get_host_ticks();
> + target_ulong result = shift ? val >> 32 : val;
>
> return result;
> }
>
> -#if defined(CONFIG_USER_ONLY)
> static RISCVException read_time(CPURISCVState *env, int csrno,
> target_ulong *val)
> {
> @@ -835,14 +815,14 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno,
> static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
> target_ulong *val)
> {
> - *val = get_ticks(false, (csrno == CSR_INSTRET));
> + *val = get_ticks(false);
> return RISCV_EXCP_NONE;
> }
>
> static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
> target_ulong *val)
> {
> - *val = get_ticks(true, (csrno == CSR_INSTRETH));
> + *val = get_ticks(true);
> return RISCV_EXCP_NONE;
> }
>
> @@ -956,17 +936,82 @@ static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
> return RISCV_EXCP_NONE;
> }
>
> +static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
> + int counter_idx,
> + bool upper_half)
> +{
> + int inst = riscv_pmu_ctr_monitor_instructions(env, counter_idx);
> + uint64_t *counter_arr_virt = env->pmu_fixed_ctrs[inst].counter_virt;
> + uint64_t *counter_arr = env->pmu_fixed_ctrs[inst].counter;
> + target_ulong result = 0;
> + uint64_t curr_val = 0;
> + uint64_t cfg_val = 0;
> +
> + if (counter_idx == 0) {
> + cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) :
> + env->mcyclecfg;
> + } else if (counter_idx == 2) {
> + cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) :
> + env->minstretcfg;
> + } else {
> + cfg_val = upper_half ?
> + ((uint64_t)env->mhpmeventh_val[counter_idx] << 32) :
> + env->mhpmevent_val[counter_idx];
> + cfg_val &= MHPMEVENT_FILTER_MASK;
> + }
> +
> + if (!cfg_val) {
> + if (icount_enabled()) {
> + curr_val = inst ? icount_get_raw() : icount_get();
> + } else {
> + curr_val = cpu_get_host_ticks();
> + }
> +
> + goto done;
> + }
> +
> + if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
> + curr_val += counter_arr[PRV_M];
> + }
> +
> + if (!(cfg_val & MCYCLECFG_BIT_SINH)) {
> + curr_val += counter_arr[PRV_S];
> + }
> +
> + if (!(cfg_val & MCYCLECFG_BIT_UINH)) {
> + curr_val += counter_arr[PRV_U];
> + }
> +
> + if (!(cfg_val & MCYCLECFG_BIT_VSINH)) {
> + curr_val += counter_arr_virt[PRV_S];
> + }
> +
> + if (!(cfg_val & MCYCLECFG_BIT_VUINH)) {
> + curr_val += counter_arr_virt[PRV_U];
> + }
> +
> +done:
> + if (riscv_cpu_mxl(env) == MXL_RV32) {
> + result = upper_half ? curr_val >> 32 : curr_val;
> + } else {
> + result = curr_val;
> + }
> +
> + return result;
> +}
> +
> static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
> target_ulong val)
> {
> int ctr_idx = csrno - CSR_MCYCLE;
> PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
> uint64_t mhpmctr_val = val;
> - bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
>
> counter->mhpmcounter_val = val;
> - if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
> - counter->mhpmcounter_prev = get_ticks(false, instr);
> + if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
> + riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
> + counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
> + ctr_idx, false);
> if (ctr_idx > 2) {
> if (riscv_cpu_mxl(env) == MXL_RV32) {
> mhpmctr_val = mhpmctr_val |
> @@ -989,12 +1034,13 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
> PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
> uint64_t mhpmctr_val = counter->mhpmcounter_val;
> uint64_t mhpmctrh_val = val;
> - bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
>
> counter->mhpmcounterh_val = val;
> mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
> - if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
> - counter->mhpmcounterh_prev = get_ticks(true, instr);
> + if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
> + riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
> + counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
> + ctr_idx, true);
> if (ctr_idx > 2) {
> riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
> }
> @@ -1013,7 +1059,6 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
> counter->mhpmcounter_prev;
> target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
> counter->mhpmcounter_val;
> - bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
>
> if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
> /*
> @@ -1034,8 +1079,10 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
> * The kernel computes the perf delta by subtracting the current value from
> * the value it initialized previously (ctr_val).
> */
> - if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
> - *val = get_ticks(upper_half, instr) - ctr_prev + ctr_val;
> + if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
> + riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
> + *val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, upper_half) -
> + ctr_prev + ctr_val;
> } else {
> *val = ctr_val;
> }
> diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
> index 0e7d58b8a5c2..ac648cff8d7c 100644
> --- a/target/riscv/pmu.c
> +++ b/target/riscv/pmu.c
> @@ -19,6 +19,7 @@
> #include "qemu/osdep.h"
> #include "qemu/log.h"
> #include "qemu/error-report.h"
> +#include "qemu/timer.h"
> #include "cpu.h"
> #include "pmu.h"
> #include "sysemu/cpu-timers.h"
> @@ -176,6 +177,97 @@ static int riscv_pmu_incr_ctr_rv64(RISCVCPU *cpu, uint32_t ctr_idx)
> return 0;
> }
>
> +/*
> + * Information needed to update counters:
> + * new_priv, new_virt: To correctly save starting snapshot for the newly
> + * started mode. Look at array being indexed with newprv.
> + * old_priv, old_virt: To correctly select previous snapshot for old priv
> + * and compute delta. Also to select correct counter
> + * to inc. Look at arrays being indexed with env->priv.
> + *
> + * To avoid the complexity of calling this function, we assume that
> + * env->priv and env->virt_enabled contain old priv and old virt and
> + * new priv and new virt values are passed in as arguments.
> + */
> +static void riscv_pmu_icount_update_priv(CPURISCVState *env,
> + target_ulong newpriv, bool new_virt)
> +{
> + uint64_t *snapshot_prev, *snapshot_new;
> + uint64_t current_icount;
> + uint64_t *counter_arr;
> + uint64_t delta;
> +
> + if (icount_enabled()) {
> + current_icount = icount_get_raw();
> + } else {
> + current_icount = cpu_get_host_ticks();
> + }
> +
> + if (env->virt_enabled) {
> + counter_arr = env->pmu_fixed_ctrs[1].counter_virt;
> + snapshot_prev = env->pmu_fixed_ctrs[1].counter_virt_prev;
> + } else {
> + counter_arr = env->pmu_fixed_ctrs[1].counter;
> + snapshot_prev = env->pmu_fixed_ctrs[1].counter_prev;
> + }
> +
> + if (new_virt) {
> + snapshot_new = env->pmu_fixed_ctrs[1].counter_virt_prev;
> + } else {
> + snapshot_new = env->pmu_fixed_ctrs[1].counter_prev;
> + }
> +
> + /*
> + * new_priv can be same as env->priv. So we need to calculate
> + * delta first before updating snapshot_new[new_priv].
> + */
> + delta = current_icount - snapshot_prev[env->priv];
> + snapshot_new[newpriv] = current_icount;
> +
> + counter_arr[env->priv] += delta;
> +}
> +
> +static void riscv_pmu_cycle_update_priv(CPURISCVState *env,
> + target_ulong newpriv, bool new_virt)
> +{
> + uint64_t *snapshot_prev, *snapshot_new;
> + uint64_t current_ticks;
> + uint64_t *counter_arr;
> + uint64_t delta;
> +
> + if (icount_enabled()) {
> + current_ticks = icount_get();
> + } else {
> + current_ticks = cpu_get_host_ticks();
> + }
> +
> + if (env->virt_enabled) {
> + counter_arr = env->pmu_fixed_ctrs[0].counter_virt;
> + snapshot_prev = env->pmu_fixed_ctrs[0].counter_virt_prev;
> + } else {
> + counter_arr = env->pmu_fixed_ctrs[0].counter;
> + snapshot_prev = env->pmu_fixed_ctrs[0].counter_prev;
> + }
> +
> + if (new_virt) {
> + snapshot_new = env->pmu_fixed_ctrs[0].counter_virt_prev;
> + } else {
> + snapshot_new = env->pmu_fixed_ctrs[0].counter_prev;
> + }
> +
> + delta = current_ticks - snapshot_prev[env->priv];
> + snapshot_new[newpriv] = current_ticks;
> +
> + counter_arr[env->priv] += delta;
> +}
> +
> +void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
> + bool new_virt)
> +{
> + riscv_pmu_cycle_update_priv(env, newpriv, new_virt);
> + riscv_pmu_icount_update_priv(env, newpriv, new_virt);
> +}
> +
> int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
> {
> uint32_t ctr_idx;
> diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h
> index 7c0ad661e050..ca40cfeed647 100644
> --- a/target/riscv/pmu.h
> +++ b/target/riscv/pmu.h
> @@ -34,5 +34,7 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx);
> void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name);
> int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value,
> uint32_t ctr_idx);
> +void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
> + bool new_virt);
>
> #endif /* RISCV_PMU_H */
>
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [PATCH v7 06/11] target/riscv: Implement privilege mode filtering for cycle/instret
2024-06-26 23:57 ` [PATCH v7 06/11] target/riscv: Implement privilege mode filtering for cycle/instret Atish Patra
2024-07-01 19:29 ` Daniel Henrique Barboza
@ 2024-07-03 1:25 ` Alistair Francis
1 sibling, 0 replies; 27+ messages in thread
From: Alistair Francis @ 2024-07-03 1:25 UTC (permalink / raw)
To: Atish Patra
Cc: qemu-riscv, qemu-devel, Rajnesh Kanwal, palmer, liwei1518,
zhiwei_liu, bin.meng, dbarboza, alistair.francis
On Thu, Jun 27, 2024 at 9:59 AM Atish Patra <atishp@rivosinc.com> wrote:
>
> Privilege mode filtering can also be emulated for cycle/instret by
> tracking host_ticks/icount during each privilege mode switch. This
> patch implements that for both cycle/instret and mhpmcounters. The
> first one requires Smcntrpmf while the other one requires Sscofpmf
> to be enabled.
>
> The cycle/instret are still computed using host ticks when icount
> is not enabled. Otherwise, they are computed using raw icount which
> is more accurate in icount mode.
>
> Co-Developed-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
> Signed-off-by: Atish Patra <atishp@rivosinc.com>
> Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
Acked-by: Alistair Francis <alistair.francis@wdc.com>
Alistair
> ---
> target/riscv/cpu.h | 11 +++++
> target/riscv/cpu_bits.h | 5 ++
> target/riscv/cpu_helper.c | 9 +++-
> target/riscv/csr.c | 117 ++++++++++++++++++++++++++++++++--------------
> target/riscv/pmu.c | 92 ++++++++++++++++++++++++++++++++++++
> target/riscv/pmu.h | 2 +
> 6 files changed, 199 insertions(+), 37 deletions(-)
>
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index c5d289e5f4b9..d56d640b06be 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -158,6 +158,15 @@ typedef struct PMUCTRState {
> target_ulong irq_overflow_left;
> } PMUCTRState;
>
> +typedef struct PMUFixedCtrState {
> + /* Track cycle and icount for each privilege mode */
> + uint64_t counter[4];
> + uint64_t counter_prev[4];
> + /* Track cycle and icount for each privilege mode when V = 1*/
> + uint64_t counter_virt[2];
> + uint64_t counter_virt_prev[2];
> +} PMUFixedCtrState;
> +
> struct CPUArchState {
> target_ulong gpr[32];
> target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */
> @@ -354,6 +363,8 @@ struct CPUArchState {
> /* PMU event selector configured values for RV32 */
> target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS];
>
> + PMUFixedCtrState pmu_fixed_ctrs[2];
> +
> target_ulong sscratch;
> target_ulong mscratch;
>
> diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
> index 5faa817453bb..18f19615e4fe 100644
> --- a/target/riscv/cpu_bits.h
> +++ b/target/riscv/cpu_bits.h
> @@ -926,6 +926,11 @@ typedef enum RISCVException {
> #define MHPMEVENT_BIT_VUINH BIT_ULL(58)
> #define MHPMEVENTH_BIT_VUINH BIT(26)
>
> +#define MHPMEVENT_FILTER_MASK (MHPMEVENT_BIT_MINH | \
> + MHPMEVENT_BIT_SINH | \
> + MHPMEVENT_BIT_UINH | \
> + MHPMEVENT_BIT_VSINH | \
> + MHPMEVENT_BIT_VUINH)
> #define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000)
> #define MHPMEVENT_IDX_MASK 0xFFFFF
> #define MHPMEVENT_SSCOF_RESVD 16
> diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
> index 10d3fdaed376..395a1d914061 100644
> --- a/target/riscv/cpu_helper.c
> +++ b/target/riscv/cpu_helper.c
> @@ -695,9 +695,14 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en)
> {
> g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED);
>
> - if (icount_enabled() && newpriv != env->priv) {
> - riscv_itrigger_update_priv(env);
> + if (newpriv != env->priv || env->virt_enabled != virt_en) {
> + if (icount_enabled()) {
> + riscv_itrigger_update_priv(env);
> + }
> +
> + riscv_pmu_update_fixed_ctrs(env, newpriv, virt_en);
> }
> +
> /* tlb_flush is unnecessary as mode is contained in mmu_idx */
> env->priv = newpriv;
> env->xl = cpu_recompute_xl(env);
> diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> index 665c534db1a0..c292d036bcb2 100644
> --- a/target/riscv/csr.c
> +++ b/target/riscv/csr.c
> @@ -788,36 +788,16 @@ static RISCVException write_vcsr(CPURISCVState *env, int csrno,
> return RISCV_EXCP_NONE;
> }
>
> +#if defined(CONFIG_USER_ONLY)
> /* User Timers and Counters */
> -static target_ulong get_ticks(bool shift, bool instructions)
> +static target_ulong get_ticks(bool shift)
> {
> - int64_t val;
> - target_ulong result;
> -
> -#if !defined(CONFIG_USER_ONLY)
> - if (icount_enabled()) {
> - if (instructions) {
> - val = icount_get_raw();
> - } else {
> - val = icount_get();
> - }
> - } else {
> - val = cpu_get_host_ticks();
> - }
> -#else
> - val = cpu_get_host_ticks();
> -#endif
> -
> - if (shift) {
> - result = val >> 32;
> - } else {
> - result = val;
> - }
> + int64_t val = cpu_get_host_ticks();
> + target_ulong result = shift ? val >> 32 : val;
>
> return result;
> }
>
> -#if defined(CONFIG_USER_ONLY)
> static RISCVException read_time(CPURISCVState *env, int csrno,
> target_ulong *val)
> {
> @@ -835,14 +815,14 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno,
> static RISCVException read_hpmcounter(CPURISCVState *env, int csrno,
> target_ulong *val)
> {
> - *val = get_ticks(false, (csrno == CSR_INSTRET));
> + *val = get_ticks(false);
> return RISCV_EXCP_NONE;
> }
>
> static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno,
> target_ulong *val)
> {
> - *val = get_ticks(true, (csrno == CSR_INSTRETH));
> + *val = get_ticks(true);
> return RISCV_EXCP_NONE;
> }
>
> @@ -956,17 +936,82 @@ static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno,
> return RISCV_EXCP_NONE;
> }
>
> +static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
> + int counter_idx,
> + bool upper_half)
> +{
> + int inst = riscv_pmu_ctr_monitor_instructions(env, counter_idx);
> + uint64_t *counter_arr_virt = env->pmu_fixed_ctrs[inst].counter_virt;
> + uint64_t *counter_arr = env->pmu_fixed_ctrs[inst].counter;
> + target_ulong result = 0;
> + uint64_t curr_val = 0;
> + uint64_t cfg_val = 0;
> +
> + if (counter_idx == 0) {
> + cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) :
> + env->mcyclecfg;
> + } else if (counter_idx == 2) {
> + cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) :
> + env->minstretcfg;
> + } else {
> + cfg_val = upper_half ?
> + ((uint64_t)env->mhpmeventh_val[counter_idx] << 32) :
> + env->mhpmevent_val[counter_idx];
> + cfg_val &= MHPMEVENT_FILTER_MASK;
> + }
> +
> + if (!cfg_val) {
> + if (icount_enabled()) {
> + curr_val = inst ? icount_get_raw() : icount_get();
> + } else {
> + curr_val = cpu_get_host_ticks();
> + }
> +
> + goto done;
> + }
> +
> + if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
> + curr_val += counter_arr[PRV_M];
> + }
> +
> + if (!(cfg_val & MCYCLECFG_BIT_SINH)) {
> + curr_val += counter_arr[PRV_S];
> + }
> +
> + if (!(cfg_val & MCYCLECFG_BIT_UINH)) {
> + curr_val += counter_arr[PRV_U];
> + }
> +
> + if (!(cfg_val & MCYCLECFG_BIT_VSINH)) {
> + curr_val += counter_arr_virt[PRV_S];
> + }
> +
> + if (!(cfg_val & MCYCLECFG_BIT_VUINH)) {
> + curr_val += counter_arr_virt[PRV_U];
> + }
> +
> +done:
> + if (riscv_cpu_mxl(env) == MXL_RV32) {
> + result = upper_half ? curr_val >> 32 : curr_val;
> + } else {
> + result = curr_val;
> + }
> +
> + return result;
> +}
> +
> static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
> target_ulong val)
> {
> int ctr_idx = csrno - CSR_MCYCLE;
> PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
> uint64_t mhpmctr_val = val;
> - bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
>
> counter->mhpmcounter_val = val;
> - if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
> - counter->mhpmcounter_prev = get_ticks(false, instr);
> + if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
> + riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
> + counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
> + ctr_idx, false);
> if (ctr_idx > 2) {
> if (riscv_cpu_mxl(env) == MXL_RV32) {
> mhpmctr_val = mhpmctr_val |
> @@ -989,12 +1034,13 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
> PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
> uint64_t mhpmctr_val = counter->mhpmcounter_val;
> uint64_t mhpmctrh_val = val;
> - bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
>
> counter->mhpmcounterh_val = val;
> mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
> - if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
> - counter->mhpmcounterh_prev = get_ticks(true, instr);
> + if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
> + riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
> + counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
> + ctr_idx, true);
> if (ctr_idx > 2) {
> riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx);
> }
> @@ -1013,7 +1059,6 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
> counter->mhpmcounter_prev;
> target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val :
> counter->mhpmcounter_val;
> - bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx);
>
> if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
> /*
> @@ -1034,8 +1079,10 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
> * The kernel computes the perf delta by subtracting the current value from
> * the value it initialized previously (ctr_val).
> */
> - if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) {
> - *val = get_ticks(upper_half, instr) - ctr_prev + ctr_val;
> + if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
> + riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
> + *val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, upper_half) -
> + ctr_prev + ctr_val;
> } else {
> *val = ctr_val;
> }
> diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
> index 0e7d58b8a5c2..ac648cff8d7c 100644
> --- a/target/riscv/pmu.c
> +++ b/target/riscv/pmu.c
> @@ -19,6 +19,7 @@
> #include "qemu/osdep.h"
> #include "qemu/log.h"
> #include "qemu/error-report.h"
> +#include "qemu/timer.h"
> #include "cpu.h"
> #include "pmu.h"
> #include "sysemu/cpu-timers.h"
> @@ -176,6 +177,97 @@ static int riscv_pmu_incr_ctr_rv64(RISCVCPU *cpu, uint32_t ctr_idx)
> return 0;
> }
>
> +/*
> + * Information needed to update counters:
> + * new_priv, new_virt: To correctly save starting snapshot for the newly
> + * started mode. Look at array being indexed with newprv.
> + * old_priv, old_virt: To correctly select previous snapshot for old priv
> + * and compute delta. Also to select correct counter
> + * to inc. Look at arrays being indexed with env->priv.
> + *
> + * To avoid the complexity of calling this function, we assume that
> + * env->priv and env->virt_enabled contain old priv and old virt and
> + * new priv and new virt values are passed in as arguments.
> + */
> +static void riscv_pmu_icount_update_priv(CPURISCVState *env,
> + target_ulong newpriv, bool new_virt)
> +{
> + uint64_t *snapshot_prev, *snapshot_new;
> + uint64_t current_icount;
> + uint64_t *counter_arr;
> + uint64_t delta;
> +
> + if (icount_enabled()) {
> + current_icount = icount_get_raw();
> + } else {
> + current_icount = cpu_get_host_ticks();
> + }
> +
> + if (env->virt_enabled) {
> + counter_arr = env->pmu_fixed_ctrs[1].counter_virt;
> + snapshot_prev = env->pmu_fixed_ctrs[1].counter_virt_prev;
> + } else {
> + counter_arr = env->pmu_fixed_ctrs[1].counter;
> + snapshot_prev = env->pmu_fixed_ctrs[1].counter_prev;
> + }
> +
> + if (new_virt) {
> + snapshot_new = env->pmu_fixed_ctrs[1].counter_virt_prev;
> + } else {
> + snapshot_new = env->pmu_fixed_ctrs[1].counter_prev;
> + }
> +
> + /*
> + * new_priv can be same as env->priv. So we need to calculate
> + * delta first before updating snapshot_new[new_priv].
> + */
> + delta = current_icount - snapshot_prev[env->priv];
> + snapshot_new[newpriv] = current_icount;
> +
> + counter_arr[env->priv] += delta;
> +}
> +
> +static void riscv_pmu_cycle_update_priv(CPURISCVState *env,
> + target_ulong newpriv, bool new_virt)
> +{
> + uint64_t *snapshot_prev, *snapshot_new;
> + uint64_t current_ticks;
> + uint64_t *counter_arr;
> + uint64_t delta;
> +
> + if (icount_enabled()) {
> + current_ticks = icount_get();
> + } else {
> + current_ticks = cpu_get_host_ticks();
> + }
> +
> + if (env->virt_enabled) {
> + counter_arr = env->pmu_fixed_ctrs[0].counter_virt;
> + snapshot_prev = env->pmu_fixed_ctrs[0].counter_virt_prev;
> + } else {
> + counter_arr = env->pmu_fixed_ctrs[0].counter;
> + snapshot_prev = env->pmu_fixed_ctrs[0].counter_prev;
> + }
> +
> + if (new_virt) {
> + snapshot_new = env->pmu_fixed_ctrs[0].counter_virt_prev;
> + } else {
> + snapshot_new = env->pmu_fixed_ctrs[0].counter_prev;
> + }
> +
> + delta = current_ticks - snapshot_prev[env->priv];
> + snapshot_new[newpriv] = current_ticks;
> +
> + counter_arr[env->priv] += delta;
> +}
> +
> +void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
> + bool new_virt)
> +{
> + riscv_pmu_cycle_update_priv(env, newpriv, new_virt);
> + riscv_pmu_icount_update_priv(env, newpriv, new_virt);
> +}
> +
> int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
> {
> uint32_t ctr_idx;
> diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h
> index 7c0ad661e050..ca40cfeed647 100644
> --- a/target/riscv/pmu.h
> +++ b/target/riscv/pmu.h
> @@ -34,5 +34,7 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx);
> void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name);
> int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value,
> uint32_t ctr_idx);
> +void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
> + bool new_virt);
>
> #endif /* RISCV_PMU_H */
>
> --
> 2.34.1
>
>
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v7 07/11] target/riscv: Save counter values during countinhibit update
2024-06-26 23:57 [PATCH v7 00/11] Add RISC-V ISA extension smcntrpmf support Atish Patra
` (5 preceding siblings ...)
2024-06-26 23:57 ` [PATCH v7 06/11] target/riscv: Implement privilege mode filtering for cycle/instret Atish Patra
@ 2024-06-26 23:57 ` Atish Patra
2024-07-01 19:34 ` Daniel Henrique Barboza
2024-07-03 2:03 ` Alistair Francis
2024-06-26 23:57 ` [PATCH v7 08/11] target/riscv: Enforce WARL behavior for scounteren/hcounteren Atish Patra
` (3 subsequent siblings)
10 siblings, 2 replies; 27+ messages in thread
From: Atish Patra @ 2024-06-26 23:57 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, Atish Patra, palmer, liwei1518, zhiwei_liu,
bin.meng, dbarboza, alistair.francis
Currently, if a counter monitoring cycle/instret is stopped via
mcountinhibit we just update the state while the value is saved
during the next read. This is not accurate as the read may happen
many cycles after the counter is stopped. Ideally, the read should
return the value saved when the counter is stopped.
Thus, save the value of the counter during the inhibit update
operation and return that value during the read if corresponding bit
in mcountihibit is set.
Signed-off-by: Atish Patra <atishp@rivosinc.com>
---
target/riscv/cpu.h | 1 -
target/riscv/csr.c | 34 ++++++++++++++++++++++------------
target/riscv/machine.c | 5 ++---
3 files changed, 24 insertions(+), 16 deletions(-)
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index d56d640b06be..91fe2a46ba35 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -153,7 +153,6 @@ typedef struct PMUCTRState {
target_ulong mhpmcounter_prev;
/* Snapshort value of a counter in RV32 */
target_ulong mhpmcounterh_prev;
- bool started;
/* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
target_ulong irq_overflow_left;
} PMUCTRState;
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index c292d036bcb2..e4adfa324efe 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -1062,17 +1062,11 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
/*
- * Counter should not increment if inhibit bit is set. We can't really
- * stop the icount counting. Just return the counter value written by
- * the supervisor to indicate that counter was not incremented.
+ * Counter should not increment if inhibit bit is set. Just return the
+ * current counter value.
*/
- if (!counter->started) {
- *val = ctr_val;
- return RISCV_EXCP_NONE;
- } else {
- /* Mark that the counter has been stopped */
- counter->started = false;
- }
+ *val = ctr_val;
+ return RISCV_EXCP_NONE;
}
/*
@@ -2114,9 +2108,25 @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
/* Check if any other counter is also monitoring cycles/instructions */
for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
- if (!get_field(env->mcountinhibit, BIT(cidx))) {
counter = &env->pmu_ctrs[cidx];
- counter->started = true;
+ if (get_field(env->mcountinhibit, BIT(cidx)) && (val & BIT(cidx))) {
+ /*
+ * Update the counter value for cycle/instret as we can't stop the
+ * host ticks. But we should show the current value at this moment.
+ */
+ if (riscv_pmu_ctr_monitor_cycles(env, cidx) ||
+ riscv_pmu_ctr_monitor_instructions(env, cidx)) {
+ counter->mhpmcounter_val =
+ riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false) -
+ counter->mhpmcounter_prev +
+ counter->mhpmcounter_val;
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ counter->mhpmcounterh_val =
+ riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true) -
+ counter->mhpmcounterh_prev +
+ counter->mhpmcounterh_val;
+ }
+ }
}
}
diff --git a/target/riscv/machine.c b/target/riscv/machine.c
index 76f2150f78b5..492c2c6d9d79 100644
--- a/target/riscv/machine.c
+++ b/target/riscv/machine.c
@@ -320,15 +320,14 @@ static bool pmu_needed(void *opaque)
static const VMStateDescription vmstate_pmu_ctr_state = {
.name = "cpu/pmu",
- .version_id = 1,
- .minimum_version_id = 1,
+ .version_id = 2,
+ .minimum_version_id = 2,
.needed = pmu_needed,
.fields = (const VMStateField[]) {
VMSTATE_UINTTL(mhpmcounter_val, PMUCTRState),
VMSTATE_UINTTL(mhpmcounterh_val, PMUCTRState),
VMSTATE_UINTTL(mhpmcounter_prev, PMUCTRState),
VMSTATE_UINTTL(mhpmcounterh_prev, PMUCTRState),
- VMSTATE_BOOL(started, PMUCTRState),
VMSTATE_END_OF_LIST()
}
};
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread
* Re: [PATCH v7 07/11] target/riscv: Save counter values during countinhibit update
2024-06-26 23:57 ` [PATCH v7 07/11] target/riscv: Save counter values during countinhibit update Atish Patra
@ 2024-07-01 19:34 ` Daniel Henrique Barboza
2024-07-03 2:03 ` Alistair Francis
1 sibling, 0 replies; 27+ messages in thread
From: Daniel Henrique Barboza @ 2024-07-01 19:34 UTC (permalink / raw)
To: Atish Patra, qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, palmer, liwei1518, zhiwei_liu, bin.meng,
alistair.francis
On 6/26/24 8:57 PM, Atish Patra wrote:
> Currently, if a counter monitoring cycle/instret is stopped via
> mcountinhibit we just update the state while the value is saved
> during the next read. This is not accurate as the read may happen
> many cycles after the counter is stopped. Ideally, the read should
> return the value saved when the counter is stopped.
>
> Thus, save the value of the counter during the inhibit update
> operation and return that value during the read if corresponding bit
> in mcountihibit is set.
>
> Signed-off-by: Atish Patra <atishp@rivosinc.com>
> ---
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
> target/riscv/cpu.h | 1 -
> target/riscv/csr.c | 34 ++++++++++++++++++++++------------
> target/riscv/machine.c | 5 ++---
> 3 files changed, 24 insertions(+), 16 deletions(-)
>
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index d56d640b06be..91fe2a46ba35 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -153,7 +153,6 @@ typedef struct PMUCTRState {
> target_ulong mhpmcounter_prev;
> /* Snapshort value of a counter in RV32 */
> target_ulong mhpmcounterh_prev;
> - bool started;
> /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
> target_ulong irq_overflow_left;
> } PMUCTRState;
> diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> index c292d036bcb2..e4adfa324efe 100644
> --- a/target/riscv/csr.c
> +++ b/target/riscv/csr.c
> @@ -1062,17 +1062,11 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
>
> if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
> /*
> - * Counter should not increment if inhibit bit is set. We can't really
> - * stop the icount counting. Just return the counter value written by
> - * the supervisor to indicate that counter was not incremented.
> + * Counter should not increment if inhibit bit is set. Just return the
> + * current counter value.
> */
> - if (!counter->started) {
> - *val = ctr_val;
> - return RISCV_EXCP_NONE;
> - } else {
> - /* Mark that the counter has been stopped */
> - counter->started = false;
> - }
> + *val = ctr_val;
> + return RISCV_EXCP_NONE;
> }
>
> /*
> @@ -2114,9 +2108,25 @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
>
> /* Check if any other counter is also monitoring cycles/instructions */
> for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
> - if (!get_field(env->mcountinhibit, BIT(cidx))) {
> counter = &env->pmu_ctrs[cidx];
> - counter->started = true;
> + if (get_field(env->mcountinhibit, BIT(cidx)) && (val & BIT(cidx))) {
> + /*
> + * Update the counter value for cycle/instret as we can't stop the
> + * host ticks. But we should show the current value at this moment.
> + */
> + if (riscv_pmu_ctr_monitor_cycles(env, cidx) ||
> + riscv_pmu_ctr_monitor_instructions(env, cidx)) {
> + counter->mhpmcounter_val =
> + riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false) -
> + counter->mhpmcounter_prev +
> + counter->mhpmcounter_val;
> + if (riscv_cpu_mxl(env) == MXL_RV32) {
> + counter->mhpmcounterh_val =
> + riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true) -
> + counter->mhpmcounterh_prev +
> + counter->mhpmcounterh_val;
> + }
> + }
> }
> }
>
> diff --git a/target/riscv/machine.c b/target/riscv/machine.c
> index 76f2150f78b5..492c2c6d9d79 100644
> --- a/target/riscv/machine.c
> +++ b/target/riscv/machine.c
> @@ -320,15 +320,14 @@ static bool pmu_needed(void *opaque)
>
> static const VMStateDescription vmstate_pmu_ctr_state = {
> .name = "cpu/pmu",
> - .version_id = 1,
> - .minimum_version_id = 1,
> + .version_id = 2,
> + .minimum_version_id = 2,
> .needed = pmu_needed,
> .fields = (const VMStateField[]) {
> VMSTATE_UINTTL(mhpmcounter_val, PMUCTRState),
> VMSTATE_UINTTL(mhpmcounterh_val, PMUCTRState),
> VMSTATE_UINTTL(mhpmcounter_prev, PMUCTRState),
> VMSTATE_UINTTL(mhpmcounterh_prev, PMUCTRState),
> - VMSTATE_BOOL(started, PMUCTRState),
> VMSTATE_END_OF_LIST()
> }
> };
>
^ permalink raw reply [flat|nested] 27+ messages in thread
* Re: [PATCH v7 07/11] target/riscv: Save counter values during countinhibit update
2024-06-26 23:57 ` [PATCH v7 07/11] target/riscv: Save counter values during countinhibit update Atish Patra
2024-07-01 19:34 ` Daniel Henrique Barboza
@ 2024-07-03 2:03 ` Alistair Francis
1 sibling, 0 replies; 27+ messages in thread
From: Alistair Francis @ 2024-07-03 2:03 UTC (permalink / raw)
To: Atish Patra
Cc: qemu-riscv, qemu-devel, Rajnesh Kanwal, palmer, liwei1518,
zhiwei_liu, bin.meng, dbarboza, alistair.francis
On Thu, Jun 27, 2024 at 9:58 AM Atish Patra <atishp@rivosinc.com> wrote:
>
> Currently, if a counter monitoring cycle/instret is stopped via
> mcountinhibit we just update the state while the value is saved
> during the next read. This is not accurate as the read may happen
> many cycles after the counter is stopped. Ideally, the read should
> return the value saved when the counter is stopped.
>
> Thus, save the value of the counter during the inhibit update
> operation and return that value during the read if corresponding bit
> in mcountihibit is set.
>
> Signed-off-by: Atish Patra <atishp@rivosinc.com>
Acked-by: Alistair Francis <alistair.francis@wdc.com>
Alistair
> ---
> target/riscv/cpu.h | 1 -
> target/riscv/csr.c | 34 ++++++++++++++++++++++------------
> target/riscv/machine.c | 5 ++---
> 3 files changed, 24 insertions(+), 16 deletions(-)
>
> diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
> index d56d640b06be..91fe2a46ba35 100644
> --- a/target/riscv/cpu.h
> +++ b/target/riscv/cpu.h
> @@ -153,7 +153,6 @@ typedef struct PMUCTRState {
> target_ulong mhpmcounter_prev;
> /* Snapshort value of a counter in RV32 */
> target_ulong mhpmcounterh_prev;
> - bool started;
> /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */
> target_ulong irq_overflow_left;
> } PMUCTRState;
> diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> index c292d036bcb2..e4adfa324efe 100644
> --- a/target/riscv/csr.c
> +++ b/target/riscv/csr.c
> @@ -1062,17 +1062,11 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
>
> if (get_field(env->mcountinhibit, BIT(ctr_idx))) {
> /*
> - * Counter should not increment if inhibit bit is set. We can't really
> - * stop the icount counting. Just return the counter value written by
> - * the supervisor to indicate that counter was not incremented.
> + * Counter should not increment if inhibit bit is set. Just return the
> + * current counter value.
> */
> - if (!counter->started) {
> - *val = ctr_val;
> - return RISCV_EXCP_NONE;
> - } else {
> - /* Mark that the counter has been stopped */
> - counter->started = false;
> - }
> + *val = ctr_val;
> + return RISCV_EXCP_NONE;
> }
>
> /*
> @@ -2114,9 +2108,25 @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
>
> /* Check if any other counter is also monitoring cycles/instructions */
> for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
> - if (!get_field(env->mcountinhibit, BIT(cidx))) {
> counter = &env->pmu_ctrs[cidx];
> - counter->started = true;
> + if (get_field(env->mcountinhibit, BIT(cidx)) && (val & BIT(cidx))) {
> + /*
> + * Update the counter value for cycle/instret as we can't stop the
> + * host ticks. But we should show the current value at this moment.
> + */
> + if (riscv_pmu_ctr_monitor_cycles(env, cidx) ||
> + riscv_pmu_ctr_monitor_instructions(env, cidx)) {
> + counter->mhpmcounter_val =
> + riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false) -
> + counter->mhpmcounter_prev +
> + counter->mhpmcounter_val;
> + if (riscv_cpu_mxl(env) == MXL_RV32) {
> + counter->mhpmcounterh_val =
> + riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true) -
> + counter->mhpmcounterh_prev +
> + counter->mhpmcounterh_val;
> + }
> + }
> }
> }
>
> diff --git a/target/riscv/machine.c b/target/riscv/machine.c
> index 76f2150f78b5..492c2c6d9d79 100644
> --- a/target/riscv/machine.c
> +++ b/target/riscv/machine.c
> @@ -320,15 +320,14 @@ static bool pmu_needed(void *opaque)
>
> static const VMStateDescription vmstate_pmu_ctr_state = {
> .name = "cpu/pmu",
> - .version_id = 1,
> - .minimum_version_id = 1,
> + .version_id = 2,
> + .minimum_version_id = 2,
> .needed = pmu_needed,
> .fields = (const VMStateField[]) {
> VMSTATE_UINTTL(mhpmcounter_val, PMUCTRState),
> VMSTATE_UINTTL(mhpmcounterh_val, PMUCTRState),
> VMSTATE_UINTTL(mhpmcounter_prev, PMUCTRState),
> VMSTATE_UINTTL(mhpmcounterh_prev, PMUCTRState),
> - VMSTATE_BOOL(started, PMUCTRState),
> VMSTATE_END_OF_LIST()
> }
> };
>
> --
> 2.34.1
>
>
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v7 08/11] target/riscv: Enforce WARL behavior for scounteren/hcounteren
2024-06-26 23:57 [PATCH v7 00/11] Add RISC-V ISA extension smcntrpmf support Atish Patra
` (6 preceding siblings ...)
2024-06-26 23:57 ` [PATCH v7 07/11] target/riscv: Save counter values during countinhibit update Atish Patra
@ 2024-06-26 23:57 ` Atish Patra
2024-06-26 23:57 ` [PATCH v7 09/11] target/riscv: Start counters from both mhpmcounter and mcountinhibit Atish Patra
` (2 subsequent siblings)
10 siblings, 0 replies; 27+ messages in thread
From: Atish Patra @ 2024-06-26 23:57 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, Atish Patra, palmer, liwei1518, zhiwei_liu,
bin.meng, dbarboza, alistair.francis
scounteren/hcountern are also WARL registers similar to mcountern.
Only set the bits for the available counters during the write to
preserve the WARL behavior.
Signed-off-by: Atish Patra <atishp@rivosinc.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
---
target/riscv/csr.c | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index e4adfa324efe..6c1a884eec82 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -2994,7 +2994,11 @@ static RISCVException read_scounteren(CPURISCVState *env, int csrno,
static RISCVException write_scounteren(CPURISCVState *env, int csrno,
target_ulong val)
{
- env->scounteren = val;
+ RISCVCPU *cpu = env_archcpu(env);
+
+ /* WARL register - disable unavailable counters */
+ env->scounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
+ COUNTEREN_IR);
return RISCV_EXCP_NONE;
}
@@ -3653,7 +3657,11 @@ static RISCVException read_hcounteren(CPURISCVState *env, int csrno,
static RISCVException write_hcounteren(CPURISCVState *env, int csrno,
target_ulong val)
{
- env->hcounteren = val;
+ RISCVCPU *cpu = env_archcpu(env);
+
+ /* WARL register - disable unavailable counters */
+ env->hcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM |
+ COUNTEREN_IR);
return RISCV_EXCP_NONE;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread
* [PATCH v7 09/11] target/riscv: Start counters from both mhpmcounter and mcountinhibit
2024-06-26 23:57 [PATCH v7 00/11] Add RISC-V ISA extension smcntrpmf support Atish Patra
` (7 preceding siblings ...)
2024-06-26 23:57 ` [PATCH v7 08/11] target/riscv: Enforce WARL behavior for scounteren/hcounteren Atish Patra
@ 2024-06-26 23:57 ` Atish Patra
2024-07-01 19:37 ` Daniel Henrique Barboza
2024-06-26 23:57 ` [PATCH v7 10/11] target/riscv: More accurately model priv mode filtering Atish Patra
2024-06-26 23:57 ` [PATCH v7 11/11] target/riscv: Do not setup pmu timer if OF is disabled Atish Patra
10 siblings, 1 reply; 27+ messages in thread
From: Atish Patra @ 2024-06-26 23:57 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, Atish Patra, palmer, liwei1518, zhiwei_liu,
bin.meng, dbarboza, alistair.francis
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
Currently we start timer counter from write_mhpmcounter path only
without checking for mcountinhibit bit. This changes adds mcountinhibit
check and also programs the counter from write_mcountinhibit as well.
When a counter is stopped using mcountinhibit we simply update
the value of the counter based on current host ticks and save
it for future reads.
We don't need to disable running timer as pmu_timer_trigger_irq
will discard the interrupt if the counter has been inhibited.
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
---
target/riscv/csr.c | 75 ++++++++++++++++++++++++++++++++++++++----------------
target/riscv/pmu.c | 3 +--
2 files changed, 54 insertions(+), 24 deletions(-)
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 6c1a884eec82..150e02f080ec 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -1008,8 +1008,9 @@ static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
uint64_t mhpmctr_val = val;
counter->mhpmcounter_val = val;
- if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
- riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
+ if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
+ (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
+ riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
ctr_idx, false);
if (ctr_idx > 2) {
@@ -1037,8 +1038,9 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
counter->mhpmcounterh_val = val;
mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
- if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
- riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
+ if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
+ (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
+ riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
ctr_idx, true);
if (ctr_idx > 2) {
@@ -2101,31 +2103,60 @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
int cidx;
PMUCTRState *counter;
RISCVCPU *cpu = env_archcpu(env);
+ uint32_t present_ctrs = cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR;
+ target_ulong updated_ctrs = (env->mcountinhibit ^ val) & present_ctrs;
+ uint64_t mhpmctr_val, prev_count, curr_count;
/* WARL register - disable unavailable counters; TM bit is always 0 */
- env->mcountinhibit =
- val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR);
+ env->mcountinhibit = val & present_ctrs;
/* Check if any other counter is also monitoring cycles/instructions */
for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
- counter = &env->pmu_ctrs[cidx];
- if (get_field(env->mcountinhibit, BIT(cidx)) && (val & BIT(cidx))) {
- /*
- * Update the counter value for cycle/instret as we can't stop the
- * host ticks. But we should show the current value at this moment.
- */
- if (riscv_pmu_ctr_monitor_cycles(env, cidx) ||
- riscv_pmu_ctr_monitor_instructions(env, cidx)) {
- counter->mhpmcounter_val =
- riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false) -
- counter->mhpmcounter_prev +
- counter->mhpmcounter_val;
+ if (!(updated_ctrs & BIT(cidx)) ||
+ (!riscv_pmu_ctr_monitor_cycles(env, cidx) &&
+ !riscv_pmu_ctr_monitor_instructions(env, cidx))) {
+ continue;
+ }
+
+ counter = &env->pmu_ctrs[cidx];
+
+ if (!get_field(env->mcountinhibit, BIT(cidx))) {
+ counter->mhpmcounter_prev =
+ riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ counter->mhpmcounterh_prev =
+ riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
+ }
+
+ if (cidx > 2) {
+ mhpmctr_val = counter->mhpmcounter_val;
if (riscv_cpu_mxl(env) == MXL_RV32) {
- counter->mhpmcounterh_val =
- riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true) -
- counter->mhpmcounterh_prev +
- counter->mhpmcounterh_val;
+ mhpmctr_val = mhpmctr_val |
+ ((uint64_t)counter->mhpmcounterh_val << 32);
}
+ riscv_pmu_setup_timer(env, mhpmctr_val, cidx);
+ }
+ } else {
+ curr_count = riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
+
+ mhpmctr_val = counter->mhpmcounter_val;
+ prev_count = counter->mhpmcounter_prev;
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ uint64_t tmp =
+ riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
+
+ curr_count = curr_count | (tmp << 32);
+ mhpmctr_val = mhpmctr_val |
+ ((uint64_t)counter->mhpmcounterh_val << 32);
+ prev_count = prev_count |
+ ((uint64_t)counter->mhpmcounterh_prev << 32);
+ }
+
+ /* Adjust the counter for later reads. */
+ mhpmctr_val = curr_count - prev_count + mhpmctr_val;
+ counter->mhpmcounter_val = mhpmctr_val;
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ counter->mhpmcounterh_val = mhpmctr_val >> 32;
}
}
}
diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
index ac648cff8d7c..63420d9f3679 100644
--- a/target/riscv/pmu.c
+++ b/target/riscv/pmu.c
@@ -285,8 +285,7 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
}
ctr_idx = GPOINTER_TO_UINT(value);
- if (!riscv_pmu_counter_enabled(cpu, ctr_idx) ||
- get_field(env->mcountinhibit, BIT(ctr_idx))) {
+ if (!riscv_pmu_counter_enabled(cpu, ctr_idx)) {
return -1;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread
* Re: [PATCH v7 09/11] target/riscv: Start counters from both mhpmcounter and mcountinhibit
2024-06-26 23:57 ` [PATCH v7 09/11] target/riscv: Start counters from both mhpmcounter and mcountinhibit Atish Patra
@ 2024-07-01 19:37 ` Daniel Henrique Barboza
0 siblings, 0 replies; 27+ messages in thread
From: Daniel Henrique Barboza @ 2024-07-01 19:37 UTC (permalink / raw)
To: Atish Patra, qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, palmer, liwei1518, zhiwei_liu, bin.meng,
alistair.francis
On 6/26/24 8:57 PM, Atish Patra wrote:
> From: Rajnesh Kanwal <rkanwal@rivosinc.com>
>
> Currently we start timer counter from write_mhpmcounter path only
> without checking for mcountinhibit bit. This changes adds mcountinhibit
> check and also programs the counter from write_mcountinhibit as well.
>
> When a counter is stopped using mcountinhibit we simply update
> the value of the counter based on current host ticks and save
> it for future reads.
>
> We don't need to disable running timer as pmu_timer_trigger_irq
> will discard the interrupt if the counter has been inhibited.
>
> Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
> ---
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
> target/riscv/csr.c | 75 ++++++++++++++++++++++++++++++++++++++----------------
> target/riscv/pmu.c | 3 +--
> 2 files changed, 54 insertions(+), 24 deletions(-)
>
> diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> index 6c1a884eec82..150e02f080ec 100644
> --- a/target/riscv/csr.c
> +++ b/target/riscv/csr.c
> @@ -1008,8 +1008,9 @@ static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno,
> uint64_t mhpmctr_val = val;
>
> counter->mhpmcounter_val = val;
> - if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
> - riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
> + if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
> + (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
> + riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
> counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
> ctr_idx, false);
> if (ctr_idx > 2) {
> @@ -1037,8 +1038,9 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
>
> counter->mhpmcounterh_val = val;
> mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32);
> - if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
> - riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) {
> + if (!get_field(env->mcountinhibit, BIT(ctr_idx)) &&
> + (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) ||
> + riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) {
> counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env,
> ctr_idx, true);
> if (ctr_idx > 2) {
> @@ -2101,31 +2103,60 @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno,
> int cidx;
> PMUCTRState *counter;
> RISCVCPU *cpu = env_archcpu(env);
> + uint32_t present_ctrs = cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR;
> + target_ulong updated_ctrs = (env->mcountinhibit ^ val) & present_ctrs;
> + uint64_t mhpmctr_val, prev_count, curr_count;
>
> /* WARL register - disable unavailable counters; TM bit is always 0 */
> - env->mcountinhibit =
> - val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR);
> + env->mcountinhibit = val & present_ctrs;
>
> /* Check if any other counter is also monitoring cycles/instructions */
> for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) {
> - counter = &env->pmu_ctrs[cidx];
> - if (get_field(env->mcountinhibit, BIT(cidx)) && (val & BIT(cidx))) {
> - /*
> - * Update the counter value for cycle/instret as we can't stop the
> - * host ticks. But we should show the current value at this moment.
> - */
> - if (riscv_pmu_ctr_monitor_cycles(env, cidx) ||
> - riscv_pmu_ctr_monitor_instructions(env, cidx)) {
> - counter->mhpmcounter_val =
> - riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false) -
> - counter->mhpmcounter_prev +
> - counter->mhpmcounter_val;
> + if (!(updated_ctrs & BIT(cidx)) ||
> + (!riscv_pmu_ctr_monitor_cycles(env, cidx) &&
> + !riscv_pmu_ctr_monitor_instructions(env, cidx))) {
> + continue;
> + }
> +
> + counter = &env->pmu_ctrs[cidx];
> +
> + if (!get_field(env->mcountinhibit, BIT(cidx))) {
> + counter->mhpmcounter_prev =
> + riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
> + if (riscv_cpu_mxl(env) == MXL_RV32) {
> + counter->mhpmcounterh_prev =
> + riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
> + }
> +
> + if (cidx > 2) {
> + mhpmctr_val = counter->mhpmcounter_val;
> if (riscv_cpu_mxl(env) == MXL_RV32) {
> - counter->mhpmcounterh_val =
> - riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true) -
> - counter->mhpmcounterh_prev +
> - counter->mhpmcounterh_val;
> + mhpmctr_val = mhpmctr_val |
> + ((uint64_t)counter->mhpmcounterh_val << 32);
> }
> + riscv_pmu_setup_timer(env, mhpmctr_val, cidx);
> + }
> + } else {
> + curr_count = riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false);
> +
> + mhpmctr_val = counter->mhpmcounter_val;
> + prev_count = counter->mhpmcounter_prev;
> + if (riscv_cpu_mxl(env) == MXL_RV32) {
> + uint64_t tmp =
> + riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true);
> +
> + curr_count = curr_count | (tmp << 32);
> + mhpmctr_val = mhpmctr_val |
> + ((uint64_t)counter->mhpmcounterh_val << 32);
> + prev_count = prev_count |
> + ((uint64_t)counter->mhpmcounterh_prev << 32);
> + }
> +
> + /* Adjust the counter for later reads. */
> + mhpmctr_val = curr_count - prev_count + mhpmctr_val;
> + counter->mhpmcounter_val = mhpmctr_val;
> + if (riscv_cpu_mxl(env) == MXL_RV32) {
> + counter->mhpmcounterh_val = mhpmctr_val >> 32;
> }
> }
> }
> diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
> index ac648cff8d7c..63420d9f3679 100644
> --- a/target/riscv/pmu.c
> +++ b/target/riscv/pmu.c
> @@ -285,8 +285,7 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx)
> }
>
> ctr_idx = GPOINTER_TO_UINT(value);
> - if (!riscv_pmu_counter_enabled(cpu, ctr_idx) ||
> - get_field(env->mcountinhibit, BIT(ctr_idx))) {
> + if (!riscv_pmu_counter_enabled(cpu, ctr_idx)) {
> return -1;
> }
>
>
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v7 10/11] target/riscv: More accurately model priv mode filtering.
2024-06-26 23:57 [PATCH v7 00/11] Add RISC-V ISA extension smcntrpmf support Atish Patra
` (8 preceding siblings ...)
2024-06-26 23:57 ` [PATCH v7 09/11] target/riscv: Start counters from both mhpmcounter and mcountinhibit Atish Patra
@ 2024-06-26 23:57 ` Atish Patra
2024-07-01 19:37 ` Daniel Henrique Barboza
2024-06-26 23:57 ` [PATCH v7 11/11] target/riscv: Do not setup pmu timer if OF is disabled Atish Patra
10 siblings, 1 reply; 27+ messages in thread
From: Atish Patra @ 2024-06-26 23:57 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, Atish Patra, palmer, liwei1518, zhiwei_liu,
bin.meng, dbarboza, alistair.francis
From: Rajnesh Kanwal <rkanwal@rivosinc.com>
In case of programmable counters configured to count inst/cycles
we often end-up with counter not incrementing at all from kernel's
perspective.
For example:
- Kernel configures hpm3 to count instructions and sets hpmcounter
to -10000 and all modes except U mode are inhibited.
- In QEMU we configure a timer to expire after ~10000 instructions.
- Problem is, it's often the case that kernel might not even schedule
Umode task and we hit the timer callback in QEMU.
- In the timer callback we inject the interrupt into kernel, kernel
runs the handler and reads hpmcounter3 value.
- Given QEMU maintains individual counters to count for each privilege
mode, and given umode never ran, the umode counter didn't increment
and QEMU returns same value as was programmed by the kernel when
starting the counter.
- Kernel checks for overflow using previous and current value of the
counter and reprograms the counter given there wasn't an overflow
as per the counter value. (Which itself is a problem. We have QEMU
telling kernel that counter3 overflowed but the counter value
returned by QEMU doesn't seem to reflect that.).
This change makes sure that timer is reprogrammed from the handler
if the counter didn't overflow based on the counter value.
Second, this change makes sure that whenever the counter is read,
it's value is updated to reflect the latest count.
Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
---
target/riscv/csr.c | 5 ++++-
target/riscv/pmu.c | 30 +++++++++++++++++++++++++++---
target/riscv/pmu.h | 2 ++
3 files changed, 33 insertions(+), 4 deletions(-)
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 150e02f080ec..91172b90e000 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -970,6 +970,9 @@ static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
goto done;
}
+ /* Update counter before reading. */
+ riscv_pmu_update_fixed_ctrs(env, env->priv, env->virt_enabled);
+
if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
curr_val += counter_arr[PRV_M];
}
@@ -1053,7 +1056,7 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
-static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
+RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
bool upper_half, uint32_t ctr_idx)
{
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
index 63420d9f3679..a4729f6c53bb 100644
--- a/target/riscv/pmu.c
+++ b/target/riscv/pmu.c
@@ -425,6 +425,8 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
target_ulong *mhpmevent_val;
uint64_t of_bit_mask;
int64_t irq_trigger_at;
+ uint64_t curr_ctr_val, curr_ctrh_val;
+ uint64_t ctr_val;
if (evt_idx != RISCV_PMU_EVENT_HW_CPU_CYCLES &&
evt_idx != RISCV_PMU_EVENT_HW_INSTRUCTIONS) {
@@ -454,6 +456,26 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
return;
}
+ riscv_pmu_read_ctr(env, (target_ulong *)&curr_ctr_val, false, ctr_idx);
+ ctr_val = counter->mhpmcounter_val;
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ riscv_pmu_read_ctr(env, (target_ulong *)&curr_ctrh_val, true, ctr_idx);
+ curr_ctr_val = curr_ctr_val | (curr_ctrh_val << 32);
+ ctr_val = ctr_val |
+ ((uint64_t)counter->mhpmcounterh_val << 32);
+ }
+
+ /*
+ * We can not accommodate for inhibited modes when setting up timer. Check
+ * if the counter has actually overflowed or not by comparing current
+ * counter value (accommodated for inhibited modes) with software written
+ * counter value.
+ */
+ if (curr_ctr_val >= ctr_val) {
+ riscv_pmu_setup_timer(env, curr_ctr_val, ctr_idx);
+ return;
+ }
+
if (cpu->pmu_avail_ctrs & BIT(ctr_idx)) {
/* Generate interrupt only if OF bit is clear */
if (!(*mhpmevent_val & of_bit_mask)) {
@@ -475,7 +497,7 @@ void riscv_pmu_timer_cb(void *priv)
int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
{
- uint64_t overflow_delta, overflow_at;
+ uint64_t overflow_delta, overflow_at, curr_ns;
int64_t overflow_ns, overflow_left = 0;
RISCVCPU *cpu = env_archcpu(env);
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
@@ -506,8 +528,10 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
} else {
return -1;
}
- overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
- overflow_ns;
+ curr_ns = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
+ overflow_at = curr_ns + overflow_ns;
+ if (overflow_at <= curr_ns)
+ overflow_at = UINT64_MAX;
if (overflow_at > INT64_MAX) {
overflow_left += overflow_at - INT64_MAX;
diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h
index ca40cfeed647..3853d0e2629e 100644
--- a/target/riscv/pmu.h
+++ b/target/riscv/pmu.h
@@ -36,5 +36,7 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value,
uint32_t ctr_idx);
void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
bool new_virt);
+RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
+ bool upper_half, uint32_t ctr_idx);
#endif /* RISCV_PMU_H */
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread
* Re: [PATCH v7 10/11] target/riscv: More accurately model priv mode filtering.
2024-06-26 23:57 ` [PATCH v7 10/11] target/riscv: More accurately model priv mode filtering Atish Patra
@ 2024-07-01 19:37 ` Daniel Henrique Barboza
0 siblings, 0 replies; 27+ messages in thread
From: Daniel Henrique Barboza @ 2024-07-01 19:37 UTC (permalink / raw)
To: Atish Patra, qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, palmer, liwei1518, zhiwei_liu, bin.meng,
alistair.francis
On 6/26/24 8:57 PM, Atish Patra wrote:
> From: Rajnesh Kanwal <rkanwal@rivosinc.com>
>
> In case of programmable counters configured to count inst/cycles
> we often end-up with counter not incrementing at all from kernel's
> perspective.
>
> For example:
> - Kernel configures hpm3 to count instructions and sets hpmcounter
> to -10000 and all modes except U mode are inhibited.
> - In QEMU we configure a timer to expire after ~10000 instructions.
> - Problem is, it's often the case that kernel might not even schedule
> Umode task and we hit the timer callback in QEMU.
> - In the timer callback we inject the interrupt into kernel, kernel
> runs the handler and reads hpmcounter3 value.
> - Given QEMU maintains individual counters to count for each privilege
> mode, and given umode never ran, the umode counter didn't increment
> and QEMU returns same value as was programmed by the kernel when
> starting the counter.
> - Kernel checks for overflow using previous and current value of the
> counter and reprograms the counter given there wasn't an overflow
> as per the counter value. (Which itself is a problem. We have QEMU
> telling kernel that counter3 overflowed but the counter value
> returned by QEMU doesn't seem to reflect that.).
>
> This change makes sure that timer is reprogrammed from the handler
> if the counter didn't overflow based on the counter value.
>
> Second, this change makes sure that whenever the counter is read,
> it's value is updated to reflect the latest count.
>
> Signed-off-by: Rajnesh Kanwal <rkanwal@rivosinc.com>
> ---
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
> target/riscv/csr.c | 5 ++++-
> target/riscv/pmu.c | 30 +++++++++++++++++++++++++++---
> target/riscv/pmu.h | 2 ++
> 3 files changed, 33 insertions(+), 4 deletions(-)
>
> diff --git a/target/riscv/csr.c b/target/riscv/csr.c
> index 150e02f080ec..91172b90e000 100644
> --- a/target/riscv/csr.c
> +++ b/target/riscv/csr.c
> @@ -970,6 +970,9 @@ static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env,
> goto done;
> }
>
> + /* Update counter before reading. */
> + riscv_pmu_update_fixed_ctrs(env, env->priv, env->virt_enabled);
> +
> if (!(cfg_val & MCYCLECFG_BIT_MINH)) {
> curr_val += counter_arr[PRV_M];
> }
> @@ -1053,7 +1056,7 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno,
> return RISCV_EXCP_NONE;
> }
>
> -static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
> +RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
> bool upper_half, uint32_t ctr_idx)
> {
> PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
> diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
> index 63420d9f3679..a4729f6c53bb 100644
> --- a/target/riscv/pmu.c
> +++ b/target/riscv/pmu.c
> @@ -425,6 +425,8 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
> target_ulong *mhpmevent_val;
> uint64_t of_bit_mask;
> int64_t irq_trigger_at;
> + uint64_t curr_ctr_val, curr_ctrh_val;
> + uint64_t ctr_val;
>
> if (evt_idx != RISCV_PMU_EVENT_HW_CPU_CYCLES &&
> evt_idx != RISCV_PMU_EVENT_HW_INSTRUCTIONS) {
> @@ -454,6 +456,26 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
> return;
> }
>
> + riscv_pmu_read_ctr(env, (target_ulong *)&curr_ctr_val, false, ctr_idx);
> + ctr_val = counter->mhpmcounter_val;
> + if (riscv_cpu_mxl(env) == MXL_RV32) {
> + riscv_pmu_read_ctr(env, (target_ulong *)&curr_ctrh_val, true, ctr_idx);
> + curr_ctr_val = curr_ctr_val | (curr_ctrh_val << 32);
> + ctr_val = ctr_val |
> + ((uint64_t)counter->mhpmcounterh_val << 32);
> + }
> +
> + /*
> + * We can not accommodate for inhibited modes when setting up timer. Check
> + * if the counter has actually overflowed or not by comparing current
> + * counter value (accommodated for inhibited modes) with software written
> + * counter value.
> + */
> + if (curr_ctr_val >= ctr_val) {
> + riscv_pmu_setup_timer(env, curr_ctr_val, ctr_idx);
> + return;
> + }
> +
> if (cpu->pmu_avail_ctrs & BIT(ctr_idx)) {
> /* Generate interrupt only if OF bit is clear */
> if (!(*mhpmevent_val & of_bit_mask)) {
> @@ -475,7 +497,7 @@ void riscv_pmu_timer_cb(void *priv)
>
> int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
> {
> - uint64_t overflow_delta, overflow_at;
> + uint64_t overflow_delta, overflow_at, curr_ns;
> int64_t overflow_ns, overflow_left = 0;
> RISCVCPU *cpu = env_archcpu(env);
> PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
> @@ -506,8 +528,10 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
> } else {
> return -1;
> }
> - overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
> - overflow_ns;
> + curr_ns = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
> + overflow_at = curr_ns + overflow_ns;
> + if (overflow_at <= curr_ns)
> + overflow_at = UINT64_MAX;
>
> if (overflow_at > INT64_MAX) {
> overflow_left += overflow_at - INT64_MAX;
> diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h
> index ca40cfeed647..3853d0e2629e 100644
> --- a/target/riscv/pmu.h
> +++ b/target/riscv/pmu.h
> @@ -36,5 +36,7 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value,
> uint32_t ctr_idx);
> void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv,
> bool new_virt);
> +RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val,
> + bool upper_half, uint32_t ctr_idx);
>
> #endif /* RISCV_PMU_H */
>
^ permalink raw reply [flat|nested] 27+ messages in thread
* [PATCH v7 11/11] target/riscv: Do not setup pmu timer if OF is disabled
2024-06-26 23:57 [PATCH v7 00/11] Add RISC-V ISA extension smcntrpmf support Atish Patra
` (9 preceding siblings ...)
2024-06-26 23:57 ` [PATCH v7 10/11] target/riscv: More accurately model priv mode filtering Atish Patra
@ 2024-06-26 23:57 ` Atish Patra
2024-07-01 19:39 ` Daniel Henrique Barboza
10 siblings, 1 reply; 27+ messages in thread
From: Atish Patra @ 2024-06-26 23:57 UTC (permalink / raw)
To: qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, Atish Patra, palmer, liwei1518, zhiwei_liu,
bin.meng, dbarboza, alistair.francis
The timer is setup function is invoked in both hpmcounter
write and mcountinhibit write path. If the OF bit set, the
LCOFI interrupt is disabled. There is no benefitting in
setting up the qemu timer until LCOFI is cleared to indicate
that interrupts can be fired again.
Signed-off-by: Atish Patra <atishp@rivosinc.com>
---
target/riscv/pmu.c | 56 ++++++++++++++++++++++++++++++++++++++++++------------
1 file changed, 44 insertions(+), 12 deletions(-)
diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
index a4729f6c53bb..3cc0b3648cad 100644
--- a/target/riscv/pmu.c
+++ b/target/riscv/pmu.c
@@ -416,14 +416,49 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
return 0;
}
+static bool pmu_hpmevent_is_of_set(CPURISCVState *env, uint32_t ctr_idx)
+{
+ target_ulong mhpmevent_val;
+ uint64_t of_bit_mask;
+
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ mhpmevent_val = env->mhpmeventh_val[ctr_idx];
+ of_bit_mask = MHPMEVENTH_BIT_OF;
+ } else {
+ mhpmevent_val = env->mhpmevent_val[ctr_idx];
+ of_bit_mask = MHPMEVENT_BIT_OF;
+ }
+
+ return get_field(mhpmevent_val, of_bit_mask);
+}
+
+static bool pmu_hpmevent_set_of_if_clear(CPURISCVState *env, uint32_t ctr_idx)
+{
+ target_ulong *mhpmevent_val;
+ uint64_t of_bit_mask;
+
+ if (riscv_cpu_mxl(env) == MXL_RV32) {
+ mhpmevent_val = &env->mhpmeventh_val[ctr_idx];
+ of_bit_mask = MHPMEVENTH_BIT_OF;
+ } else {
+ mhpmevent_val = &env->mhpmevent_val[ctr_idx];
+ of_bit_mask = MHPMEVENT_BIT_OF;
+ }
+
+ if (!get_field(*mhpmevent_val, of_bit_mask)) {
+ *mhpmevent_val |= of_bit_mask;
+ return true;
+ }
+
+ return false;
+}
+
static void pmu_timer_trigger_irq(RISCVCPU *cpu,
enum riscv_pmu_event_idx evt_idx)
{
uint32_t ctr_idx;
CPURISCVState *env = &cpu->env;
PMUCTRState *counter;
- target_ulong *mhpmevent_val;
- uint64_t of_bit_mask;
int64_t irq_trigger_at;
uint64_t curr_ctr_val, curr_ctrh_val;
uint64_t ctr_val;
@@ -439,12 +474,9 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
return;
}
- if (riscv_cpu_mxl(env) == MXL_RV32) {
- mhpmevent_val = &env->mhpmeventh_val[ctr_idx];
- of_bit_mask = MHPMEVENTH_BIT_OF;
- } else {
- mhpmevent_val = &env->mhpmevent_val[ctr_idx];
- of_bit_mask = MHPMEVENT_BIT_OF;
+ /* Generate interrupt only if OF bit is clear */
+ if (pmu_hpmevent_is_of_set(env, ctr_idx)) {
+ return;
}
counter = &env->pmu_ctrs[ctr_idx];
@@ -477,9 +509,7 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
}
if (cpu->pmu_avail_ctrs & BIT(ctr_idx)) {
- /* Generate interrupt only if OF bit is clear */
- if (!(*mhpmevent_val & of_bit_mask)) {
- *mhpmevent_val |= of_bit_mask;
+ if (pmu_hpmevent_set_of_if_clear(env, ctr_idx)) {
riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1));
}
}
@@ -502,7 +532,9 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
RISCVCPU *cpu = env_archcpu(env);
PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
- if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf) {
+ /* No need to setup a timer if LCOFI is disabled when OF is set */
+ if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf ||
+ pmu_hpmevent_is_of_set(env, ctr_idx)) {
return -1;
}
--
2.34.1
^ permalink raw reply related [flat|nested] 27+ messages in thread
* Re: [PATCH v7 11/11] target/riscv: Do not setup pmu timer if OF is disabled
2024-06-26 23:57 ` [PATCH v7 11/11] target/riscv: Do not setup pmu timer if OF is disabled Atish Patra
@ 2024-07-01 19:39 ` Daniel Henrique Barboza
0 siblings, 0 replies; 27+ messages in thread
From: Daniel Henrique Barboza @ 2024-07-01 19:39 UTC (permalink / raw)
To: Atish Patra, qemu-riscv, qemu-devel
Cc: Rajnesh Kanwal, palmer, liwei1518, zhiwei_liu, bin.meng,
alistair.francis
On 6/26/24 8:57 PM, Atish Patra wrote:
> The timer is setup function is invoked in both hpmcounter
> write and mcountinhibit write path. If the OF bit set, the
> LCOFI interrupt is disabled. There is no benefitting in
> setting up the qemu timer until LCOFI is cleared to indicate
> that interrupts can be fired again.
>
> Signed-off-by: Atish Patra <atishp@rivosinc.com>
> ---
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
> target/riscv/pmu.c | 56 ++++++++++++++++++++++++++++++++++++++++++------------
> 1 file changed, 44 insertions(+), 12 deletions(-)
>
> diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c
> index a4729f6c53bb..3cc0b3648cad 100644
> --- a/target/riscv/pmu.c
> +++ b/target/riscv/pmu.c
> @@ -416,14 +416,49 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value,
> return 0;
> }
>
> +static bool pmu_hpmevent_is_of_set(CPURISCVState *env, uint32_t ctr_idx)
> +{
> + target_ulong mhpmevent_val;
> + uint64_t of_bit_mask;
> +
> + if (riscv_cpu_mxl(env) == MXL_RV32) {
> + mhpmevent_val = env->mhpmeventh_val[ctr_idx];
> + of_bit_mask = MHPMEVENTH_BIT_OF;
> + } else {
> + mhpmevent_val = env->mhpmevent_val[ctr_idx];
> + of_bit_mask = MHPMEVENT_BIT_OF;
> + }
> +
> + return get_field(mhpmevent_val, of_bit_mask);
> +}
> +
> +static bool pmu_hpmevent_set_of_if_clear(CPURISCVState *env, uint32_t ctr_idx)
> +{
> + target_ulong *mhpmevent_val;
> + uint64_t of_bit_mask;
> +
> + if (riscv_cpu_mxl(env) == MXL_RV32) {
> + mhpmevent_val = &env->mhpmeventh_val[ctr_idx];
> + of_bit_mask = MHPMEVENTH_BIT_OF;
> + } else {
> + mhpmevent_val = &env->mhpmevent_val[ctr_idx];
> + of_bit_mask = MHPMEVENT_BIT_OF;
> + }
> +
> + if (!get_field(*mhpmevent_val, of_bit_mask)) {
> + *mhpmevent_val |= of_bit_mask;
> + return true;
> + }
> +
> + return false;
> +}
> +
> static void pmu_timer_trigger_irq(RISCVCPU *cpu,
> enum riscv_pmu_event_idx evt_idx)
> {
> uint32_t ctr_idx;
> CPURISCVState *env = &cpu->env;
> PMUCTRState *counter;
> - target_ulong *mhpmevent_val;
> - uint64_t of_bit_mask;
> int64_t irq_trigger_at;
> uint64_t curr_ctr_val, curr_ctrh_val;
> uint64_t ctr_val;
> @@ -439,12 +474,9 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
> return;
> }
>
> - if (riscv_cpu_mxl(env) == MXL_RV32) {
> - mhpmevent_val = &env->mhpmeventh_val[ctr_idx];
> - of_bit_mask = MHPMEVENTH_BIT_OF;
> - } else {
> - mhpmevent_val = &env->mhpmevent_val[ctr_idx];
> - of_bit_mask = MHPMEVENT_BIT_OF;
> + /* Generate interrupt only if OF bit is clear */
> + if (pmu_hpmevent_is_of_set(env, ctr_idx)) {
> + return;
> }
>
> counter = &env->pmu_ctrs[ctr_idx];
> @@ -477,9 +509,7 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu,
> }
>
> if (cpu->pmu_avail_ctrs & BIT(ctr_idx)) {
> - /* Generate interrupt only if OF bit is clear */
> - if (!(*mhpmevent_val & of_bit_mask)) {
> - *mhpmevent_val |= of_bit_mask;
> + if (pmu_hpmevent_set_of_if_clear(env, ctr_idx)) {
> riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1));
> }
> }
> @@ -502,7 +532,9 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx)
> RISCVCPU *cpu = env_archcpu(env);
> PMUCTRState *counter = &env->pmu_ctrs[ctr_idx];
>
> - if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf) {
> + /* No need to setup a timer if LCOFI is disabled when OF is set */
> + if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf ||
> + pmu_hpmevent_is_of_set(env, ctr_idx)) {
> return -1;
> }
>
>
^ permalink raw reply [flat|nested] 27+ messages in thread