qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Richard Henderson <richard.henderson@linaro.org>
To: qemu-devel@nongnu.org
Cc: qemu-arm@nongnu.org, Peter Maydell <peter.maydell@linaro.org>
Subject: [PULL 17/20] target/arm: Support 64-bit event counters for FEAT_PMUv3p5
Date: Wed, 14 Sep 2022 12:52:14 +0100	[thread overview]
Message-ID: <20220914115217.117532-19-richard.henderson@linaro.org> (raw)
In-Reply-To: <20220914115217.117532-1-richard.henderson@linaro.org>

From: Peter Maydell <peter.maydell@linaro.org>

With FEAT_PMUv3p5, the event counters are now 64 bit, rather than 32
bit.  (Previously, only the cycle counter could be 64 bit, and other
event counters were always 32 bits).  For any given event counter,
whether the overflow event is noted for overflow from bit 31 or from
bit 63 is controlled by a combination of PMCR.LP, MDCR_EL2.HLP and
MDCR_EL2.HPMN.

Implement the 64-bit event counter handling.  We choose to make our
counters always 64 bits, and mask out the top 32 bits on read or
write of PMXEVCNTR for CPUs which don't have FEAT_PMUv3p5.

(Note that the changes to pmenvcntr_op_start() and
pmenvcntr_op_finish() bring their logic closer into line with that of
pmccntr_op_start() and pmccntr_op_finish(), which already had to cope
with the overflow being either at 32 or 64 bits.)

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20220822132358.3524971-10-peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 target/arm/cpu.h       |  1 +
 target/arm/internals.h |  3 +-
 target/arm/helper.c    | 62 ++++++++++++++++++++++++++++++++++++------
 3 files changed, 57 insertions(+), 9 deletions(-)

diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 41e74df104..33cdbc0143 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1334,6 +1334,7 @@ FIELD(CPTR_EL3, TTA, 20, 1)
 FIELD(CPTR_EL3, TAM, 30, 1)
 FIELD(CPTR_EL3, TCPAC, 31, 1)
 
+#define MDCR_HLP      (1U << 26)  /* MDCR_EL2 */
 #define MDCR_SCCD     (1U << 23)  /* MDCR_EL3 */
 #define MDCR_HCCD     (1U << 23)  /* MDCR_EL2 */
 #define MDCR_EPMAD    (1U << 21)
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 83526166de..bf60cd5f84 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1256,6 +1256,7 @@ enum MVEECIState {
 /* Definitions for the PMU registers */
 #define PMCRN_MASK  0xf800
 #define PMCRN_SHIFT 11
+#define PMCRLP  0x80
 #define PMCRLC  0x40
 #define PMCRDP  0x20
 #define PMCRX   0x10
@@ -1267,7 +1268,7 @@ enum MVEECIState {
  * Mask of PMCR bits writable by guest (not including WO bits like C, P,
  * which can be written as 1 to trigger behaviour but which stay RAZ).
  */
-#define PMCR_WRITABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
+#define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
 
 #define PMXEVTYPER_P          0x80000000
 #define PMXEVTYPER_U          0x40000000
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 0d1f23de09..1a57d2e1d6 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -1084,7 +1084,8 @@ static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
  * We use these to decide whether we need to wrap a write to MDCR_EL2
  * or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
  */
-#define MDCR_EL2_PMU_ENABLE_BITS (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD)
+#define MDCR_EL2_PMU_ENABLE_BITS \
+    (MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
 #define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
 
 /* Returns true if the counter (pass 31 for PMCCNTR) should count events using
@@ -1193,6 +1194,32 @@ static bool pmccntr_clockdiv_enabled(CPUARMState *env)
     return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
 }
 
+static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
+{
+    /* Return true if the specified event counter is configured to be 64 bit */
+
+    /* This isn't intended to be used with the cycle counter */
+    assert(counter < 31);
+
+    if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+        return false;
+    }
+
+    if (arm_feature(env, ARM_FEATURE_EL2)) {
+        /*
+         * MDCR_EL2.HLP still applies even when EL2 is disabled in the
+         * current security state, so we don't use arm_mdcr_el2_eff() here.
+         */
+        bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
+        int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
+
+        if (hpmn != 0 && counter >= hpmn) {
+            return hlp;
+        }
+    }
+    return env->cp15.c9_pmcr & PMCRLP;
+}
+
 /*
  * Ensure c15_ccnt is the guest-visible count so that operations such as
  * enabling/disabling the counter or filtering, modifying the count itself,
@@ -1269,9 +1296,11 @@ static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
     }
 
     if (pmu_counter_enabled(env, counter)) {
-        uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
+        uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
+        uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
+            1ULL << 63 : 1ULL << 31;
 
-        if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
+        if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
             env->cp15.c9_pmovsr |= (1 << counter);
             pmu_update_irq(env);
         }
@@ -1286,9 +1315,13 @@ static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
 #ifndef CONFIG_USER_ONLY
         uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
         uint16_t event_idx = supported_event_map[event];
-        uint64_t delta = UINT32_MAX -
-            (uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
-        int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
+        uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
+        int64_t overflow_in;
+
+        if (!pmevcntr_is_64_bit(env, counter)) {
+            delta = (uint32_t)delta;
+        }
+        overflow_in = pm_events[event_idx].ns_per_count(delta);
 
         if (overflow_in > 0) {
             int64_t overflow_at;
@@ -1375,6 +1408,8 @@ static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
                           uint64_t value)
 {
     unsigned int i;
+    uint64_t overflow_mask, new_pmswinc;
+
     for (i = 0; i < pmu_num_counters(env); i++) {
         /* Increment a counter's count iff: */
         if ((value & (1 << i)) && /* counter's bit is set */
@@ -1388,9 +1423,12 @@ static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
              * Detect if this write causes an overflow since we can't predict
              * PMSWINC overflows like we can for other events
              */
-            uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
+            new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
 
-            if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
+            overflow_mask = pmevcntr_is_64_bit(env, i) ?
+                1ULL << 63 : 1ULL << 31;
+
+            if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
                 env->cp15.c9_pmovsr |= (1 << i);
                 pmu_update_irq(env);
             }
@@ -1597,6 +1635,10 @@ static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
 static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
                              uint64_t value, uint8_t counter)
 {
+    if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+        /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
+        value &= MAKE_64BIT_MASK(0, 32);
+    }
     if (counter < pmu_num_counters(env)) {
         pmevcntr_op_start(env, counter);
         env->cp15.c14_pmevcntr[counter] = value;
@@ -1616,6 +1658,10 @@ static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
         pmevcntr_op_start(env, counter);
         ret = env->cp15.c14_pmevcntr[counter];
         pmevcntr_op_finish(env, counter);
+        if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
+            /* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
+            ret &= MAKE_64BIT_MASK(0, 32);
+        }
         return ret;
     } else {
       /* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
-- 
2.34.1



  parent reply	other threads:[~2022-09-14 12:50 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-14 11:51 [PULL 00/20] target-arm.next patch queue Richard Henderson
2022-09-14 11:51 ` [PULL 01/20] target/arm: Add cortex-a35 Richard Henderson
2022-09-14 11:51 ` [PATCH] target/arm: Do alignment check when translation disabled Richard Henderson
2022-09-22 15:31   ` Peter Maydell
2022-09-28 15:52     ` Richard Henderson
2022-09-14 11:51 ` [PULL 02/20] hw/arm/bcm2835_property: Add support for RPI_FIRMWARE_FRAMEBUFFER_GET_NUM_DISPLAYS Richard Henderson
2022-09-14 11:52 ` [PULL 03/20] target/arm: Make cpregs 0, c0, c{3-15}, {0-7} correctly RAZ in v8 Richard Henderson
2022-09-14 11:52 ` [PULL 04/20] target/arm: Sort KVM reads of AArch32 ID registers into encoding order Richard Henderson
2022-09-14 11:52 ` [PULL 05/20] target/arm: Implement ID_MMFR5 Richard Henderson
2022-09-14 11:52 ` [PULL 06/20] target/arm: Implement ID_DFR1 Richard Henderson
2022-09-14 11:52 ` [PULL 07/20] target/arm: Advertise FEAT_ETS for '-cpu max' Richard Henderson
2022-09-14 11:52 ` [PULL 08/20] target/arm: Add missing space in comment Richard Henderson
2022-09-14 11:52 ` [PULL 09/20] target/arm: Don't corrupt high half of PMOVSR when cycle counter overflows Richard Henderson
2022-09-14 11:52 ` [PULL 10/20] target/arm: Correct value returned by pmu_counter_mask() Richard Henderson
2022-09-14 11:52 ` [PULL 11/20] target/arm: Don't mishandle count when enabling or disabling PMU counters Richard Henderson
2022-09-20 16:45   ` Thomas Huth
2022-09-23 10:50     ` Peter Maydell
2022-09-14 11:52 ` [PULL 12/20] target/arm: Ignore PMCR.D when PMCR.LC is set Richard Henderson
2022-09-14 11:52 ` [PULL 13/20] target/arm: Honour MDCR_EL2.HPMD in Secure EL2 Richard Henderson
2022-09-14 11:52 ` [PULL 14/20] target/arm: Detect overflow when calculating next PMU interrupt Richard Henderson
2022-09-14 11:52 ` [PULL 15/20] target/arm: Rename pmu_8_n feature test functions Richard Henderson
2022-09-14 11:52 ` [PULL 16/20] target/arm: Implement FEAT_PMUv3p5 cycle counter disable bits Richard Henderson
2022-09-14 11:52 ` Richard Henderson [this message]
2022-09-14 11:52 ` [PULL 18/20] target/arm: Report FEAT_PMUv3p5 for TCG '-cpu max' Richard Henderson
2022-09-14 11:52 ` [PULL 19/20] target/arm: Remove useless TARGET_BIG_ENDIAN check in armv7m_load_kernel() Richard Henderson
2022-09-14 11:52 ` [PULL 20/20] target/arm: Make boards pass base address to armv7m_load_kernel() Richard Henderson
2022-09-17 20:13 ` [PULL 00/20] target-arm.next patch queue Stefan Hajnoczi
2022-09-20 13:06   ` Peter Maydell

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220914115217.117532-19-richard.henderson@linaro.org \
    --to=richard.henderson@linaro.org \
    --cc=peter.maydell@linaro.org \
    --cc=qemu-arm@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).