* [Qemu-devel] [PATCH v4 0/3] target/arm: ARMv8.1-LOR
@ 2018-12-10 15:04 Richard Henderson
2018-12-10 15:04 ` [Qemu-devel] [PATCH v4 1/3] target/arm: Introduce arm_hcr_el2_eff Richard Henderson
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: Richard Henderson @ 2018-12-10 15:04 UTC (permalink / raw)
To: qemu-devel; +Cc: peter.maydell
Changes since v3:
* Removed the extra VI/VF/VSE change.
Changes since v2:
* Rebased on target-arm.next, which now includes 7 of the 10 patches.
* Fixes based on patch review.
r~
Richard Henderson (3):
target/arm: Introduce arm_hcr_el2_eff
target/arm: Use arm_hcr_el2_eff more places
target/arm: Implement the ARMv8.1-LOR extension
target/arm/cpu.h | 72 +++++------------
hw/intc/arm_gicv3_cpuif.c | 21 ++---
target/arm/cpu64.c | 1 +
target/arm/helper.c | 153 +++++++++++++++++++++++++++++++++----
target/arm/op_helper.c | 14 ++--
target/arm/translate-a64.c | 12 +++
6 files changed, 187 insertions(+), 86 deletions(-)
--
2.17.2
^ permalink raw reply [flat|nested] 5+ messages in thread
* [Qemu-devel] [PATCH v4 1/3] target/arm: Introduce arm_hcr_el2_eff
2018-12-10 15:04 [Qemu-devel] [PATCH v4 0/3] target/arm: ARMv8.1-LOR Richard Henderson
@ 2018-12-10 15:04 ` Richard Henderson
2018-12-10 15:05 ` [Qemu-devel] [PATCH v4 2/3] target/arm: Use arm_hcr_el2_eff more places Richard Henderson
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Richard Henderson @ 2018-12-10 15:04 UTC (permalink / raw)
To: qemu-devel; +Cc: peter.maydell
Replace arm_hcr_el2_{fmo,imo,amo} with a more general routine
that also takes SCR_EL3.NS (aka arm_is_secure_below_el3) into
account, as documented for the plethora of bits in HCR_EL2.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
----
v3: Fix set of bits affected by just TGE.
Reorder the bits to ascending order.
Zap VF,VI,VSE when !TGE and ![FIA]MO.
v4: Revert VF,VI,VSE change.
---
target/arm/cpu.h | 67 +++++++++------------------------------
hw/intc/arm_gicv3_cpuif.c | 21 ++++++------
target/arm/helper.c | 66 ++++++++++++++++++++++++++++++++------
3 files changed, 83 insertions(+), 71 deletions(-)
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 11ec2cce76..05ac883b6b 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1729,6 +1729,14 @@ static inline bool arm_is_secure(CPUARMState *env)
}
#endif
+/**
+ * arm_hcr_el2_eff(): Return the effective value of HCR_EL2.
+ * E.g. when in secure state, fields in HCR_EL2 are suppressed,
+ * "for all purposes other than a direct read or write access of HCR_EL2."
+ * Not included here is HCR_RW.
+ */
+uint64_t arm_hcr_el2_eff(CPUARMState *env);
+
/* Return true if the specified exception level is running in AArch64 state. */
static inline bool arm_el_is_aa64(CPUARMState *env, int el)
{
@@ -2414,54 +2422,6 @@ bool write_cpustate_to_list(ARMCPU *cpu);
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
-/**
- * arm_hcr_el2_imo(): Return the effective value of HCR_EL2.IMO.
- * Depending on the values of HCR_EL2.E2H and TGE, this may be
- * "behaves as 1 for all purposes other than direct read/write" or
- * "behaves as 0 for all purposes other than direct read/write"
- */
-static inline bool arm_hcr_el2_imo(CPUARMState *env)
-{
- switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
- case HCR_TGE:
- return true;
- case HCR_TGE | HCR_E2H:
- return false;
- default:
- return env->cp15.hcr_el2 & HCR_IMO;
- }
-}
-
-/**
- * arm_hcr_el2_fmo(): Return the effective value of HCR_EL2.FMO.
- */
-static inline bool arm_hcr_el2_fmo(CPUARMState *env)
-{
- switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
- case HCR_TGE:
- return true;
- case HCR_TGE | HCR_E2H:
- return false;
- default:
- return env->cp15.hcr_el2 & HCR_FMO;
- }
-}
-
-/**
- * arm_hcr_el2_amo(): Return the effective value of HCR_EL2.AMO.
- */
-static inline bool arm_hcr_el2_amo(CPUARMState *env)
-{
- switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
- case HCR_TGE:
- return true;
- case HCR_TGE | HCR_E2H:
- return false;
- default:
- return env->cp15.hcr_el2 & HCR_AMO;
- }
-}
-
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
unsigned int target_el)
{
@@ -2470,6 +2430,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
bool secure = arm_is_secure(env);
bool pstate_unmasked;
int8_t unmasked = 0;
+ uint64_t hcr_el2;
/* Don't take exceptions if they target a lower EL.
* This check should catch any exceptions that would not be taken but left
@@ -2479,6 +2440,8 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
return false;
}
+ hcr_el2 = arm_hcr_el2_eff(env);
+
switch (excp_idx) {
case EXCP_FIQ:
pstate_unmasked = !(env->daif & PSTATE_F);
@@ -2489,13 +2452,13 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
break;
case EXCP_VFIQ:
- if (secure || !arm_hcr_el2_fmo(env) || (env->cp15.hcr_el2 & HCR_TGE)) {
+ if (secure || !(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) {
/* VFIQs are only taken when hypervized and non-secure. */
return false;
}
return !(env->daif & PSTATE_F);
case EXCP_VIRQ:
- if (secure || !arm_hcr_el2_imo(env) || (env->cp15.hcr_el2 & HCR_TGE)) {
+ if (secure || !(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) {
/* VIRQs are only taken when hypervized and non-secure. */
return false;
}
@@ -2534,7 +2497,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* to the CPSR.F setting otherwise we further assess the state
* below.
*/
- hcr = arm_hcr_el2_fmo(env);
+ hcr = hcr_el2 & HCR_FMO;
scr = (env->cp15.scr_el3 & SCR_FIQ);
/* When EL3 is 32-bit, the SCR.FW bit controls whether the
@@ -2551,7 +2514,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* when setting the target EL, so it does not have a further
* affect here.
*/
- hcr = arm_hcr_el2_imo(env);
+ hcr = hcr_el2 & HCR_IMO;
scr = false;
break;
default:
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index 068a8e8e9b..cbad6037f1 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -85,8 +85,8 @@ static bool icv_access(CPUARMState *env, int hcr_flags)
* * access if NS EL1 and either IMO or FMO == 1:
* CTLR, DIR, PMR, RPR
*/
- bool flagmatch = ((hcr_flags & HCR_IMO) && arm_hcr_el2_imo(env)) ||
- ((hcr_flags & HCR_FMO) && arm_hcr_el2_fmo(env));
+ uint64_t hcr_el2 = arm_hcr_el2_eff(env);
+ bool flagmatch = hcr_el2 & hcr_flags & (HCR_IMO | HCR_FMO);
return flagmatch && arm_current_el(env) == 1
&& !arm_is_secure_below_el3(env);
@@ -1552,8 +1552,9 @@ static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* No need to include !IsSecure in route_*_to_el2 as it's only
* tested in cases where we know !IsSecure is true.
*/
- route_fiq_to_el2 = arm_hcr_el2_fmo(env);
- route_irq_to_el2 = arm_hcr_el2_imo(env);
+ uint64_t hcr_el2 = arm_hcr_el2_eff(env);
+ route_fiq_to_el2 = hcr_el2 & HCR_FMO;
+ route_irq_to_el2 = hcr_el2 & HCR_IMO;
switch (arm_current_el(env)) {
case 3:
@@ -1895,8 +1896,8 @@ static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
if ((env->cp15.scr_el3 & (SCR_FIQ | SCR_IRQ)) == (SCR_FIQ | SCR_IRQ)) {
switch (el) {
case 1:
- if (arm_is_secure_below_el3(env) ||
- (arm_hcr_el2_imo(env) == 0 && arm_hcr_el2_fmo(env) == 0)) {
+ /* Note that arm_hcr_el2_eff takes secure state into account. */
+ if ((arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) == 0) {
r = CP_ACCESS_TRAP_EL3;
}
break;
@@ -1936,8 +1937,8 @@ static CPAccessResult gicv3_dir_access(CPUARMState *env,
static CPAccessResult gicv3_sgi_access(CPUARMState *env,
const ARMCPRegInfo *ri, bool isread)
{
- if ((arm_hcr_el2_imo(env) || arm_hcr_el2_fmo(env)) &&
- arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
+ if (arm_current_el(env) == 1 &&
+ (arm_hcr_el2_eff(env) & (HCR_IMO | HCR_FMO)) != 0) {
/* Takes priority over a possible EL3 trap */
return CP_ACCESS_TRAP_EL2;
}
@@ -1961,7 +1962,7 @@ static CPAccessResult gicv3_fiq_access(CPUARMState *env,
if (env->cp15.scr_el3 & SCR_FIQ) {
switch (el) {
case 1:
- if (arm_is_secure_below_el3(env) || !arm_hcr_el2_fmo(env)) {
+ if ((arm_hcr_el2_eff(env) & HCR_FMO) == 0) {
r = CP_ACCESS_TRAP_EL3;
}
break;
@@ -2000,7 +2001,7 @@ static CPAccessResult gicv3_irq_access(CPUARMState *env,
if (env->cp15.scr_el3 & SCR_IRQ) {
switch (el) {
case 1:
- if (arm_is_secure_below_el3(env) || !arm_hcr_el2_imo(env)) {
+ if ((arm_hcr_el2_eff(env) & HCR_IMO) == 0) {
r = CP_ACCESS_TRAP_EL3;
}
break;
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 037cece133..95d59e07fb 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -1331,9 +1331,10 @@ static void csselr_write(CPUARMState *env, const ARMCPRegInfo *ri,
static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
{
CPUState *cs = ENV_GET_CPU(env);
+ uint64_t hcr_el2 = arm_hcr_el2_eff(env);
uint64_t ret = 0;
- if (arm_hcr_el2_imo(env)) {
+ if (hcr_el2 & HCR_IMO) {
if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
ret |= CPSR_I;
}
@@ -1343,7 +1344,7 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri)
}
}
- if (arm_hcr_el2_fmo(env)) {
+ if (hcr_el2 & HCR_FMO) {
if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) {
ret |= CPSR_F;
}
@@ -4008,6 +4009,51 @@ static void hcr_writelow(CPUARMState *env, const ARMCPRegInfo *ri,
hcr_write(env, NULL, value);
}
+/*
+ * Return the effective value of HCR_EL2.
+ * Bits that are not included here:
+ * RW (read from SCR_EL3.RW as needed)
+ */
+uint64_t arm_hcr_el2_eff(CPUARMState *env)
+{
+ uint64_t ret = env->cp15.hcr_el2;
+
+ if (arm_is_secure_below_el3(env)) {
+ /*
+ * "This register has no effect if EL2 is not enabled in the
+ * current Security state". This is ARMv8.4-SecEL2 speak for
+ * !(SCR_EL3.NS==1 || SCR_EL3.EEL2==1).
+ *
+ * Prior to that, the language was "In an implementation that
+ * includes EL3, when the value of SCR_EL3.NS is 0 the PE behaves
+ * as if this field is 0 for all purposes other than a direct
+ * read or write access of HCR_EL2". With lots of enumeration
+ * on a per-field basis. In current QEMU, this is condition
+ * is arm_is_secure_below_el3.
+ *
+ * Since the v8.4 language applies to the entire register, and
+ * appears to be backward compatible, use that.
+ */
+ ret = 0;
+ } else if (ret & HCR_TGE) {
+ /* These bits are up-to-date as of ARMv8.4. */
+ if (ret & HCR_E2H) {
+ ret &= ~(HCR_VM | HCR_FMO | HCR_IMO | HCR_AMO |
+ HCR_BSU_MASK | HCR_DC | HCR_TWI | HCR_TWE |
+ HCR_TID0 | HCR_TID2 | HCR_TPCP | HCR_TPU |
+ HCR_TDZ | HCR_CD | HCR_ID | HCR_MIOCNCE);
+ } else {
+ ret |= HCR_FMO | HCR_IMO | HCR_AMO;
+ }
+ ret &= ~(HCR_SWIO | HCR_PTW | HCR_VF | HCR_VI | HCR_VSE |
+ HCR_FB | HCR_TID1 | HCR_TID3 | HCR_TSC | HCR_TACR |
+ HCR_TSW | HCR_TTLB | HCR_TVM | HCR_HCD | HCR_TRVM |
+ HCR_TLOR);
+ }
+
+ return ret;
+}
+
static const ARMCPRegInfo el2_cp_reginfo[] = {
{ .name = "HCR_EL2", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_IO,
@@ -6526,12 +6572,13 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
uint32_t cur_el, bool secure)
{
CPUARMState *env = cs->env_ptr;
- int rw;
- int scr;
- int hcr;
+ bool rw;
+ bool scr;
+ bool hcr;
int target_el;
/* Is the highest EL AArch64? */
- int is64 = arm_feature(env, ARM_FEATURE_AARCH64);
+ bool is64 = arm_feature(env, ARM_FEATURE_AARCH64);
+ uint64_t hcr_el2;
if (arm_feature(env, ARM_FEATURE_EL3)) {
rw = ((env->cp15.scr_el3 & SCR_RW) == SCR_RW);
@@ -6543,18 +6590,19 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
rw = is64;
}
+ hcr_el2 = arm_hcr_el2_eff(env);
switch (excp_idx) {
case EXCP_IRQ:
scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
- hcr = arm_hcr_el2_imo(env);
+ hcr = hcr_el2 & HCR_IMO;
break;
case EXCP_FIQ:
scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
- hcr = arm_hcr_el2_fmo(env);
+ hcr = hcr_el2 & HCR_FMO;
break;
default:
scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
- hcr = arm_hcr_el2_amo(env);
+ hcr = hcr_el2 & HCR_AMO;
break;
};
--
2.17.2
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [Qemu-devel] [PATCH v4 2/3] target/arm: Use arm_hcr_el2_eff more places
2018-12-10 15:04 [Qemu-devel] [PATCH v4 0/3] target/arm: ARMv8.1-LOR Richard Henderson
2018-12-10 15:04 ` [Qemu-devel] [PATCH v4 1/3] target/arm: Introduce arm_hcr_el2_eff Richard Henderson
@ 2018-12-10 15:05 ` Richard Henderson
2018-12-10 15:05 ` [Qemu-devel] [PATCH v4 3/3] target/arm: Implement the ARMv8.1-LOR extension Richard Henderson
2018-12-11 17:38 ` [Qemu-devel] [PATCH v4 0/3] target/arm: ARMv8.1-LOR Peter Maydell
3 siblings, 0 replies; 5+ messages in thread
From: Richard Henderson @ 2018-12-10 15:05 UTC (permalink / raw)
To: qemu-devel; +Cc: peter.maydell
Since arm_hcr_el2_eff includes a check against
arm_is_secure_below_el3, we can often remove a
nearby check against secure state.
In some cases, sort the call to arm_hcr_el2_eff
to the end of a short-circuit logical sequence.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
v3: Do not change regime_translation_disabled.
---
target/arm/helper.c | 12 +++++-------
target/arm/op_helper.c | 14 ++++++--------
2 files changed, 11 insertions(+), 15 deletions(-)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 95d59e07fb..d6f8be9f4e 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -448,7 +448,7 @@ static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
int el = arm_current_el(env);
bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
(env->cp15.mdcr_el2 & MDCR_TDE) ||
- (env->cp15.hcr_el2 & HCR_TGE);
+ (arm_hcr_el2_eff(env) & HCR_TGE);
if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
return CP_ACCESS_TRAP_EL2;
@@ -468,7 +468,7 @@ static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
int el = arm_current_el(env);
bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
(env->cp15.mdcr_el2 & MDCR_TDE) ||
- (env->cp15.hcr_el2 & HCR_TGE);
+ (arm_hcr_el2_eff(env) & HCR_TGE);
if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
return CP_ACCESS_TRAP_EL2;
@@ -488,7 +488,7 @@ static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
int el = arm_current_el(env);
bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
(env->cp15.mdcr_el2 & MDCR_TDE) ||
- (env->cp15.hcr_el2 & HCR_TGE);
+ (arm_hcr_el2_eff(env) & HCR_TGE);
if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
return CP_ACCESS_TRAP_EL2;
@@ -4566,8 +4566,7 @@ int sve_exception_el(CPUARMState *env, int el)
if (disabled) {
/* route_to_el2 */
return (arm_feature(env, ARM_FEATURE_EL2)
- && !arm_is_secure(env)
- && (env->cp15.hcr_el2 & HCR_TGE) ? 2 : 1);
+ && (arm_hcr_el2_eff(env) & HCR_TGE) ? 2 : 1);
}
/* Check CPACR.FPEN. */
@@ -6216,9 +6215,8 @@ static int bad_mode_switch(CPUARMState *env, int mode, CPSRWriteType write_type)
* and CPS are treated as illegal mode changes.
*/
if (write_type == CPSRWriteByInstr &&
- (env->cp15.hcr_el2 & HCR_TGE) &&
(env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON &&
- !arm_is_secure_below_el3(env)) {
+ (arm_hcr_el2_eff(env) & HCR_TGE)) {
return 1;
}
return 0;
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index 0d6e89e474..ef72361a36 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -33,8 +33,7 @@ void raise_exception(CPUARMState *env, uint32_t excp,
{
CPUState *cs = CPU(arm_env_get_cpu(env));
- if ((env->cp15.hcr_el2 & HCR_TGE) &&
- target_el == 1 && !arm_is_secure(env)) {
+ if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
/*
* Redirect NS EL1 exceptions to NS EL2. These are reported with
* their original syndrome register value, with the exception of
@@ -428,9 +427,9 @@ static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
* No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
* bits will be zero indicating no trap.
*/
- if (cur_el < 2 && !arm_is_secure(env)) {
- mask = (is_wfe) ? HCR_TWE : HCR_TWI;
- if (env->cp15.hcr_el2 & mask) {
+ if (cur_el < 2) {
+ mask = is_wfe ? HCR_TWE : HCR_TWI;
+ if (arm_hcr_el2_eff(env) & mask) {
return 2;
}
}
@@ -995,7 +994,7 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
exception_target_el(env));
}
- if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
+ if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
/* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
* We also want an EL2 guest to be able to forbid its EL1 from
* making PSCI calls into QEMU's "firmware" via HCR.TSC.
@@ -1098,8 +1097,7 @@ void HELPER(exception_return)(CPUARMState *env)
goto illegal_return;
}
- if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
- && !arm_is_secure_below_el3(env)) {
+ if (new_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
goto illegal_return;
}
--
2.17.2
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [Qemu-devel] [PATCH v4 3/3] target/arm: Implement the ARMv8.1-LOR extension
2018-12-10 15:04 [Qemu-devel] [PATCH v4 0/3] target/arm: ARMv8.1-LOR Richard Henderson
2018-12-10 15:04 ` [Qemu-devel] [PATCH v4 1/3] target/arm: Introduce arm_hcr_el2_eff Richard Henderson
2018-12-10 15:05 ` [Qemu-devel] [PATCH v4 2/3] target/arm: Use arm_hcr_el2_eff more places Richard Henderson
@ 2018-12-10 15:05 ` Richard Henderson
2018-12-11 17:38 ` [Qemu-devel] [PATCH v4 0/3] target/arm: ARMv8.1-LOR Peter Maydell
3 siblings, 0 replies; 5+ messages in thread
From: Richard Henderson @ 2018-12-10 15:05 UTC (permalink / raw)
To: qemu-devel; +Cc: peter.maydell
Provide a trivial implementation with zero limited ordering regions,
which causes the LDLAR and STLLR instructions to devolve into the
LDAR and STLR instructions from the base ARMv8.0 instruction set.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
v2: Mark LORID_EL1 read-only.
Add TLOR access checks.
Conditionally unmask TLOR in hcr/scr_write.
v3: Fix isar_feature_aa64_lor.
Split out access_lor_ns.
Defer all {E2H,TGE} vs TLOR testing to arm_hcr_el2_eff.
---
target/arm/cpu.h | 5 +++
target/arm/cpu64.c | 1 +
target/arm/helper.c | 75 ++++++++++++++++++++++++++++++++++++++
target/arm/translate-a64.c | 12 ++++++
4 files changed, 93 insertions(+)
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 05ac883b6b..c943f35dd9 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -3340,6 +3340,11 @@ static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
}
+static inline bool isar_feature_aa64_lor(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0;
+}
+
/*
* Forward to the above feature tests given an ARMCPU pointer.
*/
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 1a4289c9dd..1d57be0c91 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -326,6 +326,7 @@ static void aarch64_max_initfn(Object *obj)
t = cpu->isar.id_aa64mmfr1;
t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
+ t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
cpu->isar.id_aa64mmfr1 = t;
/* Replicate the same data to the 32-bit id registers. */
diff --git a/target/arm/helper.c b/target/arm/helper.c
index d6f8be9f4e..644599b29d 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -1281,6 +1281,7 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
/* Begin with base v8.0 state. */
uint32_t valid_mask = 0x3fff;
+ ARMCPU *cpu = arm_env_get_cpu(env);
if (arm_el_is_aa64(env, 3)) {
value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
@@ -1303,6 +1304,9 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
valid_mask &= ~SCR_SMD;
}
}
+ if (cpu_isar_feature(aa64_lor, cpu)) {
+ valid_mask |= SCR_TLOR;
+ }
/* Clear all-context RES0 bits. */
value &= valid_mask;
@@ -3963,6 +3967,9 @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
*/
valid_mask &= ~HCR_TSC;
}
+ if (cpu_isar_feature(aa64_lor, cpu)) {
+ valid_mask |= HCR_TLOR;
+ }
/* Clear RES0 bits. */
value &= valid_mask;
@@ -5018,6 +5025,42 @@ static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
return pfr0;
}
+/* Shared logic between LORID and the rest of the LOR* registers.
+ * Secure state has already been delt with.
+ */
+static CPAccessResult access_lor_ns(CPUARMState *env)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_is_secure_below_el3(env)) {
+ /* Access ok in secure mode. */
+ return CP_ACCESS_OK;
+ }
+ return access_lor_ns(env);
+}
+
+static CPAccessResult access_lor_other(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ if (arm_is_secure_below_el3(env)) {
+ /* Access denied in secure mode. */
+ return CP_ACCESS_TRAP;
+ }
+ return access_lor_ns(env);
+}
+
void register_cp_regs_for_features(ARMCPU *cpu)
{
/* Register all the coprocessor registers based on feature bits */
@@ -5759,6 +5802,38 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_one_arm_cp_reg(cpu, &sctlr);
}
+ if (cpu_isar_feature(aa64_lor, cpu)) {
+ /*
+ * A trivial implementation of ARMv8.1-LOR leaves all of these
+ * registers fixed at 0, which indicates that there are zero
+ * supported Limited Ordering regions.
+ */
+ static const ARMCPRegInfo lor_reginfo[] = {
+ { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
+ .access = PL1_R, .accessfn = access_lorid,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, lor_reginfo);
+ }
+
if (cpu_isar_feature(aa64_sve, cpu)) {
define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
if (arm_feature(env, ARM_FEATURE_EL2)) {
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index fd36425f1a..e1da1e4d6f 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -2290,6 +2290,12 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
}
return;
+ case 0x8: /* STLLR */
+ if (!dc_isar_feature(aa64_lor, s)) {
+ break;
+ }
+ /* StoreLORelease is the same as Store-Release for QEMU. */
+ /* fall through */
case 0x9: /* STLR */
/* Generate ISS for non-exclusive accesses including LASR. */
if (rn == 31) {
@@ -2301,6 +2307,12 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
return;
+ case 0xc: /* LDLAR */
+ if (!dc_isar_feature(aa64_lor, s)) {
+ break;
+ }
+ /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
+ /* fall through */
case 0xd: /* LDAR */
/* Generate ISS for non-exclusive accesses including LASR. */
if (rn == 31) {
--
2.17.2
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [Qemu-devel] [PATCH v4 0/3] target/arm: ARMv8.1-LOR
2018-12-10 15:04 [Qemu-devel] [PATCH v4 0/3] target/arm: ARMv8.1-LOR Richard Henderson
` (2 preceding siblings ...)
2018-12-10 15:05 ` [Qemu-devel] [PATCH v4 3/3] target/arm: Implement the ARMv8.1-LOR extension Richard Henderson
@ 2018-12-11 17:38 ` Peter Maydell
3 siblings, 0 replies; 5+ messages in thread
From: Peter Maydell @ 2018-12-11 17:38 UTC (permalink / raw)
To: Richard Henderson; +Cc: QEMU Developers
On Mon, 10 Dec 2018 at 15:05, Richard Henderson
<richard.henderson@linaro.org> wrote:
>
> Changes since v3:
> * Removed the extra VI/VF/VSE change.
>
> Changes since v2:
> * Rebased on target-arm.next, which now includes 7 of the 10 patches.
> * Fixes based on patch review.
>
Applied to target-arm.next, thanks.
-- PMM
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2018-12-11 17:38 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2018-12-10 15:04 [Qemu-devel] [PATCH v4 0/3] target/arm: ARMv8.1-LOR Richard Henderson
2018-12-10 15:04 ` [Qemu-devel] [PATCH v4 1/3] target/arm: Introduce arm_hcr_el2_eff Richard Henderson
2018-12-10 15:05 ` [Qemu-devel] [PATCH v4 2/3] target/arm: Use arm_hcr_el2_eff more places Richard Henderson
2018-12-10 15:05 ` [Qemu-devel] [PATCH v4 3/3] target/arm: Implement the ARMv8.1-LOR extension Richard Henderson
2018-12-11 17:38 ` [Qemu-devel] [PATCH v4 0/3] target/arm: ARMv8.1-LOR Peter Maydell
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).