* [Qemu-devel] [PULL 01/45] target/arm: Forbid unprivileged mode for M Baseline
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 02/45] nvic: Handle ARMv6-M SCS reserved registers Peter Maydell
` (44 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Julia Suvorova <jusual@mail.ru>
MSR handling is the only place where CONTROL.nPRIV is modified.
Signed-off-by: Julia Suvorova <jusual@mail.ru>
Message-id: 20180705222622.17139-1-jusual@mail.ru
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/helper.c | 12 ++++++++----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 66afb08ee0f..a6d642f4ae5 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -10728,8 +10728,10 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
write_v7m_control_spsel_for_secstate(env,
val & R_V7M_CONTROL_SPSEL_MASK,
M_REG_NS);
- env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
- env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
+ if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ env->v7m.control[M_REG_NS] &= ~R_V7M_CONTROL_NPRIV_MASK;
+ env->v7m.control[M_REG_NS] |= val & R_V7M_CONTROL_NPRIV_MASK;
+ }
return;
case 0x98: /* SP_NS */
{
@@ -10822,8 +10824,10 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
!arm_v7m_is_handler_mode(env)) {
write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
}
- env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
- env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
+ if (arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
+ env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
+ }
break;
default:
bad_reg:
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 02/45] nvic: Handle ARMv6-M SCS reserved registers
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 01/45] target/arm: Forbid unprivileged mode for M Baseline Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 03/45] arm: Add ARMv6-M programmer's model support Peter Maydell
` (43 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Julia Suvorova <jusual@mail.ru>
Handle SCS reserved registers listed in ARMv6-M ARM D3.6.1.
All reserved registers are RAZ/WI. ARM_FEATURE_M_MAIN is used for the
checks, because these registers are reserved in ARMv8-M Baseline too.
Signed-off-by: Julia Suvorova <jusual@mail.ru>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/armv7m_nvic.c | 51 +++++++++++++++++++++++++++++++++++++++++--
1 file changed, 49 insertions(+), 2 deletions(-)
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index cd1e7f17299..33396ce2854 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -867,6 +867,9 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
}
return val;
case 0xd10: /* System Control. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ goto bad_offset;
+ }
return cpu->env.v7m.scr[attrs.secure];
case 0xd14: /* Configuration Control. */
/* The BFHFNMIGN bit is the only non-banked bit; we
@@ -988,12 +991,21 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
}
return val;
case 0xd2c: /* Hard Fault Status. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
return cpu->env.v7m.hfsr;
case 0xd30: /* Debug Fault Status. */
return cpu->env.v7m.dfsr;
case 0xd34: /* MMFAR MemManage Fault Address */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
return cpu->env.v7m.mmfar[attrs.secure];
case 0xd38: /* Bus Fault Address. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
return cpu->env.v7m.bfar;
case 0xd3c: /* Aux Fault Status. */
/* TODO: Implement fault status registers. */
@@ -1288,6 +1300,9 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
}
break;
case 0xd10: /* System Control. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ goto bad_offset;
+ }
/* We don't implement deep-sleep so these bits are RAZ/WI.
* The other bits in the register are banked.
* QEMU's implementation ignores SEVONPEND and SLEEPONEXIT, which
@@ -1389,15 +1404,24 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
nvic_irq_update(s);
break;
case 0xd2c: /* Hard Fault Status. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
cpu->env.v7m.hfsr &= ~value; /* W1C */
break;
case 0xd30: /* Debug Fault Status. */
cpu->env.v7m.dfsr &= ~value; /* W1C */
break;
case 0xd34: /* Mem Manage Address. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
cpu->env.v7m.mmfar[attrs.secure] = value;
return;
case 0xd38: /* Bus Fault Address. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
cpu->env.v7m.bfar = value;
return;
case 0xd3c: /* Aux Fault Status. */
@@ -1627,6 +1651,11 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
case 0xf00: /* Software Triggered Interrupt Register */
{
int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
+
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+
if (excnum < s->num_irq) {
armv7m_nvic_set_pending(s, excnum, false);
}
@@ -1771,7 +1800,13 @@ static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
}
}
break;
- case 0xd18 ... 0xd23: /* System Handler Priority (SHPR1, SHPR2, SHPR3) */
+ case 0xd18: /* System Handler Priority (SHPR1) */
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
+ val = 0;
+ break;
+ }
+ /* fall through */
+ case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
val = 0;
for (i = 0; i < size; i++) {
unsigned hdlidx = (offset - 0xd14) + i;
@@ -1784,6 +1819,10 @@ static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
}
break;
case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
+ val = 0;
+ break;
+ };
/* The BFSR bits [15:8] are shared between security states
* and we store them in the NS copy
*/
@@ -1876,7 +1915,12 @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
}
nvic_irq_update(s);
return MEMTX_OK;
- case 0xd18 ... 0xd23: /* System Handler Priority (SHPR1, SHPR2, SHPR3) */
+ case 0xd18: /* System Handler Priority (SHPR1) */
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
+ return MEMTX_OK;
+ }
+ /* fall through */
+ case 0xd1c ... 0xd23: /* System Handler Priority (SHPR2, SHPR3) */
for (i = 0; i < size; i++) {
unsigned hdlidx = (offset - 0xd14) + i;
int newprio = extract32(value, i * 8, 8);
@@ -1890,6 +1934,9 @@ static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
nvic_irq_update(s);
return MEMTX_OK;
case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_MAIN)) {
+ return MEMTX_OK;
+ }
/* All bits are W1C, so construct 32 bit value with 0s in
* the parts not written by the access size
*/
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 03/45] arm: Add ARMv6-M programmer's model support
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 01/45] target/arm: Forbid unprivileged mode for M Baseline Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 02/45] nvic: Handle ARMv6-M SCS reserved registers Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 04/45] nvic: Change NVIC to support ARMv6-M Peter Maydell
` (42 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Julia Suvorova <jusual@mail.ru>
Forbid stack alignment change. (CCR)
Reserve FAULTMASK, BASEPRI registers.
Report any fault as a HardFault. Disable MemManage, BusFault and
UsageFault, so they always escalated to HardFault. (SHCSR)
Signed-off-by: Julia Suvorova <jusual@mail.ru>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 20180718095628.26442-1-jusual@mail.ru
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/armv7m_nvic.c | 10 ++++++++++
target/arm/cpu.c | 4 ++++
target/arm/helper.c | 13 +++++++++++--
3 files changed, 25 insertions(+), 2 deletions(-)
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index 33396ce2854..49e8e2b94d4 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -879,6 +879,9 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
return val;
case 0xd24: /* System Handler Control and State (SHCSR) */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ goto bad_offset;
+ }
val = 0;
if (attrs.secure) {
if (s->sec_vectors[ARMV7M_EXCP_MEM].active) {
@@ -1312,6 +1315,10 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
cpu->env.v7m.scr[attrs.secure] = value;
break;
case 0xd14: /* Configuration Control. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ goto bad_offset;
+ }
+
/* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
value &= (R_V7M_CCR_STKALIGN_MASK |
R_V7M_CCR_BFHFNMIGN_MASK |
@@ -1336,6 +1343,9 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
cpu->env.v7m.ccr[attrs.secure] = value;
break;
case 0xd24: /* System Handler Control and State (SHCSR) */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ goto bad_offset;
+ }
if (attrs.secure) {
s->sec_vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
/* Secure HardFault active bit cannot be written */
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 64a8005a4b9..3848ef46aa9 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -231,6 +231,10 @@ static void arm_cpu_reset(CPUState *s)
env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_NONBASETHRDENA_MASK;
env->v7m.ccr[M_REG_S] |= R_V7M_CCR_NONBASETHRDENA_MASK;
}
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_UNALIGN_TRP_MASK;
+ env->v7m.ccr[M_REG_S] |= R_V7M_CCR_UNALIGN_TRP_MASK;
+ }
/* Unlike A/R profile, M profile defines the reset LR value */
env->regs[14] = 0xffffffff;
diff --git a/target/arm/helper.c b/target/arm/helper.c
index a6d642f4ae5..63a70065617 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -10710,13 +10710,13 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
env->v7m.primask[M_REG_NS] = val & 1;
return;
case 0x91: /* BASEPRI_NS */
- if (!env->v7m.secure) {
+ if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
return;
}
env->v7m.basepri[M_REG_NS] = val & 0xff;
return;
case 0x93: /* FAULTMASK_NS */
- if (!env->v7m.secure) {
+ if (!env->v7m.secure || !arm_feature(env, ARM_FEATURE_M_MAIN)) {
return;
}
env->v7m.faultmask[M_REG_NS] = val & 1;
@@ -10800,9 +10800,15 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
env->v7m.primask[env->v7m.secure] = val & 1;
break;
case 17: /* BASEPRI */
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ goto bad_reg;
+ }
env->v7m.basepri[env->v7m.secure] = val & 0xff;
break;
case 18: /* BASEPRI_MAX */
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ goto bad_reg;
+ }
val &= 0xff;
if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
|| env->v7m.basepri[env->v7m.secure] == 0)) {
@@ -10810,6 +10816,9 @@ void HELPER(v7m_msr)(CPUARMState *env, uint32_t maskreg, uint32_t val)
}
break;
case 19: /* FAULTMASK */
+ if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
+ goto bad_reg;
+ }
env->v7m.faultmask[env->v7m.secure] = val & 1;
break;
case 20: /* CONTROL */
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 04/45] nvic: Change NVIC to support ARMv6-M
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (2 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 03/45] arm: Add ARMv6-M programmer's model support Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 05/45] accel/tcg: Pass read access type through to io_readx() Peter Maydell
` (41 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Julia Suvorova <jusual@mail.ru>
The differences from ARMv7-M NVIC are:
* ARMv6-M only supports up to 32 external interrupts
(configurable feature already). The ICTR is reserved.
* Active Bit Register is reserved.
* ARMv6-M supports 4 priority levels against 256 in ARMv7-M.
Signed-off-by: Julia Suvorova <jusual@mail.ru>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
include/hw/intc/armv7m_nvic.h | 1 +
hw/intc/armv7m_nvic.c | 21 ++++++++++++++++++---
2 files changed, 19 insertions(+), 3 deletions(-)
diff --git a/include/hw/intc/armv7m_nvic.h b/include/hw/intc/armv7m_nvic.h
index 8bc29112e3a..a472c9b8f05 100644
--- a/include/hw/intc/armv7m_nvic.h
+++ b/include/hw/intc/armv7m_nvic.h
@@ -57,6 +57,7 @@ typedef struct NVICState {
VecInfo sec_vectors[NVIC_INTERNAL_VECTORS];
/* The PRIGROUP field in AIRCR is banked */
uint32_t prigroup[M_REG_NUM_BANKS];
+ uint8_t num_prio_bits;
/* v8M NVIC_ITNS state (stored as a bool per bit) */
bool itns[NVIC_MAX_VECTORS];
diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c
index 49e8e2b94d4..351b69ab40b 100644
--- a/hw/intc/armv7m_nvic.c
+++ b/hw/intc/armv7m_nvic.c
@@ -420,6 +420,8 @@ static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio)
assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
assert(irq < s->num_irq);
+ prio &= MAKE_64BIT_MASK(8 - s->num_prio_bits, s->num_prio_bits);
+
if (secure) {
assert(exc_is_banked(irq));
s->sec_vectors[irq].prio = prio;
@@ -779,6 +781,9 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
switch (offset) {
case 4: /* Interrupt Control Type. */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V7)) {
+ goto bad_offset;
+ }
return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
case 0xc: /* CPPWR */
if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
@@ -1278,9 +1283,12 @@ static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
"Setting VECTRESET when not in DEBUG mode "
"is UNPREDICTABLE\n");
}
- s->prigroup[attrs.secure] = extract32(value,
- R_V7M_AIRCR_PRIGROUP_SHIFT,
- R_V7M_AIRCR_PRIGROUP_LENGTH);
+ if (arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) {
+ s->prigroup[attrs.secure] =
+ extract32(value,
+ R_V7M_AIRCR_PRIGROUP_SHIFT,
+ R_V7M_AIRCR_PRIGROUP_LENGTH);
+ }
if (attrs.secure) {
/* These bits are only writable by secure */
cpu->env.v7m.aircr = value &
@@ -1791,6 +1799,11 @@ static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
break;
case 0x300 ... 0x33f: /* NVIC Active */
val = 0;
+
+ if (!arm_feature(&s->cpu->env, ARM_FEATURE_V7)) {
+ break;
+ }
+
startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ; /* vector # */
for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
@@ -2260,6 +2273,8 @@ static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
/* include space for internal exception vectors */
s->num_irq += NVIC_FIRST_IRQ;
+ s->num_prio_bits = arm_feature(&s->cpu->env, ARM_FEATURE_V7) ? 8 : 2;
+
object_property_set_bool(OBJECT(&s->systick[M_REG_NS]), true,
"realized", &err);
if (err != NULL) {
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 05/45] accel/tcg: Pass read access type through to io_readx()
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (3 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 04/45] nvic: Change NVIC to support ARMv6-M Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 06/45] accel/tcg: Handle get_page_addr_code() returning -1 in hashtable lookups Peter Maydell
` (40 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
The io_readx() function needs to know whether the load it is
doing is an MMU_DATA_LOAD or an MMU_INST_FETCH, so that it
can pass the right value to the cpu_transaction_failed()
function. Plumb this information through from the softmmu
code.
This is currently not often going to give the wrong answer,
because usually instruction fetches go via get_page_addr_code().
However once we switch over to handling execution from non-RAM by
creating single-insn TBs, the path for an insn fetch to generate
a bus error will be through cpu_ld*_code() and io_readx(),
so without this change we will generate a d-side fault when we
should generate an i-side fault.
We also have to pass the access type via a CPU struct global
down to unassigned_mem_read(), for the benefit of the targets
which still use the cpu_unassigned_access() hook (m68k, mips,
sparc, xtensa).
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Tested-by: Cédric Le Goater <clg@kaod.org>
Message-id: 20180710160013.26559-2-peter.maydell@linaro.org
---
accel/tcg/softmmu_template.h | 11 +++++++----
include/qom/cpu.h | 6 ++++++
accel/tcg/cputlb.c | 5 +++--
memory.c | 3 ++-
4 files changed, 18 insertions(+), 7 deletions(-)
diff --git a/accel/tcg/softmmu_template.h b/accel/tcg/softmmu_template.h
index badbf148803..f060a693d41 100644
--- a/accel/tcg/softmmu_template.h
+++ b/accel/tcg/softmmu_template.h
@@ -99,11 +99,12 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
size_t mmu_idx, size_t index,
target_ulong addr,
uintptr_t retaddr,
- bool recheck)
+ bool recheck,
+ MMUAccessType access_type)
{
CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, recheck,
- DATA_SIZE);
+ access_type, DATA_SIZE);
}
#endif
@@ -140,7 +141,8 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
- tlb_addr & TLB_RECHECK);
+ tlb_addr & TLB_RECHECK,
+ READ_ACCESS_TYPE);
res = TGT_LE(res);
return res;
}
@@ -207,7 +209,8 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
/* ??? Note that the io helpers always read data in the target
byte ordering. We should push the LE/BE request down into io. */
res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr,
- tlb_addr & TLB_RECHECK);
+ tlb_addr & TLB_RECHECK,
+ READ_ACCESS_TYPE);
res = TGT_BE(res);
return res;
}
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index bd796579ee4..ecf6ed556a9 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -386,6 +386,12 @@ struct CPUState {
*/
uintptr_t mem_io_pc;
vaddr mem_io_vaddr;
+ /*
+ * This is only needed for the legacy cpu_unassigned_access() hook;
+ * when all targets using it have been converted to use
+ * cpu_transaction_failed() instead it can be removed.
+ */
+ MMUAccessType mem_io_access_type;
int kvm_fd;
struct KVMState *kvm_state;
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 563fa30117e..51b1193044c 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -789,7 +789,7 @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx,
target_ulong addr, uintptr_t retaddr,
- bool recheck, int size)
+ bool recheck, MMUAccessType access_type, int size)
{
CPUState *cpu = ENV_GET_CPU(env);
hwaddr mr_offset;
@@ -831,6 +831,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
}
cpu->mem_io_vaddr = addr;
+ cpu->mem_io_access_type = access_type;
if (mr->global_locking && !qemu_mutex_iothread_locked()) {
qemu_mutex_lock_iothread();
@@ -843,7 +844,7 @@ static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
section->offset_within_address_space -
section->offset_within_region;
- cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_LOAD,
+ cpu_transaction_failed(cpu, physaddr, addr, size, access_type,
mmu_idx, iotlbentry->attrs, r, retaddr);
}
if (locked) {
diff --git a/memory.c b/memory.c
index e9cd4469688..2ea16e7bfb0 100644
--- a/memory.c
+++ b/memory.c
@@ -1249,7 +1249,8 @@ static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
#endif
if (current_cpu != NULL) {
- cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
+ bool is_exec = current_cpu->mem_io_access_type == MMU_INST_FETCH;
+ cpu_unassigned_access(current_cpu, addr, false, is_exec, 0, size);
}
return 0;
}
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 06/45] accel/tcg: Handle get_page_addr_code() returning -1 in hashtable lookups
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (4 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 05/45] accel/tcg: Pass read access type through to io_readx() Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 07/45] accel/tcg: Handle get_page_addr_code() returning -1 in tb_check_watchpoint() Peter Maydell
` (39 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
When we support execution from non-RAM MMIO regions, get_page_addr_code()
will return -1 to indicate that there is no RAM at the requested address.
Handle this in the cpu-exec TB hashtable lookup code, treating it as
"no match found".
Note that the call to get_page_addr_code() in tb_lookup_cmp() needs
no changes -- a return of -1 will already correctly result in the
function returning false.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Emilio G. Cota <cota@braap.org>
Tested-by: Cédric Le Goater <clg@kaod.org>
Message-id: 20180710160013.26559-3-peter.maydell@linaro.org
---
accel/tcg/cpu-exec.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index c738b7f7d6e..6bcb6d99bd7 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -332,6 +332,9 @@ TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
desc.trace_vcpu_dstate = *cpu->trace_dstate;
desc.pc = pc;
phys_pc = get_page_addr_code(desc.env, pc);
+ if (phys_pc == -1) {
+ return NULL;
+ }
desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 07/45] accel/tcg: Handle get_page_addr_code() returning -1 in tb_check_watchpoint()
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (5 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 06/45] accel/tcg: Handle get_page_addr_code() returning -1 in hashtable lookups Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 08/45] accel/tcg: tb_gen_code(): Create single-insn TB for execution from non-RAM Peter Maydell
` (38 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
When we support execution from non-RAM MMIO regions, get_page_addr_code()
will return -1 to indicate that there is no RAM at the requested address.
Handle this in tb_check_watchpoint() -- if the exception happened for a
PC which doesn't correspond to RAM then there is no need to invalidate
any TBs, because the one-instruction TB will not have been cached.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Tested-by: Cédric Le Goater <clg@kaod.org>
Message-id: 20180710160013.26559-4-peter.maydell@linaro.org
---
accel/tcg/translate-all.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 1571987113b..ff7d0145f46 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -2121,7 +2121,9 @@ void tb_check_watchpoint(CPUState *cpu)
cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
addr = get_page_addr_code(env, pc);
- tb_invalidate_phys_range(addr, addr + 1);
+ if (addr != -1) {
+ tb_invalidate_phys_range(addr, addr + 1);
+ }
}
}
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 08/45] accel/tcg: tb_gen_code(): Create single-insn TB for execution from non-RAM
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (6 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 07/45] accel/tcg: Handle get_page_addr_code() returning -1 in tb_check_watchpoint() Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 09/45] accel/tcg: Return -1 for execution from MMIO regions in get_page_addr_code() Peter Maydell
` (37 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
If get_page_addr_code() returns -1, this indicates that there is no RAM
page we can read a full TB from. Instead we must create a TB which
contains a single instruction and which we do not cache, so it is
executed only once.
Since this means we can now have TBs which are not in any page list,
we also need to make tb_phys_invalidate() handle them (by not trying
to remove them from a nonexistent page list).
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Emilio G. Cota <cota@braap.org>
Tested-by: Cédric Le Goater <clg@kaod.org>
Message-id: 20180710160013.26559-5-peter.maydell@linaro.org
---
accel/tcg/translate-all.c | 19 ++++++++++++++++++-
1 file changed, 18 insertions(+), 1 deletion(-)
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index ff7d0145f46..898c3bb3d12 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -1493,7 +1493,7 @@ static void tb_phys_invalidate__locked(TranslationBlock *tb)
*/
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
{
- if (page_addr == -1) {
+ if (page_addr == -1 && tb->page_addr[0] != -1) {
page_lock_tb(tb);
do_tb_phys_invalidate(tb, true);
page_unlock_tb(tb);
@@ -1608,6 +1608,17 @@ tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
assert_memory_lock();
+ if (phys_pc == -1) {
+ /*
+ * If the TB is not associated with a physical RAM page then
+ * it must be a temporary one-insn TB, and we have nothing to do
+ * except fill in the page_addr[] fields.
+ */
+ assert(tb->cflags & CF_NOCACHE);
+ tb->page_addr[0] = tb->page_addr[1] = -1;
+ return tb;
+ }
+
/*
* Add the TB to the page list, acquiring first the pages's locks.
* We keep the locks held until after inserting the TB in the hash table,
@@ -1677,6 +1688,12 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
phys_pc = get_page_addr_code(env, pc);
+ if (phys_pc == -1) {
+ /* Generate a temporary TB with 1 insn in it */
+ cflags &= ~CF_COUNT_MASK;
+ cflags |= CF_NOCACHE | 1;
+ }
+
buffer_overflow:
tb = tb_alloc(pc);
if (unlikely(!tb)) {
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 09/45] accel/tcg: Return -1 for execution from MMIO regions in get_page_addr_code()
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (7 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 08/45] accel/tcg: tb_gen_code(): Create single-insn TB for execution from non-RAM Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 10/45] target/arm: Allow execution from small regions Peter Maydell
` (36 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
Now that all the callers can handle get_page_addr_code() returning -1,
remove all the code which tries to handle execution from MMIO regions
or small-MMU-region RAM areas. This will mean that we can correctly
execute from these areas, rather than ending up either aborting QEMU
or delivering an incorrect guest exception.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Tested-by: Cédric Le Goater <clg@kaod.org>
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 20180710160013.26559-6-peter.maydell@linaro.org
---
accel/tcg/cputlb.c | 95 +++++-----------------------------------------
1 file changed, 10 insertions(+), 85 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 51b1193044c..754795ff253 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -741,39 +741,6 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
prot, mmu_idx, size);
}
-static void report_bad_exec(CPUState *cpu, target_ulong addr)
-{
- /* Accidentally executing outside RAM or ROM is quite common for
- * several user-error situations, so report it in a way that
- * makes it clear that this isn't a QEMU bug and provide suggestions
- * about what a user could do to fix things.
- */
- error_report("Trying to execute code outside RAM or ROM at 0x"
- TARGET_FMT_lx, addr);
- error_printf("This usually means one of the following happened:\n\n"
- "(1) You told QEMU to execute a kernel for the wrong machine "
- "type, and it crashed on startup (eg trying to run a "
- "raspberry pi kernel on a versatilepb QEMU machine)\n"
- "(2) You didn't give QEMU a kernel or BIOS filename at all, "
- "and QEMU executed a ROM full of no-op instructions until "
- "it fell off the end\n"
- "(3) Your guest kernel has a bug and crashed by jumping "
- "off into nowhere\n\n"
- "This is almost always one of the first two, so check your "
- "command line and that you are using the right type of kernel "
- "for this machine.\n"
- "If you think option (3) is likely then you can try debugging "
- "your guest with the -d debug options; in particular "
- "-d guest_errors will cause the log to include a dump of the "
- "guest register state at this point.\n\n"
- "Execution cannot continue; stopping here.\n\n");
-
- /* Report also to the logs, with more detail including register dump */
- qemu_log_mask(LOG_GUEST_ERROR, "qemu: fatal: Trying to execute code "
- "outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
- log_cpu_state_mask(LOG_GUEST_ERROR, cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
-}
-
static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
{
ram_addr_t ram_addr;
@@ -963,7 +930,6 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
MemoryRegionSection *section;
CPUState *cpu = ENV_GET_CPU(env);
CPUIOTLBEntry *iotlbentry;
- hwaddr physaddr, mr_offset;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = cpu_mmu_index(env, true);
@@ -977,65 +943,24 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
if (unlikely(env->tlb_table[mmu_idx][index].addr_code & TLB_RECHECK)) {
/*
* This is a TLB_RECHECK access, where the MMU protection
- * covers a smaller range than a target page, and we must
- * repeat the MMU check here. This tlb_fill() call might
- * longjump out if this access should cause a guest exception.
- */
- int index;
- target_ulong tlb_addr;
-
- tlb_fill(cpu, addr, 0, MMU_INST_FETCH, mmu_idx, 0);
-
- index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
- tlb_addr = env->tlb_table[mmu_idx][index].addr_code;
- if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) {
- /* RAM access. We can't handle this, so for now just stop */
- cpu_abort(cpu, "Unable to handle guest executing from RAM within "
- "a small MPU region at 0x" TARGET_FMT_lx, addr);
- }
- /*
- * Fall through to handle IO accesses (which will almost certainly
- * also result in failure)
+ * covers a smaller range than a target page. Return -1 to
+ * indicate that we cannot simply execute from RAM here;
+ * we will perform the necessary repeat of the MMU check
+ * when the "execute a single insn" code performs the
+ * load of the guest insn.
*/
+ return -1;
}
iotlbentry = &env->iotlb[mmu_idx][index];
section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
mr = section->mr;
if (memory_region_is_unassigned(mr)) {
- qemu_mutex_lock_iothread();
- if (memory_region_request_mmio_ptr(mr, addr)) {
- qemu_mutex_unlock_iothread();
- /* A MemoryRegion is potentially added so re-run the
- * get_page_addr_code.
- */
- return get_page_addr_code(env, addr);
- }
- qemu_mutex_unlock_iothread();
-
- /* Give the new-style cpu_transaction_failed() hook first chance
- * to handle this.
- * This is not the ideal place to detect and generate CPU
- * exceptions for instruction fetch failure (for instance
- * we don't know the length of the access that the CPU would
- * use, and it would be better to go ahead and try the access
- * and use the MemTXResult it produced). However it is the
- * simplest place we have currently available for the check.
+ /*
+ * Not guest RAM, so there is no ram_addr_t for it. Return -1,
+ * and we will execute a single insn from this device.
*/
- mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
- physaddr = mr_offset +
- section->offset_within_address_space -
- section->offset_within_region;
- cpu_transaction_failed(cpu, physaddr, addr, 0, MMU_INST_FETCH, mmu_idx,
- iotlbentry->attrs, MEMTX_DECODE_ERROR, 0);
-
- cpu_unassigned_access(cpu, addr, false, true, 0, 4);
- /* The CPU's unassigned access hook might have longjumped out
- * with an exception. If it didn't (or there was no hook) then
- * we can't proceed further.
- */
- report_bad_exec(cpu, addr);
- exit(1);
+ return -1;
}
p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
return qemu_ram_addr_from_host_nofail(p);
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 10/45] target/arm: Allow execution from small regions
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (8 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 09/45] accel/tcg: Return -1 for execution from MMIO regions in get_page_addr_code() Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 11/45] accel/tcg: Check whether TLB entry is RAM consistently with how we set it up Peter Maydell
` (35 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
Now that we have full support for small regions, including execution,
we can remove the workarounds where we marked all small regions as
non-executable for the M-profile MPU and SAU.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Tested-by: Cédric Le Goater <clg@kaod.org>
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 20180710160013.26559-7-peter.maydell@linaro.org
---
target/arm/helper.c | 23 -----------------------
1 file changed, 23 deletions(-)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 63a70065617..02c1c4d8404 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -9795,17 +9795,6 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
fi->type = ARMFault_Permission;
fi->level = 1;
- /*
- * Core QEMU code can't handle execution from small pages yet, so
- * don't try it. This way we'll get an MPU exception, rather than
- * eventually causing QEMU to exit in get_page_addr_code().
- */
- if (*page_size < TARGET_PAGE_SIZE && (*prot & PAGE_EXEC)) {
- qemu_log_mask(LOG_UNIMP,
- "MPU: No support for execution from regions "
- "smaller than 1K\n");
- *prot &= ~PAGE_EXEC;
- }
return !(*prot & (1 << access_type));
}
@@ -10056,18 +10045,6 @@ static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
fi->type = ARMFault_Permission;
fi->level = 1;
- /*
- * Core QEMU code can't handle execution from small pages yet, so
- * don't try it. This means any attempted execution will generate
- * an MPU exception, rather than eventually causing QEMU to exit in
- * get_page_addr_code().
- */
- if (*is_subpage && (*prot & PAGE_EXEC)) {
- qemu_log_mask(LOG_UNIMP,
- "MPU: No support for execution from regions "
- "smaller than 1K\n");
- *prot &= ~PAGE_EXEC;
- }
return !(*prot & (1 << access_type));
}
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 11/45] accel/tcg: Check whether TLB entry is RAM consistently with how we set it up
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (9 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 10/45] target/arm: Allow execution from small regions Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 12/45] intc/arm_gic: Refactor operations on the distributor Peter Maydell
` (34 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
We set up TLB entries in tlb_set_page_with_attrs(), where we have
some logic for determining whether the TLB entry is considered
to be RAM-backed, and thus has a valid addend field. When we
look at the TLB entry in get_page_addr_code(), we use different
logic for determining whether to treat the page as RAM-backed
and use the addend field. This is confusing, and in fact buggy,
because the code in tlb_set_page_with_attrs() correctly decides
that rom_device memory regions not in romd mode are not RAM-backed,
but the code in get_page_addr_code() thinks they are RAM-backed.
This typically results in "Bad ram pointer" assertion if the
guest tries to execute from such a memory region.
Fix this by making get_page_addr_code() just look at the
TLB_MMIO bit in the code_address field of the TLB, which
tlb_set_page_with_attrs() sets if and only if the addend
field is not valid for code execution.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Tested-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 20180713150945.12348-1-peter.maydell@linaro.org
---
include/exec/exec-all.h | 2 --
accel/tcg/cputlb.c | 29 ++++++++---------------------
exec.c | 6 ------
3 files changed, 8 insertions(+), 29 deletions(-)
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index da73e3bfed2..5f781255826 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -502,8 +502,6 @@ hwaddr memory_region_section_get_iotlb(CPUState *cpu,
hwaddr paddr, hwaddr xlat,
int prot,
target_ulong *address);
-bool memory_region_is_unassigned(MemoryRegion *mr);
-
#endif
/* vl.c */
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 754795ff253..f4702ce91f6 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -926,10 +926,6 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
{
int mmu_idx, index;
void *p;
- MemoryRegion *mr;
- MemoryRegionSection *section;
- CPUState *cpu = ENV_GET_CPU(env);
- CPUIOTLBEntry *iotlbentry;
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
mmu_idx = cpu_mmu_index(env, true);
@@ -940,28 +936,19 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
assert(tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr));
}
- if (unlikely(env->tlb_table[mmu_idx][index].addr_code & TLB_RECHECK)) {
+ if (unlikely(env->tlb_table[mmu_idx][index].addr_code &
+ (TLB_RECHECK | TLB_MMIO))) {
/*
- * This is a TLB_RECHECK access, where the MMU protection
- * covers a smaller range than a target page. Return -1 to
- * indicate that we cannot simply execute from RAM here;
- * we will perform the necessary repeat of the MMU check
- * when the "execute a single insn" code performs the
- * load of the guest insn.
+ * Return -1 if we can't translate and execute from an entire
+ * page of RAM here, which will cause us to execute by loading
+ * and translating one insn at a time, without caching:
+ * - TLB_RECHECK: means the MMU protection covers a smaller range
+ * than a target page, so we must redo the MMU check every insn
+ * - TLB_MMIO: region is not backed by RAM
*/
return -1;
}
- iotlbentry = &env->iotlb[mmu_idx][index];
- section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
- mr = section->mr;
- if (memory_region_is_unassigned(mr)) {
- /*
- * Not guest RAM, so there is no ram_addr_t for it. Return -1,
- * and we will execute a single insn from this device.
- */
- return -1;
- }
p = (void *)((uintptr_t)addr + env->tlb_table[mmu_idx][index].addend);
return qemu_ram_addr_from_host_nofail(p);
}
diff --git a/exec.c b/exec.c
index 4f5df07b6a2..e7be0761c28 100644
--- a/exec.c
+++ b/exec.c
@@ -402,12 +402,6 @@ static MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr addr)
}
}
-bool memory_region_is_unassigned(MemoryRegion *mr)
-{
- return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
- && mr != &io_mem_watch;
-}
-
/* Called from RCU critical section */
static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
hwaddr addr,
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 12/45] intc/arm_gic: Refactor operations on the distributor
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (10 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 11/45] accel/tcg: Check whether TLB entry is RAM consistently with how we set it up Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 13/45] intc/arm_gic: Implement GICD_ISACTIVERn and GICD_ICACTIVERn registers Peter Maydell
` (33 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
In preparation for the virtualization extensions implementation,
refactor the name of the functions and macros that act on the GIC
distributor to make that fact explicit. It will be useful to
differentiate them from the ones that will act on the virtual
interfaces.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Reviewed-by: Sai Pavan Boddu <sai.pavan.boddu@xilinx.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20180727095421.386-2-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/gic_internal.h | 51 ++++++------
hw/intc/arm_gic.c | 163 +++++++++++++++++++++------------------
hw/intc/arm_gic_common.c | 6 +-
hw/intc/arm_gic_kvm.c | 23 +++---
4 files changed, 127 insertions(+), 116 deletions(-)
diff --git a/hw/intc/gic_internal.h b/hw/intc/gic_internal.h
index 7fe87b13de0..6f8d242904b 100644
--- a/hw/intc/gic_internal.h
+++ b/hw/intc/gic_internal.h
@@ -27,30 +27,31 @@
#define GIC_BASE_IRQ 0
-#define GIC_SET_ENABLED(irq, cm) s->irq_state[irq].enabled |= (cm)
-#define GIC_CLEAR_ENABLED(irq, cm) s->irq_state[irq].enabled &= ~(cm)
-#define GIC_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0)
-#define GIC_SET_PENDING(irq, cm) s->irq_state[irq].pending |= (cm)
-#define GIC_CLEAR_PENDING(irq, cm) s->irq_state[irq].pending &= ~(cm)
-#define GIC_SET_ACTIVE(irq, cm) s->irq_state[irq].active |= (cm)
-#define GIC_CLEAR_ACTIVE(irq, cm) s->irq_state[irq].active &= ~(cm)
-#define GIC_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0)
-#define GIC_SET_MODEL(irq) s->irq_state[irq].model = true
-#define GIC_CLEAR_MODEL(irq) s->irq_state[irq].model = false
-#define GIC_TEST_MODEL(irq) s->irq_state[irq].model
-#define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level |= (cm)
-#define GIC_CLEAR_LEVEL(irq, cm) s->irq_state[irq].level &= ~(cm)
-#define GIC_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0)
-#define GIC_SET_EDGE_TRIGGER(irq) s->irq_state[irq].edge_trigger = true
-#define GIC_CLEAR_EDGE_TRIGGER(irq) s->irq_state[irq].edge_trigger = false
-#define GIC_TEST_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger)
-#define GIC_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \
+#define GIC_DIST_SET_ENABLED(irq, cm) (s->irq_state[irq].enabled |= (cm))
+#define GIC_DIST_CLEAR_ENABLED(irq, cm) (s->irq_state[irq].enabled &= ~(cm))
+#define GIC_DIST_TEST_ENABLED(irq, cm) ((s->irq_state[irq].enabled & (cm)) != 0)
+#define GIC_DIST_SET_PENDING(irq, cm) (s->irq_state[irq].pending |= (cm))
+#define GIC_DIST_CLEAR_PENDING(irq, cm) (s->irq_state[irq].pending &= ~(cm))
+#define GIC_DIST_SET_ACTIVE(irq, cm) (s->irq_state[irq].active |= (cm))
+#define GIC_DIST_CLEAR_ACTIVE(irq, cm) (s->irq_state[irq].active &= ~(cm))
+#define GIC_DIST_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0)
+#define GIC_DIST_SET_MODEL(irq) (s->irq_state[irq].model = true)
+#define GIC_DIST_CLEAR_MODEL(irq) (s->irq_state[irq].model = false)
+#define GIC_DIST_TEST_MODEL(irq) (s->irq_state[irq].model)
+#define GIC_DIST_SET_LEVEL(irq, cm) (s->irq_state[irq].level |= (cm))
+#define GIC_DIST_CLEAR_LEVEL(irq, cm) (s->irq_state[irq].level &= ~(cm))
+#define GIC_DIST_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0)
+#define GIC_DIST_SET_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger = true)
+#define GIC_DIST_CLEAR_EDGE_TRIGGER(irq) \
+ (s->irq_state[irq].edge_trigger = false)
+#define GIC_DIST_TEST_EDGE_TRIGGER(irq) (s->irq_state[irq].edge_trigger)
+#define GIC_DIST_GET_PRIORITY(irq, cpu) (((irq) < GIC_INTERNAL) ? \
s->priority1[irq][cpu] : \
s->priority2[(irq) - GIC_INTERNAL])
-#define GIC_TARGET(irq) s->irq_target[irq]
-#define GIC_CLEAR_GROUP(irq, cm) (s->irq_state[irq].group &= ~(cm))
-#define GIC_SET_GROUP(irq, cm) (s->irq_state[irq].group |= (cm))
-#define GIC_TEST_GROUP(irq, cm) ((s->irq_state[irq].group & (cm)) != 0)
+#define GIC_DIST_TARGET(irq) (s->irq_target[irq])
+#define GIC_DIST_CLEAR_GROUP(irq, cm) (s->irq_state[irq].group &= ~(cm))
+#define GIC_DIST_SET_GROUP(irq, cm) (s->irq_state[irq].group |= (cm))
+#define GIC_DIST_TEST_GROUP(irq, cm) ((s->irq_state[irq].group & (cm)) != 0)
#define GICD_CTLR_EN_GRP0 (1U << 0)
#define GICD_CTLR_EN_GRP1 (1U << 1)
@@ -79,8 +80,8 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs);
void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs);
void gic_update(GICState *s);
void gic_init_irqs_and_distributor(GICState *s);
-void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val,
- MemTxAttrs attrs);
+void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val,
+ MemTxAttrs attrs);
static inline bool gic_test_pending(GICState *s, int irq, int cm)
{
@@ -93,7 +94,7 @@ static inline bool gic_test_pending(GICState *s, int irq, int cm)
* GICD_ISPENDR to set the state pending.
*/
return (s->irq_state[irq].pending & cm) ||
- (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_LEVEL(irq, cm));
+ (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_LEVEL(irq, cm));
}
}
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 34dc84ae813..9286236d86b 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -92,11 +92,12 @@ void gic_update(GICState *s)
best_prio = 0x100;
best_irq = 1023;
for (irq = 0; irq < s->num_irq; irq++) {
- if (GIC_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) &&
- (!GIC_TEST_ACTIVE(irq, cm)) &&
- (irq < GIC_INTERNAL || GIC_TARGET(irq) & cm)) {
- if (GIC_GET_PRIORITY(irq, cpu) < best_prio) {
- best_prio = GIC_GET_PRIORITY(irq, cpu);
+ if (GIC_DIST_TEST_ENABLED(irq, cm) &&
+ gic_test_pending(s, irq, cm) &&
+ (!GIC_DIST_TEST_ACTIVE(irq, cm)) &&
+ (irq < GIC_INTERNAL || GIC_DIST_TARGET(irq) & cm)) {
+ if (GIC_DIST_GET_PRIORITY(irq, cpu) < best_prio) {
+ best_prio = GIC_DIST_GET_PRIORITY(irq, cpu);
best_irq = irq;
}
}
@@ -112,7 +113,7 @@ void gic_update(GICState *s)
if (best_prio < s->priority_mask[cpu]) {
s->current_pending[cpu] = best_irq;
if (best_prio < s->running_priority[cpu]) {
- int group = GIC_TEST_GROUP(best_irq, cm);
+ int group = GIC_DIST_TEST_GROUP(best_irq, cm);
if (extract32(s->ctlr, group, 1) &&
extract32(s->cpu_ctlr[cpu], group, 1)) {
@@ -145,7 +146,7 @@ void gic_set_pending_private(GICState *s, int cpu, int irq)
}
DPRINTF("Set %d pending cpu %d\n", irq, cpu);
- GIC_SET_PENDING(irq, cm);
+ GIC_DIST_SET_PENDING(irq, cm);
gic_update(s);
}
@@ -153,13 +154,13 @@ static void gic_set_irq_11mpcore(GICState *s, int irq, int level,
int cm, int target)
{
if (level) {
- GIC_SET_LEVEL(irq, cm);
- if (GIC_TEST_EDGE_TRIGGER(irq) || GIC_TEST_ENABLED(irq, cm)) {
+ GIC_DIST_SET_LEVEL(irq, cm);
+ if (GIC_DIST_TEST_EDGE_TRIGGER(irq) || GIC_DIST_TEST_ENABLED(irq, cm)) {
DPRINTF("Set %d pending mask %x\n", irq, target);
- GIC_SET_PENDING(irq, target);
+ GIC_DIST_SET_PENDING(irq, target);
}
} else {
- GIC_CLEAR_LEVEL(irq, cm);
+ GIC_DIST_CLEAR_LEVEL(irq, cm);
}
}
@@ -167,13 +168,13 @@ static void gic_set_irq_generic(GICState *s, int irq, int level,
int cm, int target)
{
if (level) {
- GIC_SET_LEVEL(irq, cm);
+ GIC_DIST_SET_LEVEL(irq, cm);
DPRINTF("Set %d pending mask %x\n", irq, target);
- if (GIC_TEST_EDGE_TRIGGER(irq)) {
- GIC_SET_PENDING(irq, target);
+ if (GIC_DIST_TEST_EDGE_TRIGGER(irq)) {
+ GIC_DIST_SET_PENDING(irq, target);
}
} else {
- GIC_CLEAR_LEVEL(irq, cm);
+ GIC_DIST_CLEAR_LEVEL(irq, cm);
}
}
@@ -192,7 +193,7 @@ static void gic_set_irq(void *opaque, int irq, int level)
/* The first external input line is internal interrupt 32. */
cm = ALL_CPU_MASK;
irq += GIC_INTERNAL;
- target = GIC_TARGET(irq);
+ target = GIC_DIST_TARGET(irq);
} else {
int cpu;
irq -= (s->num_irq - GIC_INTERNAL);
@@ -204,7 +205,7 @@ static void gic_set_irq(void *opaque, int irq, int level)
assert(irq >= GIC_NR_SGIS);
- if (level == GIC_TEST_LEVEL(irq, cm)) {
+ if (level == GIC_DIST_TEST_LEVEL(irq, cm)) {
return;
}
@@ -224,7 +225,7 @@ static uint16_t gic_get_current_pending_irq(GICState *s, int cpu,
uint16_t pending_irq = s->current_pending[cpu];
if (pending_irq < GIC_MAXIRQ && gic_has_groups(s)) {
- int group = GIC_TEST_GROUP(pending_irq, (1 << cpu));
+ int group = GIC_DIST_TEST_GROUP(pending_irq, (1 << cpu));
/* On a GIC without the security extensions, reading this register
* behaves in the same way as a secure access to a GIC with them.
*/
@@ -255,7 +256,7 @@ static int gic_get_group_priority(GICState *s, int cpu, int irq)
if (gic_has_groups(s) &&
!(s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) &&
- GIC_TEST_GROUP(irq, (1 << cpu))) {
+ GIC_DIST_TEST_GROUP(irq, (1 << cpu))) {
bpr = s->abpr[cpu] - 1;
assert(bpr >= 0);
} else {
@@ -268,7 +269,7 @@ static int gic_get_group_priority(GICState *s, int cpu, int irq)
*/
mask = ~0U << ((bpr & 7) + 1);
- return GIC_GET_PRIORITY(irq, cpu) & mask;
+ return GIC_DIST_GET_PRIORITY(irq, cpu) & mask;
}
static void gic_activate_irq(GICState *s, int cpu, int irq)
@@ -281,14 +282,14 @@ static void gic_activate_irq(GICState *s, int cpu, int irq)
int regno = preemption_level / 32;
int bitno = preemption_level % 32;
- if (gic_has_groups(s) && GIC_TEST_GROUP(irq, (1 << cpu))) {
+ if (gic_has_groups(s) && GIC_DIST_TEST_GROUP(irq, (1 << cpu))) {
s->nsapr[regno][cpu] |= (1 << bitno);
} else {
s->apr[regno][cpu] |= (1 << bitno);
}
s->running_priority[cpu] = prio;
- GIC_SET_ACTIVE(irq, 1 << cpu);
+ GIC_DIST_SET_ACTIVE(irq, 1 << cpu);
}
static int gic_get_prio_from_apr_bits(GICState *s, int cpu)
@@ -357,7 +358,7 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
return irq;
}
- if (GIC_GET_PRIORITY(irq, cpu) >= s->running_priority[cpu]) {
+ if (GIC_DIST_GET_PRIORITY(irq, cpu) >= s->running_priority[cpu]) {
DPRINTF("ACK, pending interrupt (%d) has insufficient priority\n", irq);
return 1023;
}
@@ -366,7 +367,8 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
/* Clear pending flags for both level and edge triggered interrupts.
* Level triggered IRQs will be reasserted once they become inactive.
*/
- GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm);
+ GIC_DIST_CLEAR_PENDING(irq, GIC_DIST_TEST_MODEL(irq) ? ALL_CPU_MASK
+ : cm);
ret = irq;
} else {
if (irq < GIC_NR_SGIS) {
@@ -378,7 +380,9 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
src = ctz32(s->sgi_pending[irq][cpu]);
s->sgi_pending[irq][cpu] &= ~(1 << src);
if (s->sgi_pending[irq][cpu] == 0) {
- GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm);
+ GIC_DIST_CLEAR_PENDING(irq,
+ GIC_DIST_TEST_MODEL(irq) ? ALL_CPU_MASK
+ : cm);
}
ret = irq | ((src & 0x7) << 10);
} else {
@@ -386,7 +390,8 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
* interrupts. (level triggered interrupts with an active line
* remain pending, see gic_test_pending)
*/
- GIC_CLEAR_PENDING(irq, GIC_TEST_MODEL(irq) ? ALL_CPU_MASK : cm);
+ GIC_DIST_CLEAR_PENDING(irq, GIC_DIST_TEST_MODEL(irq) ? ALL_CPU_MASK
+ : cm);
ret = irq;
}
}
@@ -397,11 +402,11 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
return ret;
}
-void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val,
+void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val,
MemTxAttrs attrs)
{
if (s->security_extn && !attrs.secure) {
- if (!GIC_TEST_GROUP(irq, (1 << cpu))) {
+ if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) {
return; /* Ignore Non-secure access of Group0 IRQ */
}
val = 0x80 | (val >> 1); /* Non-secure view */
@@ -414,13 +419,13 @@ void gic_set_priority(GICState *s, int cpu, int irq, uint8_t val,
}
}
-static uint32_t gic_get_priority(GICState *s, int cpu, int irq,
+static uint32_t gic_dist_get_priority(GICState *s, int cpu, int irq,
MemTxAttrs attrs)
{
- uint32_t prio = GIC_GET_PRIORITY(irq, cpu);
+ uint32_t prio = GIC_DIST_GET_PRIORITY(irq, cpu);
if (s->security_extn && !attrs.secure) {
- if (!GIC_TEST_GROUP(irq, (1 << cpu))) {
+ if (!GIC_DIST_TEST_GROUP(irq, (1 << cpu))) {
return 0; /* Non-secure access cannot read priority of Group0 IRQ */
}
prio = (prio << 1) & 0xff; /* Non-secure view */
@@ -557,7 +562,7 @@ static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
return;
}
- group = gic_has_groups(s) && GIC_TEST_GROUP(irq, cm);
+ group = gic_has_groups(s) && GIC_DIST_TEST_GROUP(irq, cm);
if (!gic_eoi_split(s, cpu, attrs)) {
/* This is UNPREDICTABLE; we choose to ignore it */
@@ -571,7 +576,7 @@ static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
return;
}
- GIC_CLEAR_ACTIVE(irq, cm);
+ GIC_DIST_CLEAR_ACTIVE(irq, cm);
}
void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
@@ -598,14 +603,15 @@ void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
if (s->revision == REV_11MPCORE) {
/* Mark level triggered interrupts as pending if they are still
raised. */
- if (!GIC_TEST_EDGE_TRIGGER(irq) && GIC_TEST_ENABLED(irq, cm)
- && GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) {
+ if (!GIC_DIST_TEST_EDGE_TRIGGER(irq) && GIC_DIST_TEST_ENABLED(irq, cm)
+ && GIC_DIST_TEST_LEVEL(irq, cm)
+ && (GIC_DIST_TARGET(irq) & cm) != 0) {
DPRINTF("Set %d pending mask %x\n", irq, cm);
- GIC_SET_PENDING(irq, cm);
+ GIC_DIST_SET_PENDING(irq, cm);
}
}
- group = gic_has_groups(s) && GIC_TEST_GROUP(irq, cm);
+ group = gic_has_groups(s) && GIC_DIST_TEST_GROUP(irq, cm);
if (s->security_extn && !attrs.secure && !group) {
DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq);
@@ -621,7 +627,7 @@ void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
/* In GICv2 the guest can choose to split priority-drop and deactivate */
if (!gic_eoi_split(s, cpu, attrs)) {
- GIC_CLEAR_ACTIVE(irq, cm);
+ GIC_DIST_CLEAR_ACTIVE(irq, cm);
}
gic_update(s);
}
@@ -669,7 +675,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
goto bad_reg;
}
for (i = 0; i < 8; i++) {
- if (GIC_TEST_GROUP(irq + i, cm)) {
+ if (GIC_DIST_TEST_GROUP(irq + i, cm)) {
res |= (1 << i);
}
}
@@ -689,11 +695,11 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
res = 0;
for (i = 0; i < 8; i++) {
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
- if (GIC_TEST_ENABLED(irq + i, cm)) {
+ if (GIC_DIST_TEST_ENABLED(irq + i, cm)) {
res |= (1 << i);
}
}
@@ -710,7 +716,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK;
for (i = 0; i < 8; i++) {
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
@@ -727,11 +733,11 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
mask = (irq < GIC_INTERNAL) ? cm : ALL_CPU_MASK;
for (i = 0; i < 8; i++) {
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
- if (GIC_TEST_ACTIVE(irq + i, mask)) {
+ if (GIC_DIST_TEST_ACTIVE(irq + i, mask)) {
res |= (1 << i);
}
}
@@ -740,7 +746,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
irq = (offset - 0x400) + GIC_BASE_IRQ;
if (irq >= s->num_irq)
goto bad_reg;
- res = gic_get_priority(s, cpu, irq, attrs);
+ res = gic_dist_get_priority(s, cpu, irq, attrs);
} else if (offset < 0xc00) {
/* Interrupt CPU Target. */
if (s->num_cpu == 1 && s->revision != REV_11MPCORE) {
@@ -756,7 +762,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
} else if (irq < GIC_INTERNAL) {
res = cm;
} else {
- res = GIC_TARGET(irq);
+ res = GIC_DIST_TARGET(irq);
}
}
} else if (offset < 0xf00) {
@@ -767,14 +773,16 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
res = 0;
for (i = 0; i < 4; i++) {
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
- if (GIC_TEST_MODEL(irq + i))
+ if (GIC_DIST_TEST_MODEL(irq + i)) {
res |= (1 << (i * 2));
- if (GIC_TEST_EDGE_TRIGGER(irq + i))
+ }
+ if (GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) {
res |= (2 << (i * 2));
+ }
}
} else if (offset < 0xf10) {
goto bad_reg;
@@ -792,7 +800,7 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
}
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq, 1 << cpu)) {
res = 0; /* Ignore Non-secure access of Group0 IRQ */
} else {
res = s->sgi_pending[irq][cpu];
@@ -888,10 +896,10 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
if (value & (1 << i)) {
/* Group1 (Non-secure) */
- GIC_SET_GROUP(irq + i, cm);
+ GIC_DIST_SET_GROUP(irq + i, cm);
} else {
/* Group0 (Secure) */
- GIC_CLEAR_GROUP(irq + i, cm);
+ GIC_DIST_CLEAR_GROUP(irq + i, cm);
}
}
}
@@ -910,25 +918,26 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
for (i = 0; i < 8; i++) {
if (value & (1 << i)) {
int mask =
- (irq < GIC_INTERNAL) ? (1 << cpu) : GIC_TARGET(irq + i);
+ (irq < GIC_INTERNAL) ? (1 << cpu)
+ : GIC_DIST_TARGET(irq + i);
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
- if (!GIC_TEST_ENABLED(irq + i, cm)) {
+ if (!GIC_DIST_TEST_ENABLED(irq + i, cm)) {
DPRINTF("Enabled IRQ %d\n", irq + i);
trace_gic_enable_irq(irq + i);
}
- GIC_SET_ENABLED(irq + i, cm);
+ GIC_DIST_SET_ENABLED(irq + i, cm);
/* If a raised level triggered IRQ enabled then mark
is as pending. */
- if (GIC_TEST_LEVEL(irq + i, mask)
- && !GIC_TEST_EDGE_TRIGGER(irq + i)) {
+ if (GIC_DIST_TEST_LEVEL(irq + i, mask)
+ && !GIC_DIST_TEST_EDGE_TRIGGER(irq + i)) {
DPRINTF("Set %d pending mask %x\n", irq + i, mask);
- GIC_SET_PENDING(irq + i, mask);
+ GIC_DIST_SET_PENDING(irq + i, mask);
}
}
}
@@ -946,15 +955,15 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
- if (GIC_TEST_ENABLED(irq + i, cm)) {
+ if (GIC_DIST_TEST_ENABLED(irq + i, cm)) {
DPRINTF("Disabled IRQ %d\n", irq + i);
trace_gic_disable_irq(irq + i);
}
- GIC_CLEAR_ENABLED(irq + i, cm);
+ GIC_DIST_CLEAR_ENABLED(irq + i, cm);
}
}
} else if (offset < 0x280) {
@@ -969,11 +978,11 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
for (i = 0; i < 8; i++) {
if (value & (1 << i)) {
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
- GIC_SET_PENDING(irq + i, GIC_TARGET(irq + i));
+ GIC_DIST_SET_PENDING(irq + i, GIC_DIST_TARGET(irq + i));
}
}
} else if (offset < 0x300) {
@@ -987,7 +996,7 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
for (i = 0; i < 8; i++) {
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
@@ -995,7 +1004,7 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
for per-CPU interrupts. It's unclear whether this is the
corect behavior. */
if (value & (1 << i)) {
- GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK);
+ GIC_DIST_CLEAR_PENDING(irq + i, ALL_CPU_MASK);
}
}
} else if (offset < 0x400) {
@@ -1006,7 +1015,7 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
irq = (offset - 0x400) + GIC_BASE_IRQ;
if (irq >= s->num_irq)
goto bad_reg;
- gic_set_priority(s, cpu, irq, value, attrs);
+ gic_dist_set_priority(s, cpu, irq, value, attrs);
} else if (offset < 0xc00) {
/* Interrupt CPU Target. RAZ/WI on uniprocessor GICs, with the
* annoying exception of the 11MPCore's GIC.
@@ -1032,21 +1041,21 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
value |= 0xaa;
for (i = 0; i < 4; i++) {
if (s->security_extn && !attrs.secure &&
- !GIC_TEST_GROUP(irq + i, 1 << cpu)) {
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
continue; /* Ignore Non-secure access of Group0 IRQ */
}
if (s->revision == REV_11MPCORE) {
if (value & (1 << (i * 2))) {
- GIC_SET_MODEL(irq + i);
+ GIC_DIST_SET_MODEL(irq + i);
} else {
- GIC_CLEAR_MODEL(irq + i);
+ GIC_DIST_CLEAR_MODEL(irq + i);
}
}
if (value & (2 << (i * 2))) {
- GIC_SET_EDGE_TRIGGER(irq + i);
+ GIC_DIST_SET_EDGE_TRIGGER(irq + i);
} else {
- GIC_CLEAR_EDGE_TRIGGER(irq + i);
+ GIC_DIST_CLEAR_EDGE_TRIGGER(irq + i);
}
}
} else if (offset < 0xf10) {
@@ -1060,10 +1069,10 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
irq = (offset - 0xf10);
if (!s->security_extn || attrs.secure ||
- GIC_TEST_GROUP(irq, 1 << cpu)) {
+ GIC_DIST_TEST_GROUP(irq, 1 << cpu)) {
s->sgi_pending[irq][cpu] &= ~value;
if (s->sgi_pending[irq][cpu] == 0) {
- GIC_CLEAR_PENDING(irq, 1 << cpu);
+ GIC_DIST_CLEAR_PENDING(irq, 1 << cpu);
}
}
} else if (offset < 0xf30) {
@@ -1074,8 +1083,8 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
irq = (offset - 0xf20);
if (!s->security_extn || attrs.secure ||
- GIC_TEST_GROUP(irq, 1 << cpu)) {
- GIC_SET_PENDING(irq, 1 << cpu);
+ GIC_DIST_TEST_GROUP(irq, 1 << cpu)) {
+ GIC_DIST_SET_PENDING(irq, 1 << cpu);
s->sgi_pending[irq][cpu] |= value;
}
} else {
@@ -1122,7 +1131,7 @@ static void gic_dist_writel(void *opaque, hwaddr offset,
mask = ALL_CPU_MASK;
break;
}
- GIC_SET_PENDING(irq, mask);
+ GIC_DIST_SET_PENDING(irq, mask);
target_cpu = ctz32(mask);
while (target_cpu < GIC_NCPU) {
s->sgi_pending[irq][target_cpu] |= (1 << cpu);
diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c
index aee50a20e02..295ee9cc5e5 100644
--- a/hw/intc/arm_gic_common.c
+++ b/hw/intc/arm_gic_common.c
@@ -204,8 +204,8 @@ static void arm_gic_common_reset(DeviceState *dev)
}
}
for (i = 0; i < GIC_NR_SGIS; i++) {
- GIC_SET_ENABLED(i, ALL_CPU_MASK);
- GIC_SET_EDGE_TRIGGER(i);
+ GIC_DIST_SET_ENABLED(i, ALL_CPU_MASK);
+ GIC_DIST_SET_EDGE_TRIGGER(i);
}
for (i = 0; i < ARRAY_SIZE(s->priority2); i++) {
@@ -222,7 +222,7 @@ static void arm_gic_common_reset(DeviceState *dev)
}
if (s->security_extn && s->irq_reset_nonsecure) {
for (i = 0; i < GIC_MAXIRQ; i++) {
- GIC_SET_GROUP(i, ALL_CPU_MASK);
+ GIC_DIST_SET_GROUP(i, ALL_CPU_MASK);
}
}
diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c
index 86665080bdc..4b611c8d6df 100644
--- a/hw/intc/arm_gic_kvm.c
+++ b/hw/intc/arm_gic_kvm.c
@@ -140,10 +140,10 @@ static void translate_group(GICState *s, int irq, int cpu,
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
if (to_kernel) {
- *field = GIC_TEST_GROUP(irq, cm);
+ *field = GIC_DIST_TEST_GROUP(irq, cm);
} else {
if (*field & 1) {
- GIC_SET_GROUP(irq, cm);
+ GIC_DIST_SET_GROUP(irq, cm);
}
}
}
@@ -154,10 +154,10 @@ static void translate_enabled(GICState *s, int irq, int cpu,
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
if (to_kernel) {
- *field = GIC_TEST_ENABLED(irq, cm);
+ *field = GIC_DIST_TEST_ENABLED(irq, cm);
} else {
if (*field & 1) {
- GIC_SET_ENABLED(irq, cm);
+ GIC_DIST_SET_ENABLED(irq, cm);
}
}
}
@@ -171,7 +171,7 @@ static void translate_pending(GICState *s, int irq, int cpu,
*field = gic_test_pending(s, irq, cm);
} else {
if (*field & 1) {
- GIC_SET_PENDING(irq, cm);
+ GIC_DIST_SET_PENDING(irq, cm);
/* TODO: Capture is level-line is held high in the kernel */
}
}
@@ -183,10 +183,10 @@ static void translate_active(GICState *s, int irq, int cpu,
int cm = (irq < GIC_INTERNAL) ? (1 << cpu) : ALL_CPU_MASK;
if (to_kernel) {
- *field = GIC_TEST_ACTIVE(irq, cm);
+ *field = GIC_DIST_TEST_ACTIVE(irq, cm);
} else {
if (*field & 1) {
- GIC_SET_ACTIVE(irq, cm);
+ GIC_DIST_SET_ACTIVE(irq, cm);
}
}
}
@@ -195,10 +195,10 @@ static void translate_trigger(GICState *s, int irq, int cpu,
uint32_t *field, bool to_kernel)
{
if (to_kernel) {
- *field = (GIC_TEST_EDGE_TRIGGER(irq)) ? 0x2 : 0x0;
+ *field = (GIC_DIST_TEST_EDGE_TRIGGER(irq)) ? 0x2 : 0x0;
} else {
if (*field & 0x2) {
- GIC_SET_EDGE_TRIGGER(irq);
+ GIC_DIST_SET_EDGE_TRIGGER(irq);
}
}
}
@@ -207,9 +207,10 @@ static void translate_priority(GICState *s, int irq, int cpu,
uint32_t *field, bool to_kernel)
{
if (to_kernel) {
- *field = GIC_GET_PRIORITY(irq, cpu) & 0xff;
+ *field = GIC_DIST_GET_PRIORITY(irq, cpu) & 0xff;
} else {
- gic_set_priority(s, cpu, irq, *field & 0xff, MEMTXATTRS_UNSPECIFIED);
+ gic_dist_set_priority(s, cpu, irq,
+ *field & 0xff, MEMTXATTRS_UNSPECIFIED);
}
}
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 13/45] intc/arm_gic: Implement GICD_ISACTIVERn and GICD_ICACTIVERn registers
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (11 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 12/45] intc/arm_gic: Refactor operations on the distributor Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 14/45] intc/arm_gic: Remove some dead code and put some functions static Peter Maydell
` (32 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Implement GICD_ISACTIVERn and GICD_ICACTIVERn registers in the GICv2.
Those registers allow to set or clear the active state of an IRQ in the
distributor.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20180727095421.386-3-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/arm_gic.c | 61 +++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 57 insertions(+), 4 deletions(-)
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 9286236d86b..53b749d2167 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -725,8 +725,16 @@ static uint32_t gic_dist_readb(void *opaque, hwaddr offset, MemTxAttrs attrs)
}
}
} else if (offset < 0x400) {
- /* Interrupt Active. */
- irq = (offset - 0x300) * 8 + GIC_BASE_IRQ;
+ /* Interrupt Set/Clear Active. */
+ if (offset < 0x380) {
+ irq = (offset - 0x300) * 8;
+ } else if (s->revision == 2) {
+ irq = (offset - 0x380) * 8;
+ } else {
+ goto bad_reg;
+ }
+
+ irq += GIC_BASE_IRQ;
if (irq >= s->num_irq)
goto bad_reg;
res = 0;
@@ -1007,9 +1015,54 @@ static void gic_dist_writeb(void *opaque, hwaddr offset,
GIC_DIST_CLEAR_PENDING(irq + i, ALL_CPU_MASK);
}
}
+ } else if (offset < 0x380) {
+ /* Interrupt Set Active. */
+ if (s->revision != 2) {
+ goto bad_reg;
+ }
+
+ irq = (offset - 0x300) * 8 + GIC_BASE_IRQ;
+ if (irq >= s->num_irq) {
+ goto bad_reg;
+ }
+
+ /* This register is banked per-cpu for PPIs */
+ int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK;
+
+ for (i = 0; i < 8; i++) {
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
+ continue; /* Ignore Non-secure access of Group0 IRQ */
+ }
+
+ if (value & (1 << i)) {
+ GIC_DIST_SET_ACTIVE(irq + i, cm);
+ }
+ }
} else if (offset < 0x400) {
- /* Interrupt Active. */
- goto bad_reg;
+ /* Interrupt Clear Active. */
+ if (s->revision != 2) {
+ goto bad_reg;
+ }
+
+ irq = (offset - 0x380) * 8 + GIC_BASE_IRQ;
+ if (irq >= s->num_irq) {
+ goto bad_reg;
+ }
+
+ /* This register is banked per-cpu for PPIs */
+ int cm = irq < GIC_INTERNAL ? (1 << cpu) : ALL_CPU_MASK;
+
+ for (i = 0; i < 8; i++) {
+ if (s->security_extn && !attrs.secure &&
+ !GIC_DIST_TEST_GROUP(irq + i, 1 << cpu)) {
+ continue; /* Ignore Non-secure access of Group0 IRQ */
+ }
+
+ if (value & (1 << i)) {
+ GIC_DIST_CLEAR_ACTIVE(irq + i, cm);
+ }
+ }
} else if (offset < 0x800) {
/* Interrupt Priority. */
irq = (offset - 0x400) + GIC_BASE_IRQ;
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 14/45] intc/arm_gic: Remove some dead code and put some functions static
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (12 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 13/45] intc/arm_gic: Implement GICD_ISACTIVERn and GICD_ICACTIVERn registers Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 15/45] vmstate.h: Provide VMSTATE_UINT16_SUB_ARRAY Peter Maydell
` (31 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Some functions are now only used in arm_gic.c, put them static. Some of
them where only used by the NVIC implementation and are not used
anymore, so remove them.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20180727095421.386-4-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/gic_internal.h | 4 ----
hw/intc/arm_gic.c | 23 ++---------------------
2 files changed, 2 insertions(+), 25 deletions(-)
diff --git a/hw/intc/gic_internal.h b/hw/intc/gic_internal.h
index 6f8d242904b..a2075a94dbb 100644
--- a/hw/intc/gic_internal.h
+++ b/hw/intc/gic_internal.h
@@ -75,11 +75,7 @@
/* The special cases for the revision property: */
#define REV_11MPCORE 0
-void gic_set_pending_private(GICState *s, int cpu, int irq);
uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs);
-void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs);
-void gic_update(GICState *s);
-void gic_init_irqs_and_distributor(GICState *s);
void gic_dist_set_priority(GICState *s, int cpu, int irq, uint8_t val,
MemTxAttrs attrs);
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 53b749d2167..b8eba6e5944 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -71,7 +71,7 @@ static inline bool gic_has_groups(GICState *s)
/* TODO: Many places that call this routine could be optimized. */
/* Update interrupt status after enabled or pending bits have been changed. */
-void gic_update(GICState *s)
+static void gic_update(GICState *s)
{
int best_irq;
int best_prio;
@@ -137,19 +137,6 @@ void gic_update(GICState *s)
}
}
-void gic_set_pending_private(GICState *s, int cpu, int irq)
-{
- int cm = 1 << cpu;
-
- if (gic_test_pending(s, irq, cm)) {
- return;
- }
-
- DPRINTF("Set %d pending cpu %d\n", irq, cpu);
- GIC_DIST_SET_PENDING(irq, cm);
- gic_update(s);
-}
-
static void gic_set_irq_11mpcore(GICState *s, int irq, int level,
int cm, int target)
{
@@ -579,7 +566,7 @@ static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
GIC_DIST_CLEAR_ACTIVE(irq, cm);
}
-void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
+static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
{
int cm = 1 << cpu;
int group;
@@ -1488,12 +1475,6 @@ static const MemoryRegionOps gic_cpu_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-/* This function is used by nvic model */
-void gic_init_irqs_and_distributor(GICState *s)
-{
- gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops);
-}
-
static void arm_gic_realize(DeviceState *dev, Error **errp)
{
/* Device instance realize function for the GIC sysbus device */
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 15/45] vmstate.h: Provide VMSTATE_UINT16_SUB_ARRAY
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (13 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 14/45] intc/arm_gic: Remove some dead code and put some functions static Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 16/45] intc/arm_gic: Add the virtualization extensions to the GIC state Peter Maydell
` (30 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Provide a VMSTATE_UINT16_SUB_ARRAY macro to save a uint16_t sub-array in
a VMState.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 20180727095421.386-5-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
include/migration/vmstate.h | 3 +++
1 file changed, 3 insertions(+)
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index 42b946ce902..2b501d04669 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -923,6 +923,9 @@ extern const VMStateInfo vmstate_info_qtailq;
#define VMSTATE_UINT16_ARRAY(_f, _s, _n) \
VMSTATE_UINT16_ARRAY_V(_f, _s, _n, 0)
+#define VMSTATE_UINT16_SUB_ARRAY(_f, _s, _start, _num) \
+ VMSTATE_SUB_ARRAY(_f, _s, _start, _num, 0, vmstate_info_uint16, uint16_t)
+
#define VMSTATE_UINT16_2DARRAY(_f, _s, _n1, _n2) \
VMSTATE_UINT16_2DARRAY_V(_f, _s, _n1, _n2, 0)
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 16/45] intc/arm_gic: Add the virtualization extensions to the GIC state
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (14 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 15/45] vmstate.h: Provide VMSTATE_UINT16_SUB_ARRAY Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 17/45] intc/arm_gic: Add virtual interface register definitions Peter Maydell
` (29 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Add the necessary parts of the virtualization extensions state to the
GIC state. We choose to increase the size of the CPU interfaces state to
add space for the vCPU interfaces (the GIC_NCPU_VCPU macro). This way,
we'll be able to reuse most of the CPU interface code for the vCPUs.
The only exception is the APR value, which is stored in h_apr in the
virtual interface state for vCPUs. This is due to some complications
with the GIC VMState, for which we don't want to break backward
compatibility. APRs being stored in 2D arrays, increasing the second
dimension would lead to some ugly VMState description. To avoid
that, we keep it in h_apr for vCPUs.
The vCPUs are numbered from GIC_NCPU to (GIC_NCPU * 2) - 1. The
`gic_is_vcpu` function help to determine if a given CPU id correspond to
a physical CPU or a virtual one.
For the in-kernel KVM VGIC, since the exposed VGIC does not implement
the virtualization extensions, we report an error if the corresponding
property is set to true.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20180727095421.386-6-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/gic_internal.h | 5 ++
include/hw/intc/arm_gic_common.h | 43 +++++++--
hw/intc/arm_gic.c | 2 +-
hw/intc/arm_gic_common.c | 148 ++++++++++++++++++++++++++-----
hw/intc/arm_gic_kvm.c | 8 +-
5 files changed, 173 insertions(+), 33 deletions(-)
diff --git a/hw/intc/gic_internal.h b/hw/intc/gic_internal.h
index a2075a94dbb..c85427c8e31 100644
--- a/hw/intc/gic_internal.h
+++ b/hw/intc/gic_internal.h
@@ -94,4 +94,9 @@ static inline bool gic_test_pending(GICState *s, int irq, int cm)
}
}
+static inline bool gic_is_vcpu(int cpu)
+{
+ return cpu >= GIC_NCPU;
+}
+
#endif /* QEMU_ARM_GIC_INTERNAL_H */
diff --git a/include/hw/intc/arm_gic_common.h b/include/hw/intc/arm_gic_common.h
index af3ca18e2f0..b5585fec451 100644
--- a/include/hw/intc/arm_gic_common.h
+++ b/include/hw/intc/arm_gic_common.h
@@ -30,6 +30,8 @@
#define GIC_NR_SGIS 16
/* Maximum number of possible CPU interfaces, determined by GIC architecture */
#define GIC_NCPU 8
+/* Maximum number of possible CPU interfaces with their respective vCPU */
+#define GIC_NCPU_VCPU (GIC_NCPU * 2)
#define MAX_NR_GROUP_PRIO 128
#define GIC_NR_APRS (MAX_NR_GROUP_PRIO / 32)
@@ -37,6 +39,17 @@
#define GIC_MIN_BPR 0
#define GIC_MIN_ABPR (GIC_MIN_BPR + 1)
+/* Architectural maximum number of list registers in the virtual interface */
+#define GIC_MAX_LR 64
+
+/* Only 32 priority levels and 32 preemption levels in the vCPU interfaces */
+#define GIC_VIRT_MAX_GROUP_PRIO_BITS 5
+#define GIC_VIRT_MAX_NR_GROUP_PRIO (1 << GIC_VIRT_MAX_GROUP_PRIO_BITS)
+#define GIC_VIRT_NR_APRS (GIC_VIRT_MAX_NR_GROUP_PRIO / 32)
+
+#define GIC_VIRT_MIN_BPR 2
+#define GIC_VIRT_MIN_ABPR (GIC_VIRT_MIN_BPR + 1)
+
typedef struct gic_irq_state {
/* The enable bits are only banked for per-cpu interrupts. */
uint8_t enabled;
@@ -57,6 +70,8 @@ typedef struct GICState {
qemu_irq parent_fiq[GIC_NCPU];
qemu_irq parent_virq[GIC_NCPU];
qemu_irq parent_vfiq[GIC_NCPU];
+ qemu_irq maintenance_irq[GIC_NCPU];
+
/* GICD_CTLR; for a GIC with the security extensions the NS banked version
* of this register is just an alias of bit 1 of the S banked version.
*/
@@ -64,7 +79,7 @@ typedef struct GICState {
/* GICC_CTLR; again, the NS banked version is just aliases of bits of
* the S banked register, so our state only needs to store the S version.
*/
- uint32_t cpu_ctlr[GIC_NCPU];
+ uint32_t cpu_ctlr[GIC_NCPU_VCPU];
gic_irq_state irq_state[GIC_MAXIRQ];
uint8_t irq_target[GIC_MAXIRQ];
@@ -78,9 +93,9 @@ typedef struct GICState {
*/
uint8_t sgi_pending[GIC_NR_SGIS][GIC_NCPU];
- uint16_t priority_mask[GIC_NCPU];
- uint16_t running_priority[GIC_NCPU];
- uint16_t current_pending[GIC_NCPU];
+ uint16_t priority_mask[GIC_NCPU_VCPU];
+ uint16_t running_priority[GIC_NCPU_VCPU];
+ uint16_t current_pending[GIC_NCPU_VCPU];
/* If we present the GICv2 without security extensions to a guest,
* the guest can configure the GICC_CTLR to configure group 1 binary point
@@ -88,8 +103,8 @@ typedef struct GICState {
* For a GIC with Security Extensions we use use bpr for the
* secure copy and abpr as storage for the non-secure copy of the register.
*/
- uint8_t bpr[GIC_NCPU];
- uint8_t abpr[GIC_NCPU];
+ uint8_t bpr[GIC_NCPU_VCPU];
+ uint8_t abpr[GIC_NCPU_VCPU];
/* The APR is implementation defined, so we choose a layout identical to
* the KVM ABI layout for QEMU's implementation of the gic:
@@ -100,6 +115,15 @@ typedef struct GICState {
uint32_t apr[GIC_NR_APRS][GIC_NCPU];
uint32_t nsapr[GIC_NR_APRS][GIC_NCPU];
+ /* Virtual interface control registers */
+ uint32_t h_hcr[GIC_NCPU];
+ uint32_t h_misr[GIC_NCPU];
+ uint32_t h_lr[GIC_MAX_LR][GIC_NCPU];
+ uint32_t h_apr[GIC_NCPU];
+
+ /* Number of LRs implemented in this GIC instance */
+ uint32_t num_lrs;
+
uint32_t num_cpu;
MemoryRegion iomem; /* Distributor */
@@ -108,9 +132,13 @@ typedef struct GICState {
*/
struct GICState *backref[GIC_NCPU];
MemoryRegion cpuiomem[GIC_NCPU + 1]; /* CPU interfaces */
+ MemoryRegion vifaceiomem[GIC_NCPU + 1]; /* Virtual interfaces */
+ MemoryRegion vcpuiomem; /* vCPU interface */
+
uint32_t num_irq;
uint32_t revision;
bool security_extn;
+ bool virt_extn;
bool irq_reset_nonsecure; /* configure IRQs as group 1 (NS) on reset? */
int dev_fd; /* kvm device fd if backed by kvm vgic support */
Error *migration_blocker;
@@ -134,6 +162,7 @@ typedef struct ARMGICCommonClass {
} ARMGICCommonClass;
void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler,
- const MemoryRegionOps *ops);
+ const MemoryRegionOps *ops,
+ const MemoryRegionOps *virt_ops);
#endif
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index b8eba6e5944..5231579985f 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -1497,7 +1497,7 @@ static void arm_gic_realize(DeviceState *dev, Error **errp)
}
/* This creates distributor and main CPU interface (s->cpuiomem[0]) */
- gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops);
+ gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops, NULL);
/* Extra core-specific regions for the CPU interfaces. This is
* necessary for "franken-GIC" implementations, for example on
diff --git a/hw/intc/arm_gic_common.c b/hw/intc/arm_gic_common.c
index 295ee9cc5e5..547dc41185e 100644
--- a/hw/intc/arm_gic_common.c
+++ b/hw/intc/arm_gic_common.c
@@ -46,6 +46,13 @@ static int gic_post_load(void *opaque, int version_id)
return 0;
}
+static bool gic_virt_state_needed(void *opaque)
+{
+ GICState *s = (GICState *)opaque;
+
+ return s->virt_extn;
+}
+
static const VMStateDescription vmstate_gic_irq_state = {
.name = "arm_gic_irq_state",
.version_id = 1,
@@ -62,6 +69,30 @@ static const VMStateDescription vmstate_gic_irq_state = {
}
};
+static const VMStateDescription vmstate_gic_virt_state = {
+ .name = "arm_gic_virt_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = gic_virt_state_needed,
+ .fields = (VMStateField[]) {
+ /* Virtual interface */
+ VMSTATE_UINT32_ARRAY(h_hcr, GICState, GIC_NCPU),
+ VMSTATE_UINT32_ARRAY(h_misr, GICState, GIC_NCPU),
+ VMSTATE_UINT32_2DARRAY(h_lr, GICState, GIC_MAX_LR, GIC_NCPU),
+ VMSTATE_UINT32_ARRAY(h_apr, GICState, GIC_NCPU),
+
+ /* Virtual CPU interfaces */
+ VMSTATE_UINT32_SUB_ARRAY(cpu_ctlr, GICState, GIC_NCPU, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(priority_mask, GICState, GIC_NCPU, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(running_priority, GICState, GIC_NCPU, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(current_pending, GICState, GIC_NCPU, GIC_NCPU),
+ VMSTATE_UINT8_SUB_ARRAY(bpr, GICState, GIC_NCPU, GIC_NCPU),
+ VMSTATE_UINT8_SUB_ARRAY(abpr, GICState, GIC_NCPU, GIC_NCPU),
+
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_gic = {
.name = "arm_gic",
.version_id = 12,
@@ -70,26 +101,31 @@ static const VMStateDescription vmstate_gic = {
.post_load = gic_post_load,
.fields = (VMStateField[]) {
VMSTATE_UINT32(ctlr, GICState),
- VMSTATE_UINT32_ARRAY(cpu_ctlr, GICState, GIC_NCPU),
+ VMSTATE_UINT32_SUB_ARRAY(cpu_ctlr, GICState, 0, GIC_NCPU),
VMSTATE_STRUCT_ARRAY(irq_state, GICState, GIC_MAXIRQ, 1,
vmstate_gic_irq_state, gic_irq_state),
VMSTATE_UINT8_ARRAY(irq_target, GICState, GIC_MAXIRQ),
VMSTATE_UINT8_2DARRAY(priority1, GICState, GIC_INTERNAL, GIC_NCPU),
VMSTATE_UINT8_ARRAY(priority2, GICState, GIC_MAXIRQ - GIC_INTERNAL),
VMSTATE_UINT8_2DARRAY(sgi_pending, GICState, GIC_NR_SGIS, GIC_NCPU),
- VMSTATE_UINT16_ARRAY(priority_mask, GICState, GIC_NCPU),
- VMSTATE_UINT16_ARRAY(running_priority, GICState, GIC_NCPU),
- VMSTATE_UINT16_ARRAY(current_pending, GICState, GIC_NCPU),
- VMSTATE_UINT8_ARRAY(bpr, GICState, GIC_NCPU),
- VMSTATE_UINT8_ARRAY(abpr, GICState, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(priority_mask, GICState, 0, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(running_priority, GICState, 0, GIC_NCPU),
+ VMSTATE_UINT16_SUB_ARRAY(current_pending, GICState, 0, GIC_NCPU),
+ VMSTATE_UINT8_SUB_ARRAY(bpr, GICState, 0, GIC_NCPU),
+ VMSTATE_UINT8_SUB_ARRAY(abpr, GICState, 0, GIC_NCPU),
VMSTATE_UINT32_2DARRAY(apr, GICState, GIC_NR_APRS, GIC_NCPU),
VMSTATE_UINT32_2DARRAY(nsapr, GICState, GIC_NR_APRS, GIC_NCPU),
VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription * []) {
+ &vmstate_gic_virt_state,
+ NULL
}
};
void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler,
- const MemoryRegionOps *ops)
+ const MemoryRegionOps *ops,
+ const MemoryRegionOps *virt_ops)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(s);
int i = s->num_irq - GIC_INTERNAL;
@@ -116,6 +152,11 @@ void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler,
for (i = 0; i < s->num_cpu; i++) {
sysbus_init_irq(sbd, &s->parent_vfiq[i]);
}
+ if (s->virt_extn) {
+ for (i = 0; i < s->num_cpu; i++) {
+ sysbus_init_irq(sbd, &s->maintenance_irq[i]);
+ }
+ }
/* Distributor */
memory_region_init_io(&s->iomem, OBJECT(s), ops, s, "gic_dist", 0x1000);
@@ -127,6 +168,17 @@ void gic_init_irqs_and_mmio(GICState *s, qemu_irq_handler handler,
memory_region_init_io(&s->cpuiomem[0], OBJECT(s), ops ? &ops[1] : NULL,
s, "gic_cpu", s->revision == 2 ? 0x2000 : 0x100);
sysbus_init_mmio(sbd, &s->cpuiomem[0]);
+
+ if (s->virt_extn) {
+ memory_region_init_io(&s->vifaceiomem[0], OBJECT(s), virt_ops,
+ s, "gic_viface", 0x1000);
+ sysbus_init_mmio(sbd, &s->vifaceiomem[0]);
+
+ memory_region_init_io(&s->vcpuiomem, OBJECT(s),
+ virt_ops ? &virt_ops[1] : NULL,
+ s, "gic_vcpu", 0x2000);
+ sysbus_init_mmio(sbd, &s->vcpuiomem);
+ }
}
static void arm_gic_common_realize(DeviceState *dev, Error **errp)
@@ -163,6 +215,48 @@ static void arm_gic_common_realize(DeviceState *dev, Error **errp)
"the security extensions");
return;
}
+
+ if (s->virt_extn) {
+ if (s->revision != 2) {
+ error_setg(errp, "GIC virtualization extensions are only "
+ "supported by revision 2");
+ return;
+ }
+
+ /* For now, set the number of implemented LRs to 4, as found in most
+ * real GICv2. This could be promoted as a QOM property if we need to
+ * emulate a variant with another num_lrs.
+ */
+ s->num_lrs = 4;
+ }
+}
+
+static inline void arm_gic_common_reset_irq_state(GICState *s, int first_cpu,
+ int resetprio)
+{
+ int i, j;
+
+ for (i = first_cpu; i < first_cpu + s->num_cpu; i++) {
+ if (s->revision == REV_11MPCORE) {
+ s->priority_mask[i] = 0xf0;
+ } else {
+ s->priority_mask[i] = resetprio;
+ }
+ s->current_pending[i] = 1023;
+ s->running_priority[i] = 0x100;
+ s->cpu_ctlr[i] = 0;
+ s->bpr[i] = gic_is_vcpu(i) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR;
+ s->abpr[i] = gic_is_vcpu(i) ? GIC_VIRT_MIN_ABPR : GIC_MIN_ABPR;
+
+ if (!gic_is_vcpu(i)) {
+ for (j = 0; j < GIC_INTERNAL; j++) {
+ s->priority1[j][i] = resetprio;
+ }
+ for (j = 0; j < GIC_NR_SGIS; j++) {
+ s->sgi_pending[j][i] = 0;
+ }
+ }
+ }
}
static void arm_gic_common_reset(DeviceState *dev)
@@ -185,24 +279,15 @@ static void arm_gic_common_reset(DeviceState *dev)
}
memset(s->irq_state, 0, GIC_MAXIRQ * sizeof(gic_irq_state));
- for (i = 0 ; i < s->num_cpu; i++) {
- if (s->revision == REV_11MPCORE) {
- s->priority_mask[i] = 0xf0;
- } else {
- s->priority_mask[i] = resetprio;
- }
- s->current_pending[i] = 1023;
- s->running_priority[i] = 0x100;
- s->cpu_ctlr[i] = 0;
- s->bpr[i] = GIC_MIN_BPR;
- s->abpr[i] = GIC_MIN_ABPR;
- for (j = 0; j < GIC_INTERNAL; j++) {
- s->priority1[j][i] = resetprio;
- }
- for (j = 0; j < GIC_NR_SGIS; j++) {
- s->sgi_pending[j][i] = 0;
- }
+ arm_gic_common_reset_irq_state(s, 0, resetprio);
+
+ if (s->virt_extn) {
+ /* vCPU states are stored at indexes GIC_NCPU .. GIC_NCPU+num_cpu.
+ * The exposed vCPU interface does not have security extensions.
+ */
+ arm_gic_common_reset_irq_state(s, GIC_NCPU, 0);
}
+
for (i = 0; i < GIC_NR_SGIS; i++) {
GIC_DIST_SET_ENABLED(i, ALL_CPU_MASK);
GIC_DIST_SET_EDGE_TRIGGER(i);
@@ -226,6 +311,19 @@ static void arm_gic_common_reset(DeviceState *dev)
}
}
+ if (s->virt_extn) {
+ for (i = 0; i < s->num_lrs; i++) {
+ for (j = 0; j < s->num_cpu; j++) {
+ s->h_lr[i][j] = 0;
+ }
+ }
+
+ for (i = 0; i < s->num_cpu; i++) {
+ s->h_hcr[i] = 0;
+ s->h_misr[i] = 0;
+ }
+ }
+
s->ctlr = 0;
}
@@ -255,6 +353,8 @@ static Property arm_gic_common_properties[] = {
DEFINE_PROP_UINT32("revision", GICState, revision, 1),
/* True if the GIC should implement the security extensions */
DEFINE_PROP_BOOL("has-security-extensions", GICState, security_extn, 0),
+ /* True if the GIC should implement the virtualization extensions */
+ DEFINE_PROP_BOOL("has-virtualization-extensions", GICState, virt_extn, 0),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/intc/arm_gic_kvm.c b/hw/intc/arm_gic_kvm.c
index 4b611c8d6df..a611e8ee125 100644
--- a/hw/intc/arm_gic_kvm.c
+++ b/hw/intc/arm_gic_kvm.c
@@ -511,6 +511,12 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
return;
}
+ if (s->virt_extn) {
+ error_setg(errp, "the in-kernel VGIC does not implement the "
+ "virtualization extensions");
+ return;
+ }
+
if (!kvm_arm_gic_can_save_restore(s)) {
error_setg(&s->migration_blocker, "This operating system kernel does "
"not support vGICv2 migration");
@@ -522,7 +528,7 @@ static void kvm_arm_gic_realize(DeviceState *dev, Error **errp)
}
}
- gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL);
+ gic_init_irqs_and_mmio(s, kvm_arm_gicv2_set_irq, NULL, NULL);
for (i = 0; i < s->num_irq - GIC_INTERNAL; i++) {
qemu_irq irq = qdev_get_gpio_in(dev, i);
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 17/45] intc/arm_gic: Add virtual interface register definitions
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (15 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 16/45] intc/arm_gic: Add the virtualization extensions to the GIC state Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 18/45] intc/arm_gic: Add virtualization extensions helper macros and functions Peter Maydell
` (28 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Add the register definitions for the virtual interface of the GICv2.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20180727095421.386-7-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/gic_internal.h | 65 ++++++++++++++++++++++++++++++++++++++++++
1 file changed, 65 insertions(+)
diff --git a/hw/intc/gic_internal.h b/hw/intc/gic_internal.h
index c85427c8e31..1aa888a5762 100644
--- a/hw/intc/gic_internal.h
+++ b/hw/intc/gic_internal.h
@@ -21,6 +21,7 @@
#ifndef QEMU_ARM_GIC_INTERNAL_H
#define QEMU_ARM_GIC_INTERNAL_H
+#include "hw/registerfields.h"
#include "hw/intc/arm_gic.h"
#define ALL_CPU_MASK ((unsigned)(((1 << GIC_NCPU) - 1)))
@@ -64,6 +65,70 @@
#define GICC_CTLR_EOIMODE (1U << 9)
#define GICC_CTLR_EOIMODE_NS (1U << 10)
+REG32(GICH_HCR, 0x0)
+ FIELD(GICH_HCR, EN, 0, 1)
+ FIELD(GICH_HCR, UIE, 1, 1)
+ FIELD(GICH_HCR, LRENPIE, 2, 1)
+ FIELD(GICH_HCR, NPIE, 3, 1)
+ FIELD(GICH_HCR, VGRP0EIE, 4, 1)
+ FIELD(GICH_HCR, VGRP0DIE, 5, 1)
+ FIELD(GICH_HCR, VGRP1EIE, 6, 1)
+ FIELD(GICH_HCR, VGRP1DIE, 7, 1)
+ FIELD(GICH_HCR, EOICount, 27, 5)
+
+#define GICH_HCR_MASK \
+ (R_GICH_HCR_EN_MASK | R_GICH_HCR_UIE_MASK | \
+ R_GICH_HCR_LRENPIE_MASK | R_GICH_HCR_NPIE_MASK | \
+ R_GICH_HCR_VGRP0EIE_MASK | R_GICH_HCR_VGRP0DIE_MASK | \
+ R_GICH_HCR_VGRP1EIE_MASK | R_GICH_HCR_VGRP1DIE_MASK | \
+ R_GICH_HCR_EOICount_MASK)
+
+REG32(GICH_VTR, 0x4)
+ FIELD(GICH_VTR, ListRegs, 0, 6)
+ FIELD(GICH_VTR, PREbits, 26, 3)
+ FIELD(GICH_VTR, PRIbits, 29, 3)
+
+REG32(GICH_VMCR, 0x8)
+ FIELD(GICH_VMCR, VMCCtlr, 0, 10)
+ FIELD(GICH_VMCR, VMABP, 18, 3)
+ FIELD(GICH_VMCR, VMBP, 21, 3)
+ FIELD(GICH_VMCR, VMPriMask, 27, 5)
+
+REG32(GICH_MISR, 0x10)
+ FIELD(GICH_MISR, EOI, 0, 1)
+ FIELD(GICH_MISR, U, 1, 1)
+ FIELD(GICH_MISR, LRENP, 2, 1)
+ FIELD(GICH_MISR, NP, 3, 1)
+ FIELD(GICH_MISR, VGrp0E, 4, 1)
+ FIELD(GICH_MISR, VGrp0D, 5, 1)
+ FIELD(GICH_MISR, VGrp1E, 6, 1)
+ FIELD(GICH_MISR, VGrp1D, 7, 1)
+
+REG32(GICH_EISR0, 0x20)
+REG32(GICH_EISR1, 0x24)
+REG32(GICH_ELRSR0, 0x30)
+REG32(GICH_ELRSR1, 0x34)
+REG32(GICH_APR, 0xf0)
+
+REG32(GICH_LR0, 0x100)
+ FIELD(GICH_LR0, VirtualID, 0, 10)
+ FIELD(GICH_LR0, PhysicalID, 10, 10)
+ FIELD(GICH_LR0, CPUID, 10, 3)
+ FIELD(GICH_LR0, EOI, 19, 1)
+ FIELD(GICH_LR0, Priority, 23, 5)
+ FIELD(GICH_LR0, State, 28, 2)
+ FIELD(GICH_LR0, Grp1, 30, 1)
+ FIELD(GICH_LR0, HW, 31, 1)
+
+/* Last LR register */
+REG32(GICH_LR63, 0x1fc)
+
+#define GICH_LR_MASK \
+ (R_GICH_LR0_VirtualID_MASK | R_GICH_LR0_PhysicalID_MASK | \
+ R_GICH_LR0_CPUID_MASK | R_GICH_LR0_EOI_MASK | \
+ R_GICH_LR0_Priority_MASK | R_GICH_LR0_State_MASK | \
+ R_GICH_LR0_Grp1_MASK | R_GICH_LR0_HW_MASK)
+
/* Valid bits for GICC_CTLR for GICv1, v1 with security extensions,
* GICv2 and GICv2 with security extensions:
*/
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 18/45] intc/arm_gic: Add virtualization extensions helper macros and functions
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (16 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 17/45] intc/arm_gic: Add virtual interface register definitions Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 19/45] intc/arm_gic: Refactor secure/ns access check in the CPU interface Peter Maydell
` (27 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Add some helper macros and functions related to the virtualization
extensions to gic_internal.h.
The GICH_LR_* macros help extracting specific fields of a list register
value. The only tricky one is the priority field as only the MSB are
stored. The value must be shifted accordingly to obtain the correct
priority value.
gic_is_vcpu() and gic_get_vcpu_real_id() help with (v)CPU id manipulation
to abstract the fact that vCPU id are in the range
[ GIC_NCPU; (GIC_NCPU + num_cpu) [.
gic_lr_* and gic_virq_is_valid() help with the list registers.
gic_get_lr_entry() returns the LR entry for a given (vCPU, irq) pair. It
is meant to be used in contexts where we know for sure that the entry
exists, so we assert that entry is actually found, and the caller can
avoid the NULL check on the returned pointer.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20180727095421.386-8-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/gic_internal.h | 74 ++++++++++++++++++++++++++++++++++++++++++
hw/intc/arm_gic.c | 5 +++
2 files changed, 79 insertions(+)
diff --git a/hw/intc/gic_internal.h b/hw/intc/gic_internal.h
index 1aa888a5762..cc5acc5d419 100644
--- a/hw/intc/gic_internal.h
+++ b/hw/intc/gic_internal.h
@@ -129,6 +129,20 @@ REG32(GICH_LR63, 0x1fc)
R_GICH_LR0_Priority_MASK | R_GICH_LR0_State_MASK | \
R_GICH_LR0_Grp1_MASK | R_GICH_LR0_HW_MASK)
+#define GICH_LR_STATE_INVALID 0
+#define GICH_LR_STATE_PENDING 1
+#define GICH_LR_STATE_ACTIVE 2
+#define GICH_LR_STATE_ACTIVE_PENDING 3
+
+#define GICH_LR_VIRT_ID(entry) (FIELD_EX32(entry, GICH_LR0, VirtualID))
+#define GICH_LR_PHYS_ID(entry) (FIELD_EX32(entry, GICH_LR0, PhysicalID))
+#define GICH_LR_CPUID(entry) (FIELD_EX32(entry, GICH_LR0, CPUID))
+#define GICH_LR_EOI(entry) (FIELD_EX32(entry, GICH_LR0, EOI))
+#define GICH_LR_PRIORITY(entry) (FIELD_EX32(entry, GICH_LR0, Priority) << 3)
+#define GICH_LR_STATE(entry) (FIELD_EX32(entry, GICH_LR0, State))
+#define GICH_LR_GROUP(entry) (FIELD_EX32(entry, GICH_LR0, Grp1))
+#define GICH_LR_HW(entry) (FIELD_EX32(entry, GICH_LR0, HW))
+
/* Valid bits for GICC_CTLR for GICv1, v1 with security extensions,
* GICv2 and GICv2 with security extensions:
*/
@@ -164,4 +178,64 @@ static inline bool gic_is_vcpu(int cpu)
return cpu >= GIC_NCPU;
}
+static inline int gic_get_vcpu_real_id(int cpu)
+{
+ return (cpu >= GIC_NCPU) ? (cpu - GIC_NCPU) : cpu;
+}
+
+/* Return true if the given vIRQ state exists in a LR and is either active or
+ * pending and active.
+ *
+ * This function is used to check that a guest's `end of interrupt' or
+ * `interrupts deactivation' request is valid, and matches with a LR of an
+ * already acknowledged vIRQ (i.e. has the active bit set in its state).
+ */
+static inline bool gic_virq_is_valid(GICState *s, int irq, int vcpu)
+{
+ int cpu = gic_get_vcpu_real_id(vcpu);
+ int lr_idx;
+
+ for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t *entry = &s->h_lr[lr_idx][cpu];
+
+ if ((GICH_LR_VIRT_ID(*entry) == irq) &&
+ (GICH_LR_STATE(*entry) & GICH_LR_STATE_ACTIVE)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Return a pointer on the LR entry matching the given vIRQ.
+ *
+ * This function is used to retrieve an LR for which we know for sure that the
+ * corresponding vIRQ exists in the current context (i.e. its current state is
+ * not `invalid'):
+ * - Either the corresponding vIRQ has been validated with gic_virq_is_valid()
+ * so it is `active' or `active and pending',
+ * - Or it was pending and has been selected by gic_get_best_virq(). It is now
+ * `pending', `active' or `active and pending', depending on what the guest
+ * already did with this vIRQ.
+ *
+ * Having multiple LRs with the same VirtualID leads to UNPREDICTABLE
+ * behaviour in the GIC. We choose to return the first one that matches.
+ */
+static inline uint32_t *gic_get_lr_entry(GICState *s, int irq, int vcpu)
+{
+ int cpu = gic_get_vcpu_real_id(vcpu);
+ int lr_idx;
+
+ for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t *entry = &s->h_lr[lr_idx][cpu];
+
+ if ((GICH_LR_VIRT_ID(*entry) == irq) &&
+ (GICH_LR_STATE(*entry) != GICH_LR_STATE_INVALID)) {
+ return entry;
+ }
+ }
+
+ g_assert_not_reached();
+}
+
#endif /* QEMU_ARM_GIC_INTERNAL_H */
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 5231579985f..41141fee532 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -61,6 +61,11 @@ static inline int gic_get_current_cpu(GICState *s)
return 0;
}
+static inline int gic_get_current_vcpu(GICState *s)
+{
+ return gic_get_current_cpu(s) + GIC_NCPU;
+}
+
/* Return true if this GIC config has interrupt groups, which is
* true if we're a GICv2, or a GICv1 with the security extensions.
*/
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 19/45] intc/arm_gic: Refactor secure/ns access check in the CPU interface
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (17 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 18/45] intc/arm_gic: Add virtualization extensions helper macros and functions Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 20/45] intc/arm_gic: Add virtualization enabled IRQ helper functions Peter Maydell
` (26 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
An access to the CPU interface is non-secure if the current GIC instance
implements the security extensions, and the memory access is actually
non-secure. Until then, it was checked with tests such as
if (s->security_extn && !attrs.secure) { ... }
in various places of the CPU interface code.
With the implementation of the virtualization extensions, those tests
must be updated to take into account whether we are in a vCPU interface
or not. This is because the exposed vCPU interface does not implement
security extensions.
This commits replaces all those tests with a call to the
gic_cpu_ns_access() function to check if the current access to the CPU
interface is non-secure. This function takes into account whether the
current CPU is a vCPU or not.
Note that this function is used only in the (v)CPU interface code path.
The distributor code path is left unchanged, as the distributor is not
exposed to vCPUs at all.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 20180727095421.386-9-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/arm_gic.c | 39 ++++++++++++++++++++++-----------------
1 file changed, 22 insertions(+), 17 deletions(-)
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 41141fee532..94d5982e2ac 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -74,6 +74,11 @@ static inline bool gic_has_groups(GICState *s)
return s->revision == 2 || s->security_extn;
}
+static inline bool gic_cpu_ns_access(GICState *s, int cpu, MemTxAttrs attrs)
+{
+ return !gic_is_vcpu(cpu) && s->security_extn && !attrs.secure;
+}
+
/* TODO: Many places that call this routine could be optimized. */
/* Update interrupt status after enabled or pending bits have been changed. */
static void gic_update(GICState *s)
@@ -221,7 +226,7 @@ static uint16_t gic_get_current_pending_irq(GICState *s, int cpu,
/* On a GIC without the security extensions, reading this register
* behaves in the same way as a secure access to a GIC with them.
*/
- bool secure = !s->security_extn || attrs.secure;
+ bool secure = !gic_cpu_ns_access(s, cpu, attrs);
if (group == 0 && !secure) {
/* Group0 interrupts hidden from Non-secure access */
@@ -428,7 +433,7 @@ static uint32_t gic_dist_get_priority(GICState *s, int cpu, int irq,
static void gic_set_priority_mask(GICState *s, int cpu, uint8_t pmask,
MemTxAttrs attrs)
{
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
if (s->priority_mask[cpu] & 0x80) {
/* Priority Mask in upper half */
pmask = 0x80 | (pmask >> 1);
@@ -444,7 +449,7 @@ static uint32_t gic_get_priority_mask(GICState *s, int cpu, MemTxAttrs attrs)
{
uint32_t pmask = s->priority_mask[cpu];
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
if (pmask & 0x80) {
/* Priority Mask in upper half, return Non-secure view */
pmask = (pmask << 1) & 0xff;
@@ -460,7 +465,7 @@ static uint32_t gic_get_cpu_control(GICState *s, int cpu, MemTxAttrs attrs)
{
uint32_t ret = s->cpu_ctlr[cpu];
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
/* Construct the NS banked view of GICC_CTLR from the correct
* bits of the S banked view. We don't need to move the bypass
* control bits because we don't implement that (IMPDEF) part
@@ -476,7 +481,7 @@ static void gic_set_cpu_control(GICState *s, int cpu, uint32_t value,
{
uint32_t mask;
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
/* The NS view can only write certain bits in the register;
* the rest are unchanged
*/
@@ -507,7 +512,7 @@ static uint8_t gic_get_running_priority(GICState *s, int cpu, MemTxAttrs attrs)
return 0xff;
}
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
if (s->running_priority[cpu] & 0x80) {
/* Running priority in upper half of range: return the Non-secure
* view of the priority.
@@ -531,7 +536,7 @@ static bool gic_eoi_split(GICState *s, int cpu, MemTxAttrs attrs)
/* Before GICv2 prio-drop and deactivate are not separable */
return false;
}
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE_NS;
}
return s->cpu_ctlr[cpu] & GICC_CTLR_EOIMODE;
@@ -563,7 +568,7 @@ static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
return;
}
- if (s->security_extn && !attrs.secure && !group) {
+ if (gic_cpu_ns_access(s, cpu, attrs) && !group) {
DPRINTF("Non-secure DI for Group0 interrupt %d ignored\n", irq);
return;
}
@@ -605,7 +610,7 @@ static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
group = gic_has_groups(s) && GIC_DIST_TEST_GROUP(irq, cm);
- if (s->security_extn && !attrs.secure && !group) {
+ if (gic_cpu_ns_access(s, cpu, attrs) && !group) {
DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq);
return;
}
@@ -1281,7 +1286,7 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset,
*data = gic_get_priority_mask(s, cpu, attrs);
break;
case 0x08: /* Binary Point */
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) {
/* NS view of BPR when CBPR is 1 */
*data = MIN(s->bpr[cpu] + 1, 7);
@@ -1308,7 +1313,7 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset,
* With security extensions, secure access: ABPR (alias of NS BPR)
* With security extensions, nonsecure access: RAZ/WI
*/
- if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) {
+ if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) {
*data = 0;
} else {
*data = s->abpr[cpu];
@@ -1320,7 +1325,7 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset,
if (regno >= GIC_NR_APRS || s->revision != 2) {
*data = 0;
- } else if (s->security_extn && !attrs.secure) {
+ } else if (gic_cpu_ns_access(s, cpu, attrs)) {
/* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */
*data = gic_apr_ns_view(s, regno, cpu);
} else {
@@ -1333,7 +1338,7 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset,
int regno = (offset - 0xe0) / 4;
if (regno >= GIC_NR_APRS || s->revision != 2 || !gic_has_groups(s) ||
- (s->security_extn && !attrs.secure)) {
+ gic_cpu_ns_access(s, cpu, attrs)) {
*data = 0;
} else {
*data = s->nsapr[regno][cpu];
@@ -1360,7 +1365,7 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
gic_set_priority_mask(s, cpu, value, attrs);
break;
case 0x08: /* Binary Point */
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
if (s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) {
/* WI when CBPR is 1 */
return MEMTX_OK;
@@ -1375,7 +1380,7 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
gic_complete_irq(s, cpu, value & 0x3ff, attrs);
return MEMTX_OK;
case 0x1c: /* Aliased Binary Point */
- if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) {
+ if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) {
/* unimplemented, or NS access: RAZ/WI */
return MEMTX_OK;
} else {
@@ -1389,7 +1394,7 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
if (regno >= GIC_NR_APRS || s->revision != 2) {
return MEMTX_OK;
}
- if (s->security_extn && !attrs.secure) {
+ if (gic_cpu_ns_access(s, cpu, attrs)) {
/* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */
gic_apr_write_ns_view(s, regno, cpu, value);
} else {
@@ -1404,7 +1409,7 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
if (regno >= GIC_NR_APRS || s->revision != 2) {
return MEMTX_OK;
}
- if (!gic_has_groups(s) || (s->security_extn && !attrs.secure)) {
+ if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) {
return MEMTX_OK;
}
s->nsapr[regno][cpu] = value;
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 20/45] intc/arm_gic: Add virtualization enabled IRQ helper functions
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (18 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 19/45] intc/arm_gic: Refactor secure/ns access check in the CPU interface Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 21/45] intc/arm_gic: Implement virtualization extensions in gic_(activate_irq|drop_prio) Peter Maydell
` (25 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Add some helper functions to gic_internal.h to get or change the state
of an IRQ. When the current CPU is not a vCPU, the call is forwarded to
the GIC distributor. Otherwise, it acts on the list register matching
the IRQ in the current CPU virtual interface.
gic_clear_active can have a side effect on the distributor, even in the
vCPU case, when the correponding LR has the HW field set.
Use those functions in the CPU interface code path to prepare for the
vCPU interface implementation.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 20180727095421.386-10-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/gic_internal.h | 83 ++++++++++++++++++++++++++++++++++++++++++
hw/intc/arm_gic.c | 32 +++++++---------
2 files changed, 97 insertions(+), 18 deletions(-)
diff --git a/hw/intc/gic_internal.h b/hw/intc/gic_internal.h
index cc5acc5d419..45c2af0bf59 100644
--- a/hw/intc/gic_internal.h
+++ b/hw/intc/gic_internal.h
@@ -143,6 +143,13 @@ REG32(GICH_LR63, 0x1fc)
#define GICH_LR_GROUP(entry) (FIELD_EX32(entry, GICH_LR0, Grp1))
#define GICH_LR_HW(entry) (FIELD_EX32(entry, GICH_LR0, HW))
+#define GICH_LR_CLEAR_PENDING(entry) \
+ ((entry) &= ~(GICH_LR_STATE_PENDING << R_GICH_LR0_State_SHIFT))
+#define GICH_LR_SET_ACTIVE(entry) \
+ ((entry) |= (GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT))
+#define GICH_LR_CLEAR_ACTIVE(entry) \
+ ((entry) &= ~(GICH_LR_STATE_ACTIVE << R_GICH_LR0_State_SHIFT))
+
/* Valid bits for GICC_CTLR for GICv1, v1 with security extensions,
* GICv2 and GICv2 with security extensions:
*/
@@ -238,4 +245,80 @@ static inline uint32_t *gic_get_lr_entry(GICState *s, int irq, int vcpu)
g_assert_not_reached();
}
+static inline bool gic_test_group(GICState *s, int irq, int cpu)
+{
+ if (gic_is_vcpu(cpu)) {
+ uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
+ return GICH_LR_GROUP(*entry);
+ } else {
+ return GIC_DIST_TEST_GROUP(irq, 1 << cpu);
+ }
+}
+
+static inline void gic_clear_pending(GICState *s, int irq, int cpu)
+{
+ if (gic_is_vcpu(cpu)) {
+ uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
+ GICH_LR_CLEAR_PENDING(*entry);
+ } else {
+ /* Clear pending state for both level and edge triggered
+ * interrupts. (level triggered interrupts with an active line
+ * remain pending, see gic_test_pending)
+ */
+ GIC_DIST_CLEAR_PENDING(irq, GIC_DIST_TEST_MODEL(irq) ? ALL_CPU_MASK
+ : (1 << cpu));
+ }
+}
+
+static inline void gic_set_active(GICState *s, int irq, int cpu)
+{
+ if (gic_is_vcpu(cpu)) {
+ uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
+ GICH_LR_SET_ACTIVE(*entry);
+ } else {
+ GIC_DIST_SET_ACTIVE(irq, 1 << cpu);
+ }
+}
+
+static inline void gic_clear_active(GICState *s, int irq, int cpu)
+{
+ if (gic_is_vcpu(cpu)) {
+ uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
+ GICH_LR_CLEAR_ACTIVE(*entry);
+
+ if (GICH_LR_HW(*entry)) {
+ /* Hardware interrupt. We must forward the deactivation request to
+ * the distributor.
+ */
+ int phys_irq = GICH_LR_PHYS_ID(*entry);
+ int rcpu = gic_get_vcpu_real_id(cpu);
+
+ if (phys_irq < GIC_NR_SGIS || phys_irq >= GIC_MAXIRQ) {
+ /* UNPREDICTABLE behaviour, we choose to ignore the request */
+ return;
+ }
+
+ /* This is equivalent to a NS write to DIR on the physical CPU
+ * interface. Hence group0 interrupt deactivation is ignored if
+ * the GIC is secure.
+ */
+ if (!s->security_extn || GIC_DIST_TEST_GROUP(phys_irq, 1 << rcpu)) {
+ GIC_DIST_CLEAR_ACTIVE(phys_irq, 1 << rcpu);
+ }
+ }
+ } else {
+ GIC_DIST_CLEAR_ACTIVE(irq, 1 << cpu);
+ }
+}
+
+static inline int gic_get_priority(GICState *s, int irq, int cpu)
+{
+ if (gic_is_vcpu(cpu)) {
+ uint32_t *entry = gic_get_lr_entry(s, irq, cpu);
+ return GICH_LR_PRIORITY(*entry);
+ } else {
+ return GIC_DIST_GET_PRIORITY(irq, cpu);
+ }
+}
+
#endif /* QEMU_ARM_GIC_INTERNAL_H */
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 94d5982e2ac..26ed7ea58ab 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -222,7 +222,8 @@ static uint16_t gic_get_current_pending_irq(GICState *s, int cpu,
uint16_t pending_irq = s->current_pending[cpu];
if (pending_irq < GIC_MAXIRQ && gic_has_groups(s)) {
- int group = GIC_DIST_TEST_GROUP(pending_irq, (1 << cpu));
+ int group = gic_test_group(s, pending_irq, cpu);
+
/* On a GIC without the security extensions, reading this register
* behaves in the same way as a secure access to a GIC with them.
*/
@@ -253,7 +254,7 @@ static int gic_get_group_priority(GICState *s, int cpu, int irq)
if (gic_has_groups(s) &&
!(s->cpu_ctlr[cpu] & GICC_CTLR_CBPR) &&
- GIC_DIST_TEST_GROUP(irq, (1 << cpu))) {
+ gic_test_group(s, irq, cpu)) {
bpr = s->abpr[cpu] - 1;
assert(bpr >= 0);
} else {
@@ -266,7 +267,7 @@ static int gic_get_group_priority(GICState *s, int cpu, int irq)
*/
mask = ~0U << ((bpr & 7) + 1);
- return GIC_DIST_GET_PRIORITY(irq, cpu) & mask;
+ return gic_get_priority(s, irq, cpu) & mask;
}
static void gic_activate_irq(GICState *s, int cpu, int irq)
@@ -279,14 +280,14 @@ static void gic_activate_irq(GICState *s, int cpu, int irq)
int regno = preemption_level / 32;
int bitno = preemption_level % 32;
- if (gic_has_groups(s) && GIC_DIST_TEST_GROUP(irq, (1 << cpu))) {
+ if (gic_has_groups(s) && gic_test_group(s, irq, cpu)) {
s->nsapr[regno][cpu] |= (1 << bitno);
} else {
s->apr[regno][cpu] |= (1 << bitno);
}
s->running_priority[cpu] = prio;
- GIC_DIST_SET_ACTIVE(irq, 1 << cpu);
+ gic_set_active(s, irq, cpu);
}
static int gic_get_prio_from_apr_bits(GICState *s, int cpu)
@@ -355,7 +356,7 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
return irq;
}
- if (GIC_DIST_GET_PRIORITY(irq, cpu) >= s->running_priority[cpu]) {
+ if (gic_get_priority(s, irq, cpu) >= s->running_priority[cpu]) {
DPRINTF("ACK, pending interrupt (%d) has insufficient priority\n", irq);
return 1023;
}
@@ -364,8 +365,7 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
/* Clear pending flags for both level and edge triggered interrupts.
* Level triggered IRQs will be reasserted once they become inactive.
*/
- GIC_DIST_CLEAR_PENDING(irq, GIC_DIST_TEST_MODEL(irq) ? ALL_CPU_MASK
- : cm);
+ gic_clear_pending(s, irq, cpu);
ret = irq;
} else {
if (irq < GIC_NR_SGIS) {
@@ -377,9 +377,7 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
src = ctz32(s->sgi_pending[irq][cpu]);
s->sgi_pending[irq][cpu] &= ~(1 << src);
if (s->sgi_pending[irq][cpu] == 0) {
- GIC_DIST_CLEAR_PENDING(irq,
- GIC_DIST_TEST_MODEL(irq) ? ALL_CPU_MASK
- : cm);
+ gic_clear_pending(s, irq, cpu);
}
ret = irq | ((src & 0x7) << 10);
} else {
@@ -387,8 +385,7 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
* interrupts. (level triggered interrupts with an active line
* remain pending, see gic_test_pending)
*/
- GIC_DIST_CLEAR_PENDING(irq, GIC_DIST_TEST_MODEL(irq) ? ALL_CPU_MASK
- : cm);
+ gic_clear_pending(s, irq, cpu);
ret = irq;
}
}
@@ -544,7 +541,6 @@ static bool gic_eoi_split(GICState *s, int cpu, MemTxAttrs attrs)
static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
{
- int cm = 1 << cpu;
int group;
if (irq >= s->num_irq) {
@@ -559,7 +555,7 @@ static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
return;
}
- group = gic_has_groups(s) && GIC_DIST_TEST_GROUP(irq, cm);
+ group = gic_has_groups(s) && gic_test_group(s, irq, cpu);
if (!gic_eoi_split(s, cpu, attrs)) {
/* This is UNPREDICTABLE; we choose to ignore it */
@@ -573,7 +569,7 @@ static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
return;
}
- GIC_DIST_CLEAR_ACTIVE(irq, cm);
+ gic_clear_active(s, irq, cpu);
}
static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
@@ -608,7 +604,7 @@ static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
}
}
- group = gic_has_groups(s) && GIC_DIST_TEST_GROUP(irq, cm);
+ group = gic_has_groups(s) && gic_test_group(s, irq, cpu);
if (gic_cpu_ns_access(s, cpu, attrs) && !group) {
DPRINTF("Non-secure EOI for Group0 interrupt %d ignored\n", irq);
@@ -624,7 +620,7 @@ static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
/* In GICv2 the guest can choose to split priority-drop and deactivate */
if (!gic_eoi_split(s, cpu, attrs)) {
- GIC_DIST_CLEAR_ACTIVE(irq, cm);
+ gic_clear_active(s, irq, cpu);
}
gic_update(s);
}
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 21/45] intc/arm_gic: Implement virtualization extensions in gic_(activate_irq|drop_prio)
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (19 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 20/45] intc/arm_gic: Add virtualization enabled IRQ helper functions Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 22/45] intc/arm_gic: Implement virtualization extensions in gic_acknowledge_irq Peter Maydell
` (24 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Implement virtualization extensions in gic_activate_irq() and
gic_drop_prio() and in gic_get_prio_from_apr_bits() called by
gic_drop_prio().
When the current CPU is a vCPU:
- Use GIC_VIRT_MIN_BPR and GIC_VIRT_NR_APRS instead of their non-virt
counterparts,
- the vCPU APR is stored in the virtual interface, in h_apr.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20180727095421.386-11-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/arm_gic.c | 50 +++++++++++++++++++++++++++++++++++------------
1 file changed, 38 insertions(+), 12 deletions(-)
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 26ed7ea58ab..de73dc9f54b 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -276,16 +276,23 @@ static void gic_activate_irq(GICState *s, int cpu, int irq)
* and update the running priority.
*/
int prio = gic_get_group_priority(s, cpu, irq);
- int preemption_level = prio >> (GIC_MIN_BPR + 1);
+ int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR;
+ int preemption_level = prio >> (min_bpr + 1);
int regno = preemption_level / 32;
int bitno = preemption_level % 32;
+ uint32_t *papr = NULL;
- if (gic_has_groups(s) && gic_test_group(s, irq, cpu)) {
- s->nsapr[regno][cpu] |= (1 << bitno);
+ if (gic_is_vcpu(cpu)) {
+ assert(regno == 0);
+ papr = &s->h_apr[gic_get_vcpu_real_id(cpu)];
+ } else if (gic_has_groups(s) && gic_test_group(s, irq, cpu)) {
+ papr = &s->nsapr[regno][cpu];
} else {
- s->apr[regno][cpu] |= (1 << bitno);
+ papr = &s->apr[regno][cpu];
}
+ *papr |= (1 << bitno);
+
s->running_priority[cpu] = prio;
gic_set_active(s, irq, cpu);
}
@@ -296,6 +303,16 @@ static int gic_get_prio_from_apr_bits(GICState *s, int cpu)
* on the set bits in the Active Priority Registers.
*/
int i;
+
+ if (gic_is_vcpu(cpu)) {
+ uint32_t apr = s->h_apr[gic_get_vcpu_real_id(cpu)];
+ if (apr) {
+ return ctz32(apr) << (GIC_VIRT_MIN_BPR + 1);
+ } else {
+ return 0x100;
+ }
+ }
+
for (i = 0; i < GIC_NR_APRS; i++) {
uint32_t apr = s->apr[i][cpu] | s->nsapr[i][cpu];
if (!apr) {
@@ -324,16 +341,25 @@ static void gic_drop_prio(GICState *s, int cpu, int group)
* running priority will be wrong, so interrupts that should preempt
* might not do so, and interrupts that should not preempt might do so.
*/
- int i;
+ if (gic_is_vcpu(cpu)) {
+ int rcpu = gic_get_vcpu_real_id(cpu);
- for (i = 0; i < GIC_NR_APRS; i++) {
- uint32_t *papr = group ? &s->nsapr[i][cpu] : &s->apr[i][cpu];
- if (!*papr) {
- continue;
+ if (s->h_apr[rcpu]) {
+ /* Clear lowest set bit */
+ s->h_apr[rcpu] &= s->h_apr[rcpu] - 1;
+ }
+ } else {
+ int i;
+
+ for (i = 0; i < GIC_NR_APRS; i++) {
+ uint32_t *papr = group ? &s->nsapr[i][cpu] : &s->apr[i][cpu];
+ if (!*papr) {
+ continue;
+ }
+ /* Clear lowest set bit */
+ *papr &= *papr - 1;
+ break;
}
- /* Clear lowest set bit */
- *papr &= *papr - 1;
- break;
}
s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu);
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 22/45] intc/arm_gic: Implement virtualization extensions in gic_acknowledge_irq
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (20 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 21/45] intc/arm_gic: Implement virtualization extensions in gic_(activate_irq|drop_prio) Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 23/45] intc/arm_gic: Implement virtualization extensions in gic_(deactivate|complete_irq) Peter Maydell
` (23 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Implement virtualization extensions in the gic_acknowledge_irq()
function. This function changes the state of the highest priority IRQ
from pending to active.
When the current CPU is a vCPU, modifying the state of an IRQ modifies
the corresponding LR entry. However if we clear the pending flag before
setting the active one, we lose track of the LR entry as it becomes
invalid. The next call to gic_get_lr_entry() will fail.
To overcome this issue, we call gic_activate_irq() before
gic_clear_pending(). This does not change the general behaviour of
gic_acknowledge_irq.
We also move the SGI case in gic_clear_pending_sgi() to enhance
code readability as the virtualization extensions support adds a if-else
level.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20180727095421.386-12-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/arm_gic.c | 52 ++++++++++++++++++++++++++++++-----------------
1 file changed, 33 insertions(+), 19 deletions(-)
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index de73dc9f54b..d80acde989f 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -365,17 +365,44 @@ static void gic_drop_prio(GICState *s, int cpu, int group)
s->running_priority[cpu] = gic_get_prio_from_apr_bits(s, cpu);
}
+static inline uint32_t gic_clear_pending_sgi(GICState *s, int irq, int cpu)
+{
+ int src;
+ uint32_t ret;
+
+ if (!gic_is_vcpu(cpu)) {
+ /* Lookup the source CPU for the SGI and clear this in the
+ * sgi_pending map. Return the src and clear the overall pending
+ * state on this CPU if the SGI is not pending from any CPUs.
+ */
+ assert(s->sgi_pending[irq][cpu] != 0);
+ src = ctz32(s->sgi_pending[irq][cpu]);
+ s->sgi_pending[irq][cpu] &= ~(1 << src);
+ if (s->sgi_pending[irq][cpu] == 0) {
+ gic_clear_pending(s, irq, cpu);
+ }
+ ret = irq | ((src & 0x7) << 10);
+ } else {
+ uint32_t *lr_entry = gic_get_lr_entry(s, irq, cpu);
+ src = GICH_LR_CPUID(*lr_entry);
+
+ gic_clear_pending(s, irq, cpu);
+ ret = irq | (src << 10);
+ }
+
+ return ret;
+}
+
uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
{
- int ret, irq, src;
- int cm = 1 << cpu;
+ int ret, irq;
/* gic_get_current_pending_irq() will return 1022 or 1023 appropriately
* for the case where this GIC supports grouping and the pending interrupt
* is in the wrong group.
*/
irq = gic_get_current_pending_irq(s, cpu, attrs);
- trace_gic_acknowledge_irq(cpu, irq);
+ trace_gic_acknowledge_irq(gic_get_vcpu_real_id(cpu), irq);
if (irq >= GIC_MAXIRQ) {
DPRINTF("ACK, no pending interrupt or it is hidden: %d\n", irq);
@@ -387,6 +414,8 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
return 1023;
}
+ gic_activate_irq(s, cpu, irq);
+
if (s->revision == REV_11MPCORE) {
/* Clear pending flags for both level and edge triggered interrupts.
* Level triggered IRQs will be reasserted once they become inactive.
@@ -395,28 +424,13 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
ret = irq;
} else {
if (irq < GIC_NR_SGIS) {
- /* Lookup the source CPU for the SGI and clear this in the
- * sgi_pending map. Return the src and clear the overall pending
- * state on this CPU if the SGI is not pending from any CPUs.
- */
- assert(s->sgi_pending[irq][cpu] != 0);
- src = ctz32(s->sgi_pending[irq][cpu]);
- s->sgi_pending[irq][cpu] &= ~(1 << src);
- if (s->sgi_pending[irq][cpu] == 0) {
- gic_clear_pending(s, irq, cpu);
- }
- ret = irq | ((src & 0x7) << 10);
+ ret = gic_clear_pending_sgi(s, irq, cpu);
} else {
- /* Clear pending state for both level and edge triggered
- * interrupts. (level triggered interrupts with an active line
- * remain pending, see gic_test_pending)
- */
gic_clear_pending(s, irq, cpu);
ret = irq;
}
}
- gic_activate_irq(s, cpu, irq);
gic_update(s);
DPRINTF("ACK %d\n", irq);
return ret;
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 23/45] intc/arm_gic: Implement virtualization extensions in gic_(deactivate|complete_irq)
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (21 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 22/45] intc/arm_gic: Implement virtualization extensions in gic_acknowledge_irq Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 24/45] intc/arm_gic: Implement virtualization extensions in gic_cpu_(read|write) Peter Maydell
` (22 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Implement virtualization extensions in the gic_deactivate_irq() and
gic_complete_irq() functions.
When the guest writes an invalid vIRQ to V_EOIR or V_DIR, since the
GICv2 specification is not entirely clear here, we adopt the behaviour
observed on real hardware:
* When V_CTRL.EOIMode is false (EOI split is disabled):
- In case of an invalid vIRQ write to V_EOIR:
-> If some bits are set in H_APR, an invalid vIRQ write to V_EOIR
triggers a priority drop, and increments V_HCR.EOICount.
-> If V_APR is already cleared, nothing happen
- An invalid vIRQ write to V_DIR is ignored.
* When V_CTRL.EOIMode is true:
- In case of an invalid vIRQ write to V_EOIR:
-> If some bits are set in H_APR, an invalid vIRQ write to V_EOIR
triggers a priority drop.
-> If V_APR is already cleared, nothing happen
- An invalid vIRQ write to V_DIR increments V_HCR.EOICount.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Message-id: 20180727095421.386-13-luc.michel@greensocs.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/arm_gic.c | 51 +++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 47 insertions(+), 4 deletions(-)
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index d80acde989f..3cddf658269 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -583,20 +583,19 @@ static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
{
int group;
- if (irq >= s->num_irq) {
+ if (irq >= GIC_MAXIRQ || (!gic_is_vcpu(cpu) && irq >= s->num_irq)) {
/*
* This handles two cases:
* 1. If software writes the ID of a spurious interrupt [ie 1023]
* to the GICC_DIR, the GIC ignores that write.
* 2. If software writes the number of a non-existent interrupt
* this must be a subcase of "value written is not an active interrupt"
- * and so this is UNPREDICTABLE. We choose to ignore it.
+ * and so this is UNPREDICTABLE. We choose to ignore it. For vCPUs,
+ * all IRQs potentially exist, so this limit does not apply.
*/
return;
}
- group = gic_has_groups(s) && gic_test_group(s, irq, cpu);
-
if (!gic_eoi_split(s, cpu, attrs)) {
/* This is UNPREDICTABLE; we choose to ignore it */
qemu_log_mask(LOG_GUEST_ERROR,
@@ -604,6 +603,17 @@ static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
return;
}
+ if (gic_is_vcpu(cpu) && !gic_virq_is_valid(s, irq, cpu)) {
+ /* This vIRQ does not have an LR entry which is either active or
+ * pending and active. Increment EOICount and ignore the write.
+ */
+ int rcpu = gic_get_vcpu_real_id(cpu);
+ s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT;
+ return;
+ }
+
+ group = gic_has_groups(s) && gic_test_group(s, irq, cpu);
+
if (gic_cpu_ns_access(s, cpu, attrs) && !group) {
DPRINTF("Non-secure DI for Group0 interrupt %d ignored\n", irq);
return;
@@ -618,6 +628,39 @@ static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
int group;
DPRINTF("EOI %d\n", irq);
+ if (gic_is_vcpu(cpu)) {
+ /* The call to gic_prio_drop() will clear a bit in GICH_APR iff the
+ * running prio is < 0x100.
+ */
+ bool prio_drop = s->running_priority[cpu] < 0x100;
+
+ if (irq >= GIC_MAXIRQ) {
+ /* Ignore spurious interrupt */
+ return;
+ }
+
+ gic_drop_prio(s, cpu, 0);
+
+ if (!gic_eoi_split(s, cpu, attrs)) {
+ bool valid = gic_virq_is_valid(s, irq, cpu);
+ if (prio_drop && !valid) {
+ /* We are in a situation where:
+ * - V_CTRL.EOIMode is false (no EOI split),
+ * - The call to gic_drop_prio() cleared a bit in GICH_APR,
+ * - This vIRQ does not have an LR entry which is either
+ * active or pending and active.
+ * In that case, we must increment EOICount.
+ */
+ int rcpu = gic_get_vcpu_real_id(cpu);
+ s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT;
+ } else if (valid) {
+ gic_clear_active(s, irq, cpu);
+ }
+ }
+
+ return;
+ }
+
if (irq >= s->num_irq) {
/* This handles two cases:
* 1. If software writes the ID of a spurious interrupt [ie 1023]
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 24/45] intc/arm_gic: Implement virtualization extensions in gic_cpu_(read|write)
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (22 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 23/45] intc/arm_gic: Implement virtualization extensions in gic_(deactivate|complete_irq) Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 25/45] intc/arm_gic: Wire the vCPU interface Peter Maydell
` (21 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Implement virtualization extensions in the gic_cpu_read() and
gic_cpu_write() functions. Those are the last bits missing to fully
support virtualization extensions in the CPU interface path.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20180727095421.386-14-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/arm_gic.c | 20 +++++++++++++++-----
1 file changed, 15 insertions(+), 5 deletions(-)
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 3cddf658269..0e1b23047e3 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -1401,9 +1401,12 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset,
case 0xd0: case 0xd4: case 0xd8: case 0xdc:
{
int regno = (offset - 0xd0) / 4;
+ int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS;
- if (regno >= GIC_NR_APRS || s->revision != 2) {
+ if (regno >= nr_aprs || s->revision != 2) {
*data = 0;
+ } else if (gic_is_vcpu(cpu)) {
+ *data = s->h_apr[gic_get_vcpu_real_id(cpu)];
} else if (gic_cpu_ns_access(s, cpu, attrs)) {
/* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */
*data = gic_apr_ns_view(s, regno, cpu);
@@ -1417,7 +1420,7 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset,
int regno = (offset - 0xe0) / 4;
if (regno >= GIC_NR_APRS || s->revision != 2 || !gic_has_groups(s) ||
- gic_cpu_ns_access(s, cpu, attrs)) {
+ gic_cpu_ns_access(s, cpu, attrs) || gic_is_vcpu(cpu)) {
*data = 0;
} else {
*data = s->nsapr[regno][cpu];
@@ -1452,7 +1455,8 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
s->abpr[cpu] = MAX(value & 0x7, GIC_MIN_ABPR);
}
} else {
- s->bpr[cpu] = MAX(value & 0x7, GIC_MIN_BPR);
+ int min_bpr = gic_is_vcpu(cpu) ? GIC_VIRT_MIN_BPR : GIC_MIN_BPR;
+ s->bpr[cpu] = MAX(value & 0x7, min_bpr);
}
break;
case 0x10: /* End Of Interrupt */
@@ -1469,11 +1473,14 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
case 0xd0: case 0xd4: case 0xd8: case 0xdc:
{
int regno = (offset - 0xd0) / 4;
+ int nr_aprs = gic_is_vcpu(cpu) ? GIC_VIRT_NR_APRS : GIC_NR_APRS;
- if (regno >= GIC_NR_APRS || s->revision != 2) {
+ if (regno >= nr_aprs || s->revision != 2) {
return MEMTX_OK;
}
- if (gic_cpu_ns_access(s, cpu, attrs)) {
+ if (gic_is_vcpu(cpu)) {
+ s->h_apr[gic_get_vcpu_real_id(cpu)] = value;
+ } else if (gic_cpu_ns_access(s, cpu, attrs)) {
/* NS view of GICC_APR<n> is the top half of GIC_NSAPR<n> */
gic_apr_write_ns_view(s, regno, cpu, value);
} else {
@@ -1488,6 +1495,9 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
if (regno >= GIC_NR_APRS || s->revision != 2) {
return MEMTX_OK;
}
+ if (gic_is_vcpu(cpu)) {
+ return MEMTX_OK;
+ }
if (!gic_has_groups(s) || (gic_cpu_ns_access(s, cpu, attrs))) {
return MEMTX_OK;
}
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 25/45] intc/arm_gic: Wire the vCPU interface
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (23 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 24/45] intc/arm_gic: Implement virtualization extensions in gic_cpu_(read|write) Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 26/45] intc/arm_gic: Implement the virtual interface registers Peter Maydell
` (20 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Add the read/write functions to handle accesses to the vCPU interface.
Those accesses are forwarded to the real CPU interface, with the CPU id
being converted to the corresponding vCPU id (vCPU id = CPU id +
GIC_NCPU).
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Message-id: 20180727095421.386-15-luc.michel@greensocs.com
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/arm_gic.c | 37 +++++++++++++++++++++++++++++++++++--
1 file changed, 35 insertions(+), 2 deletions(-)
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 0e1b23047e3..7ee2e6bcbb3 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -1555,6 +1555,23 @@ static MemTxResult gic_do_cpu_write(void *opaque, hwaddr addr,
return gic_cpu_write(s, id, addr, value, attrs);
}
+static MemTxResult gic_thisvcpu_read(void *opaque, hwaddr addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+
+ return gic_cpu_read(s, gic_get_current_vcpu(s), addr, data, attrs);
+}
+
+static MemTxResult gic_thisvcpu_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+
+ return gic_cpu_write(s, gic_get_current_vcpu(s), addr, value, attrs);
+}
+
static const MemoryRegionOps gic_ops[2] = {
{
.read_with_attrs = gic_dist_read,
@@ -1574,6 +1591,19 @@ static const MemoryRegionOps gic_cpu_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
+static const MemoryRegionOps gic_virt_ops[2] = {
+ {
+ .read_with_attrs = NULL,
+ .write_with_attrs = NULL,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ },
+ {
+ .read_with_attrs = gic_thisvcpu_read,
+ .write_with_attrs = gic_thisvcpu_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ }
+};
+
static void arm_gic_realize(DeviceState *dev, Error **errp)
{
/* Device instance realize function for the GIC sysbus device */
@@ -1595,8 +1625,11 @@ static void arm_gic_realize(DeviceState *dev, Error **errp)
return;
}
- /* This creates distributor and main CPU interface (s->cpuiomem[0]) */
- gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops, NULL);
+ /* This creates distributor, main CPU interface (s->cpuiomem[0]) and if
+ * enabled, virtualization extensions related interfaces (main virtual
+ * interface (s->vifaceiomem[0]) and virtual CPU interface).
+ */
+ gic_init_irqs_and_mmio(s, gic_set_irq, gic_ops, gic_virt_ops);
/* Extra core-specific regions for the CPU interfaces. This is
* necessary for "franken-GIC" implementations, for example on
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 26/45] intc/arm_gic: Implement the virtual interface registers
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (24 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 25/45] intc/arm_gic: Wire the vCPU interface Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 27/45] intc/arm_gic: Implement gic_update_virt() function Peter Maydell
` (19 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Implement the read and write functions for the virtual interface of the
virtualization extensions in the GICv2.
One mirror region per CPU is also created, which maps to that specific
CPU id. This is required by the GIC architecture specification.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20180727095421.386-16-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/arm_gic.c | 235 +++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 233 insertions(+), 2 deletions(-)
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 7ee2e6bcbb3..60631964876 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -147,6 +147,24 @@ static void gic_update(GICState *s)
}
}
+/* Return true if this LR is empty, i.e. the corresponding bit
+ * in ELRSR is set.
+ */
+static inline bool gic_lr_entry_is_free(uint32_t entry)
+{
+ return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID)
+ && (GICH_LR_HW(entry) || !GICH_LR_EOI(entry));
+}
+
+/* Return true if this LR should trigger an EOI maintenance interrupt, i.e. the
+ * corrsponding bit in EISR is set.
+ */
+static inline bool gic_lr_entry_is_eoi(uint32_t entry)
+{
+ return (GICH_LR_STATE(entry) == GICH_LR_STATE_INVALID)
+ && !GICH_LR_HW(entry) && GICH_LR_EOI(entry);
+}
+
static void gic_set_irq_11mpcore(GICState *s, int irq, int level,
int cm, int target)
{
@@ -1572,6 +1590,200 @@ static MemTxResult gic_thisvcpu_write(void *opaque, hwaddr addr,
return gic_cpu_write(s, gic_get_current_vcpu(s), addr, value, attrs);
}
+static uint32_t gic_compute_eisr(GICState *s, int cpu, int lr_start)
+{
+ int lr_idx;
+ uint32_t ret = 0;
+
+ for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t *entry = &s->h_lr[lr_idx][cpu];
+ ret = deposit32(ret, lr_idx - lr_start, 1,
+ gic_lr_entry_is_eoi(*entry));
+ }
+
+ return ret;
+}
+
+static uint32_t gic_compute_elrsr(GICState *s, int cpu, int lr_start)
+{
+ int lr_idx;
+ uint32_t ret = 0;
+
+ for (lr_idx = lr_start; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t *entry = &s->h_lr[lr_idx][cpu];
+ ret = deposit32(ret, lr_idx - lr_start, 1,
+ gic_lr_entry_is_free(*entry));
+ }
+
+ return ret;
+}
+
+static void gic_vmcr_write(GICState *s, uint32_t value, MemTxAttrs attrs)
+{
+ int vcpu = gic_get_current_vcpu(s);
+ uint32_t ctlr;
+ uint32_t abpr;
+ uint32_t bpr;
+ uint32_t prio_mask;
+
+ ctlr = FIELD_EX32(value, GICH_VMCR, VMCCtlr);
+ abpr = FIELD_EX32(value, GICH_VMCR, VMABP);
+ bpr = FIELD_EX32(value, GICH_VMCR, VMBP);
+ prio_mask = FIELD_EX32(value, GICH_VMCR, VMPriMask) << 3;
+
+ gic_set_cpu_control(s, vcpu, ctlr, attrs);
+ s->abpr[vcpu] = MAX(abpr, GIC_VIRT_MIN_ABPR);
+ s->bpr[vcpu] = MAX(bpr, GIC_VIRT_MIN_BPR);
+ gic_set_priority_mask(s, vcpu, prio_mask, attrs);
+}
+
+static MemTxResult gic_hyp_read(void *opaque, int cpu, hwaddr addr,
+ uint64_t *data, MemTxAttrs attrs)
+{
+ GICState *s = ARM_GIC(opaque);
+ int vcpu = cpu + GIC_NCPU;
+
+ switch (addr) {
+ case A_GICH_HCR: /* Hypervisor Control */
+ *data = s->h_hcr[cpu];
+ break;
+
+ case A_GICH_VTR: /* VGIC Type */
+ *data = FIELD_DP32(0, GICH_VTR, ListRegs, s->num_lrs - 1);
+ *data = FIELD_DP32(*data, GICH_VTR, PREbits,
+ GIC_VIRT_MAX_GROUP_PRIO_BITS - 1);
+ *data = FIELD_DP32(*data, GICH_VTR, PRIbits,
+ (7 - GIC_VIRT_MIN_BPR) - 1);
+ break;
+
+ case A_GICH_VMCR: /* Virtual Machine Control */
+ *data = FIELD_DP32(0, GICH_VMCR, VMCCtlr,
+ extract32(s->cpu_ctlr[vcpu], 0, 10));
+ *data = FIELD_DP32(*data, GICH_VMCR, VMABP, s->abpr[vcpu]);
+ *data = FIELD_DP32(*data, GICH_VMCR, VMBP, s->bpr[vcpu]);
+ *data = FIELD_DP32(*data, GICH_VMCR, VMPriMask,
+ extract32(s->priority_mask[vcpu], 3, 5));
+ break;
+
+ case A_GICH_MISR: /* Maintenance Interrupt Status */
+ *data = s->h_misr[cpu];
+ break;
+
+ case A_GICH_EISR0: /* End of Interrupt Status 0 and 1 */
+ case A_GICH_EISR1:
+ *data = gic_compute_eisr(s, cpu, (addr - A_GICH_EISR0) * 8);
+ break;
+
+ case A_GICH_ELRSR0: /* Empty List Status 0 and 1 */
+ case A_GICH_ELRSR1:
+ *data = gic_compute_elrsr(s, cpu, (addr - A_GICH_ELRSR0) * 8);
+ break;
+
+ case A_GICH_APR: /* Active Priorities */
+ *data = s->h_apr[cpu];
+ break;
+
+ case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */
+ {
+ int lr_idx = (addr - A_GICH_LR0) / 4;
+
+ if (lr_idx > s->num_lrs) {
+ *data = 0;
+ } else {
+ *data = s->h_lr[lr_idx][cpu];
+ }
+ break;
+ }
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "gic_hyp_read: Bad offset %" HWADDR_PRIx "\n", addr);
+ return MEMTX_OK;
+ }
+
+ return MEMTX_OK;
+}
+
+static MemTxResult gic_hyp_write(void *opaque, int cpu, hwaddr addr,
+ uint64_t value, MemTxAttrs attrs)
+{
+ GICState *s = ARM_GIC(opaque);
+ int vcpu = cpu + GIC_NCPU;
+
+ switch (addr) {
+ case A_GICH_HCR: /* Hypervisor Control */
+ s->h_hcr[cpu] = value & GICH_HCR_MASK;
+ break;
+
+ case A_GICH_VMCR: /* Virtual Machine Control */
+ gic_vmcr_write(s, value, attrs);
+ break;
+
+ case A_GICH_APR: /* Active Priorities */
+ s->h_apr[cpu] = value;
+ s->running_priority[vcpu] = gic_get_prio_from_apr_bits(s, vcpu);
+ break;
+
+ case A_GICH_LR0 ... A_GICH_LR63: /* List Registers */
+ {
+ int lr_idx = (addr - A_GICH_LR0) / 4;
+
+ if (lr_idx > s->num_lrs) {
+ return MEMTX_OK;
+ }
+
+ s->h_lr[lr_idx][cpu] = value & GICH_LR_MASK;
+ break;
+ }
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "gic_hyp_write: Bad offset %" HWADDR_PRIx "\n", addr);
+ return MEMTX_OK;
+ }
+
+ return MEMTX_OK;
+}
+
+static MemTxResult gic_thiscpu_hyp_read(void *opaque, hwaddr addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+
+ return gic_hyp_read(s, gic_get_current_cpu(s), addr, data, attrs);
+}
+
+static MemTxResult gic_thiscpu_hyp_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ GICState *s = (GICState *)opaque;
+
+ return gic_hyp_write(s, gic_get_current_cpu(s), addr, value, attrs);
+}
+
+static MemTxResult gic_do_hyp_read(void *opaque, hwaddr addr, uint64_t *data,
+ unsigned size, MemTxAttrs attrs)
+{
+ GICState **backref = (GICState **)opaque;
+ GICState *s = *backref;
+ int id = (backref - s->backref);
+
+ return gic_hyp_read(s, id, addr, data, attrs);
+}
+
+static MemTxResult gic_do_hyp_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size,
+ MemTxAttrs attrs)
+{
+ GICState **backref = (GICState **)opaque;
+ GICState *s = *backref;
+ int id = (backref - s->backref);
+
+ return gic_hyp_write(s, id + GIC_NCPU, addr, value, attrs);
+
+}
+
static const MemoryRegionOps gic_ops[2] = {
{
.read_with_attrs = gic_dist_read,
@@ -1593,8 +1805,8 @@ static const MemoryRegionOps gic_cpu_ops = {
static const MemoryRegionOps gic_virt_ops[2] = {
{
- .read_with_attrs = NULL,
- .write_with_attrs = NULL,
+ .read_with_attrs = gic_thiscpu_hyp_read,
+ .write_with_attrs = gic_thiscpu_hyp_write,
.endianness = DEVICE_NATIVE_ENDIAN,
},
{
@@ -1604,6 +1816,12 @@ static const MemoryRegionOps gic_virt_ops[2] = {
}
};
+static const MemoryRegionOps gic_viface_ops = {
+ .read_with_attrs = gic_do_hyp_read,
+ .write_with_attrs = gic_do_hyp_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
static void arm_gic_realize(DeviceState *dev, Error **errp)
{
/* Device instance realize function for the GIC sysbus device */
@@ -1645,6 +1863,19 @@ static void arm_gic_realize(DeviceState *dev, Error **errp)
&s->backref[i], "gic_cpu", 0x100);
sysbus_init_mmio(sbd, &s->cpuiomem[i+1]);
}
+
+ /* Extra core-specific regions for virtual interfaces. This is required by
+ * the GICv2 specification.
+ */
+ if (s->virt_extn) {
+ for (i = 0; i < s->num_cpu; i++) {
+ memory_region_init_io(&s->vifaceiomem[i + 1], OBJECT(s),
+ &gic_viface_ops, &s->backref[i],
+ "gic_viface", 0x1000);
+ sysbus_init_mmio(sbd, &s->vifaceiomem[i + 1]);
+ }
+ }
+
}
static void arm_gic_class_init(ObjectClass *klass, void *data)
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 27/45] intc/arm_gic: Implement gic_update_virt() function
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (25 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 26/45] intc/arm_gic: Implement the virtual interface registers Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 28/45] intc/arm_gic: Implement maintenance interrupt generation Peter Maydell
` (18 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Add the gic_update_virt() function to update the vCPU interface states
and raise vIRQ and vFIQ as needed. This commit renames gic_update() to
gic_update_internal() and generalizes it to handle both cases, with a
`virt' parameter to track whether we are updating the CPU or vCPU
interfaces.
The main difference between CPU and vCPU is the way we select the best
IRQ. This part has been split into the gic_get_best_(v)irq functions.
For the virt case, the LRs are iterated to find the best candidate.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20180727095421.386-17-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/arm_gic.c | 175 +++++++++++++++++++++++++++++++++++-----------
1 file changed, 136 insertions(+), 39 deletions(-)
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 60631964876..6b97e197966 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -79,74 +79,149 @@ static inline bool gic_cpu_ns_access(GICState *s, int cpu, MemTxAttrs attrs)
return !gic_is_vcpu(cpu) && s->security_extn && !attrs.secure;
}
+static inline void gic_get_best_irq(GICState *s, int cpu,
+ int *best_irq, int *best_prio, int *group)
+{
+ int irq;
+ int cm = 1 << cpu;
+
+ *best_irq = 1023;
+ *best_prio = 0x100;
+
+ for (irq = 0; irq < s->num_irq; irq++) {
+ if (GIC_DIST_TEST_ENABLED(irq, cm) && gic_test_pending(s, irq, cm) &&
+ (!GIC_DIST_TEST_ACTIVE(irq, cm)) &&
+ (irq < GIC_INTERNAL || GIC_DIST_TARGET(irq) & cm)) {
+ if (GIC_DIST_GET_PRIORITY(irq, cpu) < *best_prio) {
+ *best_prio = GIC_DIST_GET_PRIORITY(irq, cpu);
+ *best_irq = irq;
+ }
+ }
+ }
+
+ if (*best_irq < 1023) {
+ *group = GIC_DIST_TEST_GROUP(*best_irq, cm);
+ }
+}
+
+static inline void gic_get_best_virq(GICState *s, int cpu,
+ int *best_irq, int *best_prio, int *group)
+{
+ int lr_idx = 0;
+
+ *best_irq = 1023;
+ *best_prio = 0x100;
+
+ for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t lr_entry = s->h_lr[lr_idx][cpu];
+ int state = GICH_LR_STATE(lr_entry);
+
+ if (state == GICH_LR_STATE_PENDING) {
+ int prio = GICH_LR_PRIORITY(lr_entry);
+
+ if (prio < *best_prio) {
+ *best_prio = prio;
+ *best_irq = GICH_LR_VIRT_ID(lr_entry);
+ *group = GICH_LR_GROUP(lr_entry);
+ }
+ }
+ }
+}
+
+/* Return true if IRQ signaling is enabled for the given cpu and at least one
+ * of the given groups:
+ * - in the non-virt case, the distributor must be enabled for one of the
+ * given groups
+ * - in the virt case, the virtual interface must be enabled.
+ * - in all cases, the (v)CPU interface must be enabled for one of the given
+ * groups.
+ */
+static inline bool gic_irq_signaling_enabled(GICState *s, int cpu, bool virt,
+ int group_mask)
+{
+ if (!virt && !(s->ctlr & group_mask)) {
+ return false;
+ }
+
+ if (virt && !(s->h_hcr[cpu] & R_GICH_HCR_EN_MASK)) {
+ return false;
+ }
+
+ if (!(s->cpu_ctlr[cpu] & group_mask)) {
+ return false;
+ }
+
+ return true;
+}
+
/* TODO: Many places that call this routine could be optimized. */
/* Update interrupt status after enabled or pending bits have been changed. */
-static void gic_update(GICState *s)
+static inline void gic_update_internal(GICState *s, bool virt)
{
int best_irq;
int best_prio;
- int irq;
int irq_level, fiq_level;
- int cpu;
- int cm;
+ int cpu, cpu_iface;
+ int group = 0;
+ qemu_irq *irq_lines = virt ? s->parent_virq : s->parent_irq;
+ qemu_irq *fiq_lines = virt ? s->parent_vfiq : s->parent_fiq;
for (cpu = 0; cpu < s->num_cpu; cpu++) {
- cm = 1 << cpu;
- s->current_pending[cpu] = 1023;
- if (!(s->ctlr & (GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1))
- || !(s->cpu_ctlr[cpu] & (GICC_CTLR_EN_GRP0 | GICC_CTLR_EN_GRP1))) {
- qemu_irq_lower(s->parent_irq[cpu]);
- qemu_irq_lower(s->parent_fiq[cpu]);
+ cpu_iface = virt ? (cpu + GIC_NCPU) : cpu;
+
+ s->current_pending[cpu_iface] = 1023;
+ if (!gic_irq_signaling_enabled(s, cpu, virt,
+ GICD_CTLR_EN_GRP0 | GICD_CTLR_EN_GRP1)) {
+ qemu_irq_lower(irq_lines[cpu]);
+ qemu_irq_lower(fiq_lines[cpu]);
continue;
}
- best_prio = 0x100;
- best_irq = 1023;
- for (irq = 0; irq < s->num_irq; irq++) {
- if (GIC_DIST_TEST_ENABLED(irq, cm) &&
- gic_test_pending(s, irq, cm) &&
- (!GIC_DIST_TEST_ACTIVE(irq, cm)) &&
- (irq < GIC_INTERNAL || GIC_DIST_TARGET(irq) & cm)) {
- if (GIC_DIST_GET_PRIORITY(irq, cpu) < best_prio) {
- best_prio = GIC_DIST_GET_PRIORITY(irq, cpu);
- best_irq = irq;
- }
- }
+
+ if (virt) {
+ gic_get_best_virq(s, cpu, &best_irq, &best_prio, &group);
+ } else {
+ gic_get_best_irq(s, cpu, &best_irq, &best_prio, &group);
}
if (best_irq != 1023) {
trace_gic_update_bestirq(cpu, best_irq, best_prio,
- s->priority_mask[cpu], s->running_priority[cpu]);
+ s->priority_mask[cpu_iface], s->running_priority[cpu_iface]);
}
irq_level = fiq_level = 0;
- if (best_prio < s->priority_mask[cpu]) {
- s->current_pending[cpu] = best_irq;
- if (best_prio < s->running_priority[cpu]) {
- int group = GIC_DIST_TEST_GROUP(best_irq, cm);
-
- if (extract32(s->ctlr, group, 1) &&
- extract32(s->cpu_ctlr[cpu], group, 1)) {
- if (group == 0 && s->cpu_ctlr[cpu] & GICC_CTLR_FIQ_EN) {
+ if (best_prio < s->priority_mask[cpu_iface]) {
+ s->current_pending[cpu_iface] = best_irq;
+ if (best_prio < s->running_priority[cpu_iface]) {
+ if (gic_irq_signaling_enabled(s, cpu, virt, 1 << group)) {
+ if (group == 0 &&
+ s->cpu_ctlr[cpu_iface] & GICC_CTLR_FIQ_EN) {
DPRINTF("Raised pending FIQ %d (cpu %d)\n",
- best_irq, cpu);
+ best_irq, cpu_iface);
fiq_level = 1;
- trace_gic_update_set_irq(cpu, "fiq", fiq_level);
+ trace_gic_update_set_irq(cpu, virt ? "vfiq" : "fiq",
+ fiq_level);
} else {
DPRINTF("Raised pending IRQ %d (cpu %d)\n",
- best_irq, cpu);
+ best_irq, cpu_iface);
irq_level = 1;
- trace_gic_update_set_irq(cpu, "irq", irq_level);
+ trace_gic_update_set_irq(cpu, virt ? "virq" : "irq",
+ irq_level);
}
}
}
}
- qemu_set_irq(s->parent_irq[cpu], irq_level);
- qemu_set_irq(s->parent_fiq[cpu], fiq_level);
+ qemu_set_irq(irq_lines[cpu], irq_level);
+ qemu_set_irq(fiq_lines[cpu], fiq_level);
}
}
+static void gic_update(GICState *s)
+{
+ gic_update_internal(s, false);
+}
+
/* Return true if this LR is empty, i.e. the corresponding bit
* in ELRSR is set.
*/
@@ -165,6 +240,11 @@ static inline bool gic_lr_entry_is_eoi(uint32_t entry)
&& !GICH_LR_HW(entry) && GICH_LR_EOI(entry);
}
+static void gic_update_virt(GICState *s)
+{
+ gic_update_internal(s, true);
+}
+
static void gic_set_irq_11mpcore(GICState *s, int irq, int level,
int cm, int target)
{
@@ -449,7 +529,11 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
}
}
- gic_update(s);
+ if (gic_is_vcpu(cpu)) {
+ gic_update_virt(s);
+ } else {
+ gic_update(s);
+ }
DPRINTF("ACK %d\n", irq);
return ret;
}
@@ -627,6 +711,11 @@ static void gic_deactivate_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
*/
int rcpu = gic_get_vcpu_real_id(cpu);
s->h_hcr[rcpu] += 1 << R_GICH_HCR_EOICount_SHIFT;
+
+ /* Update the virtual interface in case a maintenance interrupt should
+ * be raised.
+ */
+ gic_update_virt(s);
return;
}
@@ -676,6 +765,7 @@ static void gic_complete_irq(GICState *s, int cpu, int irq, MemTxAttrs attrs)
}
}
+ gic_update_virt(s);
return;
}
@@ -1531,7 +1621,13 @@ static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
"gic_cpu_write: Bad offset %x\n", (int)offset);
return MEMTX_OK;
}
- gic_update(s);
+
+ if (gic_is_vcpu(cpu)) {
+ gic_update_virt(s);
+ } else {
+ gic_update(s);
+ }
+
return MEMTX_OK;
}
@@ -1742,6 +1838,7 @@ static MemTxResult gic_hyp_write(void *opaque, int cpu, hwaddr addr,
return MEMTX_OK;
}
+ gic_update_virt(s);
return MEMTX_OK;
}
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 28/45] intc/arm_gic: Implement maintenance interrupt generation
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (26 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 27/45] intc/arm_gic: Implement gic_update_virt() function Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:17 ` [Qemu-devel] [PULL 29/45] intc/arm_gic: Improve traces Peter Maydell
` (17 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Implement the maintenance interrupt generation that is part of the GICv2
virtualization extensions.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20180727095421.386-18-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/arm_gic.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 97 insertions(+)
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 6b97e197966..6ff7da3e5d3 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -240,9 +240,106 @@ static inline bool gic_lr_entry_is_eoi(uint32_t entry)
&& !GICH_LR_HW(entry) && GICH_LR_EOI(entry);
}
+static inline void gic_extract_lr_info(GICState *s, int cpu,
+ int *num_eoi, int *num_valid, int *num_pending)
+{
+ int lr_idx;
+
+ *num_eoi = 0;
+ *num_valid = 0;
+ *num_pending = 0;
+
+ for (lr_idx = 0; lr_idx < s->num_lrs; lr_idx++) {
+ uint32_t *entry = &s->h_lr[lr_idx][cpu];
+
+ if (gic_lr_entry_is_eoi(*entry)) {
+ (*num_eoi)++;
+ }
+
+ if (GICH_LR_STATE(*entry) != GICH_LR_STATE_INVALID) {
+ (*num_valid)++;
+ }
+
+ if (GICH_LR_STATE(*entry) == GICH_LR_STATE_PENDING) {
+ (*num_pending)++;
+ }
+ }
+}
+
+static void gic_compute_misr(GICState *s, int cpu)
+{
+ uint32_t value = 0;
+ int vcpu = cpu + GIC_NCPU;
+
+ int num_eoi, num_valid, num_pending;
+
+ gic_extract_lr_info(s, cpu, &num_eoi, &num_valid, &num_pending);
+
+ /* EOI */
+ if (num_eoi) {
+ value |= R_GICH_MISR_EOI_MASK;
+ }
+
+ /* U: true if only 0 or 1 LR entry is valid */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_UIE_MASK) && (num_valid < 2)) {
+ value |= R_GICH_MISR_U_MASK;
+ }
+
+ /* LRENP: EOICount is not 0 */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_LRENPIE_MASK) &&
+ ((s->h_hcr[cpu] & R_GICH_HCR_EOICount_MASK) != 0)) {
+ value |= R_GICH_MISR_LRENP_MASK;
+ }
+
+ /* NP: no pending interrupts */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_NPIE_MASK) && (num_pending == 0)) {
+ value |= R_GICH_MISR_NP_MASK;
+ }
+
+ /* VGrp0E: group0 virq signaling enabled */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0EIE_MASK) &&
+ (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) {
+ value |= R_GICH_MISR_VGrp0E_MASK;
+ }
+
+ /* VGrp0D: group0 virq signaling disabled */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP0DIE_MASK) &&
+ !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP0)) {
+ value |= R_GICH_MISR_VGrp0D_MASK;
+ }
+
+ /* VGrp1E: group1 virq signaling enabled */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1EIE_MASK) &&
+ (s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) {
+ value |= R_GICH_MISR_VGrp1E_MASK;
+ }
+
+ /* VGrp1D: group1 virq signaling disabled */
+ if ((s->h_hcr[cpu] & R_GICH_HCR_VGRP1DIE_MASK) &&
+ !(s->cpu_ctlr[vcpu] & GICC_CTLR_EN_GRP1)) {
+ value |= R_GICH_MISR_VGrp1D_MASK;
+ }
+
+ s->h_misr[cpu] = value;
+}
+
+static void gic_update_maintenance(GICState *s)
+{
+ int cpu = 0;
+ int maint_level;
+
+ for (cpu = 0; cpu < s->num_cpu; cpu++) {
+ gic_compute_misr(s, cpu);
+ maint_level = (s->h_hcr[cpu] & R_GICH_HCR_EN_MASK) && s->h_misr[cpu];
+
+ qemu_set_irq(s->maintenance_irq[cpu], maint_level);
+ }
+}
+
static void gic_update_virt(GICState *s)
{
gic_update_internal(s, true);
+ gic_update_maintenance(s);
}
static void gic_set_irq_11mpcore(GICState *s, int irq, int level,
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 29/45] intc/arm_gic: Improve traces
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (27 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 28/45] intc/arm_gic: Implement maintenance interrupt generation Peter Maydell
@ 2018-08-14 18:17 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 30/45] xlnx-zynqmp: Improve GIC wiring and MMIO mapping Peter Maydell
` (16 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:17 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Add some traces to the ARM GIC to catch register accesses (distributor,
(v)cpu interface and virtual interface), and to take into account
virtualization extensions (print `vcpu` instead of `cpu` when needed).
Also add some virtualization extensions specific traces: LR updating
and maintenance IRQ generation.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20180727095421.386-19-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/intc/arm_gic.c | 31 +++++++++++++++++++++++++------
hw/intc/trace-events | 12 ++++++++++--
2 files changed, 35 insertions(+), 8 deletions(-)
diff --git a/hw/intc/arm_gic.c b/hw/intc/arm_gic.c
index 6ff7da3e5d3..c1b35fc1ee2 100644
--- a/hw/intc/arm_gic.c
+++ b/hw/intc/arm_gic.c
@@ -184,8 +184,10 @@ static inline void gic_update_internal(GICState *s, bool virt)
}
if (best_irq != 1023) {
- trace_gic_update_bestirq(cpu, best_irq, best_prio,
- s->priority_mask[cpu_iface], s->running_priority[cpu_iface]);
+ trace_gic_update_bestirq(virt ? "vcpu" : "cpu", cpu,
+ best_irq, best_prio,
+ s->priority_mask[cpu_iface],
+ s->running_priority[cpu_iface]);
}
irq_level = fiq_level = 0;
@@ -332,6 +334,7 @@ static void gic_update_maintenance(GICState *s)
gic_compute_misr(s, cpu);
maint_level = (s->h_hcr[cpu] & R_GICH_HCR_EN_MASK) && s->h_misr[cpu];
+ trace_gic_update_maintenance_irq(cpu, maint_level);
qemu_set_irq(s->maintenance_irq[cpu], maint_level);
}
}
@@ -597,7 +600,8 @@ uint32_t gic_acknowledge_irq(GICState *s, int cpu, MemTxAttrs attrs)
* is in the wrong group.
*/
irq = gic_get_current_pending_irq(s, cpu, attrs);
- trace_gic_acknowledge_irq(gic_get_vcpu_real_id(cpu), irq);
+ trace_gic_acknowledge_irq(gic_is_vcpu(cpu) ? "vcpu" : "cpu",
+ gic_get_vcpu_real_id(cpu), irq);
if (irq >= GIC_MAXIRQ) {
DPRINTF("ACK, no pending interrupt or it is hidden: %d\n", irq);
@@ -1130,20 +1134,23 @@ static MemTxResult gic_dist_read(void *opaque, hwaddr offset, uint64_t *data,
switch (size) {
case 1:
*data = gic_dist_readb(opaque, offset, attrs);
- return MEMTX_OK;
+ break;
case 2:
*data = gic_dist_readb(opaque, offset, attrs);
*data |= gic_dist_readb(opaque, offset + 1, attrs) << 8;
- return MEMTX_OK;
+ break;
case 4:
*data = gic_dist_readb(opaque, offset, attrs);
*data |= gic_dist_readb(opaque, offset + 1, attrs) << 8;
*data |= gic_dist_readb(opaque, offset + 2, attrs) << 16;
*data |= gic_dist_readb(opaque, offset + 3, attrs) << 24;
- return MEMTX_OK;
+ break;
default:
return MEMTX_ERROR;
}
+
+ trace_gic_dist_read(offset, size, *data);
+ return MEMTX_OK;
}
static void gic_dist_writeb(void *opaque, hwaddr offset,
@@ -1482,6 +1489,8 @@ static void gic_dist_writel(void *opaque, hwaddr offset,
static MemTxResult gic_dist_write(void *opaque, hwaddr offset, uint64_t data,
unsigned size, MemTxAttrs attrs)
{
+ trace_gic_dist_write(offset, size, data);
+
switch (size) {
case 1:
gic_dist_writeb(opaque, offset, data, attrs);
@@ -1638,12 +1647,18 @@ static MemTxResult gic_cpu_read(GICState *s, int cpu, int offset,
*data = 0;
break;
}
+
+ trace_gic_cpu_read(gic_is_vcpu(cpu) ? "vcpu" : "cpu",
+ gic_get_vcpu_real_id(cpu), offset, *data);
return MEMTX_OK;
}
static MemTxResult gic_cpu_write(GICState *s, int cpu, int offset,
uint32_t value, MemTxAttrs attrs)
{
+ trace_gic_cpu_write(gic_is_vcpu(cpu) ? "vcpu" : "cpu",
+ gic_get_vcpu_real_id(cpu), offset, value);
+
switch (offset) {
case 0x00: /* Control */
gic_set_cpu_control(s, cpu, value, attrs);
@@ -1894,6 +1909,7 @@ static MemTxResult gic_hyp_read(void *opaque, int cpu, hwaddr addr,
return MEMTX_OK;
}
+ trace_gic_hyp_read(addr, *data);
return MEMTX_OK;
}
@@ -1903,6 +1919,8 @@ static MemTxResult gic_hyp_write(void *opaque, int cpu, hwaddr addr,
GICState *s = ARM_GIC(opaque);
int vcpu = cpu + GIC_NCPU;
+ trace_gic_hyp_write(addr, value);
+
switch (addr) {
case A_GICH_HCR: /* Hypervisor Control */
s->h_hcr[cpu] = value & GICH_HCR_MASK;
@@ -1926,6 +1944,7 @@ static MemTxResult gic_hyp_write(void *opaque, int cpu, hwaddr addr,
}
s->h_lr[lr_idx][cpu] = value & GICH_LR_MASK;
+ trace_gic_lr_entry(cpu, lr_idx, s->h_lr[lr_idx][cpu]);
break;
}
diff --git a/hw/intc/trace-events b/hw/intc/trace-events
index 5fb18e65c97..81c7c399f7d 100644
--- a/hw/intc/trace-events
+++ b/hw/intc/trace-events
@@ -92,9 +92,17 @@ aspeed_vic_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64
gic_enable_irq(int irq) "irq %d enabled"
gic_disable_irq(int irq) "irq %d disabled"
gic_set_irq(int irq, int level, int cpumask, int target) "irq %d level %d cpumask 0x%x target 0x%x"
-gic_update_bestirq(int cpu, int irq, int prio, int priority_mask, int running_priority) "cpu %d irq %d priority %d cpu priority mask %d cpu running priority %d"
+gic_update_bestirq(const char *s, int cpu, int irq, int prio, int priority_mask, int running_priority) "%s %d irq %d priority %d cpu priority mask %d cpu running priority %d"
gic_update_set_irq(int cpu, const char *name, int level) "cpu[%d]: %s = %d"
-gic_acknowledge_irq(int cpu, int irq) "cpu %d acknowledged irq %d"
+gic_acknowledge_irq(const char *s, int cpu, int irq) "%s %d acknowledged irq %d"
+gic_cpu_write(const char *s, int cpu, int addr, uint32_t val) "%s %d iface write at 0x%08x 0x%08" PRIx32
+gic_cpu_read(const char *s, int cpu, int addr, uint32_t val) "%s %d iface read at 0x%08x: 0x%08" PRIx32
+gic_hyp_read(int addr, uint32_t val) "hyp read at 0x%08x: 0x%08" PRIx32
+gic_hyp_write(int addr, uint32_t val) "hyp write at 0x%08x: 0x%08" PRIx32
+gic_dist_read(int addr, unsigned int size, uint32_t val) "dist read at 0x%08x size %u: 0x%08" PRIx32
+gic_dist_write(int addr, unsigned int size, uint32_t val) "dist write at 0x%08x size %u: 0x%08" PRIx32
+gic_lr_entry(int cpu, int entry, uint32_t val) "cpu %d: new lr entry %d: 0x%08" PRIx32
+gic_update_maintenance_irq(int cpu, int val) "cpu %d: maintenance = %d"
# hw/intc/arm_gicv3_cpuif.c
gicv3_icc_pmr_read(uint32_t cpu, uint64_t val) "GICv3 ICC_PMR read cpu 0x%x value 0x%" PRIx64
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 30/45] xlnx-zynqmp: Improve GIC wiring and MMIO mapping
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (28 preceding siblings ...)
2018-08-14 18:17 ` [Qemu-devel] [PULL 29/45] intc/arm_gic: Improve traces Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 31/45] arm/virt: Add support for GICv2 virtualization extensions Peter Maydell
` (15 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
This commit improve the way the GIC is realized and connected in the
ZynqMP SoC. The security extensions are enabled only if requested in the
machine state. The same goes for the virtualization extensions.
All the GIC to APU CPU(s) IRQ lines are now connected, including FIQ,
vIRQ and vFIQ. The missing CPU to GIC timers IRQ connections are also
added (HYP and SEC timers).
The GIC maintenance IRQs are back-wired to the correct GIC PPIs.
Finally, the MMIO mappings are reworked to take into account the ZynqMP
specifics. The GIC (v)CPU interface is aliased 16 times:
* for the first 0x1000 bytes from 0xf9010000 to 0xf901f000
* for the second 0x1000 bytes from 0xf9020000 to 0xf902f000
Mappings of the virtual interface and virtual CPU interface are mapped
only when virtualization extensions are requested. The
XlnxZynqMPGICRegion struct has been enhanced to be able to catch all
this information.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Message-id: 20180727095421.386-20-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
include/hw/arm/xlnx-zynqmp.h | 4 +-
hw/arm/xlnx-zynqmp.c | 92 ++++++++++++++++++++++++++++++++----
2 files changed, 86 insertions(+), 10 deletions(-)
diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h
index 82b6ec2486b..98f925ab84a 100644
--- a/include/hw/arm/xlnx-zynqmp.h
+++ b/include/hw/arm/xlnx-zynqmp.h
@@ -53,7 +53,7 @@
#define XLNX_ZYNQMP_OCM_RAM_0_ADDRESS 0xFFFC0000
#define XLNX_ZYNQMP_OCM_RAM_SIZE 0x10000
-#define XLNX_ZYNQMP_GIC_REGIONS 2
+#define XLNX_ZYNQMP_GIC_REGIONS 6
/* ZynqMP maps the ARM GIC regions (GICC, GICD ...) at consecutive 64k offsets
* and under-decodes the 64k region. This mirrors the 4k regions to every 4k
@@ -62,7 +62,7 @@
*/
#define XLNX_ZYNQMP_GIC_REGION_SIZE 0x1000
-#define XLNX_ZYNQMP_GIC_ALIASES (0x10000 / XLNX_ZYNQMP_GIC_REGION_SIZE - 1)
+#define XLNX_ZYNQMP_GIC_ALIASES (0x10000 / XLNX_ZYNQMP_GIC_REGION_SIZE)
#define XLNX_ZYNQMP_MAX_LOW_RAM_SIZE 0x80000000ull
diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c
index 8de4868eb95..c1950403505 100644
--- a/hw/arm/xlnx-zynqmp.c
+++ b/hw/arm/xlnx-zynqmp.c
@@ -29,12 +29,17 @@
#define ARM_PHYS_TIMER_PPI 30
#define ARM_VIRT_TIMER_PPI 27
+#define ARM_HYP_TIMER_PPI 26
+#define ARM_SEC_TIMER_PPI 29
+#define GIC_MAINTENANCE_PPI 25
#define GEM_REVISION 0x40070106
#define GIC_BASE_ADDR 0xf9000000
#define GIC_DIST_ADDR 0xf9010000
#define GIC_CPU_ADDR 0xf9020000
+#define GIC_VIFACE_ADDR 0xf9040000
+#define GIC_VCPU_ADDR 0xf9060000
#define SATA_INTR 133
#define SATA_ADDR 0xFD0C0000
@@ -111,11 +116,54 @@ static const int adma_ch_intr[XLNX_ZYNQMP_NUM_ADMA_CH] = {
typedef struct XlnxZynqMPGICRegion {
int region_index;
uint32_t address;
+ uint32_t offset;
+ bool virt;
} XlnxZynqMPGICRegion;
static const XlnxZynqMPGICRegion xlnx_zynqmp_gic_regions[] = {
- { .region_index = 0, .address = GIC_DIST_ADDR, },
- { .region_index = 1, .address = GIC_CPU_ADDR, },
+ /* Distributor */
+ {
+ .region_index = 0,
+ .address = GIC_DIST_ADDR,
+ .offset = 0,
+ .virt = false
+ },
+
+ /* CPU interface */
+ {
+ .region_index = 1,
+ .address = GIC_CPU_ADDR,
+ .offset = 0,
+ .virt = false
+ },
+ {
+ .region_index = 1,
+ .address = GIC_CPU_ADDR + 0x10000,
+ .offset = 0x1000,
+ .virt = false
+ },
+
+ /* Virtual interface */
+ {
+ .region_index = 2,
+ .address = GIC_VIFACE_ADDR,
+ .offset = 0,
+ .virt = true
+ },
+
+ /* Virtual CPU interface */
+ {
+ .region_index = 3,
+ .address = GIC_VCPU_ADDR,
+ .offset = 0,
+ .virt = true
+ },
+ {
+ .region_index = 3,
+ .address = GIC_VCPU_ADDR + 0x10000,
+ .offset = 0x1000,
+ .virt = true
+ },
};
static inline int arm_gic_ppi_index(int cpu_nr, int ppi_index)
@@ -281,6 +329,9 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
qdev_prop_set_uint32(DEVICE(&s->gic), "num-irq", GIC_NUM_SPI_INTR + 32);
qdev_prop_set_uint32(DEVICE(&s->gic), "revision", 2);
qdev_prop_set_uint32(DEVICE(&s->gic), "num-cpu", num_apus);
+ qdev_prop_set_bit(DEVICE(&s->gic), "has-security-extensions", s->secure);
+ qdev_prop_set_bit(DEVICE(&s->gic),
+ "has-virtualization-extensions", s->virt);
/* Realize APUs before realizing the GIC. KVM requires this. */
for (i = 0; i < num_apus; i++) {
@@ -325,19 +376,23 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
for (i = 0; i < XLNX_ZYNQMP_GIC_REGIONS; i++) {
SysBusDevice *gic = SYS_BUS_DEVICE(&s->gic);
const XlnxZynqMPGICRegion *r = &xlnx_zynqmp_gic_regions[i];
- MemoryRegion *mr = sysbus_mmio_get_region(gic, r->region_index);
+ MemoryRegion *mr;
uint32_t addr = r->address;
int j;
- sysbus_mmio_map(gic, r->region_index, addr);
+ if (r->virt && !s->virt) {
+ continue;
+ }
+ mr = sysbus_mmio_get_region(gic, r->region_index);
for (j = 0; j < XLNX_ZYNQMP_GIC_ALIASES; j++) {
MemoryRegion *alias = &s->gic_mr[i][j];
- addr += XLNX_ZYNQMP_GIC_REGION_SIZE;
memory_region_init_alias(alias, OBJECT(s), "zynqmp-gic-alias", mr,
- 0, XLNX_ZYNQMP_GIC_REGION_SIZE);
+ r->offset, XLNX_ZYNQMP_GIC_REGION_SIZE);
memory_region_add_subregion(system_memory, addr, alias);
+
+ addr += XLNX_ZYNQMP_GIC_REGION_SIZE;
}
}
@@ -347,12 +402,33 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp)
sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i,
qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]),
ARM_CPU_IRQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus,
+ qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]),
+ ARM_CPU_FIQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus * 2,
+ qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]),
+ ARM_CPU_VIRQ));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus * 3,
+ qdev_get_gpio_in(DEVICE(&s->apu_cpu[i]),
+ ARM_CPU_VFIQ));
irq = qdev_get_gpio_in(DEVICE(&s->gic),
arm_gic_ppi_index(i, ARM_PHYS_TIMER_PPI));
- qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), 0, irq);
+ qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_PHYS, irq);
irq = qdev_get_gpio_in(DEVICE(&s->gic),
arm_gic_ppi_index(i, ARM_VIRT_TIMER_PPI));
- qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), 1, irq);
+ qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_VIRT, irq);
+ irq = qdev_get_gpio_in(DEVICE(&s->gic),
+ arm_gic_ppi_index(i, ARM_HYP_TIMER_PPI));
+ qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_HYP, irq);
+ irq = qdev_get_gpio_in(DEVICE(&s->gic),
+ arm_gic_ppi_index(i, ARM_SEC_TIMER_PPI));
+ qdev_connect_gpio_out(DEVICE(&s->apu_cpu[i]), GTIMER_SEC, irq);
+
+ if (s->virt) {
+ irq = qdev_get_gpio_in(DEVICE(&s->gic),
+ arm_gic_ppi_index(i, GIC_MAINTENANCE_PPI));
+ sysbus_connect_irq(SYS_BUS_DEVICE(&s->gic), i + num_apus * 4, irq);
+ }
}
if (s->has_rpu) {
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 31/45] arm/virt: Add support for GICv2 virtualization extensions
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (29 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 30/45] xlnx-zynqmp: Improve GIC wiring and MMIO mapping Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 32/45] arm: Fix return code of arm_load_elf Peter Maydell
` (14 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
From: Luc Michel <luc.michel@greensocs.com>
Add support for GICv2 virtualization extensions by mapping the necessary
I/O regions and connecting the maintenance IRQ lines.
Declare those additions in the device tree and in the ACPI tables.
Signed-off-by: Luc Michel <luc.michel@greensocs.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20180727095421.386-21-luc.michel@greensocs.com
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
include/hw/arm/virt.h | 4 +++-
hw/arm/virt-acpi-build.c | 6 +++--
hw/arm/virt.c | 52 +++++++++++++++++++++++++++++++++-------
3 files changed, 50 insertions(+), 12 deletions(-)
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index 9a870ccb6a5..4cc57a7ef62 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -42,7 +42,7 @@
#define NUM_VIRTIO_TRANSPORTS 32
#define NUM_SMMU_IRQS 4
-#define ARCH_GICV3_MAINT_IRQ 9
+#define ARCH_GIC_MAINT_IRQ 9
#define ARCH_TIMER_VIRT_IRQ 11
#define ARCH_TIMER_S_EL1_IRQ 13
@@ -60,6 +60,8 @@ enum {
VIRT_GIC_DIST,
VIRT_GIC_CPU,
VIRT_GIC_V2M,
+ VIRT_GIC_HYP,
+ VIRT_GIC_VCPU,
VIRT_GIC_ITS,
VIRT_GIC_REDIST,
VIRT_GIC_REDIST2,
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c
index 6ea47e25883..ce31abd62c2 100644
--- a/hw/arm/virt-acpi-build.c
+++ b/hw/arm/virt-acpi-build.c
@@ -659,6 +659,8 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
gicc->length = sizeof(*gicc);
if (vms->gic_version == 2) {
gicc->base_address = cpu_to_le64(memmap[VIRT_GIC_CPU].base);
+ gicc->gich_base_address = cpu_to_le64(memmap[VIRT_GIC_HYP].base);
+ gicc->gicv_base_address = cpu_to_le64(memmap[VIRT_GIC_VCPU].base);
}
gicc->cpu_interface_number = cpu_to_le32(i);
gicc->arm_mpidr = cpu_to_le64(armcpu->mp_affinity);
@@ -668,8 +670,8 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms)
if (arm_feature(&armcpu->env, ARM_FEATURE_PMU)) {
gicc->performance_interrupt = cpu_to_le32(PPI(VIRTUAL_PMU_IRQ));
}
- if (vms->virt && vms->gic_version == 3) {
- gicc->vgic_interrupt = cpu_to_le32(PPI(ARCH_GICV3_MAINT_IRQ));
+ if (vms->virt) {
+ gicc->vgic_interrupt = cpu_to_le32(PPI(ARCH_GIC_MAINT_IRQ));
}
}
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 281ddcdf6e2..0807be985c0 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -131,6 +131,8 @@ static const MemMapEntry a15memmap[] = {
[VIRT_GIC_DIST] = { 0x08000000, 0x00010000 },
[VIRT_GIC_CPU] = { 0x08010000, 0x00010000 },
[VIRT_GIC_V2M] = { 0x08020000, 0x00001000 },
+ [VIRT_GIC_HYP] = { 0x08030000, 0x00010000 },
+ [VIRT_GIC_VCPU] = { 0x08040000, 0x00010000 },
/* The space in between here is reserved for GICv3 CPU/vCPU/HYP */
[VIRT_GIC_ITS] = { 0x08080000, 0x00020000 },
/* This redistributor space allows up to 2*64kB*123 CPUs */
@@ -440,18 +442,33 @@ static void fdt_add_gic_node(VirtMachineState *vms)
if (vms->virt) {
qemu_fdt_setprop_cells(vms->fdt, nodename, "interrupts",
- GIC_FDT_IRQ_TYPE_PPI, ARCH_GICV3_MAINT_IRQ,
+ GIC_FDT_IRQ_TYPE_PPI, ARCH_GIC_MAINT_IRQ,
GIC_FDT_IRQ_FLAGS_LEVEL_HI);
}
} else {
/* 'cortex-a15-gic' means 'GIC v2' */
qemu_fdt_setprop_string(vms->fdt, nodename, "compatible",
"arm,cortex-a15-gic");
- qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
- 2, vms->memmap[VIRT_GIC_DIST].base,
- 2, vms->memmap[VIRT_GIC_DIST].size,
- 2, vms->memmap[VIRT_GIC_CPU].base,
- 2, vms->memmap[VIRT_GIC_CPU].size);
+ if (!vms->virt) {
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
+ 2, vms->memmap[VIRT_GIC_DIST].base,
+ 2, vms->memmap[VIRT_GIC_DIST].size,
+ 2, vms->memmap[VIRT_GIC_CPU].base,
+ 2, vms->memmap[VIRT_GIC_CPU].size);
+ } else {
+ qemu_fdt_setprop_sized_cells(vms->fdt, nodename, "reg",
+ 2, vms->memmap[VIRT_GIC_DIST].base,
+ 2, vms->memmap[VIRT_GIC_DIST].size,
+ 2, vms->memmap[VIRT_GIC_CPU].base,
+ 2, vms->memmap[VIRT_GIC_CPU].size,
+ 2, vms->memmap[VIRT_GIC_HYP].base,
+ 2, vms->memmap[VIRT_GIC_HYP].size,
+ 2, vms->memmap[VIRT_GIC_VCPU].base,
+ 2, vms->memmap[VIRT_GIC_VCPU].size);
+ qemu_fdt_setprop_cells(vms->fdt, nodename, "interrupts",
+ GIC_FDT_IRQ_TYPE_PPI, ARCH_GIC_MAINT_IRQ,
+ GIC_FDT_IRQ_FLAGS_LEVEL_HI);
+ }
}
qemu_fdt_setprop_cell(vms->fdt, nodename, "phandle", vms->gic_phandle);
@@ -573,6 +590,11 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
qdev_prop_set_uint32(gicdev, "redist-region-count[1]",
MIN(smp_cpus - redist0_count, redist1_capacity));
}
+ } else {
+ if (!kvm_irqchip_in_kernel()) {
+ qdev_prop_set_bit(gicdev, "has-virtualization-extensions",
+ vms->virt);
+ }
}
qdev_init_nofail(gicdev);
gicbusdev = SYS_BUS_DEVICE(gicdev);
@@ -584,6 +606,10 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
}
} else {
sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_CPU].base);
+ if (vms->virt) {
+ sysbus_mmio_map(gicbusdev, 2, vms->memmap[VIRT_GIC_HYP].base);
+ sysbus_mmio_map(gicbusdev, 3, vms->memmap[VIRT_GIC_VCPU].base);
+ }
}
/* Wire the outputs from each CPU's generic timer and the GICv3
@@ -610,9 +636,17 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
ppibase + timer_irq[irq]));
}
- qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", 0,
- qdev_get_gpio_in(gicdev, ppibase
- + ARCH_GICV3_MAINT_IRQ));
+ if (type == 3) {
+ qemu_irq irq = qdev_get_gpio_in(gicdev,
+ ppibase + ARCH_GIC_MAINT_IRQ);
+ qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt",
+ 0, irq);
+ } else if (vms->virt) {
+ qemu_irq irq = qdev_get_gpio_in(gicdev,
+ ppibase + ARCH_GIC_MAINT_IRQ);
+ sysbus_connect_irq(gicbusdev, i + 4 * smp_cpus, irq);
+ }
+
qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0,
qdev_get_gpio_in(gicdev, ppibase
+ VIRTUAL_PMU_IRQ));
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 32/45] arm: Fix return code of arm_load_elf
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (30 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 31/45] arm/virt: Add support for GICv2 virtualization extensions Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 33/45] target/arm: Mask virtual interrupts if HCR_EL2.TGE is set Peter Maydell
` (13 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
From: Adam Lackorzynski <adam@l4re.org>
Use an int64_t as a return type to restore
the negative check for arm_load_as.
Signed-off-by: Adam Lackorzynski <adam@l4re.org>
Message-id: 20180730173712.GG4987@os.inf.tu-dresden.de
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
hw/arm/boot.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/hw/arm/boot.c b/hw/arm/boot.c
index e09201cc97c..ca9467e583f 100644
--- a/hw/arm/boot.c
+++ b/hw/arm/boot.c
@@ -818,9 +818,9 @@ static int do_arm_linux_init(Object *obj, void *opaque)
return 0;
}
-static uint64_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry,
- uint64_t *lowaddr, uint64_t *highaddr,
- int elf_machine, AddressSpace *as)
+static int64_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry,
+ uint64_t *lowaddr, uint64_t *highaddr,
+ int elf_machine, AddressSpace *as)
{
bool elf_is64;
union {
@@ -829,7 +829,7 @@ static uint64_t arm_load_elf(struct arm_boot_info *info, uint64_t *pentry,
} elf_header;
int data_swab = 0;
bool big_endian;
- uint64_t ret = -1;
+ int64_t ret = -1;
Error *err = NULL;
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 33/45] target/arm: Mask virtual interrupts if HCR_EL2.TGE is set
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (31 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 32/45] arm: Fix return code of arm_load_elf Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 34/45] target/arm: Honour HCR_EL2.TGE and MDCR_EL2.TDE in debug register access checks Peter Maydell
` (12 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
If the "trap general exceptions" bit HCR_EL2.TGE is set, we
must mask all virtual interrupts (as per DDI0487C.a D1.14.3).
Implement this in arm_excp_unmasked().
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20180724115950.17316-2-peter.maydell@linaro.org
---
target/arm/cpu.h | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index e310ffc29d2..efb2a8d3f3d 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -2261,13 +2261,15 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
break;
case EXCP_VFIQ:
- if (secure || !(env->cp15.hcr_el2 & HCR_FMO)) {
+ if (secure || !(env->cp15.hcr_el2 & HCR_FMO)
+ || (env->cp15.hcr_el2 & HCR_TGE)) {
/* VFIQs are only taken when hypervized and non-secure. */
return false;
}
return !(env->daif & PSTATE_F);
case EXCP_VIRQ:
- if (secure || !(env->cp15.hcr_el2 & HCR_IMO)) {
+ if (secure || !(env->cp15.hcr_el2 & HCR_IMO)
+ || (env->cp15.hcr_el2 & HCR_TGE)) {
/* VIRQs are only taken when hypervized and non-secure. */
return false;
}
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 34/45] target/arm: Honour HCR_EL2.TGE and MDCR_EL2.TDE in debug register access checks
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (32 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 33/45] target/arm: Mask virtual interrupts if HCR_EL2.TGE is set Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 35/45] target/arm: Honour HCR_EL2.TGE when raising synchronous exceptions Peter Maydell
` (11 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
Some debug registers can be trapped via MDCR_EL2 bits TDRA, TDOSA,
and TDA, which we implement in the functions access_tdra(),
access_tdosa() and access_tda(). If MDCR_EL2.TDE or HCR_EL2.TGE
are 1, the TDRA, TDOSA and TDA bits should behave as if they were 1.
Implement this by having the access functions check MDCR_EL2.TDE
and HCR_EL2.TGE.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20180724115950.17316-3-peter.maydell@linaro.org
---
target/arm/helper.c | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 02c1c4d8404..3cd43cf7018 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -444,9 +444,11 @@ static CPAccessResult access_tdosa(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
int el = arm_current_el(env);
+ bool mdcr_el2_tdosa = (env->cp15.mdcr_el2 & MDCR_TDOSA) ||
+ (env->cp15.mdcr_el2 & MDCR_TDE) ||
+ (env->cp15.hcr_el2 & HCR_TGE);
- if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDOSA)
- && !arm_is_secure_below_el3(env)) {
+ if (el < 2 && mdcr_el2_tdosa && !arm_is_secure_below_el3(env)) {
return CP_ACCESS_TRAP_EL2;
}
if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDOSA)) {
@@ -462,9 +464,11 @@ static CPAccessResult access_tdra(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
int el = arm_current_el(env);
+ bool mdcr_el2_tdra = (env->cp15.mdcr_el2 & MDCR_TDRA) ||
+ (env->cp15.mdcr_el2 & MDCR_TDE) ||
+ (env->cp15.hcr_el2 & HCR_TGE);
- if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDRA)
- && !arm_is_secure_below_el3(env)) {
+ if (el < 2 && mdcr_el2_tdra && !arm_is_secure_below_el3(env)) {
return CP_ACCESS_TRAP_EL2;
}
if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
@@ -480,9 +484,11 @@ static CPAccessResult access_tda(CPUARMState *env, const ARMCPRegInfo *ri,
bool isread)
{
int el = arm_current_el(env);
+ bool mdcr_el2_tda = (env->cp15.mdcr_el2 & MDCR_TDA) ||
+ (env->cp15.mdcr_el2 & MDCR_TDE) ||
+ (env->cp15.hcr_el2 & HCR_TGE);
- if (el < 2 && (env->cp15.mdcr_el2 & MDCR_TDA)
- && !arm_is_secure_below_el3(env)) {
+ if (el < 2 && mdcr_el2_tda && !arm_is_secure_below_el3(env)) {
return CP_ACCESS_TRAP_EL2;
}
if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TDA)) {
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 35/45] target/arm: Honour HCR_EL2.TGE when raising synchronous exceptions
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (33 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 34/45] target/arm: Honour HCR_EL2.TGE and MDCR_EL2.TDE in debug register access checks Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 36/45] target/arm: Provide accessor functions for HCR_EL2.{IMO, FMO, AMO} Peter Maydell
` (10 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
Whene we raise a synchronous exception, if HCR_EL2.TGE is set then
exceptions targeting NS EL1 must be redirected to EL2. Implement
this in raise_exception() -- all synchronous exceptions go through
this function.
(Asynchronous exceptions go via arm_cpu_exec_interrupt(), which
already honours HCR_EL2.TGE when it determines the target EL
in arm_phys_excp_target_el().)
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20180724115950.17316-4-peter.maydell@linaro.org
---
target/arm/op_helper.c | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index f728f25e4bf..d550978b5b9 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -33,6 +33,20 @@ static void raise_exception(CPUARMState *env, uint32_t excp,
{
CPUState *cs = CPU(arm_env_get_cpu(env));
+ if ((env->cp15.hcr_el2 & HCR_TGE) &&
+ target_el == 1 && !arm_is_secure(env)) {
+ /*
+ * Redirect NS EL1 exceptions to NS EL2. These are reported with
+ * their original syndrome register value, with the exception of
+ * SIMD/FP access traps, which are reported as uncategorized
+ * (see DDI0478C.a D1.10.4)
+ */
+ target_el = 2;
+ if (syndrome >> ARM_EL_EC_SHIFT == EC_ADVSIMDFPACCESSTRAP) {
+ syndrome = syn_uncategorized();
+ }
+ }
+
assert(!excp_is_internal(excp));
cs->exception_index = excp;
env->exception.syndrome = syndrome;
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 36/45] target/arm: Provide accessor functions for HCR_EL2.{IMO, FMO, AMO}
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (34 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 35/45] target/arm: Honour HCR_EL2.TGE when raising synchronous exceptions Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 37/45] target/arm: Treat SCTLR_EL1.M as if it were zero when HCR_EL2.TGE is set Peter Maydell
` (9 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
The IMO, FMO and AMO bits in HCR_EL2 are defined to "behave as
1 for all purposes other than direct reads" if HCR_EL2.TGE
is set and HCR_EL2.E2H is 0, and to "behave as 0 for all
purposes other than direct reads" if HCR_EL2.TGE is set
and HRC_EL2.E2H is 1.
To avoid having to check E2H and TGE everywhere where we test IMO and
FMO, provide accessors arm_hcr_el2_imo(), arm_hcr_el2_fmo()and
arm_hcr_el2_amo(). We don't implement ARMv8.1-VHE yet, so the E2H
case will never be true, but we include the logic to save effort when
we eventually do get to that.
(Note that in several of these callsites the change doesn't
actually make a difference as either the callsite is handling
TGE specially anyway, or the CPU can't get into that situation
with TGE set; we change everywhere for consistency.)
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20180724115950.17316-5-peter.maydell@linaro.org
---
target/arm/cpu.h | 64 +++++++++++++++++++++++++++++++++++----
hw/intc/arm_gicv3_cpuif.c | 19 ++++++------
target/arm/helper.c | 6 ++--
3 files changed, 71 insertions(+), 18 deletions(-)
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index efb2a8d3f3d..4289c33ef4c 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1229,6 +1229,12 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
#define HCR_RW (1ULL << 31)
#define HCR_CD (1ULL << 32)
#define HCR_ID (1ULL << 33)
+#define HCR_E2H (1ULL << 34)
+/*
+ * When we actually implement ARMv8.1-VHE we should add HCR_E2H to
+ * HCR_MASK and then clear it again if the feature bit is not set in
+ * hcr_write().
+ */
#define HCR_MASK ((1ULL << 34) - 1)
#define SCR_NS (1U << 0)
@@ -2234,6 +2240,54 @@ bool write_cpustate_to_list(ARMCPU *cpu);
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
+/**
+ * arm_hcr_el2_imo(): Return the effective value of HCR_EL2.IMO.
+ * Depending on the values of HCR_EL2.E2H and TGE, this may be
+ * "behaves as 1 for all purposes other than direct read/write" or
+ * "behaves as 0 for all purposes other than direct read/write"
+ */
+static inline bool arm_hcr_el2_imo(CPUARMState *env)
+{
+ switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
+ case HCR_TGE:
+ return true;
+ case HCR_TGE | HCR_E2H:
+ return false;
+ default:
+ return env->cp15.hcr_el2 & HCR_IMO;
+ }
+}
+
+/**
+ * arm_hcr_el2_fmo(): Return the effective value of HCR_EL2.FMO.
+ */
+static inline bool arm_hcr_el2_fmo(CPUARMState *env)
+{
+ switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
+ case HCR_TGE:
+ return true;
+ case HCR_TGE | HCR_E2H:
+ return false;
+ default:
+ return env->cp15.hcr_el2 & HCR_FMO;
+ }
+}
+
+/**
+ * arm_hcr_el2_amo(): Return the effective value of HCR_EL2.AMO.
+ */
+static inline bool arm_hcr_el2_amo(CPUARMState *env)
+{
+ switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
+ case HCR_TGE:
+ return true;
+ case HCR_TGE | HCR_E2H:
+ return false;
+ default:
+ return env->cp15.hcr_el2 & HCR_AMO;
+ }
+}
+
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
unsigned int target_el)
{
@@ -2261,15 +2315,13 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
break;
case EXCP_VFIQ:
- if (secure || !(env->cp15.hcr_el2 & HCR_FMO)
- || (env->cp15.hcr_el2 & HCR_TGE)) {
+ if (secure || !arm_hcr_el2_fmo(env) || (env->cp15.hcr_el2 & HCR_TGE)) {
/* VFIQs are only taken when hypervized and non-secure. */
return false;
}
return !(env->daif & PSTATE_F);
case EXCP_VIRQ:
- if (secure || !(env->cp15.hcr_el2 & HCR_IMO)
- || (env->cp15.hcr_el2 & HCR_TGE)) {
+ if (secure || !arm_hcr_el2_imo(env) || (env->cp15.hcr_el2 & HCR_TGE)) {
/* VIRQs are only taken when hypervized and non-secure. */
return false;
}
@@ -2308,7 +2360,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* to the CPSR.F setting otherwise we further assess the state
* below.
*/
- hcr = (env->cp15.hcr_el2 & HCR_FMO);
+ hcr = arm_hcr_el2_fmo(env);
scr = (env->cp15.scr_el3 & SCR_FIQ);
/* When EL3 is 32-bit, the SCR.FW bit controls whether the
@@ -2325,7 +2377,7 @@ static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
* when setting the target EL, so it does not have a further
* affect here.
*/
- hcr = (env->cp15.hcr_el2 & HCR_IMO);
+ hcr = arm_hcr_el2_imo(env);
scr = false;
break;
default:
diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c
index 2a60568d82c..068a8e8e9b9 100644
--- a/hw/intc/arm_gicv3_cpuif.c
+++ b/hw/intc/arm_gicv3_cpuif.c
@@ -85,7 +85,10 @@ static bool icv_access(CPUARMState *env, int hcr_flags)
* * access if NS EL1 and either IMO or FMO == 1:
* CTLR, DIR, PMR, RPR
*/
- return (env->cp15.hcr_el2 & hcr_flags) && arm_current_el(env) == 1
+ bool flagmatch = ((hcr_flags & HCR_IMO) && arm_hcr_el2_imo(env)) ||
+ ((hcr_flags & HCR_FMO) && arm_hcr_el2_fmo(env));
+
+ return flagmatch && arm_current_el(env) == 1
&& !arm_is_secure_below_el3(env);
}
@@ -1549,8 +1552,8 @@ static void icc_dir_write(CPUARMState *env, const ARMCPRegInfo *ri,
/* No need to include !IsSecure in route_*_to_el2 as it's only
* tested in cases where we know !IsSecure is true.
*/
- route_fiq_to_el2 = env->cp15.hcr_el2 & HCR_FMO;
- route_irq_to_el2 = env->cp15.hcr_el2 & HCR_IMO;
+ route_fiq_to_el2 = arm_hcr_el2_fmo(env);
+ route_irq_to_el2 = arm_hcr_el2_imo(env);
switch (arm_current_el(env)) {
case 3:
@@ -1893,7 +1896,7 @@ static CPAccessResult gicv3_irqfiq_access(CPUARMState *env,
switch (el) {
case 1:
if (arm_is_secure_below_el3(env) ||
- ((env->cp15.hcr_el2 & (HCR_IMO | HCR_FMO)) == 0)) {
+ (arm_hcr_el2_imo(env) == 0 && arm_hcr_el2_fmo(env) == 0)) {
r = CP_ACCESS_TRAP_EL3;
}
break;
@@ -1933,7 +1936,7 @@ static CPAccessResult gicv3_dir_access(CPUARMState *env,
static CPAccessResult gicv3_sgi_access(CPUARMState *env,
const ARMCPRegInfo *ri, bool isread)
{
- if ((env->cp15.hcr_el2 & (HCR_IMO | HCR_FMO)) &&
+ if ((arm_hcr_el2_imo(env) || arm_hcr_el2_fmo(env)) &&
arm_current_el(env) == 1 && !arm_is_secure_below_el3(env)) {
/* Takes priority over a possible EL3 trap */
return CP_ACCESS_TRAP_EL2;
@@ -1958,8 +1961,7 @@ static CPAccessResult gicv3_fiq_access(CPUARMState *env,
if (env->cp15.scr_el3 & SCR_FIQ) {
switch (el) {
case 1:
- if (arm_is_secure_below_el3(env) ||
- ((env->cp15.hcr_el2 & HCR_FMO) == 0)) {
+ if (arm_is_secure_below_el3(env) || !arm_hcr_el2_fmo(env)) {
r = CP_ACCESS_TRAP_EL3;
}
break;
@@ -1998,8 +2000,7 @@ static CPAccessResult gicv3_irq_access(CPUARMState *env,
if (env->cp15.scr_el3 & SCR_IRQ) {
switch (el) {
case 1:
- if (arm_is_secure_below_el3(env) ||
- ((env->cp15.hcr_el2 & HCR_IMO) == 0)) {
+ if (arm_is_secure_below_el3(env) || !arm_hcr_el2_imo(env)) {
r = CP_ACCESS_TRAP_EL3;
}
break;
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 3cd43cf7018..7b438e43a90 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -6336,15 +6336,15 @@ uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
switch (excp_idx) {
case EXCP_IRQ:
scr = ((env->cp15.scr_el3 & SCR_IRQ) == SCR_IRQ);
- hcr = ((env->cp15.hcr_el2 & HCR_IMO) == HCR_IMO);
+ hcr = arm_hcr_el2_imo(env);
break;
case EXCP_FIQ:
scr = ((env->cp15.scr_el3 & SCR_FIQ) == SCR_FIQ);
- hcr = ((env->cp15.hcr_el2 & HCR_FMO) == HCR_FMO);
+ hcr = arm_hcr_el2_fmo(env);
break;
default:
scr = ((env->cp15.scr_el3 & SCR_EA) == SCR_EA);
- hcr = ((env->cp15.hcr_el2 & HCR_AMO) == HCR_AMO);
+ hcr = arm_hcr_el2_amo(env);
break;
};
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 37/45] target/arm: Treat SCTLR_EL1.M as if it were zero when HCR_EL2.TGE is set
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (35 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 36/45] target/arm: Provide accessor functions for HCR_EL2.{IMO, FMO, AMO} Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 38/45] target/arm: Improve exception-taken logging Peter Maydell
` (8 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
One of the required effects of setting HCR_EL2.TGE is that when
SCR_EL3.NS is 1 then SCTLR_EL1.M must behave as if it is zero for
all purposes except direct reads. That is, it effectively disables
the MMU for the NS EL0/EL1 translation regime.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20180724115950.17316-6-peter.maydell@linaro.org
---
target/arm/helper.c | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 7b438e43a90..62f63e4e5b9 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -8396,6 +8396,14 @@ static inline bool regime_translation_disabled(CPUARMState *env,
if (mmu_idx == ARMMMUIdx_S2NS) {
return (env->cp15.hcr_el2 & HCR_VM) == 0;
}
+
+ if (env->cp15.hcr_el2 & HCR_TGE) {
+ /* TGE means that NS EL0/1 act as if SCTLR_EL1.M is zero */
+ if (!regime_is_secure(env, mmu_idx) && regime_el(env, mmu_idx) == 1) {
+ return true;
+ }
+ }
+
return (regime_sctlr(env, mmu_idx) & SCTLR_M) == 0;
}
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 38/45] target/arm: Improve exception-taken logging
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (36 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 37/45] target/arm: Treat SCTLR_EL1.M as if it were zero when HCR_EL2.TGE is set Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 39/45] target/arm: Initialize exc_secure correctly in do_v7m_exception_exit() Peter Maydell
` (7 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
Improve the exception-taken logging by logging in
v7m_exception_taken() the exception we're going to take
and whether it is secure/nonsecure.
This requires us to move logging at many callsites from after the
call to before it, so that the logging appears in a sensible order.
(This will make tail-chaining produce more useful logs; for the
current callers of v7m_exception_taken() we know which exception
we're going to take, so custom log messages at the callsite sufficed;
for tail-chaining only v7m_exception_taken() knows the exception
number that we're going to tail-chain to.)
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 20180720145647.8810-2-peter.maydell@linaro.org
---
target/arm/helper.c | 17 +++++++++++------
1 file changed, 11 insertions(+), 6 deletions(-)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 62f63e4e5b9..1b60c8d66c9 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -6840,6 +6840,8 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
bool push_failed = false;
armv7m_nvic_get_pending_irq_info(env->nvic, &exc, &targets_secure);
+ qemu_log_mask(CPU_LOG_INT, "...taking pending %s exception %d\n",
+ targets_secure ? "secure" : "nonsecure", exc);
if (arm_feature(env, ARM_FEATURE_V8)) {
if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
@@ -6913,12 +6915,15 @@ static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain,
* we might now want to take a different exception which
* targets a different security state, so try again from the top.
*/
+ qemu_log_mask(CPU_LOG_INT,
+ "...derived exception on callee-saves register stacking");
v7m_exception_taken(cpu, lr, true, true);
return;
}
if (!arm_v7m_load_vector(cpu, exc, targets_secure, &addr)) {
/* Vector load failed: derived exception */
+ qemu_log_mask(CPU_LOG_INT, "...derived exception on vector table load");
v7m_exception_taken(cpu, lr, true, true);
return;
}
@@ -7129,9 +7134,9 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
if (sfault) {
env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
- v7m_exception_taken(cpu, excret, true, false);
qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
"stackframe: failed EXC_RETURN.ES validity check\n");
+ v7m_exception_taken(cpu, excret, true, false);
return;
}
@@ -7141,9 +7146,9 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
*/
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
- v7m_exception_taken(cpu, excret, true, false);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
"stackframe: failed exception return integrity check\n");
+ v7m_exception_taken(cpu, excret, true, false);
return;
}
@@ -7198,10 +7203,10 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
/* Take a SecureFault on the current stack */
env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
- v7m_exception_taken(cpu, excret, true, false);
qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
"stackframe: failed exception return integrity "
"signature check\n");
+ v7m_exception_taken(cpu, excret, true, false);
return;
}
@@ -7234,6 +7239,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
/* v7m_stack_read() pended a fault, so take it (as a tail
* chained exception on the same stack frame)
*/
+ qemu_log_mask(CPU_LOG_INT, "...derived exception on unstacking\n");
v7m_exception_taken(cpu, excret, true, false);
return;
}
@@ -7270,10 +7276,10 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
env->v7m.secure);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
- v7m_exception_taken(cpu, excret, true, false);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
"stackframe: failed exception return integrity "
"check\n");
+ v7m_exception_taken(cpu, excret, true, false);
return;
}
}
@@ -7309,9 +7315,9 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
ignore_stackfaults = v7m_push_stack(cpu);
- v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
"failed exception return integrity check\n");
+ v7m_exception_taken(cpu, excret, false, ignore_stackfaults);
return;
}
@@ -7727,7 +7733,6 @@ void arm_v7m_cpu_do_interrupt(CPUState *cs)
ignore_stackfaults = v7m_push_stack(cpu);
v7m_exception_taken(cpu, lr, false, ignore_stackfaults);
- qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
}
/* Function used to synchronize QEMU's AArch64 register set with AArch32
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 39/45] target/arm: Initialize exc_secure correctly in do_v7m_exception_exit()
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (37 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 38/45] target/arm: Improve exception-taken logging Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 40/45] target/arm: Restore M-profile CONTROL.SPSEL before any tailchaining Peter Maydell
` (6 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
In do_v7m_exception_exit(), we use the exc_secure variable to track
whether the exception we're returning from is secure or non-secure.
Unfortunately the statement initializing this was accidentally
inside an "if (env->v7m.exception != ARMV7M_EXCP_NMI)" conditional,
which meant that we were using the wrong value for NMI handlers.
Move the initialization out to the right place.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 20180720145647.8810-3-peter.maydell@linaro.org
---
target/arm/helper.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 1b60c8d66c9..9d908f1c663 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -7052,6 +7052,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
/* For all other purposes, treat ES as 0 (R_HXSR) */
excret &= ~R_V7M_EXCRET_ES_MASK;
}
+ exc_secure = excret & R_V7M_EXCRET_ES_MASK;
}
if (env->v7m.exception != ARMV7M_EXCP_NMI) {
@@ -7062,7 +7063,6 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
* which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
*/
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
- exc_secure = excret & R_V7M_EXCRET_ES_MASK;
if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
env->v7m.faultmask[exc_secure] = 0;
}
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 40/45] target/arm: Restore M-profile CONTROL.SPSEL before any tailchaining
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (38 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 39/45] target/arm: Initialize exc_secure correctly in do_v7m_exception_exit() Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 41/45] target/arm: Implement tailchaining for M profile cores Peter Maydell
` (5 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
On exception return for M-profile, we must restore the CONTROL.SPSEL
bit from the EXCRET value before we do any kind of tailchaining,
including for the derived exceptions on integrity check failures.
Otherwise we will give the guest an incorrect EXCRET.SPSEL value on
exception entry for the tailchained exception.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20180720145647.8810-4-peter.maydell@linaro.org
---
target/arm/helper.c | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 9d908f1c663..559065131a4 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -7131,6 +7131,16 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
}
}
+ /*
+ * Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
+ * Handler mode (and will be until we write the new XPSR.Interrupt
+ * field) this does not switch around the current stack pointer.
+ * We must do this before we do any kind of tailchaining, including
+ * for the derived exceptions on integrity check failures, or we will
+ * give the guest an incorrect EXCRET.SPSEL value on exception entry.
+ */
+ write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
+
if (sfault) {
env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
@@ -7152,12 +7162,6 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
return;
}
- /* Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
- * Handler mode (and will be until we write the new XPSR.Interrupt
- * field) this does not switch around the current stack pointer.
- */
- write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
-
switch_v7m_security_state(env, return_to_secure);
{
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 41/45] target/arm: Implement tailchaining for M profile cores
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (39 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 40/45] target/arm: Restore M-profile CONTROL.SPSEL before any tailchaining Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 42/45] target/arm: Fix sign of sve_cmpeq_ppzw/sve_cmpne_ppzw Peter Maydell
` (4 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
Tailchaining is an optimization in handling of exception return
for M-profile cores: if we are about to pop the exception stack
for an exception return, but there is a pending exception which
is higher priority than the priority we are returning to, then
instead of unstacking and then immediately taking the exception
and stacking registers again, we can chain to the pending
exception without unstacking and stacking.
For v6M and v7M it is IMPDEF whether tailchaining happens for pending
exceptions; for v8M this is architecturally required. Implement it
in QEMU for all M-profile cores, since in practice v6M and v7M
hardware implementations generally do have it.
(We were already doing tailchaining for derived exceptions which
happened during exception return, like the validity checks and
stack access failures; these have always been required to be
tailchained for all versions of the architecture.)
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20180720145647.8810-5-peter.maydell@linaro.org
---
target/arm/helper.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 559065131a4..8b07bf214ec 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -7162,6 +7162,22 @@ static void do_v7m_exception_exit(ARMCPU *cpu)
return;
}
+ /*
+ * Tailchaining: if there is currently a pending exception that
+ * is high enough priority to preempt execution at the level we're
+ * about to return to, then just directly take that exception now,
+ * avoiding an unstack-and-then-stack. Note that now we have
+ * deactivated the previous exception by calling armv7m_nvic_complete_irq()
+ * our current execution priority is already the execution priority we are
+ * returning to -- none of the state we would unstack or set based on
+ * the EXCRET value affects it.
+ */
+ if (armv7m_nvic_can_take_pending_exception(env->nvic)) {
+ qemu_log_mask(CPU_LOG_INT, "...tailchaining to pending exception\n");
+ v7m_exception_taken(cpu, excret, true, false);
+ return;
+ }
+
switch_v7m_security_state(env, return_to_secure);
{
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 42/45] target/arm: Fix sign of sve_cmpeq_ppzw/sve_cmpne_ppzw
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (40 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 41/45] target/arm: Implement tailchaining for M profile cores Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 43/45] target/arm: Fix typo in do_sat_addsub_64 Peter Maydell
` (3 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
From: Richard Henderson <richard.henderson@linaro.org>
The normal vector element is sign-extended before
comparing with the wide vector element.
Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Laurent Desnogues <laurent.desnogues@gmail.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Tested-by: Alex Bennée <alex.bennee@linaro.org>
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
Message-id: 20180801123111.3595-2-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/sve_helper.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 54795c91945..9bd0694d55e 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -2436,13 +2436,13 @@ uint32_t HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, uint32_t desc) \
#define DO_CMP_PPZW_S(NAME, TYPE, TYPEW, OP) \
DO_CMP_PPZW(NAME, TYPE, TYPEW, OP, H1_4, 0x1111111111111111ull)
-DO_CMP_PPZW_B(sve_cmpeq_ppzw_b, uint8_t, uint64_t, ==)
-DO_CMP_PPZW_H(sve_cmpeq_ppzw_h, uint16_t, uint64_t, ==)
-DO_CMP_PPZW_S(sve_cmpeq_ppzw_s, uint32_t, uint64_t, ==)
+DO_CMP_PPZW_B(sve_cmpeq_ppzw_b, int8_t, uint64_t, ==)
+DO_CMP_PPZW_H(sve_cmpeq_ppzw_h, int16_t, uint64_t, ==)
+DO_CMP_PPZW_S(sve_cmpeq_ppzw_s, int32_t, uint64_t, ==)
-DO_CMP_PPZW_B(sve_cmpne_ppzw_b, uint8_t, uint64_t, !=)
-DO_CMP_PPZW_H(sve_cmpne_ppzw_h, uint16_t, uint64_t, !=)
-DO_CMP_PPZW_S(sve_cmpne_ppzw_s, uint32_t, uint64_t, !=)
+DO_CMP_PPZW_B(sve_cmpne_ppzw_b, int8_t, uint64_t, !=)
+DO_CMP_PPZW_H(sve_cmpne_ppzw_h, int16_t, uint64_t, !=)
+DO_CMP_PPZW_S(sve_cmpne_ppzw_s, int32_t, uint64_t, !=)
DO_CMP_PPZW_B(sve_cmpgt_ppzw_b, int8_t, int64_t, >)
DO_CMP_PPZW_H(sve_cmpgt_ppzw_h, int16_t, int64_t, >)
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 43/45] target/arm: Fix typo in do_sat_addsub_64
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (41 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 42/45] target/arm: Fix sign of sve_cmpeq_ppzw/sve_cmpne_ppzw Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 44/45] target/arm: Reorganize SVE WHILE Peter Maydell
` (2 subsequent siblings)
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
From: Richard Henderson <richard.henderson@linaro.org>
Used the wrong temporary in the computation of subtractive overflow.
Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Laurent Desnogues <laurent.desnogues@gmail.com>
Tested-by: Alex Bennée <alex.bennee@linaro.org>
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
Message-id: 20180801123111.3595-3-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/translate-sve.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 374051cd20a..9dd4c38bab7 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -1625,7 +1625,7 @@ static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
/* Detect signed overflow for subtraction. */
tcg_gen_xor_i64(t0, reg, val);
tcg_gen_sub_i64(t1, reg, val);
- tcg_gen_xor_i64(reg, reg, t0);
+ tcg_gen_xor_i64(reg, reg, t1);
tcg_gen_and_i64(t0, t0, reg);
/* Bound the result. */
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 44/45] target/arm: Reorganize SVE WHILE
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (42 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 43/45] target/arm: Fix typo in do_sat_addsub_64 Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-14 18:18 ` [Qemu-devel] [PULL 45/45] target/arm: Fix typo in helper_sve_movz_d Peter Maydell
2018-08-15 12:29 ` [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
From: Richard Henderson <richard.henderson@linaro.org>
The pseudocode for this operation is an increment + compare loop,
so comparing <= the maximum integer produces an all-true predicate.
Rather than bound in both the inline code and the helper, pass the
helper the number of predicate bits to set instead of the number
of predicate elements to set.
Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Laurent Desnogues <laurent.desnogues@gmail.com>
Tested-by: Alex Bennée <alex.bennee@linaro.org>
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
Message-id: 20180801123111.3595-4-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/sve_helper.c | 5 ----
target/arm/translate-sve.c | 49 +++++++++++++++++++++++++-------------
2 files changed, 32 insertions(+), 22 deletions(-)
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 9bd0694d55e..87594a8adb4 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -2846,11 +2846,6 @@ uint32_t HELPER(sve_while)(void *vd, uint32_t count, uint32_t pred_desc)
return flags;
}
- /* Scale from predicate element count to bits. */
- count <<= esz;
- /* Bound to the bits in the predicate. */
- count = MIN(count, oprsz * 8);
-
/* Set all of the requested bits. */
for (i = 0; i < count / 64; ++i) {
d->p[i] = esz_mask;
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index 9dd4c38bab7..89efc80ee70 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -3173,19 +3173,19 @@ static bool trans_CTERM(DisasContext *s, arg_CTERM *a, uint32_t insn)
static bool trans_WHILE(DisasContext *s, arg_WHILE *a, uint32_t insn)
{
- if (!sve_access_check(s)) {
- return true;
- }
-
- TCGv_i64 op0 = read_cpu_reg(s, a->rn, 1);
- TCGv_i64 op1 = read_cpu_reg(s, a->rm, 1);
- TCGv_i64 t0 = tcg_temp_new_i64();
- TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 op0, op1, t0, t1, tmax;
TCGv_i32 t2, t3;
TCGv_ptr ptr;
unsigned desc, vsz = vec_full_reg_size(s);
TCGCond cond;
+ if (!sve_access_check(s)) {
+ return true;
+ }
+
+ op0 = read_cpu_reg(s, a->rn, 1);
+ op1 = read_cpu_reg(s, a->rm, 1);
+
if (!a->sf) {
if (a->u) {
tcg_gen_ext32u_i64(op0, op0);
@@ -3198,32 +3198,47 @@ static bool trans_WHILE(DisasContext *s, arg_WHILE *a, uint32_t insn)
/* For the helper, compress the different conditions into a computation
* of how many iterations for which the condition is true.
- *
- * This is slightly complicated by 0 <= UINT64_MAX, which is nominally
- * 2**64 iterations, overflowing to 0. Of course, predicate registers
- * aren't that large, so any value >= predicate size is sufficient.
*/
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
tcg_gen_sub_i64(t0, op1, op0);
- /* t0 = MIN(op1 - op0, vsz). */
- tcg_gen_movi_i64(t1, vsz);
- tcg_gen_umin_i64(t0, t0, t1);
+ tmax = tcg_const_i64(vsz >> a->esz);
if (a->eq) {
/* Equality means one more iteration. */
tcg_gen_addi_i64(t0, t0, 1);
+
+ /* If op1 is max (un)signed integer (and the only time the addition
+ * above could overflow), then we produce an all-true predicate by
+ * setting the count to the vector length. This is because the
+ * pseudocode is described as an increment + compare loop, and the
+ * max integer would always compare true.
+ */
+ tcg_gen_movi_i64(t1, (a->sf
+ ? (a->u ? UINT64_MAX : INT64_MAX)
+ : (a->u ? UINT32_MAX : INT32_MAX)));
+ tcg_gen_movcond_i64(TCG_COND_EQ, t0, op1, t1, tmax, t0);
}
- /* t0 = (condition true ? t0 : 0). */
+ /* Bound to the maximum. */
+ tcg_gen_umin_i64(t0, t0, tmax);
+ tcg_temp_free_i64(tmax);
+
+ /* Set the count to zero if the condition is false. */
cond = (a->u
? (a->eq ? TCG_COND_LEU : TCG_COND_LTU)
: (a->eq ? TCG_COND_LE : TCG_COND_LT));
tcg_gen_movi_i64(t1, 0);
tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1);
+ tcg_temp_free_i64(t1);
+ /* Since we're bounded, pass as a 32-bit type. */
t2 = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(t2, t0);
tcg_temp_free_i64(t0);
- tcg_temp_free_i64(t1);
+
+ /* Scale elements to bits. */
+ tcg_gen_shli_i32(t2, t2, a->esz);
desc = (vsz / 8) - 2;
desc = deposit32(desc, SIMD_DATA_SHIFT, 2, a->esz);
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* [Qemu-devel] [PULL 45/45] target/arm: Fix typo in helper_sve_movz_d
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (43 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 44/45] target/arm: Reorganize SVE WHILE Peter Maydell
@ 2018-08-14 18:18 ` Peter Maydell
2018-08-15 12:29 ` [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-14 18:18 UTC (permalink / raw)
To: qemu-devel
From: Richard Henderson <richard.henderson@linaro.org>
Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Laurent Desnogues <laurent.desnogues@gmail.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Tested-by: Alex Bennée <alex.bennee@linaro.org>
Tested-by: Laurent Desnogues <laurent.desnogues@gmail.com>
Message-id: 20180801123111.3595-5-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/sve_helper.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index 87594a8adb4..c3cbec9cf5f 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -1042,7 +1042,7 @@ void HELPER(sve_movz_d)(void *vd, void *vn, void *vg, uint32_t desc)
uint64_t *d = vd, *n = vn;
uint8_t *pg = vg;
for (i = 0; i < opr_sz; i += 1) {
- d[i] = n[1] & -(uint64_t)(pg[H1(i)] & 1);
+ d[i] = n[i] & -(uint64_t)(pg[H1(i)] & 1);
}
}
--
2.18.0
^ permalink raw reply related [flat|nested] 47+ messages in thread
* Re: [Qemu-devel] [PULL 00/45] target-arm queue
2018-08-14 18:17 [Qemu-devel] [PULL 00/45] target-arm queue Peter Maydell
` (44 preceding siblings ...)
2018-08-14 18:18 ` [Qemu-devel] [PULL 45/45] target/arm: Fix typo in helper_sve_movz_d Peter Maydell
@ 2018-08-15 12:29 ` Peter Maydell
45 siblings, 0 replies; 47+ messages in thread
From: Peter Maydell @ 2018-08-15 12:29 UTC (permalink / raw)
To: QEMU Developers
On 14 August 2018 at 19:17, Peter Maydell <peter.maydell@linaro.org> wrote:
> First pullreq of the 3.1 release cycle, with lots of
> Arm related patches accumulated during freeze. Most
> notable here is Luc's GICv2 virtualization support and
> my execute-from-MMIO patches.
>
> I stopped looking at my to-review queue towards the
> end of freeze, since 45 patches is already pushing what
> I consider a reasonable sized pullreq; once this goes into
> master I'll start working through it again.
>
> thanks
> -- PMM
>
> The following changes since commit 38441756b70eec5807b5f60dad11a93a91199866:
>
> Update version for v3.0.0 release (2018-08-14 16:38:43 +0100)
>
> are available in the Git repository at:
>
> git://git.linaro.org/people/pmaydell/qemu-arm.git tags/pull-target-arm-20180814
>
> for you to fetch changes up to 054e7adf4e64e4acb3b033348ebf7cc871baa34f:
>
> target/arm: Fix typo in helper_sve_movz_d (2018-08-14 17:17:22 +0100)
>
Applied, thanks.
-- PMM
^ permalink raw reply [flat|nested] 47+ messages in thread