From: "Alex Bennée" <alex.bennee@linaro.org>
To: qemu-devel@nongnu.org
Cc: "Alex Bennée" <alex.bennee@linaro.org>,
qemu-arm@nongnu.org,
"Richard Henderson" <richard.henderson@linaro.org>,
"Claudio Fontana" <cfontana@suse.de>,
"Peter Maydell" <peter.maydell@linaro.org>
Subject: [PATCH v16 25/99] target/arm: tcg: fix comment style before move to cpu-mmu
Date: Fri, 4 Jun 2021 16:51:58 +0100 [thread overview]
Message-ID: <20210604155312.15902-26-alex.bennee@linaro.org> (raw)
In-Reply-To: <20210604155312.15902-1-alex.bennee@linaro.org>
From: Claudio Fontana <cfontana@suse.de>
before exporting some functionality from helper.c into a new module,
fix the comment style of those functions.
Signed-off-by: Claudio Fontana <cfontana@suse.de>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
target/arm/tcg/helper.c | 152 ++++++++++++++++++++++++++--------------
1 file changed, 101 insertions(+), 51 deletions(-)
diff --git a/target/arm/tcg/helper.c b/target/arm/tcg/helper.c
index a66c1f0b9e..2a5022032c 100644
--- a/target/arm/tcg/helper.c
+++ b/target/arm/tcg/helper.c
@@ -10477,7 +10477,8 @@ static inline bool regime_translation_disabled(CPUARMState *env,
return false;
case 0:
default:
- /* HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
+ /*
+ * HFNMIENA set and ENABLE clear is UNPREDICTABLE, but
* we warned about that in armv7m_nvic.c when the guest set it.
*/
return true;
@@ -10531,7 +10532,8 @@ static inline uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx,
#endif /* !CONFIG_USER_ONLY */
-/* Convert a possible stage1+2 MMU index into the appropriate
+/*
+ * Convert a possible stage1+2 MMU index into the appropriate
* stage 1 MMU index
*/
static inline ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx)
@@ -10602,7 +10604,8 @@ static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx)
}
}
-/* Translate section/page access permissions to page
+/*
+ * Translate section/page access permissions to page
* R/W protection flags
*
* @env: CPUARMState
@@ -10658,7 +10661,8 @@ static inline int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx,
}
}
-/* Translate section/page access permissions to page
+/*
+ * Translate section/page access permissions to page
* R/W protection flags.
*
* @ap: The 2-bit simple AP (AP[2:1])
@@ -10686,7 +10690,8 @@ simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx));
}
-/* Translate S2 section/page access permissions to protection flags
+/*
+ * Translate S2 section/page access permissions to protection flags
*
* @env: CPUARMState
* @s2ap: The 2-bit stage2 access permissions (S2AP)
@@ -10734,7 +10739,8 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0)
return prot;
}
-/* Translate section/page access permissions to protection flags
+/*
+ * Translate section/page access permissions to protection flags
*
* @env: CPUARMState
* @mmu_idx: MMU index indicating required translation regime
@@ -10771,7 +10777,8 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64,
return prot_rw;
}
- /* TODO have_wxn should be replaced with
+ /*
+ * TODO have_wxn should be replaced with
* ARM_FEATURE_V8 || (ARM_FEATURE_V7 && ARM_FEATURE_EL2)
* when ARM_FEATURE_EL2 starts getting set. For now we assume all LPAE
* compatible processors have EL2, which is required for [U]WXN.
@@ -11043,7 +11050,8 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
*page_size = 0x1000;
} else {
- /* UNPREDICTABLE in ARMv5; we choose to take a
+ /*
+ * UNPREDICTABLE in ARMv5; we choose to take a
* page translation fault.
*/
fi->type = ARMFault_Translation;
@@ -11109,7 +11117,8 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
}
type = (desc & 3);
if (type == 0 || (type == 3 && !cpu_isar_feature(aa32_pxn, cpu))) {
- /* Section translation fault, or attempt to use the encoding
+ /*
+ * Section translation fault, or attempt to use the encoding
* which is Reserved on implementations without PXN.
*/
fi->type = ARMFault_Translation;
@@ -11214,7 +11223,8 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
}
}
if (ns) {
- /* The NS bit will (as required by the architecture) have no effect if
+ /*
+ * The NS bit will (as required by the architecture) have no effect if
* the CPU doesn't support TZ or this is a non-secure translation
* regime, because the attribute will already be non-secure.
*/
@@ -11296,7 +11306,8 @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
return true;
}
-/* Translate from the 4-bit stage 2 representation of
+/*
+ * Translate from the 4-bit stage 2 representation of
* memory attributes (without cache-allocation hints) to
* the 8-bit representation of the stage 1 MAIR registers
* (which includes allocation hints).
@@ -11585,7 +11596,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
stride = 9;
}
- /* Note that QEMU ignores shareability and cacheability attributes,
+ /*
+ * Note that QEMU ignores shareability and cacheability attributes,
* so we don't need to do anything with the SH, ORGN, IRGN fields
* in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
* ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
@@ -11594,19 +11606,22 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
*/
ttbr = regime_ttbr(env, mmu_idx, param.select);
- /* Here we should have set up all the parameters for the translation:
+ /*
+ * Here we should have set up all the parameters for the translation:
* inputsize, ttbr, epd, stride, tbi
*/
if (param.epd) {
- /* Translation table walk disabled => Translation fault on TLB miss
+ /*
+ * Translation table walk disabled => Translation fault on TLB miss
* Note: This is always 0 on 64-bit EL2 and EL3.
*/
goto do_fault;
}
if (mmu_idx != ARMMMUIdx_Stage2 && mmu_idx != ARMMMUIdx_Stage2_S) {
- /* The starting level depends on the virtual address size (which can
+ /*
+ * The starting level depends on the virtual address size (which can
* be up to 48 bits) and the translation granule size. It indicates
* the number of strides (stride bits at a time) needed to
* consume the bits of the input address. In the pseudocode this is:
@@ -11619,7 +11634,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
*/
level = 4 - (inputsize - 4) / stride;
} else {
- /* For stage 2 translations the starting level is specified by the
+ /*
+ * For stage 2 translations the starting level is specified by the
* VTCR_EL2.SL0 field (whose interpretation depends on the page size)
*/
uint32_t sl0 = extract32(tcr->raw_tcr, 6, 2);
@@ -11659,7 +11675,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
*/
descaddr &= ~indexmask;
- /* The address field in the descriptor goes up to bit 39 for ARMv7
+ /*
+ * The address field in the descriptor goes up to bit 39 for ARMv7
* but up to bit 47 for ARMv8, but we use the descaddrmask
* up to bit 39 for AArch32, because we don't need other bits in that case
* to construct next descriptor address (anyway they should be all zeroes).
@@ -11667,7 +11684,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
descaddrmask = ((1ull << (aarch64 ? 48 : 40)) - 1) &
~indexmask_grainsize;
- /* Secure accesses start with the page table in secure memory and
+ /*
+ * Secure accesses start with the page table in secure memory and
* can be downgraded to non-secure at any step. Non-secure accesses
* remain non-secure. We implement this by just ORing in the NSTable/NS
* bits at each step.
@@ -11693,7 +11711,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
descaddr = descriptor & descaddrmask;
if ((descriptor & 2) && (level < 3)) {
- /* Table entry. The top five bits are attributes which may
+ /*
+ * Table entry. The top five bits are attributes which may
* propagate down through lower levels of the table (and
* which are all arranged so that 0 means "no effect", so
* we can gather them up by ORing in the bits at each level).
@@ -11703,7 +11722,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
indexmask = indexmask_grainsize;
continue;
}
- /* Block entry at level 1 or 2, or page entry at level 3.
+ /*
+ * Block entry at level 1 or 2, or page entry at level 3.
* These are basically the same thing, although the number
* of bits we pull in from the vaddr varies.
*/
@@ -11725,15 +11745,17 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
break;
}
attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
- /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
+ /*
+ * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
* means "force PL1 access only", which means forcing AP[1] to 0.
*/
attrs &= ~(extract32(tableattrs, 2, 1) << 4); /* !APT[0] => AP[1] */
attrs |= extract32(tableattrs, 3, 1) << 5; /* APT[1] => AP[2] */
break;
}
- /* Here descaddr is the final physical address, and attributes
- * are all in attrs.
+ /*
+ * Here descaddr is the final physical address,
+ * and attributes are all in attrs.
*/
fault_type = ARMFault_AccessFlag;
if ((attrs & (1 << 8)) == 0) {
@@ -11760,7 +11782,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
}
if (ns) {
- /* The NS bit will (as required by the architecture) have no effect if
+ /*
+ * The NS bit will (as required by the architecture) have no effect if
* the CPU doesn't support TZ or this is a non-secure translation
* regime, because the attribute will already be non-secure.
*/
@@ -11814,7 +11837,8 @@ static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
break;
}
} else {
- /* Default system address map for M profile cores.
+ /*
+ * Default system address map for M profile cores.
* The architecture specifies which regions are execute-never;
* at the MPU level no other checks are defined.
*/
@@ -11840,7 +11864,8 @@ static inline void get_phys_addr_pmsav7_default(CPUARMState *env,
static bool pmsav7_use_background_region(ARMCPU *cpu,
ARMMMUIdx mmu_idx, bool is_user)
{
- /* Return true if we should use the default memory map as a
+ /*
+ * Return true if we should use the default memory map as a
* "background" region if there are no hits against any MPU regions.
*/
CPUARMState *env = &cpu->env;
@@ -11866,7 +11891,8 @@ static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
{
- /* True if address is in the M profile system region
+ /*
+ * True if address is in the M profile system region
* 0xe0000000 - 0xffffffff
*/
return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
@@ -11888,7 +11914,8 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
if (regime_translation_disabled(env, mmu_idx) ||
m_is_ppb_region(env, address)) {
- /* MPU disabled or M profile PPB access: use default memory map.
+ /*
+ * MPU disabled or M profile PPB access: use default memory map.
* The other case which uses the default memory map in the
* v7M ARM ARM pseudocode is exception vector reads from the vector
* table. In QEMU those accesses are done in arm_v7m_load_vector(),
@@ -11954,7 +11981,8 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
srdis_mask = srdis ? 0x3 : 0x0;
for (i = 2; i <= 8 && rsize < TARGET_PAGE_BITS; i *= 2) {
- /* This will check in groups of 2, 4 and then 8, whether
+ /*
+ * This will check in groups of 2, 4 and then 8, whether
* the subregion bits are consistent. rsize is incremented
* back up to give the region size, considering consistent
* adjacent subregions as one region. Stop testing if rsize
@@ -12062,7 +12090,8 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
static bool v8m_is_sau_exempt(CPUARMState *env,
uint32_t address, MMUAccessType access_type)
{
- /* The architecture specifies that certain address ranges are
+ /*
+ * The architecture specifies that certain address ranges are
* exempt from v8M SAU/IDAU checks.
*/
return
@@ -12078,7 +12107,8 @@ void v8m_security_lookup(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
V8M_SAttributes *sattrs)
{
- /* Look up the security attributes for this address. Compare the
+ /*
+ * Look up the security attributes for this address. Compare the
* pseudocode SecurityCheck() function.
* We assume the caller has zero-initialized *sattrs.
*/
@@ -12129,7 +12159,8 @@ void v8m_security_lookup(CPUARMState *env, uint32_t address,
sattrs->subpage = true;
}
if (sattrs->srvalid) {
- /* If we hit in more than one region then we must report
+ /*
+ * If we hit in more than one region then we must report
* as Secure, not NS-Callable, with no valid region
* number info.
*/
@@ -12187,7 +12218,8 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
int *prot, bool *is_subpage,
ARMMMUFaultInfo *fi, uint32_t *mregion)
{
- /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
+ /*
+ * Perform a PMSAv8 MPU lookup (without also doing the SAU check
* that a full phys-to-virt translation does).
* mregion is (if not NULL) set to the region number which matched,
* or -1 if no region number is returned (MPU off, address did not
@@ -12211,7 +12243,8 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
*mregion = -1;
}
- /* Unlike the ARM ARM pseudocode, we don't need to check whether this
+ /*
+ * Unlike the ARM ARM pseudocode, we don't need to check whether this
* was an exception vector read from the vector table (which is always
* done using the default system address map), because those accesses
* are done in arm_v7m_load_vector(), which always does a direct
@@ -12228,7 +12261,8 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
/* region search */
- /* Note that the base address is bits [31:5] from the register
+ /*
+ * Note that the base address is bits [31:5] from the register
* with bits [4:0] all zeroes, but the limit address is bits
* [31:5] from the register with bits [4:0] all ones.
*/
@@ -12264,7 +12298,8 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
}
if (matchregion != -1) {
- /* Multiple regions match -- always a failure (unlike
+ /*
+ * Multiple regions match -- always a failure (unlike
* PMSAv7 where highest-numbered-region wins)
*/
fi->type = ARMFault_Permission;
@@ -12304,7 +12339,8 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
if (*prot && !xn && !(pxn && !is_user)) {
*prot |= PAGE_EXEC;
}
- /* We don't need to look the attribute up in the MAIR0/MAIR1
+ /*
+ * We don't need to look the attribute up in the MAIR0/MAIR1
* registers because that only tells us about cacheability.
*/
if (mregion) {
@@ -12332,7 +12368,8 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
if (access_type == MMU_INST_FETCH) {
- /* Instruction fetches always use the MMU bank and the
+ /*
+ * Instruction fetches always use the MMU bank and the
* transaction attribute determined by the fetch address,
* regardless of CPU state. This is painful for QEMU
* to handle, because it would mean we need to encode
@@ -12361,14 +12398,16 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
return true;
}
} else {
- /* For data accesses we always use the MMU bank indicated
+ /*
+ * For data accesses we always use the MMU bank indicated
* by the current CPU state, but the security attributes
* might downgrade a secure access to nonsecure.
*/
if (sattrs.ns) {
txattrs->secure = false;
} else if (!secure) {
- /* NS access to S memory must fault.
+ /*
+ * NS access to S memory must fault.
* Architecturally we should first check whether the
* MPU information for this address indicates that we
* are doing an unaligned access to Device memory, which
@@ -12416,8 +12455,10 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
continue;
}
mask = 1 << ((base >> 1) & 0x1f);
- /* Keep this shift separate from the above to avoid an
- (undefined) << 32. */
+ /*
+ * Keep this shift separate from the above to avoid an
+ * (undefined) << 32
+ */
mask = (mask << 1) - 1;
if (((base ^ address) & ~mask) == 0) {
break;
@@ -12477,7 +12518,8 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
return false;
}
-/* Combine either inner or outer cacheability attributes for normal
+/*
+ * Combine either inner or outer cacheability attributes for normal
* memory, according to table D4-42 and pseudocode procedure
* CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
*
@@ -12493,7 +12535,8 @@ static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
/* stage 1 write-through takes precedence */
return s1;
} else if (extract32(s2, 2, 2) == 2) {
- /* stage 2 write-through takes precedence, but the allocation hint
+ /*
+ * stage 2 write-through takes precedence, but the allocation hint
* is still taken from stage 1
*/
return (2 << 2) | extract32(s1, 0, 2);
@@ -12502,7 +12545,8 @@ static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
}
}
-/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
+/*
+ * Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
* and CombineS1S2Desc()
*
* @s1: Attributes from stage 1 walk
@@ -12552,7 +12596,8 @@ static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
ret.attrs = 0xc; /* GRE */
}
- /* Any location for which the resultant memory type is any
+ /*
+ * Any location for which the resultant memory type is any
* type of Device memory is always treated as Outer Shareable.
*/
ret.shareability = 2;
@@ -12562,7 +12607,8 @@ static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
| combine_cacheattr_nibble(s1lo, s2lo);
if (ret.attrs == 0x44) {
- /* Any location for which the resultant memory type is Normal
+ /*
+ * Any location for which the resultant memory type is Normal
* Inner Non-cacheable, Outer Non-cacheable is always treated
* as Outer Shareable.
*/
@@ -12579,7 +12625,8 @@ static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
}
-/* get_phys_addr - get the physical address for this virtual address
+/*
+ * get_phys_addr - get the physical address for this virtual address
*
* Find the physical address corresponding to the given virtual address,
* by doing a translation table walk on MMU based systems or using the
@@ -12614,7 +12661,8 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
if (mmu_idx != s1_mmu_idx) {
- /* Call ourselves recursively to do the stage 1 and then stage 2
+ /*
+ * Call ourselves recursively to do the stage 1 and then stage 2
* translations if mmu_idx is a two-stage regime.
*/
if (arm_feature(env, ARM_FEATURE_EL2)) {
@@ -12686,14 +12734,16 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
}
}
- /* The page table entries may downgrade secure to non-secure, but
+ /*
+ * The page table entries may downgrade secure to non-secure, but
* cannot upgrade an non-secure translation regime's attributes
* to secure.
*/
attrs->secure = regime_is_secure(env, mmu_idx);
attrs->user = regime_is_user(env, mmu_idx);
- /* Fast Context Switch Extension. This doesn't exist at all in v8.
+ /*
+ * Fast Context Switch Extension. This doesn't exist at all in v8.
* In v7 and earlier it affects all stage 1 translations.
*/
if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2
--
2.20.1
next prev parent reply other threads:[~2021-06-04 17:05 UTC|newest]
Thread overview: 204+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-06-04 15:51 [PATCH v16 00/99] arm tcg/kvm refactor and split with kvm only support Alex Bennée
2021-06-04 15:51 ` [PATCH v16 01/99] MAINTAINERS: Add qtest/arm-cpu-features.c to ARM TCG CPUs section Alex Bennée
2021-06-04 18:26 ` Richard Henderson
2021-06-04 15:51 ` [PATCH v16 02/99] accel: Introduce 'query-accels' QMP command Alex Bennée
2021-06-07 13:07 ` Thomas Huth
2021-06-08 9:07 ` Philippe Mathieu-Daudé
2021-06-08 15:41 ` Markus Armbruster
2021-06-08 15:43 ` Philippe Mathieu-Daudé
2021-06-04 15:51 ` [PATCH v16 03/99] qtest: Add qtest_has_accel() method Alex Bennée
2021-06-07 13:16 ` Thomas Huth
2021-06-08 9:48 ` Philippe Mathieu-Daudé
2021-06-04 15:51 ` [PATCH v16 04/99] qtest/arm-cpu-features: Use generic qtest_has_accel() to check for KVM Alex Bennée
2021-06-04 21:01 ` Richard Henderson
2021-06-07 13:22 ` Thomas Huth
2021-06-08 8:22 ` Philippe Mathieu-Daudé
2021-06-08 10:49 ` Philippe Mathieu-Daudé
2021-06-08 12:39 ` Philippe Mathieu-Daudé
2021-06-08 14:28 ` Eric Blake
2021-06-08 17:20 ` Claudio Fontana
2021-06-04 15:51 ` [PATCH v16 05/99] qtest/arm-cpu-features: Restrict sve_tests_sve_off_kvm test to KVM Alex Bennée
2021-06-04 21:02 ` Richard Henderson
2021-06-04 15:51 ` [PATCH v16 06/99] qtest/arm-cpu-features: Remove TCG fallback to KVM specific tests Alex Bennée
2021-06-04 21:03 ` Richard Henderson
2021-06-07 13:28 ` Thomas Huth
2021-06-08 8:23 ` Philippe Mathieu-Daudé
2021-06-04 15:51 ` [PATCH v16 07/99] qtest/arm-cpu-features: Use generic qtest_has_accel() to check for TCG Alex Bennée
2021-06-04 21:04 ` Richard Henderson
2021-06-07 13:32 ` Thomas Huth
2021-06-04 15:51 ` [PATCH v16 08/99] qtest/migration-test: Skip tests if KVM not builtin on s390x/ppc64 Alex Bennée
2021-06-04 21:11 ` Richard Henderson
2021-06-08 8:26 ` Philippe Mathieu-Daudé
2021-06-07 13:33 ` Thomas Huth
2021-06-04 15:51 ` [PATCH v16 09/99] qtest/bios-tables-test: Rename tests not TCG specific Alex Bennée
2021-06-07 13:39 ` Thomas Huth
2021-06-08 8:33 ` Philippe Mathieu-Daudé
2021-06-04 15:51 ` [PATCH v16 10/99] qtest/bios-tables-test: Rename TCG specific tests Alex Bennée
2021-06-07 13:44 ` Thomas Huth
2021-06-04 15:51 ` [PATCH v16 11/99] qtest/bios-tables-test: Make test build-independent from accelerator Alex Bennée
2021-06-04 21:14 ` Richard Henderson
2021-06-07 13:47 ` Thomas Huth
2021-06-04 15:51 ` [PATCH v16 12/99] qtest: Do not restrict bios-tables-test to Aarch64 hosts anymore Alex Bennée
2021-06-04 21:15 ` Richard Henderson
2021-06-07 13:48 ` Thomas Huth
2021-06-04 15:51 ` [PATCH v16 13/99] meson: add target_user_arch Alex Bennée
2021-06-04 18:01 ` Philippe Mathieu-Daudé
2021-06-04 15:51 ` [PATCH v16 14/99] accel: add cpu_reset Alex Bennée
2021-06-04 21:23 ` Richard Henderson
2021-06-04 15:51 ` [PATCH v16 15/99] target/arm: move translate modules to tcg/ Alex Bennée
2021-06-04 15:51 ` [PATCH v16 16/99] target/arm: move helpers " Alex Bennée
2021-06-04 15:51 ` [PATCH v16 17/99] arm: tcg: only build under CONFIG_TCG Alex Bennée
2021-06-04 15:51 ` [PATCH v16 18/99] target/arm: tcg: add sysemu and user subdirs Alex Bennée
2021-06-04 15:51 ` [PATCH v16 19/99] target/arm: tcg: split mte_helper user-only and sysemu code Alex Bennée
2021-06-04 15:51 ` [PATCH v16 20/99] target/arm: tcg: move sysemu-only parts of debug_helper Alex Bennée
2021-06-04 15:51 ` [PATCH v16 21/99] target/arm: tcg: split tlb_helper user-only and sysemu-only parts Alex Bennée
2021-06-04 15:51 ` [PATCH v16 22/99] target/arm: tcg: split m_helper " Alex Bennée
2021-06-04 15:51 ` [PATCH v16 23/99] target/arm: only build psci for TCG Alex Bennée
2021-06-04 15:51 ` [PATCH v16 24/99] target/arm: split off cpu-sysemu.c Alex Bennée
2021-06-04 15:51 ` Alex Bennée [this message]
2021-06-04 15:51 ` [PATCH v16 26/99] target/arm: move physical address translation to cpu-mmu Alex Bennée
2021-06-04 15:52 ` [PATCH v16 27/99] target/arm: fix style in preparation of new cpregs module Alex Bennée
2021-06-04 15:52 ` [PATCH v16 28/99] target/arm: split cpregs from tcg/helper.c Alex Bennée
2021-06-04 15:52 ` [PATCH v16 29/99] target/arm: move cpu definitions to common cpu module Alex Bennée
2021-06-04 15:52 ` [PATCH v16 30/99] target/arm: only perform TCG cpu and machine inits if TCG enabled Alex Bennée
2021-06-04 15:52 ` [PATCH v16 31/99] target/arm: tcg: add stubs for some helpers for non-tcg builds Alex Bennée
2021-06-04 15:52 ` [PATCH v16 32/99] target/arm: move cpsr_read, cpsr_write to cpu_common Alex Bennée
2021-06-04 15:52 ` [PATCH v16 33/99] target/arm: add temporary stub for arm_rebuild_hflags Alex Bennée
2021-06-04 15:52 ` [PATCH v16 34/99] target/arm: move arm_hcr_el2_eff from tcg/ to common_cpu Alex Bennée
2021-06-04 15:52 ` [PATCH v16 35/99] target/arm: split vfp state setting from tcg helpers Alex Bennée
2021-06-04 15:52 ` [PATCH v16 36/99] target/arm: move arm_mmu_idx* to cpu-mmu Alex Bennée
2021-06-04 15:52 ` [PATCH v16 37/99] target/arm: move sve_zcr_len_for_el to common_cpu Alex Bennée
2021-06-04 22:22 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 38/99] target/arm: move arm_sctlr away from tcg helpers Alex Bennée
2021-06-04 22:27 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 39/99] target/arm: move arm_cpu_list to common_cpu Alex Bennée
2021-06-04 15:52 ` [PATCH v16 40/99] target/arm: move aarch64_sync_32_to_64 (and vv) to cpu code Alex Bennée
2021-06-04 15:52 ` [PATCH v16 41/99] target/arm: new cpu32 ARM 32 bit CPU Class Alex Bennée
2021-06-04 22:53 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 42/99] target/arm: split 32bit and 64bit arm dump state Alex Bennée
2021-06-04 22:59 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 43/99] target/arm: move a15 cpu model away from the TCG-only models Alex Bennée
2021-06-05 0:27 ` Richard Henderson
2021-06-17 18:01 ` Alex Bennée
2021-06-17 20:35 ` Claudio Fontana
2021-06-04 15:52 ` [PATCH v16 44/99] target/arm: fixup sve_exception_el code style before move Alex Bennée
2021-06-04 15:52 ` [PATCH v16 45/99] target/arm: move sve_exception_el out of TCG helpers Alex Bennée
2021-06-04 15:52 ` [PATCH v16 46/99] target/arm: fix comments style of fp_exception_el before moving it Alex Bennée
2021-06-05 0:32 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 47/99] target/arm: move fp_exception_el out of TCG helpers Alex Bennée
2021-06-05 0:35 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 48/99] target/arm: remove now useless ifndef from fp_exception_el Alex Bennée
2021-06-05 0:35 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 49/99] target/arm: make further preparation for the exception code to move Alex Bennée
2021-06-05 1:34 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 50/99] target/arm: fix style of arm_cpu_do_interrupt functions before move Alex Bennée
2021-06-04 15:52 ` [PATCH v16 51/99] target/arm: move exception code out of tcg/helper.c Alex Bennée
2021-06-05 2:19 ` Richard Henderson
2021-06-05 5:56 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 52/99] target/arm: rename handle_semihosting to tcg_handle_semihosting Alex Bennée
2021-06-05 2:22 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 53/99] target/arm: replace CONFIG_TCG with tcg_enabled Alex Bennée
2021-06-05 2:24 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 54/99] target/arm: move TCGCPUOps to tcg/tcg-cpu.c Alex Bennée
2021-06-05 3:39 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 55/99] target/arm: move cpu_tcg to tcg/tcg-cpu-models.c Alex Bennée
2021-06-04 15:52 ` [PATCH v16 56/99] target/arm: wrap call to aarch64_sve_change_el in tcg_enabled() Alex Bennée
2021-06-04 15:52 ` [PATCH v16 57/99] target/arm: remove kvm include file for PSCI and arm-powerctl Alex Bennée
2021-06-04 15:52 ` [PATCH v16 58/99] target/arm: move kvm-const.h, kvm.c, kvm64.c, kvm_arm.h to kvm/ Alex Bennée
2021-06-04 15:52 ` [PATCH v16 59/99] MAINTAINERS: update arm kvm maintained files to all in target/arm/kvm/ Alex Bennée
2021-06-05 3:52 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 60/99] target/arm: cleanup cpu includes Alex Bennée
2021-06-04 15:52 ` [PATCH v16 61/99] target/arm: remove broad "else" statements when checking accels Alex Bennée
2021-06-05 16:13 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 62/99] target/arm: remove kvm-stub.c Alex Bennée
2021-06-04 15:52 ` [PATCH v16 63/99] tests/qtest: skip bios-tables-test test_acpi_oem_fields_virt for KVM Alex Bennée
2021-06-05 16:24 ` Richard Henderson
2021-06-18 15:25 ` Alex Bennée
2021-06-04 15:52 ` [PATCH v16 64/99] tests: do not run test-hmp on all machines for ARM KVM-only Alex Bennée
2021-06-05 16:28 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 65/99] tests: device-introspect-test: cope with ARM TCG-only devices Alex Bennée
2021-06-05 16:31 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 66/99] tests: do not run qom-test on all machines for ARM KVM-only Alex Bennée
2021-06-05 16:32 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 67/99] target/arm: create kvm cpu accel class Alex Bennée
2021-06-04 15:52 ` [PATCH v16 68/99] target/arm: move kvm post init initialization to kvm cpu accel Alex Bennée
2021-06-04 15:52 ` [PATCH v16 69/99] target/arm: add tcg cpu accel class Alex Bennée
2021-06-05 16:54 ` Richard Henderson
2021-06-24 10:52 ` Alex Bennée
2021-06-04 15:52 ` [PATCH v16 70/99] target/arm: move TCG gt timer creation code in tcg/ Alex Bennée
2021-06-04 15:52 ` [PATCH v16 71/99] target/arm: cpu-sve: new module Alex Bennée
2021-06-05 18:13 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 72/99] target/arm: cpu-sve: rename functions according to module prefix Alex Bennée
2021-06-05 18:39 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 73/99] target/arm: cpu-sve: split TCG and KVM functionality Alex Bennée
2021-06-05 19:31 ` Richard Henderson
2021-06-05 19:52 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 74/99] target/arm: cpu-sve: make cpu_sve_finalize_features return bool Alex Bennée
2021-06-05 18:57 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 75/99] target/arm: make is_aa64 and arm_el_is_aa64 a macro for !TARGET_AARCH64 Alex Bennée
2021-06-05 20:00 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 76/99] target/arm: restrict rebuild_hflags_a64 to TARGET_AARCH64 Alex Bennée
2021-06-05 20:06 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 77/99] target/arm: arch_dump: restrict ELFCLASS64 to AArch64 Alex Bennée
2021-06-05 20:30 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 78/99] target/arm: cpu-exceptions, cpu-exceptions-aa64: new modules Alex Bennée
2021-06-05 20:50 ` Richard Henderson
2021-06-10 8:45 ` Claudio Fontana
2021-06-10 9:01 ` Peter Maydell
2021-06-10 9:24 ` Claudio Fontana
2021-06-05 20:57 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 79/99] target/arm: tcg: restrict ZCR cpregs to TARGET_AARCH64 Alex Bennée
2021-06-05 20:59 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 80/99] target/arm: tcg-sve: import narrow_vq and change_el functions Alex Bennée
2021-06-05 21:10 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 81/99] target/arm: tcg-sve: rename the " Alex Bennée
2021-06-04 15:52 ` [PATCH v16 82/99] target/arm: move sve_zcr_len_for_el to TARGET_AARCH64-only cpu-sve Alex Bennée
2021-06-05 21:37 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 83/99] cpu-sve: rename sve_zcr_len_for_el to cpu_sve_get_zcr_len_for_el Alex Bennée
2021-06-04 15:52 ` [PATCH v16 84/99] target/arm: cpu-common: wrap a64-only check with is_a64 Alex Bennée
2021-06-05 21:55 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 85/99] target/arm: cpu-pauth: new module for ARMv8.3 Pointer Authentication Alex Bennée
2021-06-05 22:06 ` Richard Henderson
2021-06-04 15:52 ` [PATCH v16 86/99] target/arm: cpu-pauth: change arm_cpu_pauth_finalize name and sig Alex Bennée
2021-06-05 22:09 ` Richard Henderson
2021-06-04 15:53 ` [PATCH v16 87/99] target/arm: move arm_cpu_finalize_features into cpu64 Alex Bennée
2021-06-05 22:14 ` Richard Henderson
2021-06-04 15:53 ` [PATCH v16 88/99] target/arm: cpu64: rename arm_cpu_finalize_features Alex Bennée
2021-06-05 22:14 ` Richard Henderson
2021-06-04 15:53 ` [PATCH v16 89/99] target/arm: cpu64: some final cleanup on aarch64_cpu_finalize_features Alex Bennée
2021-06-05 22:20 ` Richard Henderson
2021-06-04 15:53 ` [PATCH v16 90/99] XXX target/arm: experiment refactoring cpu "max" Alex Bennée
2021-06-04 15:53 ` [PATCH v16 91/99] target/arm: tcg: remove superfluous CONFIG_TCG check Alex Bennée
2021-06-04 15:53 ` [PATCH v16 92/99] target/arm: remove v7m stub function for !CONFIG_TCG Alex Bennée
2021-06-05 22:26 ` Richard Henderson
2021-06-04 15:53 ` [PATCH v16 93/99] meson: Introduce target-specific Kconfig Alex Bennée
2021-06-05 22:33 ` Richard Henderson
2021-06-18 16:31 ` Alex Bennée
2021-06-04 15:53 ` [PATCH v16 94/99] target/arm: move CONFIG_V7M out of default-devices Alex Bennée
2021-06-05 22:41 ` Richard Henderson
2021-06-04 15:53 ` [PATCH v16 95/99] hw/arm: add dependency on OR_IRQ for XLNX_VERSAL Alex Bennée
2021-06-05 22:43 ` Richard Henderson
2021-06-04 15:53 ` [PATCH v16 96/99] tests/qtest: split the cdrom-test into arm/aarch64 Alex Bennée
2021-06-05 22:47 ` Richard Henderson
2021-06-08 14:27 ` Alex Bennée
2021-06-08 14:42 ` Thomas Huth
2021-06-08 14:57 ` Richard Henderson
2021-06-08 15:01 ` Thomas Huth
2021-06-08 15:35 ` Alex Bennée
2021-06-08 17:23 ` Thomas Huth
2021-06-08 15:36 ` John Snow
2021-06-08 13:42 ` John Snow
2021-06-08 14:36 ` Thomas Huth
2021-06-08 14:41 ` Alex Bennée
2021-06-08 14:45 ` Thomas Huth
2021-06-04 15:53 ` [PATCH v16 97/99] tests/qtest: make xlnx-can-test conditional on being configured Alex Bennée
2021-06-05 22:48 ` Richard Henderson
2021-06-04 15:53 ` [PATCH v16 98/99] configure: allow the overriding of default-config in the build Alex Bennée
2021-06-05 23:01 ` Richard Henderson
2021-06-08 13:03 ` Philippe Mathieu-Daudé
2021-06-04 15:53 ` [PATCH v16 99/99] gitlab: defend the new stripped down arm64 configs Alex Bennée
2021-06-05 23:03 ` Richard Henderson
2021-06-08 8:44 ` [PATCH v16 00/99] arm tcg/kvm refactor and split with kvm only support Philippe Mathieu-Daudé
2021-06-08 8:50 ` Philippe Mathieu-Daudé
2021-09-20 16:24 ` Liang Yan
2021-09-20 16:27 ` Alex Bennée
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210604155312.15902-26-alex.bennee@linaro.org \
--to=alex.bennee@linaro.org \
--cc=cfontana@suse.de \
--cc=peter.maydell@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).