From: William Kosasih <kosasihwilliam4@gmail.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
qemu-arm@nongnu.org, William Kosasih <kosasihwilliam4@gmail.com>
Subject: [PATCH v3 12/12] target/arm: Fix helper macros indentation in mve_helper.c
Date: Wed, 2 Jul 2025 20:49:54 +0930 [thread overview]
Message-ID: <20250702111954.128563-13-kosasihwilliam4@gmail.com> (raw)
In-Reply-To: <20250702111954.128563-1-kosasihwilliam4@gmail.com>
Recent helper function load and store alignment fix caused the continuation
backslashes in those macro definitions to shift out of alignment.
This patch restores a uniform indentation for those trailing backslashes,
making them consistent.
Signed-off-by: William Kosasih <kosasihwilliam4@gmail.com>
---
target/arm/tcg/mve_helper.c | 268 ++++++++++++++++++------------------
1 file changed, 134 insertions(+), 134 deletions(-)
diff --git a/target/arm/tcg/mve_helper.c b/target/arm/tcg/mve_helper.c
index 5dd2585684..2a7d3e7548 100644
--- a/target/arm/tcg/mve_helper.c
+++ b/target/arm/tcg/mve_helper.c
@@ -148,45 +148,45 @@ static void mve_advance_vpt(CPUARMState *env)
}
/* For loads, predicated lanes are zeroed instead of keeping their old values */
-#define DO_VLDR(OP, MFLAG, MSIZE, MTYPE, LDTYPE, ESIZE, TYPE) \
- void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
- { \
- TYPE *d = vd; \
- uint16_t mask = mve_element_mask(env); \
- uint16_t eci_mask = mve_eci_mask(env); \
- unsigned b, e; \
- int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
- MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \
- /* \
- * R_SXTM allows the dest reg to become UNKNOWN for abandoned \
- * beats so we don't care if we update part of the dest and \
- * then take an exception. \
- */ \
- for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
- if (eci_mask & (1 << b)) { \
- d[H##ESIZE(e)] = (mask & (1 << b)) ? \
- (MTYPE)cpu_##LDTYPE##_mmu(env, addr, oi, GETPC()) : 0;\
- } \
- addr += MSIZE; \
- } \
- mve_advance_vpt(env); \
+#define DO_VLDR(OP, MFLAG, MSIZE, MTYPE, LDTYPE, ESIZE, TYPE) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
+ { \
+ TYPE *d = vd; \
+ uint16_t mask = mve_element_mask(env); \
+ uint16_t eci_mask = mve_eci_mask(env); \
+ unsigned b, e; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \
+ /* \
+ * R_SXTM allows the dest reg to become UNKNOWN for abandoned \
+ * beats so we don't care if we update part of the dest and \
+ * then take an exception. \
+ */ \
+ for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
+ if (eci_mask & (1 << b)) { \
+ d[H##ESIZE(e)] = (mask & (1 << b)) ? \
+ (MTYPE)cpu_##LDTYPE##_mmu(env, addr, oi, GETPC()) : 0; \
+ } \
+ addr += MSIZE; \
+ } \
+ mve_advance_vpt(env); \
}
-#define DO_VSTR(OP, MFLAG, MSIZE, STTYPE, ESIZE, TYPE) \
- void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
- { \
- TYPE *d = vd; \
- uint16_t mask = mve_element_mask(env); \
- unsigned b, e; \
- int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
- MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \
- for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
- if (mask & (1 << b)) { \
+#define DO_VSTR(OP, MFLAG, MSIZE, STTYPE, ESIZE, TYPE) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
+ { \
+ TYPE *d = vd; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned b, e; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \
+ for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
+ if (mask & (1 << b)) { \
cpu_##STTYPE##_mmu(env, addr, d[H##ESIZE(e)], oi, GETPC()); \
- } \
- addr += MSIZE; \
- } \
- mve_advance_vpt(env); \
+ } \
+ addr += MSIZE; \
+ } \
+ mve_advance_vpt(env); \
}
DO_VLDR(vldrb, MO_UB, 1, uint8_t, ldb, 1, uint8_t)
@@ -219,57 +219,57 @@ DO_VSTR(vstrh_w, MO_TEUW, 2, stw, 4, int32_t)
* their previous values.
*/
#define DO_VLDR_SG(OP, MFLAG, MTYPE, LDTYPE, ESIZE, TYPE, OFFTYPE, ADDRFN, WB)\
- void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
- uint32_t base) \
- { \
- TYPE *d = vd; \
- OFFTYPE *m = vm; \
- uint16_t mask = mve_element_mask(env); \
- uint16_t eci_mask = mve_eci_mask(env); \
- unsigned e; \
- uint32_t addr; \
- int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
- MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \
- for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \
- if (!(eci_mask & 1)) { \
- continue; \
- } \
- addr = ADDRFN(base, m[H##ESIZE(e)]); \
- d[H##ESIZE(e)] = (mask & 1) ? \
- (MTYPE)cpu_##LDTYPE##_mmu(env, addr, oi, GETPC()) : 0; \
- if (WB) { \
- m[H##ESIZE(e)] = addr; \
- } \
- } \
- mve_advance_vpt(env); \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
+ uint32_t base) \
+ { \
+ TYPE *d = vd; \
+ OFFTYPE *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ uint16_t eci_mask = mve_eci_mask(env); \
+ unsigned e; \
+ uint32_t addr; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) {\
+ if (!(eci_mask & 1)) { \
+ continue; \
+ } \
+ addr = ADDRFN(base, m[H##ESIZE(e)]); \
+ d[H##ESIZE(e)] = (mask & 1) ? \
+ (MTYPE)cpu_##LDTYPE##_mmu(env, addr, oi, GETPC()) : 0; \
+ if (WB) { \
+ m[H##ESIZE(e)] = addr; \
+ } \
+ } \
+ mve_advance_vpt(env); \
}
/* We know here TYPE is unsigned so always the same as the offset type */
-#define DO_VSTR_SG(OP, MFLAG, STTYPE, ESIZE, TYPE, ADDRFN, WB) \
- void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
- uint32_t base) \
- { \
- TYPE *d = vd; \
- TYPE *m = vm; \
- uint16_t mask = mve_element_mask(env); \
- uint16_t eci_mask = mve_eci_mask(env); \
- unsigned e; \
- uint32_t addr; \
- int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
- MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \
- for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) { \
- if (!(eci_mask & 1)) { \
- continue; \
- } \
- addr = ADDRFN(base, m[H##ESIZE(e)]); \
- if (mask & 1) { \
- cpu_##STTYPE##_mmu(env, addr, d[H##ESIZE(e)], oi, GETPC()); \
- } \
- if (WB) { \
- m[H##ESIZE(e)] = addr; \
- } \
- } \
- mve_advance_vpt(env); \
+#define DO_VSTR_SG(OP, MFLAG, STTYPE, ESIZE, TYPE, ADDRFN, WB) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
+ uint32_t base) \
+ { \
+ TYPE *d = vd; \
+ TYPE *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ uint16_t eci_mask = mve_eci_mask(env); \
+ unsigned e; \
+ uint32_t addr; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MFLAG | MO_ALIGN, mmu_idx); \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE, eci_mask >>= ESIZE) {\
+ if (!(eci_mask & 1)) { \
+ continue; \
+ } \
+ addr = ADDRFN(base, m[H##ESIZE(e)]); \
+ if (mask & 1) { \
+ cpu_##STTYPE##_mmu(env, addr, d[H##ESIZE(e)], oi, GETPC()); \
+ } \
+ if (WB) { \
+ m[H##ESIZE(e)] = addr; \
+ } \
+ } \
+ mve_advance_vpt(env); \
}
/*
@@ -280,58 +280,58 @@ DO_VSTR(vstrh_w, MO_TEUW, 2, stw, 4, int32_t)
* Address writeback happens on the odd beats and updates the address
* stored in the even-beat element.
*/
-#define DO_VLDR64_SG(OP, ADDRFN, WB) \
- void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
- uint32_t base) \
- { \
- uint32_t *d = vd; \
- uint32_t *m = vm; \
- uint16_t mask = mve_element_mask(env); \
- uint16_t eci_mask = mve_eci_mask(env); \
- unsigned e; \
- uint32_t addr; \
- int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
- MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
- for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
- if (!(eci_mask & 1)) { \
- continue; \
- } \
- addr = ADDRFN(base, m[H4(e & ~1)]); \
- addr += 4 * (e & 1); \
- d[H4(e)] = (mask & 1) ? cpu_ldl_mmu(env, addr, oi, GETPC()) : 0; \
- if (WB && (e & 1)) { \
- m[H4(e & ~1)] = addr - 4; \
- } \
- } \
- mve_advance_vpt(env); \
+#define DO_VLDR64_SG(OP, ADDRFN, WB) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
+ uint32_t base) \
+ { \
+ uint32_t *d = vd; \
+ uint32_t *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ uint16_t eci_mask = mve_eci_mask(env); \
+ unsigned e; \
+ uint32_t addr; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
+ for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
+ if (!(eci_mask & 1)) { \
+ continue; \
+ } \
+ addr = ADDRFN(base, m[H4(e & ~1)]); \
+ addr += 4 * (e & 1); \
+ d[H4(e)] = (mask & 1) ? cpu_ldl_mmu(env, addr, oi, GETPC()) : 0; \
+ if (WB && (e & 1)) { \
+ m[H4(e & ~1)] = addr - 4; \
+ } \
+ } \
+ mve_advance_vpt(env); \
}
-#define DO_VSTR64_SG(OP, ADDRFN, WB) \
- void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
- uint32_t base) \
- { \
- uint32_t *d = vd; \
- uint32_t *m = vm; \
- uint16_t mask = mve_element_mask(env); \
- uint16_t eci_mask = mve_eci_mask(env); \
- unsigned e; \
- uint32_t addr; \
- int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
- MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
- for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
- if (!(eci_mask & 1)) { \
- continue; \
- } \
- addr = ADDRFN(base, m[H4(e & ~1)]); \
- addr += 4 * (e & 1); \
- if (mask & 1) { \
- cpu_stl_mmu(env, addr, d[H4(e)], oi, GETPC()); \
- } \
- if (WB && (e & 1)) { \
- m[H4(e & ~1)] = addr - 4; \
- } \
- } \
- mve_advance_vpt(env); \
+#define DO_VSTR64_SG(OP, ADDRFN, WB) \
+ void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm, \
+ uint32_t base) \
+ { \
+ uint32_t *d = vd; \
+ uint32_t *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ uint16_t eci_mask = mve_eci_mask(env); \
+ unsigned e; \
+ uint32_t addr; \
+ int mmu_idx = arm_to_core_mmu_idx(arm_mmu_idx(env)); \
+ MemOpIdx oi = make_memop_idx(MO_TEUL | MO_ALIGN, mmu_idx); \
+ for (e = 0; e < 16 / 4; e++, mask >>= 4, eci_mask >>= 4) { \
+ if (!(eci_mask & 1)) { \
+ continue; \
+ } \
+ addr = ADDRFN(base, m[H4(e & ~1)]); \
+ addr += 4 * (e & 1); \
+ if (mask & 1) { \
+ cpu_stl_mmu(env, addr, d[H4(e)], oi, GETPC()); \
+ } \
+ if (WB && (e & 1)) { \
+ m[H4(e & ~1)] = addr - 4; \
+ } \
+ } \
+ mve_advance_vpt(env); \
}
#define ADDR_ADD(BASE, OFFSET) ((BASE) + (OFFSET))
--
2.48.1
next prev parent reply other threads:[~2025-07-02 11:22 UTC|newest]
Thread overview: 28+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-07-02 11:19 [PATCH v3 00/12] target/arm: Fix M-profile helper loads/stores alignment checks William Kosasih
2025-07-02 11:19 ` [PATCH v3 01/12] target/arm: Bring VLSTM/VLLDM helper store/load closer to the ARM pseudocode William Kosasih
2025-07-02 14:33 ` Richard Henderson
2025-07-02 11:19 ` [PATCH v3 02/12] target/arm: Fix BLXNS helper store alignment checks William Kosasih
2025-07-02 14:36 ` Richard Henderson
2025-07-02 11:19 ` [PATCH v3 03/12] target/arm: Fix function_return helper load " William Kosasih
2025-07-02 14:38 ` Richard Henderson
2025-07-02 11:19 ` [PATCH v3 04/12] target/arm: Fix VLDR " William Kosasih
2025-07-02 14:39 ` Richard Henderson
2025-07-02 14:44 ` Richard Henderson
2025-07-03 8:31 ` William Kosasih
2025-07-02 11:19 ` [PATCH v3 05/12] target/arm: Fix VSTR helper store " William Kosasih
2025-07-02 14:41 ` Richard Henderson
2025-07-02 11:19 ` [PATCH v3 06/12] target/arm: Fix VLDR_SG helper load " William Kosasih
2025-07-02 14:46 ` Richard Henderson
2025-07-02 11:19 ` [PATCH v3 07/12] target/arm: Fix VSTR_SG helper store " William Kosasih
2025-07-02 14:47 ` Richard Henderson
2025-07-02 11:19 ` [PATCH v3 08/12] target/arm: Fix VLD4 helper load " William Kosasih
2025-07-02 14:48 ` Richard Henderson
2025-07-02 11:19 ` [PATCH v3 09/12] target/arm: Fix VLD2 " William Kosasih
2025-07-02 14:50 ` Richard Henderson
2025-07-02 11:19 ` [PATCH v3 10/12] target/arm: Fix VST4 helper store " William Kosasih
2025-07-02 14:51 ` Richard Henderson
2025-07-02 11:19 ` [PATCH v3 11/12] target/arm: Fix VST2 " William Kosasih
2025-07-02 14:51 ` Richard Henderson
2025-07-02 11:19 ` William Kosasih [this message]
2025-07-02 14:53 ` [PATCH v3 12/12] target/arm: Fix helper macros indentation in mve_helper.c Richard Henderson
2025-07-03 8:34 ` William Kosasih
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250702111954.128563-13-kosasihwilliam4@gmail.com \
--to=kosasihwilliam4@gmail.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).