* [PATCH 0/4] target/riscv: Fix the element agnostic function problem
@ 2024-03-06 9:20 Huang Tao
2024-03-06 9:20 ` [PATCH 1/4] target/riscv: Rename vext_set_elems_1s function Huang Tao
` (3 more replies)
0 siblings, 4 replies; 9+ messages in thread
From: Huang Tao @ 2024-03-06 9:20 UTC (permalink / raw)
To: qemu-devel
Cc: qemu-riscv, zhiwei_liu, dbarboza, liwei1518, bin.meng,
alistair.francis, palmer, Huang Tao
In RVV and vcrypto instructions, the element agnostic function vext_set_elems_1s
can't deal with the big endian host environment.
This patchset fixes the problem by implementing the right function to set agnostic
elements.
Huang Tao (4):
target/riscv: Rename vext_set_elems_1s function
target/riscv: Add right functions to set agnostic elements
target/riscv: Replace element agnostic for vector instructions
target/riscv: Delete the former element agnostic function
target/riscv/vcrypto_helper.c | 32 ++++++------
target/riscv/vector_helper.c | 92 ++++++++++++++++-----------------
target/riscv/vector_internals.c | 58 +++++++++++++++++----
target/riscv/vector_internals.h | 8 +--
4 files changed, 115 insertions(+), 75 deletions(-)
--
2.41.0
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH 1/4] target/riscv: Rename vext_set_elems_1s function
2024-03-06 9:20 [PATCH 0/4] target/riscv: Fix the element agnostic function problem Huang Tao
@ 2024-03-06 9:20 ` Huang Tao
2024-03-19 21:37 ` Daniel Henrique Barboza
2024-03-06 9:20 ` [PATCH 2/4] target/riscv: Add right functions to set agnostic elements Huang Tao
` (2 subsequent siblings)
3 siblings, 1 reply; 9+ messages in thread
From: Huang Tao @ 2024-03-06 9:20 UTC (permalink / raw)
To: qemu-devel
Cc: qemu-riscv, zhiwei_liu, dbarboza, liwei1518, bin.meng,
alistair.francis, palmer, Huang Tao
In RVV and vcrypto instructions, the masked and tail elements are set to 1s
using vext_set_elems_1s function if the vma/vta bit is set. It is the element
agnostic policy.
However, this function can't deal the big endian situation. We rename the
function, adding '_le' to the end of the name, to indicate that it only
suits little endian situation.
Signed-off-by: Huang Tao <eric.huang@linux.alibaba.com>
---
target/riscv/vcrypto_helper.c | 32 ++++++------
target/riscv/vector_helper.c | 92 ++++++++++++++++-----------------
target/riscv/vector_internals.c | 10 ++--
target/riscv/vector_internals.h | 6 +--
4 files changed, 70 insertions(+), 70 deletions(-)
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
index e2d719b13b..f818749a63 100644
--- a/target/riscv/vcrypto_helper.c
+++ b/target/riscv/vcrypto_helper.c
@@ -235,7 +235,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4); \
+ vext_set_elems_1s_le(vd, vta, vl * 4, total_elems * 4); \
}
#define GEN_ZVKNED_HELPER_VS(NAME, ...) \
@@ -259,7 +259,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4); \
+ vext_set_elems_1s_le(vd, vta, vl * 4, total_elems * 4); \
}
GEN_ZVKNED_HELPER_VV(vaesef_vv, aesenc_SB_SR_AK(&round_state,
@@ -339,7 +339,7 @@ void HELPER(vaeskf1_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
}
env->vstart = 0;
/* set tail elements to 1s */
- vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4);
+ vext_set_elems_1s_le(vd, vta, vl * 4, total_elems * 4);
}
void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
@@ -396,7 +396,7 @@ void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
}
env->vstart = 0;
/* set tail elements to 1s */
- vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4);
+ vext_set_elems_1s_le(vd, vta, vl * 4, total_elems * 4);
}
static inline uint32_t sig0_sha256(uint32_t x)
@@ -469,7 +469,7 @@ void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
}
/* set tail elements to 1s */
total_elems = vext_get_total_elems(env, desc, esz);
- vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
+ vext_set_elems_1s_le(vd, vta, env->vl * esz, total_elems * esz);
env->vstart = 0;
}
@@ -579,7 +579,7 @@ void HELPER(vsha2ch32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
/* set tail elements to 1s */
total_elems = vext_get_total_elems(env, desc, esz);
- vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
+ vext_set_elems_1s_le(vd, vta, env->vl * esz, total_elems * esz);
env->vstart = 0;
}
@@ -597,7 +597,7 @@ void HELPER(vsha2ch64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
/* set tail elements to 1s */
total_elems = vext_get_total_elems(env, desc, esz);
- vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
+ vext_set_elems_1s_le(vd, vta, env->vl * esz, total_elems * esz);
env->vstart = 0;
}
@@ -615,7 +615,7 @@ void HELPER(vsha2cl32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
/* set tail elements to 1s */
total_elems = vext_get_total_elems(env, desc, esz);
- vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
+ vext_set_elems_1s_le(vd, vta, env->vl * esz, total_elems * esz);
env->vstart = 0;
}
@@ -633,7 +633,7 @@ void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
/* set tail elements to 1s */
total_elems = vext_get_total_elems(env, desc, esz);
- vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
+ vext_set_elems_1s_le(vd, vta, env->vl * esz, total_elems * esz);
env->vstart = 0;
}
@@ -672,7 +672,7 @@ void HELPER(vsm3me_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
vd[(i * 8) + j] = bswap32(w[H4(j + 16)]);
}
}
- vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
+ vext_set_elems_1s_le(vd_vptr, vta, env->vl * esz, total_elems * esz);
env->vstart = 0;
}
@@ -767,7 +767,7 @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
vd[i * 8 + k] = bswap32(v1[H4(k)]);
}
}
- vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
+ vext_set_elems_1s_le(vd_vptr, vta, env->vl * esz, total_elems * esz);
env->vstart = 0;
}
@@ -805,7 +805,7 @@ void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
vd[i * 2 + 1] = brev8(Z[1]);
}
/* set tail elements to 1s */
- vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
+ vext_set_elems_1s_le(vd, vta, env->vl * 4, total_elems * 4);
env->vstart = 0;
}
@@ -839,7 +839,7 @@ void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
vd[i * 2 + 1] = brev8(Z[1]);
}
/* set tail elements to 1s */
- vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
+ vext_set_elems_1s_le(vd, vta, env->vl * 4, total_elems * 4);
env->vstart = 0;
}
@@ -883,7 +883,7 @@ void HELPER(vsm4k_vi)(void *vd, void *vs2, uint32_t uimm5, CPURISCVState *env,
env->vstart = 0;
/* set tail elements to 1s */
- vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
+ vext_set_elems_1s_le(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
}
static void do_sm4_round(uint32_t *rk, uint32_t *buf)
@@ -932,7 +932,7 @@ void HELPER(vsm4r_vv)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
env->vstart = 0;
/* set tail elements to 1s */
- vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
+ vext_set_elems_1s_le(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
}
void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
@@ -966,5 +966,5 @@ void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
env->vstart = 0;
/* set tail elements to 1s */
- vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
+ vext_set_elems_1s_le(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
}
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 84cec73eb2..6ed73ed70a 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -187,7 +187,7 @@ static void vext_set_tail_elems_1s(target_ulong vl, void *vd,
}
for (k = 0; k < nf; ++k) {
- vext_set_elems_1s(vd, vta, (k * max_elems + vl) * esz,
+ vext_set_elems_1s_le(vd, vta, (k * max_elems + vl) * esz,
(k * max_elems + max_elems) * esz);
}
}
@@ -213,7 +213,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
while (k < nf) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
- vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
+ vext_set_elems_1s_le(vd, vma, (i + k * max_elems) * esz,
(i + k * max_elems + 1) * esz);
k++;
continue;
@@ -393,7 +393,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
while (k < nf) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
- vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
+ vext_set_elems_1s_le(vd, vma, (i + k * max_elems) * esz,
(i + k * max_elems + 1) * esz);
k++;
continue;
@@ -523,7 +523,7 @@ ProbeSuccess:
while (k < nf) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
- vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
+ vext_set_elems_1s_le(vd, vma, (i + k * max_elems) * esz,
(i + k * max_elems + 1) * esz);
k++;
continue;
@@ -887,7 +887,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VEXT_VADC_VVM(vadc_vvm_b, uint8_t, H1, DO_VADC)
@@ -918,7 +918,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VEXT_VADC_VXM(vadc_vxm_b, uint8_t, H1, DO_VADC)
@@ -1082,7 +1082,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
continue; \
} \
TS1 s1 = *((TS1 *)vs1 + HS1(i)); \
@@ -1091,7 +1091,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VEXT_SHIFT_VV(vsll_vv_b, uint8_t, uint8_t, H1, H1, DO_SLL, 0x7)
@@ -1129,7 +1129,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s(vd, vma, i * esz, \
+ vext_set_elems_1s_le(vd, vma, i * esz, \
(i + 1) * esz); \
continue; \
} \
@@ -1138,7 +1138,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);\
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz);\
}
GEN_VEXT_SHIFT_VX(vsll_vx_b, uint8_t, int8_t, H1, H1, DO_SLL, 0x7)
@@ -1806,7 +1806,7 @@ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VEXT_VMV_VV(vmv_v_v_b, int8_t, H1)
@@ -1829,7 +1829,7 @@ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VEXT_VMV_VX(vmv_v_x_b, int8_t, H1)
@@ -1853,7 +1853,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VEXT_VMERGE_VV(vmerge_vvm_b, int8_t, H1)
@@ -1879,7 +1879,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VEXT_VMERGE_VX(vmerge_vxm_b, int8_t, H1)
@@ -1919,7 +1919,7 @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
for (uint32_t i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
+ vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz);
continue;
}
fn(vd, vs1, vs2, i, env, vxrm);
@@ -1958,7 +1958,7 @@ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
break;
}
/* set tail elements to 1s */
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz);
}
/* generate helpers for fixed point instructions with OPIVV format */
@@ -2044,7 +2044,7 @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
for (uint32_t i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
+ vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz);
continue;
}
fn(vd, s1, vs2, i, env, vxrm);
@@ -2083,7 +2083,7 @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
break;
}
/* set tail elements to 1s */
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz);
}
/* generate helpers for fixed point instructions with OPIVX format */
@@ -2841,7 +2841,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s(vd, vma, i * ESZ, \
+ vext_set_elems_1s_le(vd, vma, i * ESZ, \
(i + 1) * ESZ); \
continue; \
} \
@@ -2849,7 +2849,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * ESZ, \
+ vext_set_elems_1s_le(vd, vta, vl * ESZ, \
total_elems * ESZ); \
}
@@ -2884,7 +2884,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s(vd, vma, i * ESZ, \
+ vext_set_elems_1s_le(vd, vma, i * ESZ, \
(i + 1) * ESZ); \
continue; \
} \
@@ -2892,7 +2892,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * ESZ, \
+ vext_set_elems_1s_le(vd, vta, vl * ESZ, \
total_elems * ESZ); \
}
@@ -3473,14 +3473,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s(vd, vma, i * ESZ, \
+ vext_set_elems_1s_le(vd, vma, i * ESZ, \
(i + 1) * ESZ); \
continue; \
} \
do_##NAME(vd, vs2, i, env); \
} \
env->vstart = 0; \
- vext_set_elems_1s(vd, vta, vl * ESZ, \
+ vext_set_elems_1s_le(vd, vta, vl * ESZ, \
total_elems * ESZ); \
}
@@ -4228,7 +4228,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2)
@@ -4397,7 +4397,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
*((TD *)vd + HD(0)) = s1; \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, esz, vlenb); \
+ vext_set_elems_1s_le(vd, vta, esz, vlenb); \
}
/* vd[0] = sum(vs1[0], vs2[*]) */
@@ -4483,7 +4483,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
*((TD *)vd + HD(0)) = s1; \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, esz, vlenb); \
+ vext_set_elems_1s_le(vd, vta, esz, vlenb); \
}
/* Unordered sum */
@@ -4708,7 +4708,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
continue; \
} \
*((ETYPE *)vd + H(i)) = sum; \
@@ -4718,7 +4718,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VEXT_VIOTA_M(viota_m_b, uint8_t, H1)
@@ -4741,14 +4741,14 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
continue; \
} \
*((ETYPE *)vd + H(i)) = i; \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VEXT_VID_V(vid_v_b, uint8_t, H1)
@@ -4777,13 +4777,13 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
for (i = i_min; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
continue; \
} \
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \
} \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
/* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */
@@ -4810,7 +4810,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
for (i = env->vstart; i < i_max; ++i) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
continue; \
} \
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + s1)); \
@@ -4824,7 +4824,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
\
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
/* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
@@ -4850,7 +4850,7 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
continue; \
} \
if (i == 0) { \
@@ -4861,7 +4861,7 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VEXT_VSLIE1UP(8, H1)
@@ -4899,7 +4899,7 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
continue; \
} \
if (i == vl - 1) { \
@@ -4910,7 +4910,7 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VEXT_VSLIDE1DOWN(8, H1)
@@ -4974,7 +4974,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
continue; \
} \
index = *((TS1 *)vs1 + HS1(i)); \
@@ -4986,7 +4986,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
/* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */
@@ -5017,7 +5017,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
continue; \
} \
if (index >= vlmax) { \
@@ -5028,7 +5028,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
/* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
@@ -5057,7 +5057,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
/* Compress into vd elements of vs2 where vs1 is enabled */
@@ -5098,14 +5098,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
continue; \
} \
*((ETYPE *)vd + HD(i)) = *((DTYPE *)vs2 + HS1(i)); \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
}
GEN_VEXT_INT_EXT(vzext_vf2_h, uint16_t, uint8_t, H2, H1)
diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c
index 12f5964fbb..349b24f4ae 100644
--- a/target/riscv/vector_internals.c
+++ b/target/riscv/vector_internals.c
@@ -20,7 +20,7 @@
#include "vector_internals.h"
/* set agnostic elements to 1s */
-void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
+void vext_set_elems_1s_le(void *base, uint32_t is_agnostic, uint32_t cnt,
uint32_t tot)
{
if (is_agnostic == 0) {
@@ -47,14 +47,14 @@ void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
for (i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
+ vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz);
continue;
}
fn(vd, vs1, vs2, i);
}
env->vstart = 0;
/* set tail elements to 1s */
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz);
}
void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
@@ -71,12 +71,12 @@ void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
for (i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
- vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
+ vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz);
continue;
}
fn(vd, s1, vs2, i);
}
env->vstart = 0;
/* set tail elements to 1s */
- vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
+ vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz);
}
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
index 842765f6c1..fa599f60ca 100644
--- a/target/riscv/vector_internals.h
+++ b/target/riscv/vector_internals.h
@@ -114,7 +114,7 @@ static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc,
}
/* set agnostic elements to 1s */
-void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
+void vext_set_elems_1s_le(void *base, uint32_t is_agnostic, uint32_t cnt,
uint32_t tot);
/* expand macro args before macro */
@@ -154,7 +154,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s(vd, vma, i * ESZ, \
+ vext_set_elems_1s_le(vd, vma, i * ESZ, \
(i + 1) * ESZ); \
continue; \
} \
@@ -162,7 +162,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s(vd, vta, vl * ESZ, \
+ vext_set_elems_1s_le(vd, vta, vl * ESZ, \
total_elems * ESZ); \
}
--
2.41.0
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 2/4] target/riscv: Add right functions to set agnostic elements
2024-03-06 9:20 [PATCH 0/4] target/riscv: Fix the element agnostic function problem Huang Tao
2024-03-06 9:20 ` [PATCH 1/4] target/riscv: Rename vext_set_elems_1s function Huang Tao
@ 2024-03-06 9:20 ` Huang Tao
2024-03-19 21:57 ` Daniel Henrique Barboza
2024-03-06 9:20 ` [PATCH 3/4] target/riscv: Replace element agnostic for vector instructions Huang Tao
2024-03-06 9:20 ` [PATCH 4/4] target/riscv: Delete the former element agnostic function Huang Tao
3 siblings, 1 reply; 9+ messages in thread
From: Huang Tao @ 2024-03-06 9:20 UTC (permalink / raw)
To: qemu-devel
Cc: qemu-riscv, zhiwei_liu, dbarboza, liwei1518, bin.meng,
alistair.francis, palmer, Huang Tao
We add vext_set_elems_1s to set agnostic elements to 1s in both big
and little endian situation.
In the function vext_set_elems_1s. We using esz argument to get the first
element to set. 'cnt' is just idx * esz.
Signed-off-by: Huang Tao <eric.huang@linux.alibaba.com>
---
target/riscv/vector_internals.c | 53 +++++++++++++++++++++++++++++++++
target/riscv/vector_internals.h | 2 ++
2 files changed, 55 insertions(+)
diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c
index 349b24f4ae..455be96996 100644
--- a/target/riscv/vector_internals.c
+++ b/target/riscv/vector_internals.c
@@ -20,6 +20,59 @@
#include "vector_internals.h"
/* set agnostic elements to 1s */
+#if HOST_BIG_ENDIAN
+void vext_set_elems_1s(void *vd, uint32_t is_agnostic, uint32_t esz,
+ uint32_t idx, uint32_t tot)
+{
+ if (is_agnostic == 0) {
+ /* policy undisturbed */
+ return;
+ }
+ void *base = NULL;
+ switch (esz) {
+ case 1:
+ base = ((int8_t *)vd + H1(idx));
+ break;
+ case 2:
+ base = ((int16_t *)vd + H2(idx));
+ break;
+ case 4:
+ base = ((int32_t *)vd + H4(idx));
+ break;
+ case 8:
+ base = ((int64_t *)vd + H8(idx));
+ break;
+ default:
+ g_assert_not_reached();
+ break;
+ }
+ /*
+ * spilt the elements into 2 parts
+ * part_begin: the memory need to be set in the first uint64_t unit
+ * part_allign: the memory need to be set begins from next uint64_t
+ * unit and alligned to 8
+ */
+ uint32_t cnt = idx * esz;
+ int part_begin, part_allign;
+ part_begin = MIN(tot - cnt, 8 - (cnt % 8));
+ part_allign = ((tot - cnt - part_begin) / 8) * 8;
+
+ memset(base - part_begin + 1, -1, part_begin);
+ memset(QEMU_ALIGN_PTR_UP(base, 8), -1, part_allign);
+}
+#else
+void vext_set_elems_1s(void *vd, uint32_t is_agnostic, uint32_t esz,
+ uint32_t idx, uint32_t tot)
+{
+ if (is_agnostic == 0) {
+ /* policy undisturbed */
+ return;
+ }
+ uint32_t cnt = idx * esz;
+ memset(vd + cnt, -1, tot - cnt);
+}
+#endif
+
void vext_set_elems_1s_le(void *base, uint32_t is_agnostic, uint32_t cnt,
uint32_t tot)
{
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
index fa599f60ca..c96e52f926 100644
--- a/target/riscv/vector_internals.h
+++ b/target/riscv/vector_internals.h
@@ -114,6 +114,8 @@ static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc,
}
/* set agnostic elements to 1s */
+void vext_set_elems_1s(void *vd, uint32_t is_agnostic, uint32_t esz,
+ uint32_t idx, uint32_t tot);
void vext_set_elems_1s_le(void *base, uint32_t is_agnostic, uint32_t cnt,
uint32_t tot);
--
2.41.0
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 3/4] target/riscv: Replace element agnostic for vector instructions
2024-03-06 9:20 [PATCH 0/4] target/riscv: Fix the element agnostic function problem Huang Tao
2024-03-06 9:20 ` [PATCH 1/4] target/riscv: Rename vext_set_elems_1s function Huang Tao
2024-03-06 9:20 ` [PATCH 2/4] target/riscv: Add right functions to set agnostic elements Huang Tao
@ 2024-03-06 9:20 ` Huang Tao
2024-03-06 9:20 ` [PATCH 4/4] target/riscv: Delete the former element agnostic function Huang Tao
3 siblings, 0 replies; 9+ messages in thread
From: Huang Tao @ 2024-03-06 9:20 UTC (permalink / raw)
To: qemu-devel
Cc: qemu-riscv, zhiwei_liu, dbarboza, liwei1518, bin.meng,
alistair.francis, palmer, Huang Tao
Replace vext_set_elems_1s_le with vext_set_elems_1s for RVV and
vcrypto.
Signed-off-by: Huang Tao <eric.huang@linux.alibaba.com>
---
target/riscv/vcrypto_helper.c | 32 ++++++------
target/riscv/vector_helper.c | 92 ++++++++++++++++-----------------
target/riscv/vector_internals.c | 8 +--
target/riscv/vector_internals.h | 4 +-
4 files changed, 68 insertions(+), 68 deletions(-)
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
index f818749a63..d6c52c1fd0 100644
--- a/target/riscv/vcrypto_helper.c
+++ b/target/riscv/vcrypto_helper.c
@@ -235,7 +235,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * 4, total_elems * 4); \
+ vext_set_elems_1s(vd, vta, 4, vl, total_elems * 4); \
}
#define GEN_ZVKNED_HELPER_VS(NAME, ...) \
@@ -259,7 +259,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * 4, total_elems * 4); \
+ vext_set_elems_1s(vd, vta, 4, vl, total_elems * 4); \
}
GEN_ZVKNED_HELPER_VV(vaesef_vv, aesenc_SB_SR_AK(&round_state,
@@ -339,7 +339,7 @@ void HELPER(vaeskf1_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
}
env->vstart = 0;
/* set tail elements to 1s */
- vext_set_elems_1s_le(vd, vta, vl * 4, total_elems * 4);
+ vext_set_elems_1s(vd, vta, 4, vl, total_elems * 4);
}
void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
@@ -396,7 +396,7 @@ void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
}
env->vstart = 0;
/* set tail elements to 1s */
- vext_set_elems_1s_le(vd, vta, vl * 4, total_elems * 4);
+ vext_set_elems_1s(vd, vta, 4, vl, total_elems * 4);
}
static inline uint32_t sig0_sha256(uint32_t x)
@@ -469,7 +469,7 @@ void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
}
/* set tail elements to 1s */
total_elems = vext_get_total_elems(env, desc, esz);
- vext_set_elems_1s_le(vd, vta, env->vl * esz, total_elems * esz);
+ vext_set_elems_1s(vd, vta, esz, env->vl, total_elems * esz);
env->vstart = 0;
}
@@ -579,7 +579,7 @@ void HELPER(vsha2ch32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
/* set tail elements to 1s */
total_elems = vext_get_total_elems(env, desc, esz);
- vext_set_elems_1s_le(vd, vta, env->vl * esz, total_elems * esz);
+ vext_set_elems_1s(vd, vta, esz, env->vl, total_elems * esz);
env->vstart = 0;
}
@@ -597,7 +597,7 @@ void HELPER(vsha2ch64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
/* set tail elements to 1s */
total_elems = vext_get_total_elems(env, desc, esz);
- vext_set_elems_1s_le(vd, vta, env->vl * esz, total_elems * esz);
+ vext_set_elems_1s(vd, vta, esz, env->vl, total_elems * esz);
env->vstart = 0;
}
@@ -615,7 +615,7 @@ void HELPER(vsha2cl32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
/* set tail elements to 1s */
total_elems = vext_get_total_elems(env, desc, esz);
- vext_set_elems_1s_le(vd, vta, env->vl * esz, total_elems * esz);
+ vext_set_elems_1s(vd, vta, esz, env->vl, total_elems * esz);
env->vstart = 0;
}
@@ -633,7 +633,7 @@ void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
/* set tail elements to 1s */
total_elems = vext_get_total_elems(env, desc, esz);
- vext_set_elems_1s_le(vd, vta, env->vl * esz, total_elems * esz);
+ vext_set_elems_1s(vd, vta, esz, env->vl, total_elems * esz);
env->vstart = 0;
}
@@ -672,7 +672,7 @@ void HELPER(vsm3me_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
vd[(i * 8) + j] = bswap32(w[H4(j + 16)]);
}
}
- vext_set_elems_1s_le(vd_vptr, vta, env->vl * esz, total_elems * esz);
+ vext_set_elems_1s(vd_vptr, vta, esz, env->vl, total_elems * esz);
env->vstart = 0;
}
@@ -767,7 +767,7 @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
vd[i * 8 + k] = bswap32(v1[H4(k)]);
}
}
- vext_set_elems_1s_le(vd_vptr, vta, env->vl * esz, total_elems * esz);
+ vext_set_elems_1s(vd_vptr, vta, esz, env->vl, total_elems * esz);
env->vstart = 0;
}
@@ -805,7 +805,7 @@ void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
vd[i * 2 + 1] = brev8(Z[1]);
}
/* set tail elements to 1s */
- vext_set_elems_1s_le(vd, vta, env->vl * 4, total_elems * 4);
+ vext_set_elems_1s(vd, vta, 4, env->vl, total_elems * 4);
env->vstart = 0;
}
@@ -839,7 +839,7 @@ void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
vd[i * 2 + 1] = brev8(Z[1]);
}
/* set tail elements to 1s */
- vext_set_elems_1s_le(vd, vta, env->vl * 4, total_elems * 4);
+ vext_set_elems_1s(vd, vta, 4, env->vl, total_elems * 4);
env->vstart = 0;
}
@@ -883,7 +883,7 @@ void HELPER(vsm4k_vi)(void *vd, void *vs2, uint32_t uimm5, CPURISCVState *env,
env->vstart = 0;
/* set tail elements to 1s */
- vext_set_elems_1s_le(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
+ vext_set_elems_1s(vd, vext_vta(desc), esz, env->vl, total_elems * esz);
}
static void do_sm4_round(uint32_t *rk, uint32_t *buf)
@@ -932,7 +932,7 @@ void HELPER(vsm4r_vv)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
env->vstart = 0;
/* set tail elements to 1s */
- vext_set_elems_1s_le(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
+ vext_set_elems_1s(vd, vext_vta(desc), esz, env->vl, total_elems * esz);
}
void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
@@ -966,5 +966,5 @@ void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
env->vstart = 0;
/* set tail elements to 1s */
- vext_set_elems_1s_le(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
+ vext_set_elems_1s(vd, vext_vta(desc), esz, env->vl, total_elems * esz);
}
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 6ed73ed70a..4816439815 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -187,7 +187,7 @@ static void vext_set_tail_elems_1s(target_ulong vl, void *vd,
}
for (k = 0; k < nf; ++k) {
- vext_set_elems_1s_le(vd, vta, (k * max_elems + vl) * esz,
+ vext_set_elems_1s(vd, vta, esz, k * max_elems + vl,
(k * max_elems + max_elems) * esz);
}
}
@@ -213,7 +213,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
while (k < nf) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
- vext_set_elems_1s_le(vd, vma, (i + k * max_elems) * esz,
+ vext_set_elems_1s(vd, vma, esz, i + k * max_elems,
(i + k * max_elems + 1) * esz);
k++;
continue;
@@ -393,7 +393,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
while (k < nf) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
- vext_set_elems_1s_le(vd, vma, (i + k * max_elems) * esz,
+ vext_set_elems_1s(vd, vma, esz, i + k * max_elems,
(i + k * max_elems + 1) * esz);
k++;
continue;
@@ -523,7 +523,7 @@ ProbeSuccess:
while (k < nf) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
- vext_set_elems_1s_le(vd, vma, (i + k * max_elems) * esz,
+ vext_set_elems_1s(vd, vma, esz, i + k * max_elems,
(i + k * max_elems + 1) * esz);
k++;
continue;
@@ -887,7 +887,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
GEN_VEXT_VADC_VVM(vadc_vvm_b, uint8_t, H1, DO_VADC)
@@ -918,7 +918,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
GEN_VEXT_VADC_VXM(vadc_vxm_b, uint8_t, H1, DO_VADC)
@@ -1082,7 +1082,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s(vd, vma, esz, i, (i + 1) * esz); \
continue; \
} \
TS1 s1 = *((TS1 *)vs1 + HS1(i)); \
@@ -1091,7 +1091,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
GEN_VEXT_SHIFT_VV(vsll_vv_b, uint8_t, uint8_t, H1, H1, DO_SLL, 0x7)
@@ -1129,7 +1129,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s_le(vd, vma, i * esz, \
+ vext_set_elems_1s(vd, vma, esz, i, \
(i + 1) * esz); \
continue; \
} \
@@ -1138,7 +1138,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz);\
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
GEN_VEXT_SHIFT_VX(vsll_vx_b, uint8_t, int8_t, H1, H1, DO_SLL, 0x7)
@@ -1806,7 +1806,7 @@ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
GEN_VEXT_VMV_VV(vmv_v_v_b, int8_t, H1)
@@ -1829,7 +1829,7 @@ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
GEN_VEXT_VMV_VX(vmv_v_x_b, int8_t, H1)
@@ -1853,7 +1853,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
GEN_VEXT_VMERGE_VV(vmerge_vvm_b, int8_t, H1)
@@ -1879,7 +1879,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
GEN_VEXT_VMERGE_VX(vmerge_vxm_b, int8_t, H1)
@@ -1919,7 +1919,7 @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
for (uint32_t i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
- vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz);
+ vext_set_elems_1s(vd, vma, esz, i, (i + 1) * esz);
continue;
}
fn(vd, vs1, vs2, i, env, vxrm);
@@ -1958,7 +1958,7 @@ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
break;
}
/* set tail elements to 1s */
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz);
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz);
}
/* generate helpers for fixed point instructions with OPIVV format */
@@ -2044,7 +2044,7 @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
for (uint32_t i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
- vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz);
+ vext_set_elems_1s(vd, vma, esz, i, (i + 1) * esz);
continue;
}
fn(vd, s1, vs2, i, env, vxrm);
@@ -2083,7 +2083,7 @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
break;
}
/* set tail elements to 1s */
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz);
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz);
}
/* generate helpers for fixed point instructions with OPIVX format */
@@ -2841,7 +2841,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s_le(vd, vma, i * ESZ, \
+ vext_set_elems_1s(vd, vma, ESZ, i, \
(i + 1) * ESZ); \
continue; \
} \
@@ -2849,7 +2849,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * ESZ, \
+ vext_set_elems_1s(vd, vta, ESZ, vl, \
total_elems * ESZ); \
}
@@ -2884,7 +2884,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s_le(vd, vma, i * ESZ, \
+ vext_set_elems_1s(vd, vma, ESZ, i, \
(i + 1) * ESZ); \
continue; \
} \
@@ -2892,7 +2892,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * ESZ, \
+ vext_set_elems_1s(vd, vta, ESZ, vl, \
total_elems * ESZ); \
}
@@ -3473,14 +3473,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s_le(vd, vma, i * ESZ, \
+ vext_set_elems_1s(vd, vma, ESZ, i, \
(i + 1) * ESZ); \
continue; \
} \
do_##NAME(vd, vs2, i, env); \
} \
env->vstart = 0; \
- vext_set_elems_1s_le(vd, vta, vl * ESZ, \
+ vext_set_elems_1s(vd, vta, ESZ, vl, \
total_elems * ESZ); \
}
@@ -4228,7 +4228,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2)
@@ -4397,7 +4397,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
*((TD *)vd + HD(0)) = s1; \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, esz, vlenb); \
+ vext_set_elems_1s(vd, vta, esz, 1, vlenb); \
}
/* vd[0] = sum(vs1[0], vs2[*]) */
@@ -4483,7 +4483,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
*((TD *)vd + HD(0)) = s1; \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, esz, vlenb); \
+ vext_set_elems_1s(vd, vta, esz, 1, vlenb); \
}
/* Unordered sum */
@@ -4708,7 +4708,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s(vd, vma, esz, i, (i + 1) * esz); \
continue; \
} \
*((ETYPE *)vd + H(i)) = sum; \
@@ -4718,7 +4718,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
GEN_VEXT_VIOTA_M(viota_m_b, uint8_t, H1)
@@ -4741,14 +4741,14 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s(vd, vma, esz, i, (i + 1) * esz); \
continue; \
} \
*((ETYPE *)vd + H(i)) = i; \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
GEN_VEXT_VID_V(vid_v_b, uint8_t, H1)
@@ -4777,13 +4777,13 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
for (i = i_min; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s(vd, vma, esz, i, (i + 1) * esz); \
continue; \
} \
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \
} \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
/* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */
@@ -4810,7 +4810,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
for (i = env->vstart; i < i_max; ++i) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s(vd, vma, esz, i, (i + 1) * esz); \
continue; \
} \
*((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + s1)); \
@@ -4824,7 +4824,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
\
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
/* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
@@ -4850,7 +4850,7 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s(vd, vma, esz, i, (i + 1) * esz); \
continue; \
} \
if (i == 0) { \
@@ -4861,7 +4861,7 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
GEN_VEXT_VSLIE1UP(8, H1)
@@ -4899,7 +4899,7 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s(vd, vma, esz, i, (i + 1) * esz); \
continue; \
} \
if (i == vl - 1) { \
@@ -4910,7 +4910,7 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
GEN_VEXT_VSLIDE1DOWN(8, H1)
@@ -4974,7 +4974,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s(vd, vma, esz, i, (i + 1) * esz); \
continue; \
} \
index = *((TS1 *)vs1 + HS1(i)); \
@@ -4986,7 +4986,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
/* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */
@@ -5017,7 +5017,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s(vd, vma, esz, i, (i + 1) * esz); \
continue; \
} \
if (index >= vlmax) { \
@@ -5028,7 +5028,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
/* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
@@ -5057,7 +5057,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
/* Compress into vd elements of vs2 where vs1 is enabled */
@@ -5098,14 +5098,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
+ vext_set_elems_1s(vd, vma, esz, i, (i + 1) * esz); \
continue; \
} \
*((ETYPE *)vd + HD(i)) = *((DTYPE *)vs2 + HS1(i)); \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz); \
}
GEN_VEXT_INT_EXT(vzext_vf2_h, uint16_t, uint8_t, H2, H1)
diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c
index 455be96996..0166e81e02 100644
--- a/target/riscv/vector_internals.c
+++ b/target/riscv/vector_internals.c
@@ -100,14 +100,14 @@ void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
for (i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
- vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz);
+ vext_set_elems_1s(vd, vma, esz, i, (i + 1) * esz);
continue;
}
fn(vd, vs1, vs2, i);
}
env->vstart = 0;
/* set tail elements to 1s */
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz);
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz);
}
void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
@@ -124,12 +124,12 @@ void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
for (i = env->vstart; i < vl; i++) {
if (!vm && !vext_elem_mask(v0, i)) {
/* set masked-off elements to 1s */
- vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz);
+ vext_set_elems_1s(vd, vma, esz, i, (i + 1) * esz);
continue;
}
fn(vd, s1, vs2, i);
}
env->vstart = 0;
/* set tail elements to 1s */
- vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz);
+ vext_set_elems_1s(vd, vta, esz, vl, total_elems * esz);
}
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
index c96e52f926..de7d2681e6 100644
--- a/target/riscv/vector_internals.h
+++ b/target/riscv/vector_internals.h
@@ -156,7 +156,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
for (i = env->vstart; i < vl; i++) { \
if (!vm && !vext_elem_mask(v0, i)) { \
/* set masked-off elements to 1s */ \
- vext_set_elems_1s_le(vd, vma, i * ESZ, \
+ vext_set_elems_1s(vd, vma, ESZ, i, \
(i + 1) * ESZ); \
continue; \
} \
@@ -164,7 +164,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
} \
env->vstart = 0; \
/* set tail elements to 1s */ \
- vext_set_elems_1s_le(vd, vta, vl * ESZ, \
+ vext_set_elems_1s(vd, vta, ESZ, vl, \
total_elems * ESZ); \
}
--
2.41.0
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH 4/4] target/riscv: Delete the former element agnostic function
2024-03-06 9:20 [PATCH 0/4] target/riscv: Fix the element agnostic function problem Huang Tao
` (2 preceding siblings ...)
2024-03-06 9:20 ` [PATCH 3/4] target/riscv: Replace element agnostic for vector instructions Huang Tao
@ 2024-03-06 9:20 ` Huang Tao
3 siblings, 0 replies; 9+ messages in thread
From: Huang Tao @ 2024-03-06 9:20 UTC (permalink / raw)
To: qemu-devel
Cc: qemu-riscv, zhiwei_liu, dbarboza, liwei1518, bin.meng,
alistair.francis, palmer, Huang Tao
Delete vext_set_elems_1s_le.
Signed-off-by: Huang Tao <eric.huang@linux.alibaba.com>
---
target/riscv/vector_internals.c | 13 -------------
target/riscv/vector_internals.h | 2 --
2 files changed, 15 deletions(-)
diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c
index 0166e81e02..4f24bd8516 100644
--- a/target/riscv/vector_internals.c
+++ b/target/riscv/vector_internals.c
@@ -73,19 +73,6 @@ void vext_set_elems_1s(void *vd, uint32_t is_agnostic, uint32_t esz,
}
#endif
-void vext_set_elems_1s_le(void *base, uint32_t is_agnostic, uint32_t cnt,
- uint32_t tot)
-{
- if (is_agnostic == 0) {
- /* policy undisturbed */
- return;
- }
- if (tot - cnt == 0) {
- return ;
- }
- memset(base + cnt, -1, tot - cnt);
-}
-
void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
CPURISCVState *env, uint32_t desc,
opivv2_fn *fn, uint32_t esz)
diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
index de7d2681e6..45168d4cfc 100644
--- a/target/riscv/vector_internals.h
+++ b/target/riscv/vector_internals.h
@@ -116,8 +116,6 @@ static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc,
/* set agnostic elements to 1s */
void vext_set_elems_1s(void *vd, uint32_t is_agnostic, uint32_t esz,
uint32_t idx, uint32_t tot);
-void vext_set_elems_1s_le(void *base, uint32_t is_agnostic, uint32_t cnt,
- uint32_t tot);
/* expand macro args before macro */
#define RVVCALL(macro, ...) macro(__VA_ARGS__)
--
2.41.0
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH 1/4] target/riscv: Rename vext_set_elems_1s function
2024-03-06 9:20 ` [PATCH 1/4] target/riscv: Rename vext_set_elems_1s function Huang Tao
@ 2024-03-19 21:37 ` Daniel Henrique Barboza
0 siblings, 0 replies; 9+ messages in thread
From: Daniel Henrique Barboza @ 2024-03-19 21:37 UTC (permalink / raw)
To: Huang Tao, qemu-devel
Cc: qemu-riscv, zhiwei_liu, liwei1518, bin.meng, alistair.francis,
palmer
On 3/6/24 06:20, Huang Tao wrote:
> In RVV and vcrypto instructions, the masked and tail elements are set to 1s
> using vext_set_elems_1s function if the vma/vta bit is set. It is the element
> agnostic policy.
>
> However, this function can't deal the big endian situation. We rename the
> function, adding '_le' to the end of the name, to indicate that it only
> suits little endian situation.
>
> Signed-off-by: Huang Tao <eric.huang@linux.alibaba.com>
> ---
Given that this is a temporary change (vext_set_elems_1s_le is removed later) it's
ok to not bother with the macro identations.
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
> target/riscv/vcrypto_helper.c | 32 ++++++------
> target/riscv/vector_helper.c | 92 ++++++++++++++++-----------------
> target/riscv/vector_internals.c | 10 ++--
> target/riscv/vector_internals.h | 6 +--
> 4 files changed, 70 insertions(+), 70 deletions(-)
>
> diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
> index e2d719b13b..f818749a63 100644
> --- a/target/riscv/vcrypto_helper.c
> +++ b/target/riscv/vcrypto_helper.c
> @@ -235,7 +235,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4); \
> + vext_set_elems_1s_le(vd, vta, vl * 4, total_elems * 4); \
> }
>
> #define GEN_ZVKNED_HELPER_VS(NAME, ...) \
> @@ -259,7 +259,7 @@ static inline void xor_round_key(AESState *round_state, AESState *round_key)
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4); \
> + vext_set_elems_1s_le(vd, vta, vl * 4, total_elems * 4); \
> }
>
> GEN_ZVKNED_HELPER_VV(vaesef_vv, aesenc_SB_SR_AK(&round_state,
> @@ -339,7 +339,7 @@ void HELPER(vaeskf1_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
> }
> env->vstart = 0;
> /* set tail elements to 1s */
> - vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4);
> + vext_set_elems_1s_le(vd, vta, vl * 4, total_elems * 4);
> }
>
> void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
> @@ -396,7 +396,7 @@ void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
> }
> env->vstart = 0;
> /* set tail elements to 1s */
> - vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4);
> + vext_set_elems_1s_le(vd, vta, vl * 4, total_elems * 4);
> }
>
> static inline uint32_t sig0_sha256(uint32_t x)
> @@ -469,7 +469,7 @@ void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
> }
> /* set tail elements to 1s */
> total_elems = vext_get_total_elems(env, desc, esz);
> - vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
> + vext_set_elems_1s_le(vd, vta, env->vl * esz, total_elems * esz);
> env->vstart = 0;
> }
>
> @@ -579,7 +579,7 @@ void HELPER(vsha2ch32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
>
> /* set tail elements to 1s */
> total_elems = vext_get_total_elems(env, desc, esz);
> - vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
> + vext_set_elems_1s_le(vd, vta, env->vl * esz, total_elems * esz);
> env->vstart = 0;
> }
>
> @@ -597,7 +597,7 @@ void HELPER(vsha2ch64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
>
> /* set tail elements to 1s */
> total_elems = vext_get_total_elems(env, desc, esz);
> - vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
> + vext_set_elems_1s_le(vd, vta, env->vl * esz, total_elems * esz);
> env->vstart = 0;
> }
>
> @@ -615,7 +615,7 @@ void HELPER(vsha2cl32_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
>
> /* set tail elements to 1s */
> total_elems = vext_get_total_elems(env, desc, esz);
> - vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
> + vext_set_elems_1s_le(vd, vta, env->vl * esz, total_elems * esz);
> env->vstart = 0;
> }
>
> @@ -633,7 +633,7 @@ void HELPER(vsha2cl64_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
>
> /* set tail elements to 1s */
> total_elems = vext_get_total_elems(env, desc, esz);
> - vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
> + vext_set_elems_1s_le(vd, vta, env->vl * esz, total_elems * esz);
> env->vstart = 0;
> }
>
> @@ -672,7 +672,7 @@ void HELPER(vsm3me_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
> vd[(i * 8) + j] = bswap32(w[H4(j + 16)]);
> }
> }
> - vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
> + vext_set_elems_1s_le(vd_vptr, vta, env->vl * esz, total_elems * esz);
> env->vstart = 0;
> }
>
> @@ -767,7 +767,7 @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
> vd[i * 8 + k] = bswap32(v1[H4(k)]);
> }
> }
> - vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
> + vext_set_elems_1s_le(vd_vptr, vta, env->vl * esz, total_elems * esz);
> env->vstart = 0;
> }
>
> @@ -805,7 +805,7 @@ void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
> vd[i * 2 + 1] = brev8(Z[1]);
> }
> /* set tail elements to 1s */
> - vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
> + vext_set_elems_1s_le(vd, vta, env->vl * 4, total_elems * 4);
> env->vstart = 0;
> }
>
> @@ -839,7 +839,7 @@ void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
> vd[i * 2 + 1] = brev8(Z[1]);
> }
> /* set tail elements to 1s */
> - vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
> + vext_set_elems_1s_le(vd, vta, env->vl * 4, total_elems * 4);
> env->vstart = 0;
> }
>
> @@ -883,7 +883,7 @@ void HELPER(vsm4k_vi)(void *vd, void *vs2, uint32_t uimm5, CPURISCVState *env,
>
> env->vstart = 0;
> /* set tail elements to 1s */
> - vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
> + vext_set_elems_1s_le(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
> }
>
> static void do_sm4_round(uint32_t *rk, uint32_t *buf)
> @@ -932,7 +932,7 @@ void HELPER(vsm4r_vv)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
>
> env->vstart = 0;
> /* set tail elements to 1s */
> - vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
> + vext_set_elems_1s_le(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
> }
>
> void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
> @@ -966,5 +966,5 @@ void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
>
> env->vstart = 0;
> /* set tail elements to 1s */
> - vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
> + vext_set_elems_1s_le(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
> }
> diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
> index 84cec73eb2..6ed73ed70a 100644
> --- a/target/riscv/vector_helper.c
> +++ b/target/riscv/vector_helper.c
> @@ -187,7 +187,7 @@ static void vext_set_tail_elems_1s(target_ulong vl, void *vd,
> }
>
> for (k = 0; k < nf; ++k) {
> - vext_set_elems_1s(vd, vta, (k * max_elems + vl) * esz,
> + vext_set_elems_1s_le(vd, vta, (k * max_elems + vl) * esz,
> (k * max_elems + max_elems) * esz);
> }
> }
> @@ -213,7 +213,7 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base,
> while (k < nf) {
> if (!vm && !vext_elem_mask(v0, i)) {
> /* set masked-off elements to 1s */
> - vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
> + vext_set_elems_1s_le(vd, vma, (i + k * max_elems) * esz,
> (i + k * max_elems + 1) * esz);
> k++;
> continue;
> @@ -393,7 +393,7 @@ vext_ldst_index(void *vd, void *v0, target_ulong base,
> while (k < nf) {
> if (!vm && !vext_elem_mask(v0, i)) {
> /* set masked-off elements to 1s */
> - vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
> + vext_set_elems_1s_le(vd, vma, (i + k * max_elems) * esz,
> (i + k * max_elems + 1) * esz);
> k++;
> continue;
> @@ -523,7 +523,7 @@ ProbeSuccess:
> while (k < nf) {
> if (!vm && !vext_elem_mask(v0, i)) {
> /* set masked-off elements to 1s */
> - vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz,
> + vext_set_elems_1s_le(vd, vma, (i + k * max_elems) * esz,
> (i + k * max_elems + 1) * esz);
> k++;
> continue;
> @@ -887,7 +887,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> GEN_VEXT_VADC_VVM(vadc_vvm_b, uint8_t, H1, DO_VADC)
> @@ -918,7 +918,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> GEN_VEXT_VADC_VXM(vadc_vxm_b, uint8_t, H1, DO_VADC)
> @@ -1082,7 +1082,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> /* set masked-off elements to 1s */ \
> - vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> + vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
> continue; \
> } \
> TS1 s1 = *((TS1 *)vs1 + HS1(i)); \
> @@ -1091,7 +1091,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> GEN_VEXT_SHIFT_VV(vsll_vv_b, uint8_t, uint8_t, H1, H1, DO_SLL, 0x7)
> @@ -1129,7 +1129,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> /* set masked-off elements to 1s */ \
> - vext_set_elems_1s(vd, vma, i * esz, \
> + vext_set_elems_1s_le(vd, vma, i * esz, \
> (i + 1) * esz); \
> continue; \
> } \
> @@ -1138,7 +1138,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);\
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz);\
> }
>
> GEN_VEXT_SHIFT_VX(vsll_vx_b, uint8_t, int8_t, H1, H1, DO_SLL, 0x7)
> @@ -1806,7 +1806,7 @@ void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> GEN_VEXT_VMV_VV(vmv_v_v_b, int8_t, H1)
> @@ -1829,7 +1829,7 @@ void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> GEN_VEXT_VMV_VX(vmv_v_x_b, int8_t, H1)
> @@ -1853,7 +1853,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> GEN_VEXT_VMERGE_VV(vmerge_vvm_b, int8_t, H1)
> @@ -1879,7 +1879,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> GEN_VEXT_VMERGE_VX(vmerge_vxm_b, int8_t, H1)
> @@ -1919,7 +1919,7 @@ vext_vv_rm_1(void *vd, void *v0, void *vs1, void *vs2,
> for (uint32_t i = env->vstart; i < vl; i++) {
> if (!vm && !vext_elem_mask(v0, i)) {
> /* set masked-off elements to 1s */
> - vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
> + vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz);
> continue;
> }
> fn(vd, vs1, vs2, i, env, vxrm);
> @@ -1958,7 +1958,7 @@ vext_vv_rm_2(void *vd, void *v0, void *vs1, void *vs2,
> break;
> }
> /* set tail elements to 1s */
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz);
> }
>
> /* generate helpers for fixed point instructions with OPIVV format */
> @@ -2044,7 +2044,7 @@ vext_vx_rm_1(void *vd, void *v0, target_long s1, void *vs2,
> for (uint32_t i = env->vstart; i < vl; i++) {
> if (!vm && !vext_elem_mask(v0, i)) {
> /* set masked-off elements to 1s */
> - vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
> + vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz);
> continue;
> }
> fn(vd, s1, vs2, i, env, vxrm);
> @@ -2083,7 +2083,7 @@ vext_vx_rm_2(void *vd, void *v0, target_long s1, void *vs2,
> break;
> }
> /* set tail elements to 1s */
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz);
> }
>
> /* generate helpers for fixed point instructions with OPIVX format */
> @@ -2841,7 +2841,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> /* set masked-off elements to 1s */ \
> - vext_set_elems_1s(vd, vma, i * ESZ, \
> + vext_set_elems_1s_le(vd, vma, i * ESZ, \
> (i + 1) * ESZ); \
> continue; \
> } \
> @@ -2849,7 +2849,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * ESZ, \
> + vext_set_elems_1s_le(vd, vta, vl * ESZ, \
> total_elems * ESZ); \
> }
>
> @@ -2884,7 +2884,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> /* set masked-off elements to 1s */ \
> - vext_set_elems_1s(vd, vma, i * ESZ, \
> + vext_set_elems_1s_le(vd, vma, i * ESZ, \
> (i + 1) * ESZ); \
> continue; \
> } \
> @@ -2892,7 +2892,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * ESZ, \
> + vext_set_elems_1s_le(vd, vta, vl * ESZ, \
> total_elems * ESZ); \
> }
>
> @@ -3473,14 +3473,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> /* set masked-off elements to 1s */ \
> - vext_set_elems_1s(vd, vma, i * ESZ, \
> + vext_set_elems_1s_le(vd, vma, i * ESZ, \
> (i + 1) * ESZ); \
> continue; \
> } \
> do_##NAME(vd, vs2, i, env); \
> } \
> env->vstart = 0; \
> - vext_set_elems_1s(vd, vta, vl * ESZ, \
> + vext_set_elems_1s_le(vd, vta, vl * ESZ, \
> total_elems * ESZ); \
> }
>
> @@ -4228,7 +4228,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> GEN_VFMERGE_VF(vfmerge_vfm_h, int16_t, H2)
> @@ -4397,7 +4397,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
> *((TD *)vd + HD(0)) = s1; \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, esz, vlenb); \
> + vext_set_elems_1s_le(vd, vta, esz, vlenb); \
> }
>
> /* vd[0] = sum(vs1[0], vs2[*]) */
> @@ -4483,7 +4483,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \
> *((TD *)vd + HD(0)) = s1; \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, esz, vlenb); \
> + vext_set_elems_1s_le(vd, vta, esz, vlenb); \
> }
>
> /* Unordered sum */
> @@ -4708,7 +4708,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> /* set masked-off elements to 1s */ \
> - vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> + vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
> continue; \
> } \
> *((ETYPE *)vd + H(i)) = sum; \
> @@ -4718,7 +4718,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> GEN_VEXT_VIOTA_M(viota_m_b, uint8_t, H1)
> @@ -4741,14 +4741,14 @@ void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> /* set masked-off elements to 1s */ \
> - vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> + vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
> continue; \
> } \
> *((ETYPE *)vd + H(i)) = i; \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> GEN_VEXT_VID_V(vid_v_b, uint8_t, H1)
> @@ -4777,13 +4777,13 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
> for (i = i_min; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> /* set masked-off elements to 1s */ \
> - vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> + vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
> continue; \
> } \
> *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \
> } \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> /* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */
> @@ -4810,7 +4810,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
> for (i = env->vstart; i < i_max; ++i) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> /* set masked-off elements to 1s */ \
> - vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> + vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
> continue; \
> } \
> *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + s1)); \
> @@ -4824,7 +4824,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
> \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> /* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
> @@ -4850,7 +4850,7 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> /* set masked-off elements to 1s */ \
> - vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> + vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
> continue; \
> } \
> if (i == 0) { \
> @@ -4861,7 +4861,7 @@ static void vslide1up_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> GEN_VEXT_VSLIE1UP(8, H1)
> @@ -4899,7 +4899,7 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> /* set masked-off elements to 1s */ \
> - vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> + vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
> continue; \
> } \
> if (i == vl - 1) { \
> @@ -4910,7 +4910,7 @@ static void vslide1down_##BITWIDTH(void *vd, void *v0, uint64_t s1, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> GEN_VEXT_VSLIDE1DOWN(8, H1)
> @@ -4974,7 +4974,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> /* set masked-off elements to 1s */ \
> - vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> + vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
> continue; \
> } \
> index = *((TS1 *)vs1 + HS1(i)); \
> @@ -4986,7 +4986,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> /* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */
> @@ -5017,7 +5017,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> /* set masked-off elements to 1s */ \
> - vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> + vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
> continue; \
> } \
> if (index >= vlmax) { \
> @@ -5028,7 +5028,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> /* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
> @@ -5057,7 +5057,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> /* Compress into vd elements of vs2 where vs1 is enabled */
> @@ -5098,14 +5098,14 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> /* set masked-off elements to 1s */ \
> - vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz); \
> + vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz); \
> continue; \
> } \
> *((ETYPE *)vd + HD(i)) = *((DTYPE *)vs2 + HS1(i)); \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz); \
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz); \
> }
>
> GEN_VEXT_INT_EXT(vzext_vf2_h, uint16_t, uint8_t, H2, H1)
> diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c
> index 12f5964fbb..349b24f4ae 100644
> --- a/target/riscv/vector_internals.c
> +++ b/target/riscv/vector_internals.c
> @@ -20,7 +20,7 @@
> #include "vector_internals.h"
>
> /* set agnostic elements to 1s */
> -void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
> +void vext_set_elems_1s_le(void *base, uint32_t is_agnostic, uint32_t cnt,
> uint32_t tot)
> {
> if (is_agnostic == 0) {
> @@ -47,14 +47,14 @@ void do_vext_vv(void *vd, void *v0, void *vs1, void *vs2,
> for (i = env->vstart; i < vl; i++) {
> if (!vm && !vext_elem_mask(v0, i)) {
> /* set masked-off elements to 1s */
> - vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
> + vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz);
> continue;
> }
> fn(vd, vs1, vs2, i);
> }
> env->vstart = 0;
> /* set tail elements to 1s */
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz);
> }
>
> void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
> @@ -71,12 +71,12 @@ void do_vext_vx(void *vd, void *v0, target_long s1, void *vs2,
> for (i = env->vstart; i < vl; i++) {
> if (!vm && !vext_elem_mask(v0, i)) {
> /* set masked-off elements to 1s */
> - vext_set_elems_1s(vd, vma, i * esz, (i + 1) * esz);
> + vext_set_elems_1s_le(vd, vma, i * esz, (i + 1) * esz);
> continue;
> }
> fn(vd, s1, vs2, i);
> }
> env->vstart = 0;
> /* set tail elements to 1s */
> - vext_set_elems_1s(vd, vta, vl * esz, total_elems * esz);
> + vext_set_elems_1s_le(vd, vta, vl * esz, total_elems * esz);
> }
> diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
> index 842765f6c1..fa599f60ca 100644
> --- a/target/riscv/vector_internals.h
> +++ b/target/riscv/vector_internals.h
> @@ -114,7 +114,7 @@ static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc,
> }
>
> /* set agnostic elements to 1s */
> -void vext_set_elems_1s(void *base, uint32_t is_agnostic, uint32_t cnt,
> +void vext_set_elems_1s_le(void *base, uint32_t is_agnostic, uint32_t cnt,
> uint32_t tot);
>
> /* expand macro args before macro */
> @@ -154,7 +154,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
> for (i = env->vstart; i < vl; i++) { \
> if (!vm && !vext_elem_mask(v0, i)) { \
> /* set masked-off elements to 1s */ \
> - vext_set_elems_1s(vd, vma, i * ESZ, \
> + vext_set_elems_1s_le(vd, vma, i * ESZ, \
> (i + 1) * ESZ); \
> continue; \
> } \
> @@ -162,7 +162,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs2, \
> } \
> env->vstart = 0; \
> /* set tail elements to 1s */ \
> - vext_set_elems_1s(vd, vta, vl * ESZ, \
> + vext_set_elems_1s_le(vd, vta, vl * ESZ, \
> total_elems * ESZ); \
> }
>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 2/4] target/riscv: Add right functions to set agnostic elements
2024-03-06 9:20 ` [PATCH 2/4] target/riscv: Add right functions to set agnostic elements Huang Tao
@ 2024-03-19 21:57 ` Daniel Henrique Barboza
2024-03-19 23:32 ` Richard Henderson
0 siblings, 1 reply; 9+ messages in thread
From: Daniel Henrique Barboza @ 2024-03-19 21:57 UTC (permalink / raw)
To: Huang Tao, qemu-devel
Cc: qemu-riscv, zhiwei_liu, liwei1518, bin.meng, alistair.francis,
palmer, Richard Henderson
(--- CCing Richard ---)
On 3/6/24 06:20, Huang Tao wrote:
> We add vext_set_elems_1s to set agnostic elements to 1s in both big
> and little endian situation.
> In the function vext_set_elems_1s. We using esz argument to get the first
> element to set. 'cnt' is just idx * esz.
>
> Signed-off-by: Huang Tao <eric.huang@linux.alibaba.com>
> ---
> target/riscv/vector_internals.c | 53 +++++++++++++++++++++++++++++++++
> target/riscv/vector_internals.h | 2 ++
> 2 files changed, 55 insertions(+)
>
> diff --git a/target/riscv/vector_internals.c b/target/riscv/vector_internals.c
> index 349b24f4ae..455be96996 100644
> --- a/target/riscv/vector_internals.c
> +++ b/target/riscv/vector_internals.c
> @@ -20,6 +20,59 @@
> #include "vector_internals.h"
>
> /* set agnostic elements to 1s */
> +#if HOST_BIG_ENDIAN
> +void vext_set_elems_1s(void *vd, uint32_t is_agnostic, uint32_t esz,
> + uint32_t idx, uint32_t tot)
> +{
> + if (is_agnostic == 0) {
> + /* policy undisturbed */
> + return;
> + }
> + void *base = NULL;
> + switch (esz) {
> + case 1:
> + base = ((int8_t *)vd + H1(idx));
> + break;
> + case 2:
> + base = ((int16_t *)vd + H2(idx));
> + break;
> + case 4:
> + base = ((int32_t *)vd + H4(idx));
> + break;
> + case 8:
> + base = ((int64_t *)vd + H8(idx));
> + break;
> + default:
> + g_assert_not_reached();
> + break;
> + }
> + /*
> + * spilt the elements into 2 parts
> + * part_begin: the memory need to be set in the first uint64_t unit
> + * part_allign: the memory need to be set begins from next uint64_t
> + * unit and alligned to 8
> + */
> + uint32_t cnt = idx * esz;
> + int part_begin, part_allign;
> + part_begin = MIN(tot - cnt, 8 - (cnt % 8));
> + part_allign = ((tot - cnt - part_begin) / 8) * 8;
> +
> + memset(base - part_begin + 1, -1, part_begin);
> + memset(QEMU_ALIGN_PTR_UP(base, 8), -1, part_allign);
This seems correct but a bit over complicated at first glance. I wonder if we have
something simpler already done somewhere.
Richard, does ARM (or any other arch) do anything of the sort? Aside from more trivial
byte swaps using bswap64() I didn't find anything similar.
We recently posted a big endian related fix here:
[PATCH for 9.0 v15 03/10] target/riscv/vector_helper.c: fix 'vmvr_v' memcpy endianess
But not sure how to apply it here.
Thanks,
Daniel
> +}
> +#else
> +void vext_set_elems_1s(void *vd, uint32_t is_agnostic, uint32_t esz,
> + uint32_t idx, uint32_t tot)
> +{
> + if (is_agnostic == 0) {
> + /* policy undisturbed */
> + return;
> + }
> + uint32_t cnt = idx * esz;
> + memset(vd + cnt, -1, tot - cnt);
> +}
> +#endif
> +
> void vext_set_elems_1s_le(void *base, uint32_t is_agnostic, uint32_t cnt,
> uint32_t tot)
> {
> diff --git a/target/riscv/vector_internals.h b/target/riscv/vector_internals.h
> index fa599f60ca..c96e52f926 100644
> --- a/target/riscv/vector_internals.h
> +++ b/target/riscv/vector_internals.h
> @@ -114,6 +114,8 @@ static inline uint32_t vext_get_total_elems(CPURISCVState *env, uint32_t desc,
> }
>
> /* set agnostic elements to 1s */
> +void vext_set_elems_1s(void *vd, uint32_t is_agnostic, uint32_t esz,
> + uint32_t idx, uint32_t tot);
> void vext_set_elems_1s_le(void *base, uint32_t is_agnostic, uint32_t cnt,
> uint32_t tot);
>
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 2/4] target/riscv: Add right functions to set agnostic elements
2024-03-19 21:57 ` Daniel Henrique Barboza
@ 2024-03-19 23:32 ` Richard Henderson
2024-03-20 2:41 ` Huang Tao
0 siblings, 1 reply; 9+ messages in thread
From: Richard Henderson @ 2024-03-19 23:32 UTC (permalink / raw)
To: Daniel Henrique Barboza, Huang Tao, qemu-devel
Cc: qemu-riscv, zhiwei_liu, liwei1518, bin.meng, alistair.francis,
palmer
On 3/19/24 11:57, Daniel Henrique Barboza wrote:
> This seems correct but a bit over complicated at first glance. I wonder if we have
> something simpler already done somewhere.
>
> Richard, does ARM (or any other arch) do anything of the sort? Aside from more trivial
> byte swaps using bswap64() I didn't find anything similar.
No, nothing quite like.
> We recently posted a big endian related fix here:
>
> [PATCH for 9.0 v15 03/10] target/riscv/vector_helper.c: fix 'vmvr_v' memcpy endianess
>
> But not sure how to apply it here.
It's almost exactly the same, only with memset instead of memcpy.
if (HOST_BIG_ENDIAN && idx % 8 != 0) {
uint32_t j = ROUND_UP(idx, 8);
memset(vd + H(j - 1), -1, j - idx);
idx = j;
}
memset(vd + idx, -1, tot - idx);
I'll note that you don't need to change the api of vext_set_elems_1s -- so most of these
patches are not required.
r~
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH 2/4] target/riscv: Add right functions to set agnostic elements
2024-03-19 23:32 ` Richard Henderson
@ 2024-03-20 2:41 ` Huang Tao
0 siblings, 0 replies; 9+ messages in thread
From: Huang Tao @ 2024-03-20 2:41 UTC (permalink / raw)
To: Richard Henderson, Daniel Henrique Barboza, qemu-devel
Cc: qemu-riscv, zhiwei_liu, liwei1518, bin.meng, alistair.francis,
palmer
I will rewrite the patch, and send a new version soon.
Thanks,
Huang Tao
On 2024/3/20 07:32, Richard Henderson wrote:
> On 3/19/24 11:57, Daniel Henrique Barboza wrote:
>> This seems correct but a bit over complicated at first glance. I
>> wonder if we have
>> something simpler already done somewhere.
>>
>> Richard, does ARM (or any other arch) do anything of the sort? Aside
>> from more trivial
>> byte swaps using bswap64() I didn't find anything similar.
>
> No, nothing quite like.
>
>> We recently posted a big endian related fix here:
>>
>> [PATCH for 9.0 v15 03/10] target/riscv/vector_helper.c: fix 'vmvr_v'
>> memcpy endianess
>>
>> But not sure how to apply it here.
>
> It's almost exactly the same, only with memset instead of memcpy.
>
> if (HOST_BIG_ENDIAN && idx % 8 != 0) {
> uint32_t j = ROUND_UP(idx, 8);
> memset(vd + H(j - 1), -1, j - idx);
> idx = j;
> }
> memset(vd + idx, -1, tot - idx);
>
>
> I'll note that you don't need to change the api of vext_set_elems_1s
> -- so most of these patches are not required.
>
>
> r~
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2024-03-20 2:41 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-03-06 9:20 [PATCH 0/4] target/riscv: Fix the element agnostic function problem Huang Tao
2024-03-06 9:20 ` [PATCH 1/4] target/riscv: Rename vext_set_elems_1s function Huang Tao
2024-03-19 21:37 ` Daniel Henrique Barboza
2024-03-06 9:20 ` [PATCH 2/4] target/riscv: Add right functions to set agnostic elements Huang Tao
2024-03-19 21:57 ` Daniel Henrique Barboza
2024-03-19 23:32 ` Richard Henderson
2024-03-20 2:41 ` Huang Tao
2024-03-06 9:20 ` [PATCH 3/4] target/riscv: Replace element agnostic for vector instructions Huang Tao
2024-03-06 9:20 ` [PATCH 4/4] target/riscv: Delete the former element agnostic function Huang Tao
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).