* [PATCH v5 10/15] target/riscv: Add Zvknh ISA extension support
2023-06-27 17:28 [PATCH v5 09/15] target/riscv: Add Zvkned ISA extension support Max Chou
@ 2023-06-27 17:29 ` Max Chou
2023-06-27 17:29 ` [PATCH v5 11/15] target/riscv: Add Zvksh " Max Chou
` (4 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Max Chou @ 2023-06-27 17:29 UTC (permalink / raw)
To: qemu-devel, qemu-riscv
Cc: dbarboza, Kiran Ostrolenk, Nazar Kazakov, Lawrence Hunter,
Max Chou, Palmer Dabbelt, Alistair Francis, Bin Meng, Weiwei Li,
Liu Zhiwei
From: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
This commit adds support for the Zvknh vector-crypto extension, which
consists of the following instructions:
* vsha2ms.vv
* vsha2c[hl].vv
Translation functions are defined in
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
`target/riscv/vcrypto_helper.c`.
Co-authored-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
Co-authored-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
[max.chou@sifive.com: Replaced vstart checking by TCG op]
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
Signed-off-by: Max Chou <max.chou@sifive.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
[max.chou@sifive.com: Exposed x-zvknha & x-zvknhb properties]
---
target/riscv/cpu.c | 13 +-
target/riscv/cpu_cfg.h | 2 +
target/riscv/helper.h | 4 +
target/riscv/insn32.decode | 5 +
target/riscv/insn_trans/trans_rvvk.c.inc | 78 +++++++++
target/riscv/vcrypto_helper.c | 214 +++++++++++++++++++++++
6 files changed, 313 insertions(+), 3 deletions(-)
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 9b754122ac..3ca5ac209a 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -119,6 +119,8 @@ static const struct isa_ext_data isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
+ ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
+ ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
@@ -1191,14 +1193,17 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
* In principle Zve*x would also suffice here, were they supported
* in qemu
*/
- if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned) && !cpu->cfg.ext_zve32f) {
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha) &&
+ !cpu->cfg.ext_zve32f) {
error_setg(errp,
"Vector crypto extensions require V or Zve* extensions");
return;
}
- if (cpu->cfg.ext_zvbc && !cpu->cfg.ext_zve64f) {
- error_setg(errp, "Zvbc extension requires V or Zve64{f,d} extensions");
+ if ((cpu->cfg.ext_zvbc || cpu->cfg.ext_zvknhb) && !cpu->cfg.ext_zve64f) {
+ error_setg(
+ errp,
+ "Zvbc and Zvknhb extensions require V or Zve64{f,d} extensions");
return;
}
@@ -1705,6 +1710,8 @@ static Property riscv_cpu_extensions[] = {
DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
+ DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
+ DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
index 13dbc11e90..7144bfd228 100644
--- a/target/riscv/cpu_cfg.h
+++ b/target/riscv/cpu_cfg.h
@@ -86,6 +86,8 @@ struct RISCVCPUConfig {
bool ext_zvbb;
bool ext_zvbc;
bool ext_zvkned;
+ bool ext_zvknha;
+ bool ext_zvknhb;
bool ext_zmmul;
bool ext_zvfh;
bool ext_zvfhmin;
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 738f20d3ca..19f5a8a28d 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -1234,3 +1234,7 @@ DEF_HELPER_4(vaesdm_vs, void, ptr, ptr, env, i32)
DEF_HELPER_4(vaesz_vs, void, ptr, ptr, env, i32)
DEF_HELPER_5(vaeskf1_vi, void, ptr, ptr, i32, env, i32)
DEF_HELPER_5(vaeskf2_vi, void, ptr, ptr, i32, env, i32)
+
+DEF_HELPER_5(vsha2ms_vv, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vsha2ch_vv, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vsha2cl_vv, void, ptr, ptr, ptr, env, i32)
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index 7e0295d493..d2cfb2729c 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -948,3 +948,8 @@ vaesdm_vs 101001 1 ..... 00000 010 ..... 1110111 @r2_vm_1
vaesz_vs 101001 1 ..... 00111 010 ..... 1110111 @r2_vm_1
vaeskf1_vi 100010 1 ..... ..... 010 ..... 1110111 @r_vm_1
vaeskf2_vi 101010 1 ..... ..... 010 ..... 1110111 @r_vm_1
+
+# *** Zvknh vector crypto extension ***
+vsha2ms_vv 101101 1 ..... ..... 010 ..... 1110111 @r_vm_1
+vsha2ch_vv 101110 1 ..... ..... 010 ..... 1110111 @r_vm_1
+vsha2cl_vv 101111 1 ..... ..... 010 ..... 1110111 @r_vm_1
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
index c618f76e7e..528a0d3b32 100644
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
@@ -401,3 +401,81 @@ static bool vaeskf2_check(DisasContext *s, arg_vaeskf2_vi *a)
GEN_VI_UNMASKED_TRANS(vaeskf1_vi, vaeskf1_check, ZVKNED_EGS)
GEN_VI_UNMASKED_TRANS(vaeskf2_vi, vaeskf2_check, ZVKNED_EGS)
+
+/*
+ * Zvknh
+ */
+
+#define ZVKNH_EGS 4
+
+#define GEN_VV_UNMASKED_TRANS(NAME, CHECK, EGS) \
+ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
+ { \
+ if (CHECK(s, a)) { \
+ uint32_t data = 0; \
+ TCGLabel *over = gen_new_label(); \
+ TCGLabel *vl_ok = gen_new_label(); \
+ TCGLabel *vstart_ok = gen_new_label(); \
+ TCGv_i32 tmp = tcg_temp_new_i32(); \
+ \
+ /* save opcode for unwinding in case we throw an exception */ \
+ decode_save_opc(s); \
+ \
+ /* check (vl % EGS == 0) assuming it's power of 2 */ \
+ tcg_gen_trunc_tl_i32(tmp, cpu_vl); \
+ tcg_gen_andi_i32(tmp, tmp, EGS - 1); \
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, vl_ok); \
+ gen_helper_restore_cpu_and_raise_exception( \
+ cpu_env, tcg_constant_i32(RISCV_EXCP_ILLEGAL_INST)); \
+ gen_set_label(vl_ok); \
+ \
+ /* check (vstart % EGS == 0) assuming it's power of 2 */ \
+ tcg_gen_trunc_tl_i32(tmp, cpu_vstart); \
+ tcg_gen_andi_i32(tmp, tmp, EGS - 1); \
+ tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, vstart_ok); \
+ gen_helper_restore_cpu_and_raise_exception( \
+ cpu_env, tcg_constant_i32(RISCV_EXCP_ILLEGAL_INST)); \
+ gen_set_label(vstart_ok); \
+ \
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over); \
+ \
+ data = FIELD_DP32(data, VDATA, VM, a->vm); \
+ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
+ data = FIELD_DP32(data, VDATA, VTA, s->vta); \
+ data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \
+ data = FIELD_DP32(data, VDATA, VMA, s->vma); \
+ \
+ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), \
+ vreg_ofs(s, a->rs2), cpu_env, \
+ s->cfg_ptr->vlen / 8, s->cfg_ptr->vlen / 8, \
+ data, gen_helper_##NAME); \
+ \
+ mark_vs_dirty(s); \
+ gen_set_label(over); \
+ return true; \
+ } \
+ return false; \
+ }
+
+static bool vsha_check_sew(DisasContext *s)
+{
+ return (s->cfg_ptr->ext_zvknha == true && s->sew == MO_32) ||
+ (s->cfg_ptr->ext_zvknhb == true &&
+ (s->sew == MO_32 || s->sew == MO_64));
+}
+
+static bool vsha_check(DisasContext *s, arg_rmrr *a)
+{
+ int egw_bytes = ZVKNH_EGS << s->sew;
+ int mult = 1 << MAX(s->lmul, 0);
+ return opivv_check(s, a) &&
+ vsha_check_sew(s) &&
+ MAXSZ(s) >= egw_bytes &&
+ !is_overlapped(a->rd, mult, a->rs1, mult) &&
+ !is_overlapped(a->rd, mult, a->rs2, mult) &&
+ s->lmul >= 0;
+}
+
+GEN_VV_UNMASKED_TRANS(vsha2ms_vv, vsha_check, ZVKNH_EGS)
+GEN_VV_UNMASKED_TRANS(vsha2cl_vv, vsha_check, ZVKNH_EGS)
+GEN_VV_UNMASKED_TRANS(vsha2ch_vv, vsha_check, ZVKNH_EGS)
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
index 374ca603e5..24e44fcca8 100644
--- a/target/riscv/vcrypto_helper.c
+++ b/target/riscv/vcrypto_helper.c
@@ -404,3 +404,217 @@ void HELPER(vaeskf2_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
/* set tail elements to 1s */
vext_set_elems_1s(vd, vta, vl * 4, total_elems * 4);
}
+
+static inline uint32_t sig0_sha256(uint32_t x)
+{
+ return ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3);
+}
+
+static inline uint32_t sig1_sha256(uint32_t x)
+{
+ return ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10);
+}
+
+static inline uint64_t sig0_sha512(uint64_t x)
+{
+ return ror64(x, 1) ^ ror64(x, 8) ^ (x >> 7);
+}
+
+static inline uint64_t sig1_sha512(uint64_t x)
+{
+ return ror64(x, 19) ^ ror64(x, 61) ^ (x >> 6);
+}
+
+static inline void vsha2ms_e32(uint32_t *vd, uint32_t *vs1, uint32_t *vs2)
+{
+ uint32_t res[4];
+ res[0] = sig1_sha256(vs1[H4(2)]) + vs2[H4(1)] + sig0_sha256(vd[H4(1)]) +
+ vd[H4(0)];
+ res[1] = sig1_sha256(vs1[H4(3)]) + vs2[H4(2)] + sig0_sha256(vd[H4(2)]) +
+ vd[H4(1)];
+ res[2] =
+ sig1_sha256(res[0]) + vs2[H4(3)] + sig0_sha256(vd[H4(3)]) + vd[H4(2)];
+ res[3] =
+ sig1_sha256(res[1]) + vs1[H4(0)] + sig0_sha256(vs2[H4(0)]) + vd[H4(3)];
+ vd[H4(3)] = res[3];
+ vd[H4(2)] = res[2];
+ vd[H4(1)] = res[1];
+ vd[H4(0)] = res[0];
+}
+
+static inline void vsha2ms_e64(uint64_t *vd, uint64_t *vs1, uint64_t *vs2)
+{
+ uint64_t res[4];
+ res[0] = sig1_sha512(vs1[2]) + vs2[1] + sig0_sha512(vd[1]) + vd[0];
+ res[1] = sig1_sha512(vs1[3]) + vs2[2] + sig0_sha512(vd[2]) + vd[1];
+ res[2] = sig1_sha512(res[0]) + vs2[3] + sig0_sha512(vd[3]) + vd[2];
+ res[3] = sig1_sha512(res[1]) + vs1[0] + sig0_sha512(vs2[0]) + vd[3];
+ vd[3] = res[3];
+ vd[2] = res[2];
+ vd[1] = res[1];
+ vd[0] = res[0];
+}
+
+void HELPER(vsha2ms_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
+ uint32_t desc)
+{
+ uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
+ uint32_t esz = sew == MO_32 ? 4 : 8;
+ uint32_t total_elems;
+ uint32_t vta = vext_vta(desc);
+
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
+ if (sew == MO_32) {
+ vsha2ms_e32(((uint32_t *)vd) + i * 4, ((uint32_t *)vs1) + i * 4,
+ ((uint32_t *)vs2) + i * 4);
+ } else {
+ /* If not 32 then SEW should be 64 */
+ vsha2ms_e64(((uint64_t *)vd) + i * 4, ((uint64_t *)vs1) + i * 4,
+ ((uint64_t *)vs2) + i * 4);
+ }
+ }
+ /* set tail elements to 1s */
+ total_elems = vext_get_total_elems(env, desc, esz);
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
+ env->vstart = 0;
+}
+
+static inline uint64_t sum0_64(uint64_t x)
+{
+ return ror64(x, 28) ^ ror64(x, 34) ^ ror64(x, 39);
+}
+
+static inline uint32_t sum0_32(uint32_t x)
+{
+ return ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22);
+}
+
+static inline uint64_t sum1_64(uint64_t x)
+{
+ return ror64(x, 14) ^ ror64(x, 18) ^ ror64(x, 41);
+}
+
+static inline uint32_t sum1_32(uint32_t x)
+{
+ return ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25);
+}
+
+#define ch(x, y, z) ((x & y) ^ ((~x) & z))
+
+#define maj(x, y, z) ((x & y) ^ (x & z) ^ (y & z))
+
+static void vsha2c_64(uint64_t *vs2, uint64_t *vd, uint64_t *vs1)
+{
+ uint64_t a = vs2[3], b = vs2[2], e = vs2[1], f = vs2[0];
+ uint64_t c = vd[3], d = vd[2], g = vd[1], h = vd[0];
+ uint64_t W0 = vs1[0], W1 = vs1[1];
+ uint64_t T1 = h + sum1_64(e) + ch(e, f, g) + W0;
+ uint64_t T2 = sum0_64(a) + maj(a, b, c);
+
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ T1 = h + sum1_64(e) + ch(e, f, g) + W1;
+ T2 = sum0_64(a) + maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ vd[0] = f;
+ vd[1] = e;
+ vd[2] = b;
+ vd[3] = a;
+}
+
+static void vsha2c_32(uint32_t *vs2, uint32_t *vd, uint32_t *vs1)
+{
+ uint32_t a = vs2[H4(3)], b = vs2[H4(2)], e = vs2[H4(1)], f = vs2[H4(0)];
+ uint32_t c = vd[H4(3)], d = vd[H4(2)], g = vd[H4(1)], h = vd[H4(0)];
+ uint32_t W0 = vs1[H4(0)], W1 = vs1[H4(1)];
+ uint32_t T1 = h + sum1_32(e) + ch(e, f, g) + W0;
+ uint32_t T2 = sum0_32(a) + maj(a, b, c);
+
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ T1 = h + sum1_32(e) + ch(e, f, g) + W1;
+ T2 = sum0_32(a) + maj(a, b, c);
+ h = g;
+ g = f;
+ f = e;
+ e = d + T1;
+ d = c;
+ c = b;
+ b = a;
+ a = T1 + T2;
+
+ vd[H4(0)] = f;
+ vd[H4(1)] = e;
+ vd[H4(2)] = b;
+ vd[H4(3)] = a;
+}
+
+void HELPER(vsha2ch_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
+ uint32_t desc)
+{
+ uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
+ uint32_t esz = sew == MO_64 ? 8 : 4;
+ uint32_t total_elems;
+ uint32_t vta = vext_vta(desc);
+
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
+ if (sew == MO_64) {
+ vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
+ ((uint64_t *)vs1) + 4 * i + 2);
+ } else {
+ vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
+ ((uint32_t *)vs1) + 4 * i + 2);
+ }
+ }
+
+ /* set tail elements to 1s */
+ total_elems = vext_get_total_elems(env, desc, esz);
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
+ env->vstart = 0;
+}
+
+void HELPER(vsha2cl_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
+ uint32_t desc)
+{
+ uint32_t sew = FIELD_EX64(env->vtype, VTYPE, VSEW);
+ uint32_t esz = sew == MO_64 ? 8 : 4;
+ uint32_t total_elems;
+ uint32_t vta = vext_vta(desc);
+
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
+ if (sew == MO_64) {
+ vsha2c_64(((uint64_t *)vs2) + 4 * i, ((uint64_t *)vd) + 4 * i,
+ (((uint64_t *)vs1) + 4 * i));
+ } else {
+ vsha2c_32(((uint32_t *)vs2) + 4 * i, ((uint32_t *)vd) + 4 * i,
+ (((uint32_t *)vs1) + 4 * i));
+ }
+ }
+
+ /* set tail elements to 1s */
+ total_elems = vext_get_total_elems(env, desc, esz);
+ vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
+ env->vstart = 0;
+}
--
2.31.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v5 11/15] target/riscv: Add Zvksh ISA extension support
2023-06-27 17:28 [PATCH v5 09/15] target/riscv: Add Zvkned ISA extension support Max Chou
2023-06-27 17:29 ` [PATCH v5 10/15] target/riscv: Add Zvknh " Max Chou
@ 2023-06-27 17:29 ` Max Chou
2023-06-27 17:29 ` [PATCH v5 12/15] target/riscv: Add Zvkg " Max Chou
` (3 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Max Chou @ 2023-06-27 17:29 UTC (permalink / raw)
To: qemu-devel, qemu-riscv
Cc: dbarboza, Lawrence Hunter, Kiran Ostrolenk, Max Chou,
Palmer Dabbelt, Alistair Francis, Bin Meng, Weiwei Li, Liu Zhiwei,
Nazar Kazakov
From: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
This commit adds support for the Zvksh vector-crypto extension, which
consists of the following instructions:
* vsm3me.vv
* vsm3c.vi
Translation functions are defined in
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
`target/riscv/vcrypto_helper.c`.
Co-authored-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
[max.chou@sifive.com: Replaced vstart checking by TCG op]
Signed-off-by: Kiran Ostrolenk <kiran.ostrolenk@codethink.co.uk>
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
Signed-off-by: Max Chou <max.chou@sifive.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
[max.chou@sifive.com: Exposed x-zvksh property]
---
target/riscv/cpu.c | 6 +-
target/riscv/cpu_cfg.h | 1 +
target/riscv/helper.h | 3 +
target/riscv/insn32.decode | 4 +
target/riscv/insn_trans/trans_rvvk.c.inc | 31 ++++++
target/riscv/vcrypto_helper.c | 134 +++++++++++++++++++++++
6 files changed, 177 insertions(+), 2 deletions(-)
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 3ca5ac209a..08b8355f52 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -121,6 +121,7 @@ static const struct isa_ext_data isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
+ ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia),
@@ -1193,8 +1194,8 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
* In principle Zve*x would also suffice here, were they supported
* in qemu
*/
- if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha) &&
- !cpu->cfg.ext_zve32f) {
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha ||
+ cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
error_setg(errp,
"Vector crypto extensions require V or Zve* extensions");
return;
@@ -1712,6 +1713,7 @@ static Property riscv_cpu_extensions[] = {
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
+ DEFINE_PROP_BOOL("x-zvksh", RISCVCPU, cfg.ext_zvksh, false),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
index 7144bfd228..27062b12a8 100644
--- a/target/riscv/cpu_cfg.h
+++ b/target/riscv/cpu_cfg.h
@@ -88,6 +88,7 @@ struct RISCVCPUConfig {
bool ext_zvkned;
bool ext_zvknha;
bool ext_zvknhb;
+ bool ext_zvksh;
bool ext_zmmul;
bool ext_zvfh;
bool ext_zvfhmin;
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 19f5a8a28d..9220af18e6 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -1238,3 +1238,6 @@ DEF_HELPER_5(vaeskf2_vi, void, ptr, ptr, i32, env, i32)
DEF_HELPER_5(vsha2ms_vv, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsha2ch_vv, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsha2cl_vv, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_5(vsm3me_vv, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vsm3c_vi, void, ptr, ptr, i32, env, i32)
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index d2cfb2729c..5ca83e8462 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -953,3 +953,7 @@ vaeskf2_vi 101010 1 ..... ..... 010 ..... 1110111 @r_vm_1
vsha2ms_vv 101101 1 ..... ..... 010 ..... 1110111 @r_vm_1
vsha2ch_vv 101110 1 ..... ..... 010 ..... 1110111 @r_vm_1
vsha2cl_vv 101111 1 ..... ..... 010 ..... 1110111 @r_vm_1
+
+# *** Zvksh vector crypto extension ***
+vsm3me_vv 100000 1 ..... ..... 010 ..... 1110111 @r_vm_1
+vsm3c_vi 101011 1 ..... ..... 010 ..... 1110111 @r_vm_1
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
index 528a0d3b32..af1fb74c38 100644
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
@@ -479,3 +479,34 @@ static bool vsha_check(DisasContext *s, arg_rmrr *a)
GEN_VV_UNMASKED_TRANS(vsha2ms_vv, vsha_check, ZVKNH_EGS)
GEN_VV_UNMASKED_TRANS(vsha2cl_vv, vsha_check, ZVKNH_EGS)
GEN_VV_UNMASKED_TRANS(vsha2ch_vv, vsha_check, ZVKNH_EGS)
+
+/*
+ * Zvksh
+ */
+
+#define ZVKSH_EGS 8
+
+static inline bool vsm3_check(DisasContext *s, arg_rmrr *a)
+{
+ int egw_bytes = ZVKSH_EGS << s->sew;
+ int mult = 1 << MAX(s->lmul, 0);
+ return s->cfg_ptr->ext_zvksh == true &&
+ require_rvv(s) &&
+ vext_check_isa_ill(s) &&
+ !is_overlapped(a->rd, mult, a->rs2, mult) &&
+ MAXSZ(s) >= egw_bytes &&
+ s->sew == MO_32;
+}
+
+static inline bool vsm3me_check(DisasContext *s, arg_rmrr *a)
+{
+ return vsm3_check(s, a) && vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
+}
+
+static inline bool vsm3c_check(DisasContext *s, arg_rmrr *a)
+{
+ return vsm3_check(s, a) && vext_check_ss(s, a->rd, a->rs2, a->vm);
+}
+
+GEN_VV_UNMASKED_TRANS(vsm3me_vv, vsm3me_check, ZVKSH_EGS)
+GEN_VI_UNMASKED_TRANS(vsm3c_vi, vsm3c_check, ZVKSH_EGS)
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
index 24e44fcca8..fc00ea32f3 100644
--- a/target/riscv/vcrypto_helper.c
+++ b/target/riscv/vcrypto_helper.c
@@ -618,3 +618,137 @@ void HELPER(vsha2cl_vv)(void *vd, void *vs1, void *vs2, CPURISCVState *env,
vext_set_elems_1s(vd, vta, env->vl * esz, total_elems * esz);
env->vstart = 0;
}
+
+static inline uint32_t p1(uint32_t x)
+{
+ return x ^ rol32(x, 15) ^ rol32(x, 23);
+}
+
+static inline uint32_t zvksh_w(uint32_t m16, uint32_t m9, uint32_t m3,
+ uint32_t m13, uint32_t m6)
+{
+ return p1(m16 ^ m9 ^ rol32(m3, 15)) ^ rol32(m13, 7) ^ m6;
+}
+
+void HELPER(vsm3me_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
+ CPURISCVState *env, uint32_t desc)
+{
+ uint32_t esz = memop_size(FIELD_EX64(env->vtype, VTYPE, VSEW));
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
+ uint32_t vta = vext_vta(desc);
+ uint32_t *vd = vd_vptr;
+ uint32_t *vs1 = vs1_vptr;
+ uint32_t *vs2 = vs2_vptr;
+
+ for (int i = env->vstart / 8; i < env->vl / 8; i++) {
+ uint32_t w[24];
+ for (int j = 0; j < 8; j++) {
+ w[j] = bswap32(vs1[H4((i * 8) + j)]);
+ w[j + 8] = bswap32(vs2[H4((i * 8) + j)]);
+ }
+ for (int j = 0; j < 8; j++) {
+ w[j + 16] =
+ zvksh_w(w[j], w[j + 7], w[j + 13], w[j + 3], w[j + 10]);
+ }
+ for (int j = 0; j < 8; j++) {
+ vd[(i * 8) + j] = bswap32(w[H4(j + 16)]);
+ }
+ }
+ vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
+ env->vstart = 0;
+}
+
+static inline uint32_t ff1(uint32_t x, uint32_t y, uint32_t z)
+{
+ return x ^ y ^ z;
+}
+
+static inline uint32_t ff2(uint32_t x, uint32_t y, uint32_t z)
+{
+ return (x & y) | (x & z) | (y & z);
+}
+
+static inline uint32_t ff_j(uint32_t x, uint32_t y, uint32_t z, uint32_t j)
+{
+ return (j <= 15) ? ff1(x, y, z) : ff2(x, y, z);
+}
+
+static inline uint32_t gg1(uint32_t x, uint32_t y, uint32_t z)
+{
+ return x ^ y ^ z;
+}
+
+static inline uint32_t gg2(uint32_t x, uint32_t y, uint32_t z)
+{
+ return (x & y) | (~x & z);
+}
+
+static inline uint32_t gg_j(uint32_t x, uint32_t y, uint32_t z, uint32_t j)
+{
+ return (j <= 15) ? gg1(x, y, z) : gg2(x, y, z);
+}
+
+static inline uint32_t t_j(uint32_t j)
+{
+ return (j <= 15) ? 0x79cc4519 : 0x7a879d8a;
+}
+
+static inline uint32_t p_0(uint32_t x)
+{
+ return x ^ rol32(x, 9) ^ rol32(x, 17);
+}
+
+static void sm3c(uint32_t *vd, uint32_t *vs1, uint32_t *vs2, uint32_t uimm)
+{
+ uint32_t x0, x1;
+ uint32_t j;
+ uint32_t ss1, ss2, tt1, tt2;
+ x0 = vs2[0] ^ vs2[4];
+ x1 = vs2[1] ^ vs2[5];
+ j = 2 * uimm;
+ ss1 = rol32(rol32(vs1[0], 12) + vs1[4] + rol32(t_j(j), j % 32), 7);
+ ss2 = ss1 ^ rol32(vs1[0], 12);
+ tt1 = ff_j(vs1[0], vs1[1], vs1[2], j) + vs1[3] + ss2 + x0;
+ tt2 = gg_j(vs1[4], vs1[5], vs1[6], j) + vs1[7] + ss1 + vs2[0];
+ vs1[3] = vs1[2];
+ vd[3] = rol32(vs1[1], 9);
+ vs1[1] = vs1[0];
+ vd[1] = tt1;
+ vs1[7] = vs1[6];
+ vd[7] = rol32(vs1[5], 19);
+ vs1[5] = vs1[4];
+ vd[5] = p_0(tt2);
+ j = 2 * uimm + 1;
+ ss1 = rol32(rol32(vd[1], 12) + vd[5] + rol32(t_j(j), j % 32), 7);
+ ss2 = ss1 ^ rol32(vd[1], 12);
+ tt1 = ff_j(vd[1], vs1[1], vd[3], j) + vs1[3] + ss2 + x1;
+ tt2 = gg_j(vd[5], vs1[5], vd[7], j) + vs1[7] + ss1 + vs2[1];
+ vd[2] = rol32(vs1[1], 9);
+ vd[0] = tt1;
+ vd[6] = rol32(vs1[5], 19);
+ vd[4] = p_0(tt2);
+}
+
+void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
+ CPURISCVState *env, uint32_t desc)
+{
+ uint32_t esz = memop_size(FIELD_EX64(env->vtype, VTYPE, VSEW));
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
+ uint32_t vta = vext_vta(desc);
+ uint32_t *vd = vd_vptr;
+ uint32_t *vs2 = vs2_vptr;
+ uint32_t v1[8], v2[8], v3[8];
+
+ for (int i = env->vstart / 8; i < env->vl / 8; i++) {
+ for (int k = 0; k < 8; k++) {
+ v2[k] = bswap32(vd[H4(i * 8 + k)]);
+ v3[k] = bswap32(vs2[H4(i * 8 + k)]);
+ }
+ sm3c(v1, v2, v3, uimm);
+ for (int k = 0; k < 8; k++) {
+ vd[i * 8 + k] = bswap32(v1[H4(k)]);
+ }
+ }
+ vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
+ env->vstart = 0;
+}
--
2.31.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v5 12/15] target/riscv: Add Zvkg ISA extension support
2023-06-27 17:28 [PATCH v5 09/15] target/riscv: Add Zvkned ISA extension support Max Chou
2023-06-27 17:29 ` [PATCH v5 10/15] target/riscv: Add Zvknh " Max Chou
2023-06-27 17:29 ` [PATCH v5 11/15] target/riscv: Add Zvksh " Max Chou
@ 2023-06-27 17:29 ` Max Chou
2023-06-27 17:29 ` [PATCH v5 13/15] crypto: Create sm4_subword Max Chou
` (2 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: Max Chou @ 2023-06-27 17:29 UTC (permalink / raw)
To: qemu-devel, qemu-riscv
Cc: dbarboza, Nazar Kazakov, Lawrence Hunter, Max Chou,
Palmer Dabbelt, Alistair Francis, Bin Meng, Weiwei Li, Liu Zhiwei,
Kiran Ostrolenk
From: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
This commit adds support for the Zvkg vector-crypto extension, which
consists of the following instructions:
* vgmul.vv
* vghsh.vv
Translation functions are defined in
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
`target/riscv/vcrypto_helper.c`.
Co-authored-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
[max.chou@sifive.com: Replaced vstart checking by TCG op]
Signed-off-by: Lawrence Hunter <lawrence.hunter@codethink.co.uk>
Signed-off-by: Nazar Kazakov <nazar.kazakov@codethink.co.uk>
Signed-off-by: Max Chou <max.chou@sifive.com>
Reviewed-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
[max.chou@sifive.com: Exposed x-zvkg property]
---
target/riscv/cpu.c | 6 +-
target/riscv/cpu_cfg.h | 1 +
target/riscv/helper.h | 3 +
target/riscv/insn32.decode | 4 ++
target/riscv/insn_trans/trans_rvvk.c.inc | 30 ++++++++++
target/riscv/vcrypto_helper.c | 72 ++++++++++++++++++++++++
6 files changed, 114 insertions(+), 2 deletions(-)
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 08b8355f52..699ab5e9fa 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -118,6 +118,7 @@ static const struct isa_ext_data isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
+ ISA_EXT_DATA_ENTRY(zvkg, PRIV_VERSION_1_12_0, ext_zvkg),
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
@@ -1194,8 +1195,8 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
* In principle Zve*x would also suffice here, were they supported
* in qemu
*/
- if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkned || cpu->cfg.ext_zvknha ||
- cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
+ if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkg || cpu->cfg.ext_zvkned ||
+ cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
error_setg(errp,
"Vector crypto extensions require V or Zve* extensions");
return;
@@ -1710,6 +1711,7 @@ static Property riscv_cpu_extensions[] = {
/* Vector cryptography extensions */
DEFINE_PROP_BOOL("x-zvbb", RISCVCPU, cfg.ext_zvbb, false),
DEFINE_PROP_BOOL("x-zvbc", RISCVCPU, cfg.ext_zvbc, false),
+ DEFINE_PROP_BOOL("x-zvkg", RISCVCPU, cfg.ext_zvkg, false),
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
index 27062b12a8..960761c479 100644
--- a/target/riscv/cpu_cfg.h
+++ b/target/riscv/cpu_cfg.h
@@ -85,6 +85,7 @@ struct RISCVCPUConfig {
bool ext_zve64d;
bool ext_zvbb;
bool ext_zvbc;
+ bool ext_zvkg;
bool ext_zvkned;
bool ext_zvknha;
bool ext_zvknhb;
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 9220af18e6..a4fe1ff5ca 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -1241,3 +1241,6 @@ DEF_HELPER_5(vsha2cl_vv, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsm3me_vv, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_5(vsm3c_vi, void, ptr, ptr, i32, env, i32)
+
+DEF_HELPER_5(vghsh_vv, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_4(vgmul_vv, void, ptr, ptr, env, i32)
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index 5ca83e8462..b10497afd3 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -957,3 +957,7 @@ vsha2cl_vv 101111 1 ..... ..... 010 ..... 1110111 @r_vm_1
# *** Zvksh vector crypto extension ***
vsm3me_vv 100000 1 ..... ..... 010 ..... 1110111 @r_vm_1
vsm3c_vi 101011 1 ..... ..... 010 ..... 1110111 @r_vm_1
+
+# *** Zvkg vector crypto extension ***
+vghsh_vv 101100 1 ..... ..... 010 ..... 1110111 @r_vm_1
+vgmul_vv 101000 1 ..... 10001 010 ..... 1110111 @r2_vm_1
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
index af1fb74c38..e5ccb26c45 100644
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
@@ -510,3 +510,33 @@ static inline bool vsm3c_check(DisasContext *s, arg_rmrr *a)
GEN_VV_UNMASKED_TRANS(vsm3me_vv, vsm3me_check, ZVKSH_EGS)
GEN_VI_UNMASKED_TRANS(vsm3c_vi, vsm3c_check, ZVKSH_EGS)
+
+/*
+ * Zvkg
+ */
+
+#define ZVKG_EGS 4
+
+static bool vgmul_check(DisasContext *s, arg_rmr *a)
+{
+ int egw_bytes = ZVKG_EGS << s->sew;
+ return s->cfg_ptr->ext_zvkg == true &&
+ vext_check_isa_ill(s) &&
+ require_rvv(s) &&
+ MAXSZ(s) >= egw_bytes &&
+ vext_check_ss(s, a->rd, a->rs2, a->vm) &&
+ s->sew == MO_32;
+}
+
+GEN_V_UNMASKED_TRANS(vgmul_vv, vgmul_check, ZVKG_EGS)
+
+static bool vghsh_check(DisasContext *s, arg_rmrr *a)
+{
+ int egw_bytes = ZVKG_EGS << s->sew;
+ return s->cfg_ptr->ext_zvkg == true &&
+ opivv_check(s, a) &&
+ MAXSZ(s) >= egw_bytes &&
+ s->sew == MO_32;
+}
+
+GEN_VV_UNMASKED_TRANS(vghsh_vv, vghsh_check, ZVKG_EGS)
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
index fc00ea32f3..5962ea2885 100644
--- a/target/riscv/vcrypto_helper.c
+++ b/target/riscv/vcrypto_helper.c
@@ -752,3 +752,75 @@ void HELPER(vsm3c_vi)(void *vd_vptr, void *vs2_vptr, uint32_t uimm,
vext_set_elems_1s(vd_vptr, vta, env->vl * esz, total_elems * esz);
env->vstart = 0;
}
+
+void HELPER(vghsh_vv)(void *vd_vptr, void *vs1_vptr, void *vs2_vptr,
+ CPURISCVState *env, uint32_t desc)
+{
+ uint64_t *vd = vd_vptr;
+ uint64_t *vs1 = vs1_vptr;
+ uint64_t *vs2 = vs2_vptr;
+ uint32_t vta = vext_vta(desc);
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
+
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
+ uint64_t Y[2] = {vd[i * 2 + 0], vd[i * 2 + 1]};
+ uint64_t H[2] = {brev8(vs2[i * 2 + 0]), brev8(vs2[i * 2 + 1])};
+ uint64_t X[2] = {vs1[i * 2 + 0], vs1[i * 2 + 1]};
+ uint64_t Z[2] = {0, 0};
+
+ uint64_t S[2] = {brev8(Y[0] ^ X[0]), brev8(Y[1] ^ X[1])};
+
+ for (uint j = 0; j < 128; j++) {
+ if ((S[j / 64] >> (j % 64)) & 1) {
+ Z[0] ^= H[0];
+ Z[1] ^= H[1];
+ }
+ bool reduce = ((H[1] >> 63) & 1);
+ H[1] = H[1] << 1 | H[0] >> 63;
+ H[0] = H[0] << 1;
+ if (reduce) {
+ H[0] ^= 0x87;
+ }
+ }
+
+ vd[i * 2 + 0] = brev8(Z[0]);
+ vd[i * 2 + 1] = brev8(Z[1]);
+ }
+ /* set tail elements to 1s */
+ vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
+ env->vstart = 0;
+}
+
+void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
+ uint32_t desc)
+{
+ uint64_t *vd = vd_vptr;
+ uint64_t *vs2 = vs2_vptr;
+ uint32_t vta = vext_vta(desc);
+ uint32_t total_elems = vext_get_total_elems(env, desc, 4);
+
+ for (uint32_t i = env->vstart / 4; i < env->vl / 4; i++) {
+ uint64_t Y[2] = {brev8(vd[i * 2 + 0]), brev8(vd[i * 2 + 1])};
+ uint64_t H[2] = {brev8(vs2[i * 2 + 0]), brev8(vs2[i * 2 + 1])};
+ uint64_t Z[2] = {0, 0};
+
+ for (uint j = 0; j < 128; j++) {
+ if ((Y[j / 64] >> (j % 64)) & 1) {
+ Z[0] ^= H[0];
+ Z[1] ^= H[1];
+ }
+ bool reduce = ((H[1] >> 63) & 1);
+ H[1] = H[1] << 1 | H[0] >> 63;
+ H[0] = H[0] << 1;
+ if (reduce) {
+ H[0] ^= 0x87;
+ }
+ }
+
+ vd[i * 2 + 0] = brev8(Z[0]);
+ vd[i * 2 + 1] = brev8(Z[1]);
+ }
+ /* set tail elements to 1s */
+ vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
+ env->vstart = 0;
+}
--
2.31.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v5 13/15] crypto: Create sm4_subword
2023-06-27 17:28 [PATCH v5 09/15] target/riscv: Add Zvkned ISA extension support Max Chou
` (2 preceding siblings ...)
2023-06-27 17:29 ` [PATCH v5 12/15] target/riscv: Add Zvkg " Max Chou
@ 2023-06-27 17:29 ` Max Chou
2023-06-27 17:29 ` [PATCH v5 14/15] crypto: Add SM4 constant parameter CK Max Chou
2023-06-27 17:29 ` [PATCH v5 15/15] target/riscv: Add Zvksed ISA extension support Max Chou
5 siblings, 0 replies; 7+ messages in thread
From: Max Chou @ 2023-06-27 17:29 UTC (permalink / raw)
To: qemu-devel, qemu-riscv
Cc: dbarboza, Max Chou, Frank Chang, Richard Henderson,
Daniel P. Berrangé, Peter Maydell, qemu-arm
Allows sharing of sm4_subword between different targets.
Signed-off-by: Max Chou <max.chou@sifive.com>
Reviewed-by: Frank Chang <frank.chang@sifive.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Max Chou <max.chou@sifive.com>
---
include/crypto/sm4.h | 8 ++++++++
target/arm/tcg/crypto_helper.c | 10 ++--------
2 files changed, 10 insertions(+), 8 deletions(-)
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
index 9bd3ebc62e..de8245d8a7 100644
--- a/include/crypto/sm4.h
+++ b/include/crypto/sm4.h
@@ -3,4 +3,12 @@
extern const uint8_t sm4_sbox[256];
+static inline uint32_t sm4_subword(uint32_t word)
+{
+ return sm4_sbox[word & 0xff] |
+ sm4_sbox[(word >> 8) & 0xff] << 8 |
+ sm4_sbox[(word >> 16) & 0xff] << 16 |
+ sm4_sbox[(word >> 24) & 0xff] << 24;
+}
+
#endif
diff --git a/target/arm/tcg/crypto_helper.c b/target/arm/tcg/crypto_helper.c
index fdd70abbfd..7cadd61e12 100644
--- a/target/arm/tcg/crypto_helper.c
+++ b/target/arm/tcg/crypto_helper.c
@@ -614,10 +614,7 @@ static void do_crypto_sm4e(uint64_t *rd, uint64_t *rn, uint64_t *rm)
CR_ST_WORD(d, (i + 3) % 4) ^
CR_ST_WORD(n, i);
- t = sm4_sbox[t & 0xff] |
- sm4_sbox[(t >> 8) & 0xff] << 8 |
- sm4_sbox[(t >> 16) & 0xff] << 16 |
- sm4_sbox[(t >> 24) & 0xff] << 24;
+ t = sm4_subword(t);
CR_ST_WORD(d, i) ^= t ^ rol32(t, 2) ^ rol32(t, 10) ^ rol32(t, 18) ^
rol32(t, 24);
@@ -651,10 +648,7 @@ static void do_crypto_sm4ekey(uint64_t *rd, uint64_t *rn, uint64_t *rm)
CR_ST_WORD(d, (i + 3) % 4) ^
CR_ST_WORD(m, i);
- t = sm4_sbox[t & 0xff] |
- sm4_sbox[(t >> 8) & 0xff] << 8 |
- sm4_sbox[(t >> 16) & 0xff] << 16 |
- sm4_sbox[(t >> 24) & 0xff] << 24;
+ t = sm4_subword(t);
CR_ST_WORD(d, i) ^= t ^ rol32(t, 13) ^ rol32(t, 23);
}
--
2.31.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v5 14/15] crypto: Add SM4 constant parameter CK
2023-06-27 17:28 [PATCH v5 09/15] target/riscv: Add Zvkned ISA extension support Max Chou
` (3 preceding siblings ...)
2023-06-27 17:29 ` [PATCH v5 13/15] crypto: Create sm4_subword Max Chou
@ 2023-06-27 17:29 ` Max Chou
2023-06-27 17:29 ` [PATCH v5 15/15] target/riscv: Add Zvksed ISA extension support Max Chou
5 siblings, 0 replies; 7+ messages in thread
From: Max Chou @ 2023-06-27 17:29 UTC (permalink / raw)
To: qemu-devel, qemu-riscv
Cc: dbarboza, Max Chou, Frank Chang, Daniel P. Berrangé
Adds sm4_ck constant for use in sm4 cryptography across different targets.
Signed-off-by: Max Chou <max.chou@sifive.com>
Reviewed-by: Frank Chang <frank.chang@sifive.com>
Signed-off-by: Max Chou <max.chou@sifive.com>
---
crypto/sm4.c | 10 ++++++++++
include/crypto/sm4.h | 1 +
2 files changed, 11 insertions(+)
diff --git a/crypto/sm4.c b/crypto/sm4.c
index 9f0cd452c7..2987306cf7 100644
--- a/crypto/sm4.c
+++ b/crypto/sm4.c
@@ -47,3 +47,13 @@ uint8_t const sm4_sbox[] = {
0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48,
};
+uint32_t const sm4_ck[] = {
+ 0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
+ 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
+ 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
+ 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
+ 0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
+ 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
+ 0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
+ 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
+};
diff --git a/include/crypto/sm4.h b/include/crypto/sm4.h
index de8245d8a7..382b26d922 100644
--- a/include/crypto/sm4.h
+++ b/include/crypto/sm4.h
@@ -2,6 +2,7 @@
#define QEMU_SM4_H
extern const uint8_t sm4_sbox[256];
+extern const uint32_t sm4_ck[32];
static inline uint32_t sm4_subword(uint32_t word)
{
--
2.31.1
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v5 15/15] target/riscv: Add Zvksed ISA extension support
2023-06-27 17:28 [PATCH v5 09/15] target/riscv: Add Zvkned ISA extension support Max Chou
` (4 preceding siblings ...)
2023-06-27 17:29 ` [PATCH v5 14/15] crypto: Add SM4 constant parameter CK Max Chou
@ 2023-06-27 17:29 ` Max Chou
5 siblings, 0 replies; 7+ messages in thread
From: Max Chou @ 2023-06-27 17:29 UTC (permalink / raw)
To: qemu-devel, qemu-riscv
Cc: dbarboza, Max Chou, Frank Chang, Palmer Dabbelt, Alistair Francis,
Bin Meng, Weiwei Li, Liu Zhiwei, Nazar Kazakov, Lawrence Hunter,
Kiran Ostrolenk
This commit adds support for the Zvksed vector-crypto extension, which
consists of the following instructions:
* vsm4k.vi
* vsm4r.[vv,vs]
Translation functions are defined in
`target/riscv/insn_trans/trans_rvvk.c.inc` and helpers are defined in
`target/riscv/vcrypto_helper.c`.
Signed-off-by: Max Chou <max.chou@sifive.com>
Reviewed-by: Frank Chang <frank.chang@sifive.com>
[lawrence.hunter@codethink.co.uk: Moved SM4 functions from
crypto_helper.c to vcrypto_helper.c]
[nazar.kazakov@codethink.co.uk: Added alignment checks, refactored code to
use macros, and minor style changes]
Signed-off-by: Max Chou <max.chou@sifive.com>
---
target/riscv/cpu.c | 5 +-
target/riscv/cpu_cfg.h | 1 +
target/riscv/helper.h | 4 +
target/riscv/insn32.decode | 5 +
target/riscv/insn_trans/trans_rvvk.c.inc | 43 ++++++++
target/riscv/vcrypto_helper.c | 127 +++++++++++++++++++++++
6 files changed, 184 insertions(+), 1 deletion(-)
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 699ab5e9fa..d4c2f77d44 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -122,6 +122,7 @@ static const struct isa_ext_data isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zvkned, PRIV_VERSION_1_12_0, ext_zvkned),
ISA_EXT_DATA_ENTRY(zvknha, PRIV_VERSION_1_12_0, ext_zvknha),
ISA_EXT_DATA_ENTRY(zvknhb, PRIV_VERSION_1_12_0, ext_zvknhb),
+ ISA_EXT_DATA_ENTRY(zvksed, PRIV_VERSION_1_12_0, ext_zvksed),
ISA_EXT_DATA_ENTRY(zvksh, PRIV_VERSION_1_12_0, ext_zvksh),
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin),
@@ -1196,7 +1197,8 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
* in qemu
*/
if ((cpu->cfg.ext_zvbb || cpu->cfg.ext_zvkg || cpu->cfg.ext_zvkned ||
- cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksh) && !cpu->cfg.ext_zve32f) {
+ cpu->cfg.ext_zvknha || cpu->cfg.ext_zvksed || cpu->cfg.ext_zvksh) &&
+ !cpu->cfg.ext_zve32f) {
error_setg(errp,
"Vector crypto extensions require V or Zve* extensions");
return;
@@ -1715,6 +1717,7 @@ static Property riscv_cpu_extensions[] = {
DEFINE_PROP_BOOL("x-zvkned", RISCVCPU, cfg.ext_zvkned, false),
DEFINE_PROP_BOOL("x-zvknha", RISCVCPU, cfg.ext_zvknha, false),
DEFINE_PROP_BOOL("x-zvknhb", RISCVCPU, cfg.ext_zvknhb, false),
+ DEFINE_PROP_BOOL("x-zvksed", RISCVCPU, cfg.ext_zvksed, false),
DEFINE_PROP_BOOL("x-zvksh", RISCVCPU, cfg.ext_zvksh, false),
DEFINE_PROP_END_OF_LIST(),
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
index 960761c479..a4b60f8a51 100644
--- a/target/riscv/cpu_cfg.h
+++ b/target/riscv/cpu_cfg.h
@@ -89,6 +89,7 @@ struct RISCVCPUConfig {
bool ext_zvkned;
bool ext_zvknha;
bool ext_zvknhb;
+ bool ext_zvksed;
bool ext_zvksh;
bool ext_zmmul;
bool ext_zvfh;
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index a4fe1ff5ca..16e6726666 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -1244,3 +1244,7 @@ DEF_HELPER_5(vsm3c_vi, void, ptr, ptr, i32, env, i32)
DEF_HELPER_5(vghsh_vv, void, ptr, ptr, ptr, env, i32)
DEF_HELPER_4(vgmul_vv, void, ptr, ptr, env, i32)
+
+DEF_HELPER_5(vsm4k_vi, void, ptr, ptr, i32, env, i32)
+DEF_HELPER_4(vsm4r_vv, void, ptr, ptr, env, i32)
+DEF_HELPER_4(vsm4r_vs, void, ptr, ptr, env, i32)
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index b10497afd3..dab38e23e3 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -961,3 +961,8 @@ vsm3c_vi 101011 1 ..... ..... 010 ..... 1110111 @r_vm_1
# *** Zvkg vector crypto extension ***
vghsh_vv 101100 1 ..... ..... 010 ..... 1110111 @r_vm_1
vgmul_vv 101000 1 ..... 10001 010 ..... 1110111 @r2_vm_1
+
+# *** Zvksed vector crypto extension ***
+vsm4k_vi 100001 1 ..... ..... 010 ..... 1110111 @r_vm_1
+vsm4r_vv 101000 1 ..... 10000 010 ..... 1110111 @r2_vm_1
+vsm4r_vs 101001 1 ..... 10000 010 ..... 1110111 @r2_vm_1
diff --git a/target/riscv/insn_trans/trans_rvvk.c.inc b/target/riscv/insn_trans/trans_rvvk.c.inc
index e5ccb26c45..1de9dcb6eb 100644
--- a/target/riscv/insn_trans/trans_rvvk.c.inc
+++ b/target/riscv/insn_trans/trans_rvvk.c.inc
@@ -540,3 +540,46 @@ static bool vghsh_check(DisasContext *s, arg_rmrr *a)
}
GEN_VV_UNMASKED_TRANS(vghsh_vv, vghsh_check, ZVKG_EGS)
+
+/*
+ * Zvksed
+ */
+
+#define ZVKSED_EGS 4
+
+static bool zvksed_check(DisasContext *s)
+{
+ int egw_bytes = ZVKSED_EGS << s->sew;
+ return s->cfg_ptr->ext_zvksed == true &&
+ require_rvv(s) &&
+ vext_check_isa_ill(s) &&
+ MAXSZ(s) >= egw_bytes &&
+ s->sew == MO_32;
+}
+
+static bool vsm4k_vi_check(DisasContext *s, arg_rmrr *a)
+{
+ return zvksed_check(s) &&
+ require_align(a->rd, s->lmul) &&
+ require_align(a->rs2, s->lmul);
+}
+
+GEN_VI_UNMASKED_TRANS(vsm4k_vi, vsm4k_vi_check, ZVKSED_EGS)
+
+static bool vsm4r_vv_check(DisasContext *s, arg_rmr *a)
+{
+ return zvksed_check(s) &&
+ require_align(a->rd, s->lmul) &&
+ require_align(a->rs2, s->lmul);
+}
+
+GEN_V_UNMASKED_TRANS(vsm4r_vv, vsm4r_vv_check, ZVKSED_EGS)
+
+static bool vsm4r_vs_check(DisasContext *s, arg_rmr *a)
+{
+ return zvksed_check(s) &&
+ !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) &&
+ require_align(a->rd, s->lmul);
+}
+
+GEN_V_UNMASKED_TRANS(vsm4r_vs, vsm4r_vs_check, ZVKSED_EGS)
diff --git a/target/riscv/vcrypto_helper.c b/target/riscv/vcrypto_helper.c
index 5962ea2885..defc72f6f8 100644
--- a/target/riscv/vcrypto_helper.c
+++ b/target/riscv/vcrypto_helper.c
@@ -24,6 +24,7 @@
#include "cpu.h"
#include "crypto/aes.h"
#include "crypto/aes-round.h"
+#include "crypto/sm4.h"
#include "exec/memop.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
@@ -824,3 +825,129 @@ void HELPER(vgmul_vv)(void *vd_vptr, void *vs2_vptr, CPURISCVState *env,
vext_set_elems_1s(vd, vta, env->vl * 4, total_elems * 4);
env->vstart = 0;
}
+
+void HELPER(vsm4k_vi)(void *vd, void *vs2, uint32_t uimm5, CPURISCVState *env,
+ uint32_t desc)
+{
+ const uint32_t egs = 4;
+ uint32_t rnd = uimm5 & 0x7;
+ uint32_t group_start = env->vstart / egs;
+ uint32_t group_end = env->vl / egs;
+ uint32_t esz = sizeof(uint32_t);
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
+
+ for (uint32_t i = group_start; i < group_end; ++i) {
+ uint32_t vstart = i * egs;
+ uint32_t vend = (i + 1) * egs;
+ uint32_t rk[4] = {0};
+ uint32_t tmp[8] = {0};
+
+ for (uint32_t j = vstart; j < vend; ++j) {
+ rk[j - vstart] = *((uint32_t *)vs2 + H4(j));
+ }
+
+ for (uint32_t j = 0; j < egs; ++j) {
+ tmp[j] = rk[j];
+ }
+
+ for (uint32_t j = 0; j < egs; ++j) {
+ uint32_t b, s;
+ b = tmp[j + 1] ^ tmp[j + 2] ^ tmp[j + 3] ^ sm4_ck[rnd * 4 + j];
+
+ s = sm4_subword(b);
+
+ tmp[j + 4] = tmp[j] ^ (s ^ rol32(s, 13) ^ rol32(s, 23));
+ }
+
+ for (uint32_t j = vstart; j < vend; ++j) {
+ *((uint32_t *)vd + H4(j)) = tmp[egs + (j - vstart)];
+ }
+ }
+
+ env->vstart = 0;
+ /* set tail elements to 1s */
+ vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
+}
+
+static void do_sm4_round(uint32_t *rk, uint32_t *buf)
+{
+ const uint32_t egs = 4;
+ uint32_t s, b;
+
+ for (uint32_t j = egs; j < egs * 2; ++j) {
+ b = buf[j - 3] ^ buf[j - 2] ^ buf[j - 1] ^ rk[j - 4];
+
+ s = sm4_subword(b);
+
+ buf[j] = buf[j - 4] ^ (s ^ rol32(s, 2) ^ rol32(s, 10) ^ rol32(s, 18) ^
+ rol32(s, 24));
+ }
+}
+
+void HELPER(vsm4r_vv)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
+{
+ const uint32_t egs = 4;
+ uint32_t group_start = env->vstart / egs;
+ uint32_t group_end = env->vl / egs;
+ uint32_t esz = sizeof(uint32_t);
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
+
+ for (uint32_t i = group_start; i < group_end; ++i) {
+ uint32_t vstart = i * egs;
+ uint32_t vend = (i + 1) * egs;
+ uint32_t rk[4] = {0};
+ uint32_t tmp[8] = {0};
+
+ for (uint32_t j = vstart; j < vend; ++j) {
+ rk[j - vstart] = *((uint32_t *)vs2 + H4(j));
+ }
+
+ for (uint32_t j = vstart; j < vend; ++j) {
+ tmp[j - vstart] = *((uint32_t *)vd + H4(j));
+ }
+
+ do_sm4_round(rk, tmp);
+
+ for (uint32_t j = vstart; j < vend; ++j) {
+ *((uint32_t *)vd + H4(j)) = tmp[egs + (j - vstart)];
+ }
+ }
+
+ env->vstart = 0;
+ /* set tail elements to 1s */
+ vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
+}
+
+void HELPER(vsm4r_vs)(void *vd, void *vs2, CPURISCVState *env, uint32_t desc)
+{
+ const uint32_t egs = 4;
+ uint32_t group_start = env->vstart / egs;
+ uint32_t group_end = env->vl / egs;
+ uint32_t esz = sizeof(uint32_t);
+ uint32_t total_elems = vext_get_total_elems(env, desc, esz);
+
+ for (uint32_t i = group_start; i < group_end; ++i) {
+ uint32_t vstart = i * egs;
+ uint32_t vend = (i + 1) * egs;
+ uint32_t rk[4] = {0};
+ uint32_t tmp[8] = {0};
+
+ for (uint32_t j = 0; j < egs; ++j) {
+ rk[j] = *((uint32_t *)vs2 + H4(j));
+ }
+
+ for (uint32_t j = vstart; j < vend; ++j) {
+ tmp[j - vstart] = *((uint32_t *)vd + H4(j));
+ }
+
+ do_sm4_round(rk, tmp);
+
+ for (uint32_t j = vstart; j < vend; ++j) {
+ *((uint32_t *)vd + H4(j)) = tmp[egs + (j - vstart)];
+ }
+ }
+
+ env->vstart = 0;
+ /* set tail elements to 1s */
+ vext_set_elems_1s(vd, vext_vta(desc), env->vl * esz, total_elems * esz);
+}
--
2.31.1
^ permalink raw reply related [flat|nested] 7+ messages in thread