qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [PATCH-for-10.2 v2 0/2] tcg/loongarch64: Support every TCGCond for cmp_vec ops
@ 2025-12-08  9:53 Philippe Mathieu-Daudé
  2025-12-08  9:53 ` [PATCH-for-10.2 v2 1/2] tcg/loongarch64: Factor tcg_out_cmp_vec() out of tcg_out_vec_op() Philippe Mathieu-Daudé
  2025-12-08  9:53 ` [PATCH-for-10.2 v2 2/2] tcg/loongarch64: Support every TCGCond for cmp_vec ops Philippe Mathieu-Daudé
  0 siblings, 2 replies; 7+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-12-08  9:53 UTC (permalink / raw)
  To: qemu-devel
  Cc: Bingwu Zhang, Richard Henderson, WANG Xuerui,
	Philippe Mathieu-Daudé

v2: Split Xuerui's patch in 2 to ease review.

Superseedes: <20251207055626.3685415-1-i.qemu@xen0n.name>

WANG Xuerui (2):
  tcg/loongarch64: Factor tcg_out_cmp_vec() out of tcg_out_vec_op()
  tcg/loongarch64: Support every TCGCond for cmp_vec ops

 tcg/loongarch64/tcg-target.c.inc | 119 +++++++++++++++++++------------
 1 file changed, 75 insertions(+), 44 deletions(-)

-- 
2.51.0



^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH-for-10.2 v2 1/2] tcg/loongarch64: Factor tcg_out_cmp_vec() out of tcg_out_vec_op()
  2025-12-08  9:53 [PATCH-for-10.2 v2 0/2] tcg/loongarch64: Support every TCGCond for cmp_vec ops Philippe Mathieu-Daudé
@ 2025-12-08  9:53 ` Philippe Mathieu-Daudé
  2025-12-08  9:54   ` Philippe Mathieu-Daudé
  2025-12-08  9:53 ` [PATCH-for-10.2 v2 2/2] tcg/loongarch64: Support every TCGCond for cmp_vec ops Philippe Mathieu-Daudé
  1 sibling, 1 reply; 7+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-12-08  9:53 UTC (permalink / raw)
  To: qemu-devel
  Cc: Bingwu Zhang, Richard Henderson, WANG Xuerui,
	Philippe Mathieu-Daudé

From: WANG Xuerui <git@xen0n.name>

Lift the cmp_vec handling to own function to make it easier
for readers.

Signed-off-by: WANG Xuerui <git@xen0n.name>
Message-ID: <20251207055626.3685415-1-i.qemu@xen0n.name>
[PMD: Split of bigger patch, part 1/2]
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
 tcg/loongarch64/tcg-target.c.inc | 94 +++++++++++++++++---------------
 1 file changed, 50 insertions(+), 44 deletions(-)

diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
index 10c69211ac5..dbb36a2a816 100644
--- a/tcg/loongarch64/tcg-target.c.inc
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -2179,14 +2179,10 @@ static void tcg_out_addsub_vec(TCGContext *s, bool lasx, unsigned vece,
     tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
 }
 
-static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
-                           unsigned vecl, unsigned vece,
-                           const TCGArg args[TCG_MAX_OP_ARGS],
-                           const int const_args[TCG_MAX_OP_ARGS])
+static void tcg_out_cmp_vec(TCGContext *s, bool lasx, unsigned vece,
+                            TCGArg a0, TCGArg a1, TCGArg a2,
+                            bool a2_is_const, TCGCond cond)
 {
-    TCGType type = vecl + TCG_TYPE_V64;
-    bool lasx = type == TCG_TYPE_V256;
-    TCGArg a0, a1, a2, a3;
     LoongArchInsn insn;
 
     static const LoongArchInsn cmp_vec_insn[16][2][4] = {
@@ -2233,6 +2229,51 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
             { OPC_XVSLTI_BU, OPC_XVSLTI_HU, OPC_XVSLTI_WU, OPC_XVSLTI_DU },
         }
     };
+
+    if (a2_is_const) {
+        /*
+         * cmp_vec dest, src, value
+         * Try vseqi/vslei/vslti
+         */
+        int64_t value = sextract64(a2, 0, 8 << vece);
+        switch (cond) {
+        case TCG_COND_EQ:
+        case TCG_COND_LE:
+        case TCG_COND_LT:
+            insn = cmp_vec_imm_insn[cond][lasx][vece];
+            tcg_out32(s, encode_vdvjsk5_insn(insn, a0, a1, value));
+            break;
+        case TCG_COND_LEU:
+        case TCG_COND_LTU:
+            insn = cmp_vec_imm_insn[cond][lasx][vece];
+            tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
+            break;
+        default:
+            g_assert_not_reached();
+        }
+    }
+
+    insn = cmp_vec_insn[cond][lasx][vece];
+    if (insn == 0) {
+        TCGArg t;
+        t = a1, a1 = a2, a2 = t;
+        cond = tcg_swap_cond(cond);
+        insn = cmp_vec_insn[cond][lasx][vece];
+        tcg_debug_assert(insn != 0);
+    }
+    tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
+}
+
+static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
+                           unsigned vecl, unsigned vece,
+                           const TCGArg args[TCG_MAX_OP_ARGS],
+                           const int const_args[TCG_MAX_OP_ARGS])
+{
+    TCGType type = vecl + TCG_TYPE_V64;
+    bool lasx = type == TCG_TYPE_V256;
+    TCGArg a0, a1, a2, a3;
+    LoongArchInsn insn;
+
     static const LoongArchInsn neg_vec_insn[2][4] = {
         { OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D },
         { OPC_XVNEG_B, OPC_XVNEG_H, OPC_XVNEG_W, OPC_XVNEG_D },
@@ -2347,43 +2388,8 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
         insn = lasx ? OPC_XVNOR_V : OPC_VNOR_V;
         goto vdvjvk;
     case INDEX_op_cmp_vec:
-        {
-            TCGCond cond = args[3];
-
-            if (const_args[2]) {
-                /*
-                 * cmp_vec dest, src, value
-                 * Try vseqi/vslei/vslti
-                 */
-                int64_t value = sextract64(a2, 0, 8 << vece);
-                switch (cond) {
-                case TCG_COND_EQ:
-                case TCG_COND_LE:
-                case TCG_COND_LT:
-                    insn = cmp_vec_imm_insn[cond][lasx][vece];
-                    tcg_out32(s, encode_vdvjsk5_insn(insn, a0, a1, value));
-                    break;
-                case TCG_COND_LEU:
-                case TCG_COND_LTU:
-                    insn = cmp_vec_imm_insn[cond][lasx][vece];
-                    tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
-                    break;
-                default:
-                    g_assert_not_reached();
-                }
-                break;
-            }
-
-            insn = cmp_vec_insn[cond][lasx][vece];
-            if (insn == 0) {
-                TCGArg t;
-                t = a1, a1 = a2, a2 = t;
-                cond = tcg_swap_cond(cond);
-                insn = cmp_vec_insn[cond][lasx][vece];
-                tcg_debug_assert(insn != 0);
-            }
-        }
-        goto vdvjvk;
+        tcg_out_cmp_vec(s, lasx, vece, a0, a1, a2, const_args[2], a3);
+        break;
     case INDEX_op_add_vec:
         tcg_out_addsub_vec(s, lasx, vece, a0, a1, a2, const_args[2], true);
         break;
-- 
2.51.0



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [PATCH-for-10.2 v2 2/2] tcg/loongarch64: Support every TCGCond for cmp_vec ops
  2025-12-08  9:53 [PATCH-for-10.2 v2 0/2] tcg/loongarch64: Support every TCGCond for cmp_vec ops Philippe Mathieu-Daudé
  2025-12-08  9:53 ` [PATCH-for-10.2 v2 1/2] tcg/loongarch64: Factor tcg_out_cmp_vec() out of tcg_out_vec_op() Philippe Mathieu-Daudé
@ 2025-12-08  9:53 ` Philippe Mathieu-Daudé
  2025-12-08  9:55   ` Philippe Mathieu-Daudé
  2025-12-08 15:43   ` Richard Henderson
  1 sibling, 2 replies; 7+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-12-08  9:53 UTC (permalink / raw)
  To: qemu-devel
  Cc: Bingwu Zhang, Richard Henderson, WANG Xuerui,
	Philippe Mathieu-Daudé

From: WANG Xuerui <git@xen0n.name>

Support for TCGCond's in loongarch64 cmp_vec codegen is not uniform: NE
is not supported at all and will trip over assertions, and legalization
(currently just operand-swapping) is not done for reg-imm comparisons.
Since the TCG middle-end will not legalize the comparison conditions for
us, we have to do it ourselves like other targets.

Because EQ/LT/LTU/LE/LEU are natively supported, we only have to keep
the current operand swapping treatment for GT/GTU/GE/GEU but ensure it
is done for both reg-reg and reg-imm cases, and use a bitwise NOT to
help legalize NE.

Fixes: d8b6fa593d2d ("tcg/loongarch64: Lower cmp_vec to vseq/vsle/vslt")
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/3237
Cc: Richard Henderson <richard.henderson@linaro.org>
Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
Reported-by: Bingwu Zhang <xtexchooser@duck.com>
Signed-off-by: WANG Xuerui <git@xen0n.name>
Message-ID: <20251207055626.3685415-1-i.qemu@xen0n.name>
[PMD: Split of bigger patch, part 2/2]
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
 tcg/loongarch64/tcg-target.c.inc | 45 +++++++++++++++++++++++++-------
 1 file changed, 35 insertions(+), 10 deletions(-)

diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
index dbb36a2a816..1a243a57beb 100644
--- a/tcg/loongarch64/tcg-target.c.inc
+++ b/tcg/loongarch64/tcg-target.c.inc
@@ -2184,6 +2184,33 @@ static void tcg_out_cmp_vec(TCGContext *s, bool lasx, unsigned vece,
                             bool a2_is_const, TCGCond cond)
 {
     LoongArchInsn insn;
+    bool need_invert = false;
+
+    switch (cond) {
+    case TCG_COND_EQ:
+    case TCG_COND_LE:
+    case TCG_COND_LEU:
+    case TCG_COND_LT:
+    case TCG_COND_LTU:
+        /* These are directly expressible. */
+        break;
+    case TCG_COND_NE:
+        need_invert = true;
+        cond = TCG_COND_EQ;
+        break;
+    case TCG_COND_GE:
+    case TCG_COND_GEU:
+    case TCG_COND_GT:
+    case TCG_COND_GTU:
+        {
+            TCGArg t;
+            t = a1, a1 = a2, a2 = t;
+            cond = tcg_swap_cond(cond);
+            break;
+        }
+    default:
+        g_assert_not_reached();
+    }
 
     static const LoongArchInsn cmp_vec_insn[16][2][4] = {
         [TCG_COND_EQ] = {
@@ -2236,32 +2263,30 @@ static void tcg_out_cmp_vec(TCGContext *s, bool lasx, unsigned vece,
          * Try vseqi/vslei/vslti
          */
         int64_t value = sextract64(a2, 0, 8 << vece);
+
+        insn = cmp_vec_imm_insn[cond][lasx][vece];
         switch (cond) {
         case TCG_COND_EQ:
         case TCG_COND_LE:
         case TCG_COND_LT:
-            insn = cmp_vec_imm_insn[cond][lasx][vece];
             tcg_out32(s, encode_vdvjsk5_insn(insn, a0, a1, value));
             break;
         case TCG_COND_LEU:
         case TCG_COND_LTU:
-            insn = cmp_vec_imm_insn[cond][lasx][vece];
             tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
             break;
         default:
             g_assert_not_reached();
         }
+    } else {
+        insn = cmp_vec_insn[cond][lasx][vece];
+        tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
     }
 
-    insn = cmp_vec_insn[cond][lasx][vece];
-    if (insn == 0) {
-        TCGArg t;
-        t = a1, a1 = a2, a2 = t;
-        cond = tcg_swap_cond(cond);
-        insn = cmp_vec_insn[cond][lasx][vece];
-        tcg_debug_assert(insn != 0);
+    if (need_invert) {
+        insn = lasx ? OPC_XVNOR_V : OPC_VNOR_V;
+        tcg_out32(s, encode_vdvjvk_insn(insn, a0, a0, a0));
     }
-    tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
 }
 
 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
-- 
2.51.0



^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH-for-10.2 v2 1/2] tcg/loongarch64: Factor tcg_out_cmp_vec() out of tcg_out_vec_op()
  2025-12-08  9:53 ` [PATCH-for-10.2 v2 1/2] tcg/loongarch64: Factor tcg_out_cmp_vec() out of tcg_out_vec_op() Philippe Mathieu-Daudé
@ 2025-12-08  9:54   ` Philippe Mathieu-Daudé
  0 siblings, 0 replies; 7+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-12-08  9:54 UTC (permalink / raw)
  To: qemu-devel; +Cc: Bingwu Zhang, Richard Henderson, WANG Xuerui

On 8/12/25 10:53, Philippe Mathieu-Daudé wrote:
> From: WANG Xuerui <git@xen0n.name>
> 
> Lift the cmp_vec handling to own function to make it easier
> for readers.
> 
> Signed-off-by: WANG Xuerui <git@xen0n.name>
> Message-ID: <20251207055626.3685415-1-i.qemu@xen0n.name>
> [PMD: Split of bigger patch, part 1/2]
> Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
> ---
>   tcg/loongarch64/tcg-target.c.inc | 94 +++++++++++++++++---------------
>   1 file changed, 50 insertions(+), 44 deletions(-)

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Tested-by: Philippe Mathieu-Daudé <philmd@linaro.org>



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH-for-10.2 v2 2/2] tcg/loongarch64: Support every TCGCond for cmp_vec ops
  2025-12-08  9:53 ` [PATCH-for-10.2 v2 2/2] tcg/loongarch64: Support every TCGCond for cmp_vec ops Philippe Mathieu-Daudé
@ 2025-12-08  9:55   ` Philippe Mathieu-Daudé
  2025-12-08  9:56     ` Philippe Mathieu-Daudé
  2025-12-08 15:43   ` Richard Henderson
  1 sibling, 1 reply; 7+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-12-08  9:55 UTC (permalink / raw)
  To: qemu-devel; +Cc: Bingwu Zhang, Richard Henderson, WANG Xuerui

On 8/12/25 10:53, Philippe Mathieu-Daudé wrote:
> From: WANG Xuerui <git@xen0n.name>
> 
> Support for TCGCond's in loongarch64 cmp_vec codegen is not uniform: NE
> is not supported at all and will trip over assertions, and legalization
> (currently just operand-swapping) is not done for reg-imm comparisons.
> Since the TCG middle-end will not legalize the comparison conditions for
> us, we have to do it ourselves like other targets.
> 
> Because EQ/LT/LTU/LE/LEU are natively supported, we only have to keep
> the current operand swapping treatment for GT/GTU/GE/GEU but ensure it
> is done for both reg-reg and reg-imm cases, and use a bitwise NOT to
> help legalize NE.
> 
> Fixes: d8b6fa593d2d ("tcg/loongarch64: Lower cmp_vec to vseq/vsle/vslt")
> Resolves: https://gitlab.com/qemu-project/qemu/-/issues/3237
> Cc: Richard Henderson <richard.henderson@linaro.org>
> Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
> Reported-by: Bingwu Zhang <xtexchooser@duck.com>
> Signed-off-by: WANG Xuerui <git@xen0n.name>
> Message-ID: <20251207055626.3685415-1-i.qemu@xen0n.name>
> [PMD: Split of bigger patch, part 2/2]
> Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
> ---
>   tcg/loongarch64/tcg-target.c.inc | 45 +++++++++++++++++++++++++-------
>   1 file changed, 35 insertions(+), 10 deletions(-)

Tested-by: Philippe Mathieu-Daudé <philmd@linaro.org>


^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH-for-10.2 v2 2/2] tcg/loongarch64: Support every TCGCond for cmp_vec ops
  2025-12-08  9:55   ` Philippe Mathieu-Daudé
@ 2025-12-08  9:56     ` Philippe Mathieu-Daudé
  0 siblings, 0 replies; 7+ messages in thread
From: Philippe Mathieu-Daudé @ 2025-12-08  9:56 UTC (permalink / raw)
  To: qemu-devel; +Cc: Bingwu Zhang, Richard Henderson, WANG Xuerui

On 8/12/25 10:55, Philippe Mathieu-Daudé wrote:
> On 8/12/25 10:53, Philippe Mathieu-Daudé wrote:
>> From: WANG Xuerui <git@xen0n.name>
>>
>> Support for TCGCond's in loongarch64 cmp_vec codegen is not uniform: NE
>> is not supported at all and will trip over assertions, and legalization
>> (currently just operand-swapping) is not done for reg-imm comparisons.
>> Since the TCG middle-end will not legalize the comparison conditions for
>> us, we have to do it ourselves like other targets.
>>
>> Because EQ/LT/LTU/LE/LEU are natively supported, we only have to keep
>> the current operand swapping treatment for GT/GTU/GE/GEU but ensure it
>> is done for both reg-reg and reg-imm cases, and use a bitwise NOT to
>> help legalize NE.
>>
>> Fixes: d8b6fa593d2d ("tcg/loongarch64: Lower cmp_vec to vseq/vsle/vslt")
>> Resolves: https://gitlab.com/qemu-project/qemu/-/issues/3237
>> Cc: Richard Henderson <richard.henderson@linaro.org>
>> Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
>> Reported-by: Bingwu Zhang <xtexchooser@duck.com>
>> Signed-off-by: WANG Xuerui <git@xen0n.name>
>> Message-ID: <20251207055626.3685415-1-i.qemu@xen0n.name>
>> [PMD: Split of bigger patch, part 2/2]
>> Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
>> ---
>>   tcg/loongarch64/tcg-target.c.inc | 45 +++++++++++++++++++++++++-------
>>   1 file changed, 35 insertions(+), 10 deletions(-)
> 
> Tested-by: Philippe Mathieu-Daudé <philmd@linaro.org>

Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>



^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH-for-10.2 v2 2/2] tcg/loongarch64: Support every TCGCond for cmp_vec ops
  2025-12-08  9:53 ` [PATCH-for-10.2 v2 2/2] tcg/loongarch64: Support every TCGCond for cmp_vec ops Philippe Mathieu-Daudé
  2025-12-08  9:55   ` Philippe Mathieu-Daudé
@ 2025-12-08 15:43   ` Richard Henderson
  1 sibling, 0 replies; 7+ messages in thread
From: Richard Henderson @ 2025-12-08 15:43 UTC (permalink / raw)
  To: Philippe Mathieu-Daudé, qemu-devel; +Cc: Bingwu Zhang, WANG Xuerui

On 12/8/25 03:53, Philippe Mathieu-Daudé wrote:
> From: WANG Xuerui <git@xen0n.name>
> 
> Support for TCGCond's in loongarch64 cmp_vec codegen is not uniform: NE
> is not supported at all and will trip over assertions, and legalization
> (currently just operand-swapping) is not done for reg-imm comparisons.
> Since the TCG middle-end will not legalize the comparison conditions for
> us, we have to do it ourselves like other targets.
> 
> Because EQ/LT/LTU/LE/LEU are natively supported, we only have to keep
> the current operand swapping treatment for GT/GTU/GE/GEU but ensure it
> is done for both reg-reg and reg-imm cases, and use a bitwise NOT to
> help legalize NE.
> 
> Fixes: d8b6fa593d2d ("tcg/loongarch64: Lower cmp_vec to vseq/vsle/vslt")
> Resolves: https://gitlab.com/qemu-project/qemu/-/issues/3237
> Cc: Richard Henderson <richard.henderson@linaro.org>
> Cc: Philippe Mathieu-Daudé <philmd@linaro.org>
> Reported-by: Bingwu Zhang <xtexchooser@duck.com>
> Signed-off-by: WANG Xuerui <git@xen0n.name>
> Message-ID: <20251207055626.3685415-1-i.qemu@xen0n.name>
> [PMD: Split of bigger patch, part 2/2]
> Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
> ---
>   tcg/loongarch64/tcg-target.c.inc | 45 +++++++++++++++++++++++++-------
>   1 file changed, 35 insertions(+), 10 deletions(-)
> 
> diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc
> index dbb36a2a816..1a243a57beb 100644
> --- a/tcg/loongarch64/tcg-target.c.inc
> +++ b/tcg/loongarch64/tcg-target.c.inc
> @@ -2184,6 +2184,33 @@ static void tcg_out_cmp_vec(TCGContext *s, bool lasx, unsigned vece,
>                               bool a2_is_const, TCGCond cond)
>   {
>       LoongArchInsn insn;
> +    bool need_invert = false;
> +
> +    switch (cond) {
> +    case TCG_COND_EQ:
> +    case TCG_COND_LE:
> +    case TCG_COND_LEU:
> +    case TCG_COND_LT:
> +    case TCG_COND_LTU:
> +        /* These are directly expressible. */
> +        break;
> +    case TCG_COND_NE:
> +        need_invert = true;
> +        cond = TCG_COND_EQ;
> +        break;
> +    case TCG_COND_GE:
> +    case TCG_COND_GEU:
> +    case TCG_COND_GT:
> +    case TCG_COND_GTU:
> +        {
> +            TCGArg t;
> +            t = a1, a1 = a2, a2 = t;
> +            cond = tcg_swap_cond(cond);
> +            break;
> +        }

To repeat my review of v1, you can't swap here if a2_is_const.


r~

> +    default:
> +        g_assert_not_reached();
> +    }
>   
>       static const LoongArchInsn cmp_vec_insn[16][2][4] = {
>           [TCG_COND_EQ] = {
> @@ -2236,32 +2263,30 @@ static void tcg_out_cmp_vec(TCGContext *s, bool lasx, unsigned vece,
>            * Try vseqi/vslei/vslti
>            */
>           int64_t value = sextract64(a2, 0, 8 << vece);
> +
> +        insn = cmp_vec_imm_insn[cond][lasx][vece];
>           switch (cond) {
>           case TCG_COND_EQ:
>           case TCG_COND_LE:
>           case TCG_COND_LT:
> -            insn = cmp_vec_imm_insn[cond][lasx][vece];
>               tcg_out32(s, encode_vdvjsk5_insn(insn, a0, a1, value));
>               break;
>           case TCG_COND_LEU:
>           case TCG_COND_LTU:
> -            insn = cmp_vec_imm_insn[cond][lasx][vece];
>               tcg_out32(s, encode_vdvjuk5_insn(insn, a0, a1, value));
>               break;
>           default:
>               g_assert_not_reached();
>           }
> +    } else {
> +        insn = cmp_vec_insn[cond][lasx][vece];
> +        tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
>       }
>   
> -    insn = cmp_vec_insn[cond][lasx][vece];
> -    if (insn == 0) {
> -        TCGArg t;
> -        t = a1, a1 = a2, a2 = t;
> -        cond = tcg_swap_cond(cond);
> -        insn = cmp_vec_insn[cond][lasx][vece];
> -        tcg_debug_assert(insn != 0);
> +    if (need_invert) {
> +        insn = lasx ? OPC_XVNOR_V : OPC_VNOR_V;
> +        tcg_out32(s, encode_vdvjvk_insn(insn, a0, a0, a0));
>       }
> -    tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
>   }
>   
>   static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,



^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2025-12-08 15:44 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-12-08  9:53 [PATCH-for-10.2 v2 0/2] tcg/loongarch64: Support every TCGCond for cmp_vec ops Philippe Mathieu-Daudé
2025-12-08  9:53 ` [PATCH-for-10.2 v2 1/2] tcg/loongarch64: Factor tcg_out_cmp_vec() out of tcg_out_vec_op() Philippe Mathieu-Daudé
2025-12-08  9:54   ` Philippe Mathieu-Daudé
2025-12-08  9:53 ` [PATCH-for-10.2 v2 2/2] tcg/loongarch64: Support every TCGCond for cmp_vec ops Philippe Mathieu-Daudé
2025-12-08  9:55   ` Philippe Mathieu-Daudé
2025-12-08  9:56     ` Philippe Mathieu-Daudé
2025-12-08 15:43   ` Richard Henderson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).