* [PATCH 1/2] tcg: avoid integer overflow
2020-03-16 11:40 [PATCH 0/2] avoid integer overflow Yifei Jiang
@ 2020-03-16 11:40 ` Yifei Jiang
2020-03-16 14:04 ` Peter Maydell
2020-03-16 11:40 ` [PATCH 2/2] accel/tcg: " Yifei Jiang
2020-03-16 12:56 ` [PATCH 0/2] " Peter Maydell
2 siblings, 1 reply; 6+ messages in thread
From: Yifei Jiang @ 2020-03-16 11:40 UTC (permalink / raw)
To: qemu-devel
Cc: zhang.zhanghailiang, limingwang, victor.zhangxiaofeng,
Yifei Jiang, Euler Robot, pbonzini, rth, dengkai1
This fixes coverity issues 75234842, etc.,:
2221 tcg_gen_andi_i64(t, t, dup_const(vece, 1));
CID 75234842: (OVERFLOW_BEFORE_WIDEN)
2222. overflow_before_widen: Potentially overflowing expression "1 << nbit" with type "int" (32 bits, signed) is evaluated using 32-bit arithmetic, and then used in a context that expects an expression of type "int64_t" (64 bits, signed).
2222 tcg_gen_muli_i64(t, t, (1 << nbit) - 1);
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
Signed-off-by: Mingwang Li <limingwang@huawei.com>
Reported-by: Euler Robot <euler.robot@huawei.com>
---
tcg/tcg-op-gvec.c | 18 +++++++++---------
tcg/tcg-op-vec.c | 4 ++--
2 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index 327d9588e0..3aeb049a46 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -2219,7 +2219,7 @@ static void gen_absv_mask(TCGv_i64 d, TCGv_i64 b, unsigned vece)
/* Create -1 for each negative element. */
tcg_gen_shri_i64(t, b, nbit - 1);
tcg_gen_andi_i64(t, t, dup_const(vece, 1));
- tcg_gen_muli_i64(t, t, (1 << nbit) - 1);
+ tcg_gen_muli_i64(t, t, ((int64_t)1 << nbit) - 1);
/*
* Invert (via xor -1) and add one (via sub -1).
@@ -2528,7 +2528,7 @@ void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
};
tcg_debug_assert(vece <= MO_64);
- tcg_debug_assert(shift >= 0 && shift < (8 << vece));
+ tcg_debug_assert(shift >= 0 && shift < ((int64_t)8 << vece));
if (shift == 0) {
tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
} else {
@@ -2579,7 +2579,7 @@ void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
};
tcg_debug_assert(vece <= MO_64);
- tcg_debug_assert(shift >= 0 && shift < (8 << vece));
+ tcg_debug_assert(shift >= 0 && shift < ((int64_t)8 << vece));
if (shift == 0) {
tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
} else {
@@ -2595,7 +2595,7 @@ void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
tcg_gen_shri_i64(d, a, c);
tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */
- tcg_gen_muli_i64(s, s, (2 << c) - 2); /* replicate isolated signs */
+ tcg_gen_muli_i64(s, s, ((int64_t)2 << c) - 2); /* replicate isolated signs */
tcg_gen_andi_i64(d, d, c_mask); /* clear out bits above sign */
tcg_gen_or_i64(d, d, s); /* include sign extension */
tcg_temp_free_i64(s);
@@ -2610,7 +2610,7 @@ void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
tcg_gen_shri_i64(d, a, c);
tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */
tcg_gen_andi_i64(d, d, c_mask); /* clear out bits above sign */
- tcg_gen_muli_i64(s, s, (2 << c) - 2); /* replicate isolated signs */
+ tcg_gen_muli_i64(s, s, ((int64_t)2 << c) - 2); /* replicate isolated signs */
tcg_gen_or_i64(d, d, s); /* include sign extension */
tcg_temp_free_i64(s);
}
@@ -2644,7 +2644,7 @@ void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
};
tcg_debug_assert(vece <= MO_64);
- tcg_debug_assert(shift >= 0 && shift < (8 << vece));
+ tcg_debug_assert(shift >= 0 && shift < ((int64_t)8 << vece));
if (shift == 0) {
tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
} else {
@@ -2881,7 +2881,7 @@ static void tcg_gen_shlv_mod_vec(unsigned vece, TCGv_vec d,
{
TCGv_vec t = tcg_temp_new_vec_matching(d);
- tcg_gen_dupi_vec(vece, t, (8 << vece) - 1);
+ tcg_gen_dupi_vec(vece, t, ((uint64_t)8 << vece) - 1);
tcg_gen_and_vec(vece, t, t, b);
tcg_gen_shlv_vec(vece, d, a, t);
tcg_temp_free_vec(t);
@@ -2944,7 +2944,7 @@ static void tcg_gen_shrv_mod_vec(unsigned vece, TCGv_vec d,
{
TCGv_vec t = tcg_temp_new_vec_matching(d);
- tcg_gen_dupi_vec(vece, t, (8 << vece) - 1);
+ tcg_gen_dupi_vec(vece, t, ((uint64_t)8 << vece) - 1);
tcg_gen_and_vec(vece, t, t, b);
tcg_gen_shrv_vec(vece, d, a, t);
tcg_temp_free_vec(t);
@@ -3007,7 +3007,7 @@ static void tcg_gen_sarv_mod_vec(unsigned vece, TCGv_vec d,
{
TCGv_vec t = tcg_temp_new_vec_matching(d);
- tcg_gen_dupi_vec(vece, t, (8 << vece) - 1);
+ tcg_gen_dupi_vec(vece, t, ((uint64_t)8 << vece) - 1);
tcg_gen_and_vec(vece, t, t, b);
tcg_gen_sarv_vec(vece, d, a, t);
tcg_temp_free_vec(t);
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
index b6937e8d64..4cf1dc4e8e 100644
--- a/tcg/tcg-op-vec.c
+++ b/tcg/tcg-op-vec.c
@@ -483,7 +483,7 @@ void tcg_gen_abs_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
tcg_gen_smax_vec(vece, r, a, t);
} else {
if (tcg_can_emit_vec_op(INDEX_op_sari_vec, type, vece) > 0) {
- tcg_gen_sari_vec(vece, t, a, (8 << vece) - 1);
+ tcg_gen_sari_vec(vece, t, a, ((int64_t)8 << vece) - 1);
} else {
do_dupi_vec(t, MO_REG, 0);
tcg_gen_cmp_vec(TCG_COND_LT, vece, t, a, t);
@@ -508,7 +508,7 @@ static void do_shifti(TCGOpcode opc, unsigned vece,
int can;
tcg_debug_assert(at->base_type == type);
- tcg_debug_assert(i >= 0 && i < (8 << vece));
+ tcg_debug_assert(i >= 0 && i < ((int64_t)8 << vece));
tcg_assert_listed_vecop(opc);
if (i == 0) {
--
2.19.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 2/2] accel/tcg: avoid integer overflow
2020-03-16 11:40 [PATCH 0/2] avoid integer overflow Yifei Jiang
2020-03-16 11:40 ` [PATCH 1/2] tcg: " Yifei Jiang
@ 2020-03-16 11:40 ` Yifei Jiang
2020-03-16 12:55 ` Peter Maydell
2020-03-16 12:56 ` [PATCH 0/2] " Peter Maydell
2 siblings, 1 reply; 6+ messages in thread
From: Yifei Jiang @ 2020-03-16 11:40 UTC (permalink / raw)
To: qemu-devel
Cc: zhang.zhanghailiang, limingwang, victor.zhangxiaofeng,
Yifei Jiang, Euler Robot, pbonzini, rth, dengkai1
This fixes coverity issues 75235919, etc.,
1524 /* Handle CPU specific unaligned behaviour */
CID 75235919: (OVERFLOW_BEFORE_WIDEN)
1525. overflow_before_widen: Potentially overflowing expression "1 << a_bits" with type "int" (32 bits, signed) is evaluated using 32-bit arithmetic, and then used in a context that expects an expression of type "target_ulong" (64 bits, unsigned).
1525 if (addr & ((1 << a_bits) - 1)) {
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
Signed-off-by: Mingwang Li <limingwang@huawei.com>
Reported-by: Euler Robot <euler.robot@huawei.com>
---
accel/tcg/cputlb.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index e3b5750c3b..73b5e680be 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1412,7 +1412,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
retaddr -= GETPC_ADJ;
/* Enforce guest required alignment. */
- if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
+ if (unlikely(a_bits > 0 && (addr & (((target_ulong)1 << a_bits) - 1)))) {
/* ??? Maybe indicate atomic op to cpu_unaligned_access */
cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
@@ -1522,7 +1522,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
size_t size = memop_size(op);
/* Handle CPU specific unaligned behaviour */
- if (addr & ((1 << a_bits) - 1)) {
+ if (addr & (((target_ulong)1 << a_bits) - 1)) {
cpu_unaligned_access(env_cpu(env), addr, access_type,
mmu_idx, retaddr);
}
@@ -1911,7 +1911,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
size_t size = memop_size(op);
/* Handle CPU specific unaligned behaviour */
- if (addr & ((1 << a_bits) - 1)) {
+ if (addr & (((target_ulong)1 << a_bits) - 1)) {
cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
--
2.19.1
^ permalink raw reply related [flat|nested] 6+ messages in thread