* [PATCH 0/2] avoid integer overflow
@ 2020-03-16 11:40 Yifei Jiang
2020-03-16 11:40 ` [PATCH 1/2] tcg: " Yifei Jiang
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Yifei Jiang @ 2020-03-16 11:40 UTC (permalink / raw)
To: qemu-devel
Cc: zhang.zhanghailiang, limingwang, victor.zhangxiaofeng,
Yifei Jiang, pbonzini, rth, dengkai1
the constant default type is "int", when the constant is shifted to the left,
it may exceed 32 bits, resulting in integer overflowing. So constant type need
change to "long"
Yifei Jiang (2):
tcg: avoid integer overflow
accel/tcg: avoid integer overflow
accel/tcg/cputlb.c | 6 +++---
tcg/tcg-op-gvec.c | 18 +++++++++---------
tcg/tcg-op-vec.c | 4 ++--
3 files changed, 14 insertions(+), 14 deletions(-)
--
2.19.1
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH 1/2] tcg: avoid integer overflow
2020-03-16 11:40 [PATCH 0/2] avoid integer overflow Yifei Jiang
@ 2020-03-16 11:40 ` Yifei Jiang
2020-03-16 14:04 ` Peter Maydell
2020-03-16 11:40 ` [PATCH 2/2] accel/tcg: " Yifei Jiang
2020-03-16 12:56 ` [PATCH 0/2] " Peter Maydell
2 siblings, 1 reply; 6+ messages in thread
From: Yifei Jiang @ 2020-03-16 11:40 UTC (permalink / raw)
To: qemu-devel
Cc: zhang.zhanghailiang, limingwang, victor.zhangxiaofeng,
Yifei Jiang, Euler Robot, pbonzini, rth, dengkai1
This fixes coverity issues 75234842, etc.,:
2221 tcg_gen_andi_i64(t, t, dup_const(vece, 1));
CID 75234842: (OVERFLOW_BEFORE_WIDEN)
2222. overflow_before_widen: Potentially overflowing expression "1 << nbit" with type "int" (32 bits, signed) is evaluated using 32-bit arithmetic, and then used in a context that expects an expression of type "int64_t" (64 bits, signed).
2222 tcg_gen_muli_i64(t, t, (1 << nbit) - 1);
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
Signed-off-by: Mingwang Li <limingwang@huawei.com>
Reported-by: Euler Robot <euler.robot@huawei.com>
---
tcg/tcg-op-gvec.c | 18 +++++++++---------
tcg/tcg-op-vec.c | 4 ++--
2 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index 327d9588e0..3aeb049a46 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -2219,7 +2219,7 @@ static void gen_absv_mask(TCGv_i64 d, TCGv_i64 b, unsigned vece)
/* Create -1 for each negative element. */
tcg_gen_shri_i64(t, b, nbit - 1);
tcg_gen_andi_i64(t, t, dup_const(vece, 1));
- tcg_gen_muli_i64(t, t, (1 << nbit) - 1);
+ tcg_gen_muli_i64(t, t, ((int64_t)1 << nbit) - 1);
/*
* Invert (via xor -1) and add one (via sub -1).
@@ -2528,7 +2528,7 @@ void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
};
tcg_debug_assert(vece <= MO_64);
- tcg_debug_assert(shift >= 0 && shift < (8 << vece));
+ tcg_debug_assert(shift >= 0 && shift < ((int64_t)8 << vece));
if (shift == 0) {
tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
} else {
@@ -2579,7 +2579,7 @@ void tcg_gen_gvec_shri(unsigned vece, uint32_t dofs, uint32_t aofs,
};
tcg_debug_assert(vece <= MO_64);
- tcg_debug_assert(shift >= 0 && shift < (8 << vece));
+ tcg_debug_assert(shift >= 0 && shift < ((int64_t)8 << vece));
if (shift == 0) {
tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
} else {
@@ -2595,7 +2595,7 @@ void tcg_gen_vec_sar8i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
tcg_gen_shri_i64(d, a, c);
tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */
- tcg_gen_muli_i64(s, s, (2 << c) - 2); /* replicate isolated signs */
+ tcg_gen_muli_i64(s, s, ((int64_t)2 << c) - 2); /* replicate isolated signs */
tcg_gen_andi_i64(d, d, c_mask); /* clear out bits above sign */
tcg_gen_or_i64(d, d, s); /* include sign extension */
tcg_temp_free_i64(s);
@@ -2610,7 +2610,7 @@ void tcg_gen_vec_sar16i_i64(TCGv_i64 d, TCGv_i64 a, int64_t c)
tcg_gen_shri_i64(d, a, c);
tcg_gen_andi_i64(s, d, s_mask); /* isolate (shifted) sign bit */
tcg_gen_andi_i64(d, d, c_mask); /* clear out bits above sign */
- tcg_gen_muli_i64(s, s, (2 << c) - 2); /* replicate isolated signs */
+ tcg_gen_muli_i64(s, s, ((int64_t)2 << c) - 2); /* replicate isolated signs */
tcg_gen_or_i64(d, d, s); /* include sign extension */
tcg_temp_free_i64(s);
}
@@ -2644,7 +2644,7 @@ void tcg_gen_gvec_sari(unsigned vece, uint32_t dofs, uint32_t aofs,
};
tcg_debug_assert(vece <= MO_64);
- tcg_debug_assert(shift >= 0 && shift < (8 << vece));
+ tcg_debug_assert(shift >= 0 && shift < ((int64_t)8 << vece));
if (shift == 0) {
tcg_gen_gvec_mov(vece, dofs, aofs, oprsz, maxsz);
} else {
@@ -2881,7 +2881,7 @@ static void tcg_gen_shlv_mod_vec(unsigned vece, TCGv_vec d,
{
TCGv_vec t = tcg_temp_new_vec_matching(d);
- tcg_gen_dupi_vec(vece, t, (8 << vece) - 1);
+ tcg_gen_dupi_vec(vece, t, ((uint64_t)8 << vece) - 1);
tcg_gen_and_vec(vece, t, t, b);
tcg_gen_shlv_vec(vece, d, a, t);
tcg_temp_free_vec(t);
@@ -2944,7 +2944,7 @@ static void tcg_gen_shrv_mod_vec(unsigned vece, TCGv_vec d,
{
TCGv_vec t = tcg_temp_new_vec_matching(d);
- tcg_gen_dupi_vec(vece, t, (8 << vece) - 1);
+ tcg_gen_dupi_vec(vece, t, ((uint64_t)8 << vece) - 1);
tcg_gen_and_vec(vece, t, t, b);
tcg_gen_shrv_vec(vece, d, a, t);
tcg_temp_free_vec(t);
@@ -3007,7 +3007,7 @@ static void tcg_gen_sarv_mod_vec(unsigned vece, TCGv_vec d,
{
TCGv_vec t = tcg_temp_new_vec_matching(d);
- tcg_gen_dupi_vec(vece, t, (8 << vece) - 1);
+ tcg_gen_dupi_vec(vece, t, ((uint64_t)8 << vece) - 1);
tcg_gen_and_vec(vece, t, t, b);
tcg_gen_sarv_vec(vece, d, a, t);
tcg_temp_free_vec(t);
diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c
index b6937e8d64..4cf1dc4e8e 100644
--- a/tcg/tcg-op-vec.c
+++ b/tcg/tcg-op-vec.c
@@ -483,7 +483,7 @@ void tcg_gen_abs_vec(unsigned vece, TCGv_vec r, TCGv_vec a)
tcg_gen_smax_vec(vece, r, a, t);
} else {
if (tcg_can_emit_vec_op(INDEX_op_sari_vec, type, vece) > 0) {
- tcg_gen_sari_vec(vece, t, a, (8 << vece) - 1);
+ tcg_gen_sari_vec(vece, t, a, ((int64_t)8 << vece) - 1);
} else {
do_dupi_vec(t, MO_REG, 0);
tcg_gen_cmp_vec(TCG_COND_LT, vece, t, a, t);
@@ -508,7 +508,7 @@ static void do_shifti(TCGOpcode opc, unsigned vece,
int can;
tcg_debug_assert(at->base_type == type);
- tcg_debug_assert(i >= 0 && i < (8 << vece));
+ tcg_debug_assert(i >= 0 && i < ((int64_t)8 << vece));
tcg_assert_listed_vecop(opc);
if (i == 0) {
--
2.19.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH 2/2] accel/tcg: avoid integer overflow
2020-03-16 11:40 [PATCH 0/2] avoid integer overflow Yifei Jiang
2020-03-16 11:40 ` [PATCH 1/2] tcg: " Yifei Jiang
@ 2020-03-16 11:40 ` Yifei Jiang
2020-03-16 12:55 ` Peter Maydell
2020-03-16 12:56 ` [PATCH 0/2] " Peter Maydell
2 siblings, 1 reply; 6+ messages in thread
From: Yifei Jiang @ 2020-03-16 11:40 UTC (permalink / raw)
To: qemu-devel
Cc: zhang.zhanghailiang, limingwang, victor.zhangxiaofeng,
Yifei Jiang, Euler Robot, pbonzini, rth, dengkai1
This fixes coverity issues 75235919, etc.,
1524 /* Handle CPU specific unaligned behaviour */
CID 75235919: (OVERFLOW_BEFORE_WIDEN)
1525. overflow_before_widen: Potentially overflowing expression "1 << a_bits" with type "int" (32 bits, signed) is evaluated using 32-bit arithmetic, and then used in a context that expects an expression of type "target_ulong" (64 bits, unsigned).
1525 if (addr & ((1 << a_bits) - 1)) {
Signed-off-by: Yifei Jiang <jiangyifei@huawei.com>
Signed-off-by: Mingwang Li <limingwang@huawei.com>
Reported-by: Euler Robot <euler.robot@huawei.com>
---
accel/tcg/cputlb.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index e3b5750c3b..73b5e680be 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1412,7 +1412,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
retaddr -= GETPC_ADJ;
/* Enforce guest required alignment. */
- if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
+ if (unlikely(a_bits > 0 && (addr & (((target_ulong)1 << a_bits) - 1)))) {
/* ??? Maybe indicate atomic op to cpu_unaligned_access */
cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
@@ -1522,7 +1522,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi,
size_t size = memop_size(op);
/* Handle CPU specific unaligned behaviour */
- if (addr & ((1 << a_bits) - 1)) {
+ if (addr & (((target_ulong)1 << a_bits) - 1)) {
cpu_unaligned_access(env_cpu(env), addr, access_type,
mmu_idx, retaddr);
}
@@ -1911,7 +1911,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
size_t size = memop_size(op);
/* Handle CPU specific unaligned behaviour */
- if (addr & ((1 << a_bits) - 1)) {
+ if (addr & (((target_ulong)1 << a_bits) - 1)) {
cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
mmu_idx, retaddr);
}
--
2.19.1
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH 2/2] accel/tcg: avoid integer overflow
2020-03-16 11:40 ` [PATCH 2/2] accel/tcg: " Yifei Jiang
@ 2020-03-16 12:55 ` Peter Maydell
0 siblings, 0 replies; 6+ messages in thread
From: Peter Maydell @ 2020-03-16 12:55 UTC (permalink / raw)
To: Yifei Jiang
Cc: zhanghailiang, limingwang, victor.zhangxiaofeng, QEMU Developers,
Euler Robot, Paolo Bonzini, dengkai1, Richard Henderson
On Mon, 16 Mar 2020 at 12:14, Yifei Jiang <jiangyifei@huawei.com> wrote:
>
> This fixes coverity issues 75235919, etc.,
> 1524 /* Handle CPU specific unaligned behaviour */
> CID 75235919: (OVERFLOW_BEFORE_WIDEN)
> 1525. overflow_before_widen: Potentially overflowing expression "1 << a_bits" with type "int" (32 bits, signed) is evaluated using 32-bit arithmetic, and then used in a context that expects an expression of type "target_ulong" (64 bits, unsigned).
> 1525 if (addr & ((1 << a_bits) - 1)) {
This is a Coverity false positive. The value of a_bits in
these cases can never be big enough for 1 << a_bits
to overflow, because it indicates an alignment requirement
and will at most be 6 (indicating a 64-byte-alignment).
thanks
-- PMM
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 0/2] avoid integer overflow
2020-03-16 11:40 [PATCH 0/2] avoid integer overflow Yifei Jiang
2020-03-16 11:40 ` [PATCH 1/2] tcg: " Yifei Jiang
2020-03-16 11:40 ` [PATCH 2/2] accel/tcg: " Yifei Jiang
@ 2020-03-16 12:56 ` Peter Maydell
2 siblings, 0 replies; 6+ messages in thread
From: Peter Maydell @ 2020-03-16 12:56 UTC (permalink / raw)
To: Yifei Jiang
Cc: zhanghailiang, limingwang, victor.zhangxiaofeng, QEMU Developers,
Paolo Bonzini, dengkai1, Richard Henderson
On Mon, 16 Mar 2020 at 12:25, Yifei Jiang <jiangyifei@huawei.com> wrote:
>
> the constant default type is "int", when the constant is shifted to the left,
> it may exceed 32 bits, resulting in integer overflowing. So constant type need
> change to "long"
"long" would not in general be the right type to use in cases where
we do need to make this change, because "long" also can
be only 32 bits wide on some hosts.
thanks
-- PMM
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 1/2] tcg: avoid integer overflow
2020-03-16 11:40 ` [PATCH 1/2] tcg: " Yifei Jiang
@ 2020-03-16 14:04 ` Peter Maydell
0 siblings, 0 replies; 6+ messages in thread
From: Peter Maydell @ 2020-03-16 14:04 UTC (permalink / raw)
To: Yifei Jiang
Cc: zhanghailiang, limingwang, victor.zhangxiaofeng, QEMU Developers,
Euler Robot, Paolo Bonzini, dengkai1, Richard Henderson
On Mon, 16 Mar 2020 at 13:15, Yifei Jiang <jiangyifei@huawei.com> wrote:
>
> This fixes coverity issues 75234842, etc.,:
Where does this issue number come from, by the way?
It's not from the online Coverity Scan we use which
is the issue ID we usually cite for coverity stuff.
> 2221 tcg_gen_andi_i64(t, t, dup_const(vece, 1));
> CID 75234842: (OVERFLOW_BEFORE_WIDEN)
> 2222. overflow_before_widen: Potentially overflowing expression "1 << nbit" with type "int" (32 bits, signed) is evaluated using 32-bit arithmetic, and then used in a context that expects an expression of type "int64_t" (64 bits, signed).
> 2222 tcg_gen_muli_i64(t, t, (1 << nbit) - 1);
Again, you need to apply a more critical eye to the Coverity
suggestions. For instance:
> diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
> index 327d9588e0..3aeb049a46 100644
> --- a/tcg/tcg-op-gvec.c
> +++ b/tcg/tcg-op-gvec.c
> @@ -2219,7 +2219,7 @@ static void gen_absv_mask(TCGv_i64 d, TCGv_i64 b, unsigned vece)
> /* Create -1 for each negative element. */
> tcg_gen_shri_i64(t, b, nbit - 1);
> tcg_gen_andi_i64(t, t, dup_const(vece, 1));
> - tcg_gen_muli_i64(t, t, (1 << nbit) - 1);
> + tcg_gen_muli_i64(t, t, ((int64_t)1 << nbit) - 1);
In this function nbit can only be 8 or 16, so this shift
can never overflow.
I haven't checked whether any of the others are valid.
thanks
-- PMM
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2020-03-16 16:17 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-03-16 11:40 [PATCH 0/2] avoid integer overflow Yifei Jiang
2020-03-16 11:40 ` [PATCH 1/2] tcg: " Yifei Jiang
2020-03-16 14:04 ` Peter Maydell
2020-03-16 11:40 ` [PATCH 2/2] accel/tcg: " Yifei Jiang
2020-03-16 12:55 ` Peter Maydell
2020-03-16 12:56 ` [PATCH 0/2] " Peter Maydell
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).