* [Qemu-devel] [PATCH] tcg: Use macro instead of hard code number 0xffffffff for tcg_target_ulong using
@ 2015-01-19 15:34 Chen Gang
2015-01-19 15:58 ` Peter Maydell
0 siblings, 1 reply; 4+ messages in thread
From: Chen Gang @ 2015-01-19 15:34 UTC (permalink / raw)
To: rth@twiddle.net, Alexander Graf; +Cc: QEMU Trivial, qemu-devel
For tcg_target_ulong (include TCGArg), it often uses lower 32-bit mask
and higher 32-bit mask, so define the related macro for it, so can let
code simpler, and avoid various coding styles for them.
- For lower, some append 'u', some append 'U', and some no 'u' or 'U'.
- For higher, some append 'ull', some use type cast.
- For lower but may be used higher bits, append 'ull'.
Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com>
---
tcg/optimize.c | 16 ++++++++--------
tcg/s390/tcg-target.c | 24 ++++++++++++------------
tcg/tcg.h | 3 +++
3 files changed, 23 insertions(+), 20 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 34ae3c2..dc29223 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -174,7 +174,7 @@ static void tcg_opt_gen_mov(TCGContext *s, int op_index, TCGArg *gen_args,
mask = temps[src].mask;
if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
/* High bits of the destination are now garbage. */
- mask |= ~0xffffffffull;
+ mask |= TCG_TARGET_ULONG_M32H;
}
temps[dst].mask = mask;
@@ -211,7 +211,7 @@ static void tcg_opt_gen_movi(TCGContext *s, int op_index, TCGArg *gen_args,
mask = val;
if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
/* High bits of the destination are now garbage. */
- mask |= ~0xffffffffull;
+ mask |= TCG_TARGET_ULONG_M32H;
}
temps[dst].mask = mask;
@@ -354,7 +354,7 @@ static TCGArg do_constant_folding(TCGOpcode op, TCGArg x, TCGArg y)
{
TCGArg res = do_constant_folding_2(op, x, y);
if (op_bits(op) == 32) {
- res &= 0xffffffff;
+ res &= TCG_TARGET_ULONG_M32L;
}
return res;
}
@@ -804,7 +804,7 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
break;
}
case INDEX_op_ext32u_i64:
- mask = 0xffffffffU;
+ mask = TCG_TARGET_ULONG_M32L;
goto and_const;
CASE_OP_32_64(and):
@@ -895,7 +895,7 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
mask = 0xffff;
break;
case INDEX_op_ld32u_i64:
- mask = 0xffffffffu;
+ mask = TCG_TARGET_ULONG_M32L;
break;
CASE_OP_32_64(qemu_ld):
@@ -916,9 +916,9 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
need to record that the high bits contain garbage. */
partmask = mask;
if (!(def->flags & TCG_OPF_64BIT)) {
- mask |= ~(tcg_target_ulong)0xffffffffu;
- partmask &= 0xffffffffu;
- affected &= 0xffffffffu;
+ mask |= TCG_TARGET_ULONG_M32H;
+ partmask &= TCG_TARGET_ULONG_M32L;
+ affected &= TCG_TARGET_ULONG_M32L;
}
if (partmask == 0) {
diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c
index 63e9c82..394fefc 100644
--- a/tcg/s390/tcg-target.c
+++ b/tcg/s390/tcg-target.c
@@ -677,11 +677,11 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
tcg_out_insn(s, RIL, LGFI, ret, sval);
return;
}
- if (uval <= 0xffffffff) {
+ if (uval <= TCG_TARGET_ULONG_M32L) {
tcg_out_insn(s, RIL, LLILF, ret, uval);
return;
}
- if ((uval & 0xffffffff) == 0) {
+ if ((uval & TCG_TARGET_ULONG_M32L) == 0) {
tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
return;
}
@@ -702,7 +702,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
/* A 32-bit unsigned value can be loaded in 2 insns. And given
that the lli_insns loop above did not succeed, we know that
both insns are required. */
- if (uval <= 0xffffffff) {
+ if (uval <= TCG_TARGET_ULONG_M32L) {
tcg_out_insn(s, RI, LLILL, ret, uval);
tcg_out_insn(s, RI, IILH, ret, uval >> 16);
return;
@@ -727,7 +727,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
/* If we get here, both the high and low parts have non-zero bits. */
/* Recurse to load the lower 32-bits. */
- tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
+ tcg_out_movi(s, TCG_TYPE_I64, ret, uval & TCG_TARGET_ULONG_M32L);
/* Insert data into the high 32-bits. */
uval = uval >> 31 >> 1;
@@ -1006,7 +1006,7 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
/* Try all 48-bit insns that can perform it in one go. */
if (facilities & FACILITY_EXT_IMM) {
for (i = 0; i < 2; i++) {
- tcg_target_ulong mask = ~(0xffffffffull << i*32);
+ tcg_target_ulong mask = ~(TCG_TARGET_ULONG_M32L << i*32);
if (((val | ~valid) & mask) == mask) {
tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
return;
@@ -1055,7 +1055,7 @@ static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
/* Try all 48-bit insns that can perform it in one go. */
for (i = 0; i < 2; i++) {
- tcg_target_ulong mask = (0xffffffffull << i*32);
+ tcg_target_ulong mask = (TCG_TARGET_ULONG_M32L << i*32);
if ((val & mask) != 0 && (val & ~mask) == 0) {
tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
return;
@@ -1065,8 +1065,8 @@ static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
/* Perform the OR via sequential modifications to the high and
low parts. Do this via recursion to handle 16-bit vs 32-bit
masks in each half. */
- tgen64_ori(s, dest, val & 0x00000000ffffffffull);
- tgen64_ori(s, dest, val & 0xffffffff00000000ull);
+ tgen64_ori(s, dest, val & TCG_TARGET_ULONG_M32L);
+ tgen64_ori(s, dest, val & TCG_TARGET_ULONG_M32H);
} else {
/* With no extended-immediate facility, we don't need to be so
clever. Just iterate over the insns and mask in the constant. */
@@ -1082,10 +1082,10 @@ static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
{
/* Perform the xor by parts. */
- if (val & 0xffffffff) {
+ if (val & TCG_TARGET_ULONG_M32L) {
tcg_out_insn(s, RIL, XILF, dest, val);
}
- if (val > 0xffffffff) {
+ if (val > TCG_TARGET_ULONG_M32L) {
tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
}
}
@@ -1793,14 +1793,14 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
break;
case INDEX_op_or_i32:
if (const_args[2]) {
- tgen64_ori(s, args[0], args[2] & 0xffffffff);
+ tgen64_ori(s, args[0], args[2] & TCG_TARGET_ULONG_M32L);
} else {
tcg_out_insn(s, RR, OR, args[0], args[2]);
}
break;
case INDEX_op_xor_i32:
if (const_args[2]) {
- tgen64_xori(s, args[0], args[2] & 0xffffffff);
+ tgen64_xori(s, args[0], args[2] & TCG_TARGET_ULONG_M32L);
} else {
tcg_out_insn(s, RR, XR, args[0], args[2]);
}
diff --git a/tcg/tcg.h b/tcg/tcg.h
index 944b877..4f113ed 100644
--- a/tcg/tcg.h
+++ b/tcg/tcg.h
@@ -54,6 +54,9 @@ typedef uint64_t tcg_target_ulong;
#error unsupported
#endif
+#define TCG_TARGET_ULONG_M32L ((tcg_target_ulong)0xffffffffu)
+#define TCG_TARGET_ULONG_M32H (~(tcg_target_ulong)0xffffffffu)
+
#if TCG_TARGET_NB_REGS <= 32
typedef uint32_t TCGRegSet;
#elif TCG_TARGET_NB_REGS <= 64
--
1.9.3
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [Qemu-devel] [PATCH] tcg: Use macro instead of hard code number 0xffffffff for tcg_target_ulong using
2015-01-19 15:34 [Qemu-devel] [PATCH] tcg: Use macro instead of hard code number 0xffffffff for tcg_target_ulong using Chen Gang
@ 2015-01-19 15:58 ` Peter Maydell
2015-01-21 1:46 ` Gang Chen
0 siblings, 1 reply; 4+ messages in thread
From: Peter Maydell @ 2015-01-19 15:58 UTC (permalink / raw)
To: Chen Gang; +Cc: QEMU Trivial, qemu-devel, Alexander Graf, rth@twiddle.net
On 19 January 2015 at 15:34, Chen Gang <gang.chen.5i5j@gmail.com> wrote:
> For tcg_target_ulong (include TCGArg), it often uses lower 32-bit mask
> and higher 32-bit mask, so define the related macro for it, so can let
> code simpler, and avoid various coding styles for them.
>
> - For lower, some append 'u', some append 'U', and some no 'u' or 'U'.
>
> - For higher, some append 'ull', some use type cast.
>
> - For lower but may be used higher bits, append 'ull'.
Is this patch fixing any actual bugs? I think code-style-wise
I prefer what we have at the moment...
-- PMM
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [Qemu-devel] [PATCH] tcg: Use macro instead of hard code number 0xffffffff for tcg_target_ulong using
2015-01-19 15:58 ` Peter Maydell
@ 2015-01-21 1:46 ` Gang Chen
0 siblings, 0 replies; 4+ messages in thread
From: Gang Chen @ 2015-01-21 1:46 UTC (permalink / raw)
To: Peter Maydell; +Cc: QEMU Trivial, qemu-devel, Alexander Graf, rth@twiddle.net
[-- Attachment #1: Type: text/plain, Size: 950 bytes --]
It is not for fixing bugs. I guess it belongs to trivial patch (not fix
bugs, not changing any working flow).
And excuse me, gmail is broken again, in China, I have to reply on web site
via my Mac book Safari (which is not a pure text).
On Mon, Jan 19, 2015 at 11:58 PM, Peter Maydell <peter.maydell@linaro.org>
wrote:
> On 19 January 2015 at 15:34, Chen Gang <gang.chen.5i5j@gmail.com> wrote:
> > For tcg_target_ulong (include TCGArg), it often uses lower 32-bit mask
> > and higher 32-bit mask, so define the related macro for it, so can let
> > code simpler, and avoid various coding styles for them.
> >
> > - For lower, some append 'u', some append 'U', and some no 'u' or 'U'.
> >
> > - For higher, some append 'ull', some use type cast.
> >
> > - For lower but may be used higher bits, append 'ull'.
>
> Is this patch fixing any actual bugs? I think code-style-wise
> I prefer what we have at the moment...
>
> -- PMM
>
--
Chen Gang
[-- Attachment #2: Type: text/html, Size: 1562 bytes --]
^ permalink raw reply [flat|nested] 4+ messages in thread
[parent not found: <54BBDDDC.9000804@sunrus.com.cn>]
* Re: [Qemu-devel] [PATCH] tcg: Use macro instead of hard code number 0xffffffff for tcg_target_ulong using
[not found] <54BBDDDC.9000804@sunrus.com.cn>
@ 2015-01-18 16:25 ` Chen Gang S
0 siblings, 0 replies; 4+ messages in thread
From: Chen Gang S @ 2015-01-18 16:25 UTC (permalink / raw)
To: rth, agraf; +Cc: QEMU Trivial, qemu-devel
Excuse me, gmail is broken in China, during these days, I can only login
in gmail web site via my Mac book Safari (which can not send pure text
mail), so I have to use my another mail address to send patch.
Thanks.
On 1/19/15 00:22, Chen Gang S wrote:
> For tcg_target_ulong (include TCGArg), it often uses lower 32-bit mask
> and higher 32-bit mask, so define the related macro for it, so can let
> code simpler, and also avoid various coding styles for them.
>
> - For lower, some append 'u', some append 'U', and some no 'u' or 'U'.
>
> - For higher, some append 'ull', some use type cast.
>
> - For lower but may be used higher bits, append 'ull'.
>
>
> Signed-off-by: Chen Gang <gang.chen.5i5j@gmail.com>
> ---
> tcg/optimize.c | 16 ++++++++--------
> tcg/s390/tcg-target.c | 24 ++++++++++++------------
> tcg/tcg.h | 3 +++
> 3 files changed, 23 insertions(+), 20 deletions(-)
>
> diff --git a/tcg/optimize.c b/tcg/optimize.c
> index 34ae3c2..dc29223 100644
> --- a/tcg/optimize.c
> +++ b/tcg/optimize.c
> @@ -174,7 +174,7 @@ static void tcg_opt_gen_mov(TCGContext *s, int op_index, TCGArg *gen_args,
> mask = temps[src].mask;
> if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
> /* High bits of the destination are now garbage. */
> - mask |= ~0xffffffffull;
> + mask |= TCG_TARGET_ULONG_M32H;
> }
> temps[dst].mask = mask;
>
> @@ -211,7 +211,7 @@ static void tcg_opt_gen_movi(TCGContext *s, int op_index, TCGArg *gen_args,
> mask = val;
> if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) {
> /* High bits of the destination are now garbage. */
> - mask |= ~0xffffffffull;
> + mask |= TCG_TARGET_ULONG_M32H;
> }
> temps[dst].mask = mask;
>
> @@ -354,7 +354,7 @@ static TCGArg do_constant_folding(TCGOpcode op, TCGArg x, TCGArg y)
> {
> TCGArg res = do_constant_folding_2(op, x, y);
> if (op_bits(op) == 32) {
> - res &= 0xffffffff;
> + res &= TCG_TARGET_ULONG_M32L;
> }
> return res;
> }
> @@ -804,7 +804,7 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
> break;
> }
> case INDEX_op_ext32u_i64:
> - mask = 0xffffffffU;
> + mask = TCG_TARGET_ULONG_M32L;
> goto and_const;
>
> CASE_OP_32_64(and):
> @@ -895,7 +895,7 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
> mask = 0xffff;
> break;
> case INDEX_op_ld32u_i64:
> - mask = 0xffffffffu;
> + mask = TCG_TARGET_ULONG_M32L;
> break;
>
> CASE_OP_32_64(qemu_ld):
> @@ -916,9 +916,9 @@ static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
> need to record that the high bits contain garbage. */
> partmask = mask;
> if (!(def->flags & TCG_OPF_64BIT)) {
> - mask |= ~(tcg_target_ulong)0xffffffffu;
> - partmask &= 0xffffffffu;
> - affected &= 0xffffffffu;
> + mask |= TCG_TARGET_ULONG_M32H;
> + partmask &= TCG_TARGET_ULONG_M32L;
> + affected &= TCG_TARGET_ULONG_M32L;
> }
>
> if (partmask == 0) {
> diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c
> index 63e9c82..394fefc 100644
> --- a/tcg/s390/tcg-target.c
> +++ b/tcg/s390/tcg-target.c
> @@ -677,11 +677,11 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
> tcg_out_insn(s, RIL, LGFI, ret, sval);
> return;
> }
> - if (uval <= 0xffffffff) {
> + if (uval <= TCG_TARGET_ULONG_M32L) {
> tcg_out_insn(s, RIL, LLILF, ret, uval);
> return;
> }
> - if ((uval & 0xffffffff) == 0) {
> + if ((uval & TCG_TARGET_ULONG_M32L) == 0) {
> tcg_out_insn(s, RIL, LLIHF, ret, uval >> 31 >> 1);
> return;
> }
> @@ -702,7 +702,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
> /* A 32-bit unsigned value can be loaded in 2 insns. And given
> that the lli_insns loop above did not succeed, we know that
> both insns are required. */
> - if (uval <= 0xffffffff) {
> + if (uval <= TCG_TARGET_ULONG_M32L) {
> tcg_out_insn(s, RI, LLILL, ret, uval);
> tcg_out_insn(s, RI, IILH, ret, uval >> 16);
> return;
> @@ -727,7 +727,7 @@ static void tcg_out_movi(TCGContext *s, TCGType type,
> /* If we get here, both the high and low parts have non-zero bits. */
>
> /* Recurse to load the lower 32-bits. */
> - tcg_out_movi(s, TCG_TYPE_I64, ret, uval & 0xffffffff);
> + tcg_out_movi(s, TCG_TYPE_I64, ret, uval & TCG_TARGET_ULONG_M32L);
>
> /* Insert data into the high 32-bits. */
> uval = uval >> 31 >> 1;
> @@ -1006,7 +1006,7 @@ static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
> /* Try all 48-bit insns that can perform it in one go. */
> if (facilities & FACILITY_EXT_IMM) {
> for (i = 0; i < 2; i++) {
> - tcg_target_ulong mask = ~(0xffffffffull << i*32);
> + tcg_target_ulong mask = ~(TCG_TARGET_ULONG_M32L << i*32);
> if (((val | ~valid) & mask) == mask) {
> tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
> return;
> @@ -1055,7 +1055,7 @@ static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
>
> /* Try all 48-bit insns that can perform it in one go. */
> for (i = 0; i < 2; i++) {
> - tcg_target_ulong mask = (0xffffffffull << i*32);
> + tcg_target_ulong mask = (TCG_TARGET_ULONG_M32L << i*32);
> if ((val & mask) != 0 && (val & ~mask) == 0) {
> tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
> return;
> @@ -1065,8 +1065,8 @@ static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
> /* Perform the OR via sequential modifications to the high and
> low parts. Do this via recursion to handle 16-bit vs 32-bit
> masks in each half. */
> - tgen64_ori(s, dest, val & 0x00000000ffffffffull);
> - tgen64_ori(s, dest, val & 0xffffffff00000000ull);
> + tgen64_ori(s, dest, val & TCG_TARGET_ULONG_M32L);
> + tgen64_ori(s, dest, val & TCG_TARGET_ULONG_M32H);
> } else {
> /* With no extended-immediate facility, we don't need to be so
> clever. Just iterate over the insns and mask in the constant. */
> @@ -1082,10 +1082,10 @@ static void tgen64_ori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
> static void tgen64_xori(TCGContext *s, TCGReg dest, tcg_target_ulong val)
> {
> /* Perform the xor by parts. */
> - if (val & 0xffffffff) {
> + if (val & TCG_TARGET_ULONG_M32L) {
> tcg_out_insn(s, RIL, XILF, dest, val);
> }
> - if (val > 0xffffffff) {
> + if (val > TCG_TARGET_ULONG_M32L) {
> tcg_out_insn(s, RIL, XIHF, dest, val >> 31 >> 1);
> }
> }
> @@ -1793,14 +1793,14 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
> break;
> case INDEX_op_or_i32:
> if (const_args[2]) {
> - tgen64_ori(s, args[0], args[2] & 0xffffffff);
> + tgen64_ori(s, args[0], args[2] & TCG_TARGET_ULONG_M32L);
> } else {
> tcg_out_insn(s, RR, OR, args[0], args[2]);
> }
> break;
> case INDEX_op_xor_i32:
> if (const_args[2]) {
> - tgen64_xori(s, args[0], args[2] & 0xffffffff);
> + tgen64_xori(s, args[0], args[2] & TCG_TARGET_ULONG_M32L);
> } else {
> tcg_out_insn(s, RR, XR, args[0], args[2]);
> }
> diff --git a/tcg/tcg.h b/tcg/tcg.h
> index 944b877..4f113ed 100644
> --- a/tcg/tcg.h
> +++ b/tcg/tcg.h
> @@ -54,6 +54,9 @@ typedef uint64_t tcg_target_ulong;
> #error unsupported
> #endif
>
> +#define TCG_TARGET_ULONG_M32L ((tcg_target_ulong)0xffffffffu)
> +#define TCG_TARGET_ULONG_M32H (~(tcg_target_ulong)0xffffffffu)
> +
> #if TCG_TARGET_NB_REGS <= 32
> typedef uint32_t TCGRegSet;
> #elif TCG_TARGET_NB_REGS <= 64
>
--
Chen Gang
Open, share, and attitude like air, water, and life which God blessed
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2015-01-21 1:47 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-01-19 15:34 [Qemu-devel] [PATCH] tcg: Use macro instead of hard code number 0xffffffff for tcg_target_ulong using Chen Gang
2015-01-19 15:58 ` Peter Maydell
2015-01-21 1:46 ` Gang Chen
[not found] <54BBDDDC.9000804@sunrus.com.cn>
2015-01-18 16:25 ` Chen Gang S
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).