From: "Philippe Mathieu-Daudé" <philmd@linaro.org>
To: qemu-devel@nongnu.org, Richard Henderson <richard.henderson@linaro.org>
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
"Pierrick Bouvier" <pierrick.bouvier@linaro.org>,
"Alex Bennée" <alex.bennee@linaro.org>,
"Anton Johansson" <anjo@rev.ng>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>
Subject: [RFC PATCH-for-10.1 v2 5/7] tcg: Propagate CPUState argument to cpu_req_mo()
Date: Fri, 21 Mar 2025 19:15:47 +0100 [thread overview]
Message-ID: <20250321181549.3331-6-philmd@linaro.org> (raw)
In-Reply-To: <20250321181549.3331-1-philmd@linaro.org>
In preparation of having tcg_req_mo() access CPUState in
the next commit, pass it to cpu_req_mo(), its single caller.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
accel/tcg/internal-target.h | 3 ++-
accel/tcg/cputlb.c | 20 ++++++++++----------
accel/tcg/user-exec.c | 20 ++++++++++----------
3 files changed, 22 insertions(+), 21 deletions(-)
diff --git a/accel/tcg/internal-target.h b/accel/tcg/internal-target.h
index 1cb35dba99e..992362be7e6 100644
--- a/accel/tcg/internal-target.h
+++ b/accel/tcg/internal-target.h
@@ -57,12 +57,13 @@ G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
/**
* cpu_req_mo:
+ * @cpu: CPUState
* @type: TCGBar
*
* If tcg_req_mo indicates a barrier for @type is required
* for the guest memory model, issue a host memory barrier.
*/
-#define cpu_req_mo(type) \
+#define cpu_req_mo(cpu, type) \
do { \
if (tcg_req_mo(type)) { \
smp_mb(); \
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index fb22048876e..b6713efdb81 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -2321,7 +2321,7 @@ static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
MMULookupLocals l;
bool crosspage;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
tcg_debug_assert(!crosspage);
@@ -2336,7 +2336,7 @@ static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uint16_t ret;
uint8_t a, b;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
return do_ld_2(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
@@ -2360,7 +2360,7 @@ static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
bool crosspage;
uint32_t ret;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
return do_ld_4(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
@@ -2381,7 +2381,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
bool crosspage;
uint64_t ret;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(cpu, addr, oi, ra, access_type, &l);
if (likely(!crosspage)) {
return do_ld_8(cpu, &l.page[0], l.mmu_idx, access_type, l.memop, ra);
@@ -2404,7 +2404,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
Int128 ret;
int first;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_LOAD, &l);
if (likely(!crosspage)) {
if (unlikely(l.page[0].flags & TLB_MMIO)) {
@@ -2732,7 +2732,7 @@ static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
MMULookupLocals l;
bool crosspage;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
tcg_debug_assert(!crosspage);
@@ -2746,7 +2746,7 @@ static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
bool crosspage;
uint8_t a, b;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
do_st_2(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
@@ -2768,7 +2768,7 @@ static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
MMULookupLocals l;
bool crosspage;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
do_st_4(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
@@ -2789,7 +2789,7 @@ static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
MMULookupLocals l;
bool crosspage;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
do_st_8(cpu, &l.page[0], val, l.mmu_idx, l.memop, ra);
@@ -2812,7 +2812,7 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
uint64_t a, b;
int first;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
crosspage = mmu_lookup(cpu, addr, oi, ra, MMU_DATA_STORE, &l);
if (likely(!crosspage)) {
if (unlikely(l.page[0].flags & TLB_MMIO)) {
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 2322181b151..5bda8fb5514 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -1059,7 +1059,7 @@ static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
void *haddr;
uint8_t ret;
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, access_type);
ret = ldub_p(haddr);
clear_helper_retaddr();
@@ -1073,7 +1073,7 @@ static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uint16_t ret;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
ret = load_atom_2(cpu, ra, haddr, mop);
clear_helper_retaddr();
@@ -1091,7 +1091,7 @@ static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uint32_t ret;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
ret = load_atom_4(cpu, ra, haddr, mop);
clear_helper_retaddr();
@@ -1109,7 +1109,7 @@ static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
uint64_t ret;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
ret = load_atom_8(cpu, ra, haddr, mop);
clear_helper_retaddr();
@@ -1128,7 +1128,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
MemOp mop = get_memop(oi);
tcg_debug_assert((mop & MO_SIZE) == MO_128);
- cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+ cpu_req_mo(cpu, TCG_MO_LD_LD | TCG_MO_ST_LD);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_LOAD);
ret = load_atom_16(cpu, ra, haddr, mop);
clear_helper_retaddr();
@@ -1144,7 +1144,7 @@ static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
{
void *haddr;
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, MMU_DATA_STORE);
stb_p(haddr, val);
clear_helper_retaddr();
@@ -1156,7 +1156,7 @@ static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
void *haddr;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
@@ -1172,7 +1172,7 @@ static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
void *haddr;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
@@ -1188,7 +1188,7 @@ static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
void *haddr;
MemOp mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
@@ -1204,7 +1204,7 @@ static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
void *haddr;
MemOpIdx mop = get_memop(oi);
- cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+ cpu_req_mo(cpu, TCG_MO_LD_ST | TCG_MO_ST_ST);
haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
if (mop & MO_BSWAP) {
--
2.47.1
next prev parent reply other threads:[~2025-03-21 18:17 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-03-21 18:15 [RFC PATCH-for-10.1 v2 0/7] tcg: Move TCG_GUEST_DEFAULT_MO -> TCGCPUOps::guest_default_memory_order Philippe Mathieu-Daudé
2025-03-21 18:15 ` [PATCH-for-10.1 v2 1/7] tcg: Always define TCG_GUEST_DEFAULT_MO Philippe Mathieu-Daudé
2025-03-21 18:15 ` [PATCH-for-10.1 v2 2/7] tcg: Simplify tcg_req_mo() macro Philippe Mathieu-Daudé
2025-03-21 18:15 ` [PATCH-for-10.1 v2 3/7] tcg: Define guest_default_memory_order in TCGCPUOps Philippe Mathieu-Daudé
2025-03-21 18:15 ` [PATCH-for-10.1 v2 4/7] tcg: Remove use of TCG_GUEST_DEFAULT_MO in tb_gen_code() Philippe Mathieu-Daudé
2025-03-23 18:01 ` Richard Henderson
2025-03-21 18:15 ` Philippe Mathieu-Daudé [this message]
2025-03-23 18:05 ` [RFC PATCH-for-10.1 v2 5/7] tcg: Propagate CPUState argument to cpu_req_mo() Richard Henderson
2025-03-21 18:15 ` [RFC PATCH-for-10.1 v2 6/7] tcg: Have tcg_req_mo() use TCGCPUOps::guest_default_memory_order Philippe Mathieu-Daudé
2025-03-23 18:06 ` Richard Henderson
2025-03-21 18:15 ` [PATCH-for-10.1 v2 7/7] tcg: Remove the TCG_GUEST_DEFAULT_MO definition globally Philippe Mathieu-Daudé
2025-04-02 20:00 ` [RFC PATCH-for-10.1 v2 0/7] tcg: Move TCG_GUEST_DEFAULT_MO -> TCGCPUOps::guest_default_memory_order Richard Henderson
2025-04-02 20:27 ` Philippe Mathieu-Daudé
2025-04-02 20:29 ` Philippe Mathieu-Daudé
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250321181549.3331-6-philmd@linaro.org \
--to=philmd@linaro.org \
--cc=alex.bennee@linaro.org \
--cc=anjo@rev.ng \
--cc=pbonzini@redhat.com \
--cc=pierrick.bouvier@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=richard.henderson@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).