qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Philippe Mathieu-Daudé" <philmd@linaro.org>
To: Anton Johansson <anjo@rev.ng>, qemu-devel@nongnu.org
Cc: "Paolo Bonzini" <pbonzini@redhat.com>,
	"Richard Henderson" <richard.henderson@linaro.org>,
	"Pavel Dovgalyuk" <Pavel.Dovgalyuk@ispras.ru>,
	"Claudio Fontana" <cfontana@suse.de>,
	"Philippe Mathieu-Daudé" <philmd@linaro.org>
Subject: [PATCH 5/9] accel/tcg: Hoist CPUClass arg to functions with external linkage
Date: Wed, 24 Jan 2024 11:16:35 +0100	[thread overview]
Message-ID: <20240124101639.30056-6-philmd@linaro.org> (raw)
In-Reply-To: <20240124101639.30056-1-philmd@linaro.org>

Hoist the CPUClass argument from most of these internal helpers:

 - check_for_breakpoints_slow
 - check_for_breakpoints()
 - cpu_tb_exec()
 - cpu_exec_enter()
 - cpu_exec_exit()
 - cpu_handle_halt()
 - cpu_handle_debug_exception()
 - cpu_handle_exception()
 - need_replay_interrupt()
 - cpu_handle_interrupt()
 - cpu_loop_exec_tb()
 - cpu_exec_loop()
 - cpu_exec_setjmp()

to the following ones with external linkage:

 - lookup_tb_ptr()
 - cpu_exec_step_atomic()
 - cpu_exec()

Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
 accel/tcg/cpu-exec.c | 82 ++++++++++++++++++++------------------------
 1 file changed, 37 insertions(+), 45 deletions(-)

diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index d61b285d5e..b10472cbc7 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -324,8 +324,8 @@ static void log_cpu_exec(vaddr pc, CPUState *cpu,
     }
 }
 
-static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
-                                       uint32_t *cflags)
+static bool check_for_breakpoints_slow(CPUClass *cc, CPUState *cpu,
+                                       vaddr pc, uint32_t *cflags)
 {
     CPUBreakpoint *bp;
     bool match_page = false;
@@ -357,7 +357,6 @@ static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
 #ifdef CONFIG_USER_ONLY
                 g_assert_not_reached();
 #else
-                CPUClass *cc = CPU_GET_CLASS(cpu);
                 assert(cc->tcg_ops->debug_check_breakpoint);
                 match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
 #endif
@@ -390,11 +389,11 @@ static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
     return false;
 }
 
-static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc,
-                                         uint32_t *cflags)
+static inline bool check_for_breakpoints(CPUClass *cc, CPUState *cpu,
+                                         vaddr pc, uint32_t *cflags)
 {
     return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
-        check_for_breakpoints_slow(cpu, pc, cflags);
+        check_for_breakpoints_slow(cc, cpu, pc, cflags);
 }
 
 /**
@@ -408,6 +407,7 @@ static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc,
 const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
 {
     CPUState *cpu = env_cpu(env);
+    CPUClass *cc = CPU_GET_CLASS(cpu);
     TranslationBlock *tb;
     vaddr pc;
     uint64_t cs_base;
@@ -416,7 +416,7 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
 
     cflags = curr_cflags(cpu);
-    if (check_for_breakpoints(cpu, pc, &cflags)) {
+    if (check_for_breakpoints(cc, cpu, pc, &cflags)) {
         cpu_loop_exit(cpu);
     }
 
@@ -443,7 +443,7 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
  * affect the impact of CFI in environment with high security requirements
  */
 static inline TranslationBlock * QEMU_DISABLE_CFI
-cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
+cpu_tb_exec(CPUClass *cc, CPUState *cpu, TranslationBlock *itb, int *tb_exit)
 {
     CPUArchState *env = cpu_env(cpu);
     uintptr_t ret;
@@ -476,8 +476,6 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
          * counter hit zero); we must restore the guest PC to the address
          * of the start of the TB.
          */
-        CPUClass *cc = CPU_GET_CLASS(cpu);
-
         if (cc->tcg_ops->synchronize_from_tb) {
             cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
         } else {
@@ -509,19 +507,15 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
 }
 
 
-static void cpu_exec_enter(CPUState *cpu)
+static void cpu_exec_enter(CPUClass *cc, CPUState *cpu)
 {
-    CPUClass *cc = CPU_GET_CLASS(cpu);
-
     if (cc->tcg_ops->cpu_exec_enter) {
         cc->tcg_ops->cpu_exec_enter(cpu);
     }
 }
 
-static void cpu_exec_exit(CPUState *cpu)
+static void cpu_exec_exit(CPUClass *cc, CPUState *cpu)
 {
-    CPUClass *cc = CPU_GET_CLASS(cpu);
-
     if (cc->tcg_ops->cpu_exec_exit) {
         cc->tcg_ops->cpu_exec_exit(cpu);
     }
@@ -566,6 +560,7 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
 
 void cpu_exec_step_atomic(CPUState *cpu)
 {
+    CPUClass *cc = CPU_GET_CLASS(cpu);
     CPUArchState *env = cpu_env(cpu);
     TranslationBlock *tb;
     vaddr pc;
@@ -600,11 +595,11 @@ void cpu_exec_step_atomic(CPUState *cpu)
             mmap_unlock();
         }
 
-        cpu_exec_enter(cpu);
+        cpu_exec_enter(cc, cpu);
         /* execute the generated code */
         trace_exec_tb(tb, pc);
-        cpu_tb_exec(cpu, tb, &tb_exit);
-        cpu_exec_exit(cpu);
+        cpu_tb_exec(cc, cpu, tb, &tb_exit);
+        cpu_exec_exit(cc, cpu);
     } else {
         cpu_exec_longjmp_cleanup(cpu);
     }
@@ -673,7 +668,7 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
     return;
 }
 
-static inline bool cpu_handle_halt(CPUState *cpu)
+static inline bool cpu_handle_halt(CPUClass *cc, CPUState *cpu)
 {
 #ifndef CONFIG_USER_ONLY
     if (cpu->halted) {
@@ -697,9 +692,8 @@ static inline bool cpu_handle_halt(CPUState *cpu)
     return false;
 }
 
-static inline void cpu_handle_debug_exception(CPUState *cpu)
+static inline void cpu_handle_debug_exception(CPUClass *cc, CPUState *cpu)
 {
-    CPUClass *cc = CPU_GET_CLASS(cpu);
     CPUWatchpoint *wp;
 
     if (!cpu->watchpoint_hit) {
@@ -713,7 +707,7 @@ static inline void cpu_handle_debug_exception(CPUState *cpu)
     }
 }
 
-static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
+static inline bool cpu_handle_exception(CPUClass *cc, CPUState *cpu, int *ret)
 {
     if (cpu->exception_index < 0) {
 #ifndef CONFIG_USER_ONLY
@@ -730,7 +724,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
         /* exit request from the cpu execution loop */
         *ret = cpu->exception_index;
         if (*ret == EXCP_DEBUG) {
-            cpu_handle_debug_exception(cpu);
+            cpu_handle_debug_exception(cc, cpu);
         }
         cpu->exception_index = -1;
         return true;
@@ -740,7 +734,6 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
            which will be handled outside the cpu execution
            loop */
 #if defined(TARGET_I386)
-        CPUClass *cc = CPU_GET_CLASS(cpu);
         cc->tcg_ops->fake_user_interrupt(cpu);
 #endif /* TARGET_I386 */
         *ret = cpu->exception_index;
@@ -748,7 +741,6 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
         return true;
 #else
         if (replay_exception()) {
-            CPUClass *cc = CPU_GET_CLASS(cpu);
             bql_lock();
             cc->tcg_ops->do_interrupt(cpu);
             bql_unlock();
@@ -761,7 +753,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
                  * next instruction.
                  */
                 *ret = EXCP_DEBUG;
-                cpu_handle_debug_exception(cpu);
+                cpu_handle_debug_exception(cc, cpu);
                 return true;
             }
         } else if (!replay_has_interrupt()) {
@@ -781,7 +773,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
  * "real" interrupt event later. It does not need to be recorded for
  * replay purposes.
  */
-static inline bool need_replay_interrupt(int interrupt_request)
+static inline bool need_replay_interrupt(CPUClass *cc, int interrupt_request)
 {
 #if defined(TARGET_I386)
     return !(interrupt_request & CPU_INTERRUPT_POLL);
@@ -802,7 +794,7 @@ static inline bool icount_exit_request(CPUState *cpu)
     return cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0;
 }
 
-static inline bool cpu_handle_interrupt(CPUState *cpu,
+static inline bool cpu_handle_interrupt(CPUClass *cc, CPUState *cpu,
                                         TranslationBlock **last_tb)
 {
     /*
@@ -870,11 +862,9 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
            True when it is, and we should restart on a new TB,
            and via longjmp via cpu_loop_exit.  */
         else {
-            CPUClass *cc = CPU_GET_CLASS(cpu);
-
             if (cc->tcg_ops->cpu_exec_interrupt &&
                 cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
-                if (need_replay_interrupt(interrupt_request)) {
+                if (need_replay_interrupt(cc, interrupt_request)) {
                     replay_interrupt();
                 }
                 /*
@@ -918,14 +908,15 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
     return false;
 }
 
-static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
+static inline void cpu_loop_exec_tb(CPUClass *cc, CPUState *cpu,
+                                    TranslationBlock *tb,
                                     vaddr pc, TranslationBlock **last_tb,
                                     int *tb_exit)
 {
     int32_t insns_left;
 
     trace_exec_tb(tb, pc);
-    tb = cpu_tb_exec(cpu, tb, tb_exit);
+    tb = cpu_tb_exec(cc, cpu, tb, tb_exit);
     if (*tb_exit != TB_EXIT_REQUESTED) {
         *last_tb = tb;
         return;
@@ -970,16 +961,16 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
 /* main execution loop */
 
 static int __attribute__((noinline))
-cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
+cpu_exec_loop(CPUClass *cc, CPUState *cpu, SyncClocks *sc)
 {
     int ret;
 
     /* if an exception is pending, we execute it here */
-    while (!cpu_handle_exception(cpu, &ret)) {
+    while (!cpu_handle_exception(cc, cpu, &ret)) {
         TranslationBlock *last_tb = NULL;
         int tb_exit = 0;
 
-        while (!cpu_handle_interrupt(cpu, &last_tb)) {
+        while (!cpu_handle_interrupt(cc, cpu, &last_tb)) {
             TranslationBlock *tb;
             vaddr pc;
             uint64_t cs_base;
@@ -1001,7 +992,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
                 cpu->cflags_next_tb = -1;
             }
 
-            if (check_for_breakpoints(cpu, pc, &cflags)) {
+            if (check_for_breakpoints(cc, cpu, pc, &cflags)) {
                 break;
             }
 
@@ -1046,7 +1037,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
                 tb_add_jump(last_tb, tb_exit, tb);
             }
 
-            cpu_loop_exec_tb(cpu, tb, pc, &last_tb, &tb_exit);
+            cpu_loop_exec_tb(cc, cpu, tb, pc, &last_tb, &tb_exit);
 
             /* Try to align the host and virtual clocks
                if the guest is in advance */
@@ -1056,30 +1047,31 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
     return ret;
 }
 
-static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc)
+static int cpu_exec_setjmp(CPUClass *cc, CPUState *cpu, SyncClocks *sc)
 {
     /* Prepare setjmp context for exception handling. */
     if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
         cpu_exec_longjmp_cleanup(cpu);
     }
 
-    return cpu_exec_loop(cpu, sc);
+    return cpu_exec_loop(cc, cpu, sc);
 }
 
 int cpu_exec(CPUState *cpu)
 {
     int ret;
     SyncClocks sc = { 0 };
+    CPUClass *cc = CPU_GET_CLASS(cpu);
 
     /* replay_interrupt may need current_cpu */
     current_cpu = cpu;
 
-    if (cpu_handle_halt(cpu)) {
+    if (cpu_handle_halt(cc, cpu)) {
         return EXCP_HALTED;
     }
 
     WITH_RCU_READ_LOCK_GUARD() {
-        cpu_exec_enter(cpu);
+        cpu_exec_enter(cc, cpu);
 
         /*
          * Calculate difference between guest clock and host clock.
@@ -1089,9 +1081,9 @@ int cpu_exec(CPUState *cpu)
          */
         init_delay_params(&sc, cpu);
 
-        ret = cpu_exec_setjmp(cpu, &sc);
+        ret = cpu_exec_setjmp(cc, cpu, &sc);
 
-        cpu_exec_exit(cpu);
+        cpu_exec_exit(cc, cpu);
     };
 
     return ret;
-- 
2.41.0



  parent reply	other threads:[~2024-01-24 10:18 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-01-24 10:16 [PATCH 0/9] accel/tcg: Extract some x86-specific code Philippe Mathieu-Daudé
2024-01-24 10:16 ` [PATCH 1/9] accel/tcg: Rename tcg_ss[] -> tcg_specific_ss[] in meson Philippe Mathieu-Daudé
2024-01-24 16:45   ` Anton Johansson via
2024-01-24 22:54   ` Richard Henderson
2024-01-24 10:16 ` [PATCH 2/9] accel/tcg: Rename tcg_cpus_destroy() -> tcg_cpu_destroy() Philippe Mathieu-Daudé
2024-01-24 16:47   ` Anton Johansson via
2024-01-24 22:54   ` Richard Henderson
2024-01-24 10:16 ` [PATCH 3/9] accel/tcg: Rename tcg_cpus_exec() -> tcg_cpu_exec() Philippe Mathieu-Daudé
2024-01-24 16:48   ` Anton Johansson via
2024-01-24 22:55   ` Richard Henderson
2024-01-24 10:16 ` [PATCH 4/9] accel/tcg: Un-inline icount_exit_request() for clarity Philippe Mathieu-Daudé
2024-01-24 17:00   ` Anton Johansson via
2024-01-24 22:56   ` Richard Henderson
2024-01-24 10:16 ` Philippe Mathieu-Daudé [this message]
2024-01-24 17:15   ` [PATCH 5/9] accel/tcg: Hoist CPUClass arg to functions with external linkage Anton Johansson via
2024-01-24 22:59   ` Richard Henderson
2024-01-25  4:46     ` Philippe Mathieu-Daudé
2024-01-24 10:16 ` [PATCH 6/9] accel/tcg: Introduce TCGCPUOps::need_replay_interrupt() handler Philippe Mathieu-Daudé
2024-01-24 17:16   ` Anton Johansson via
2024-01-24 23:00   ` Richard Henderson
2024-01-25  6:01   ` Pavel Dovgalyuk
2024-01-24 10:16 ` [PATCH 7/9] target/i386: Extract x86_need_replay_interrupt() from accel/tcg/ Philippe Mathieu-Daudé
2024-01-24 17:17   ` Anton Johansson via
2024-01-24 20:02     ` Philippe Mathieu-Daudé
2024-01-24 23:01   ` Richard Henderson
2024-01-25  6:01   ` Pavel Dovgalyuk
2024-01-24 10:16 ` [PATCH 8/9] accel/tcg: Introduce TCGCPUOps::cpu_exec_halt() handler Philippe Mathieu-Daudé
2024-01-24 17:19   ` Anton Johansson via
2024-01-24 23:02   ` Richard Henderson
2024-01-24 10:16 ` [PATCH 9/9] target/i386: Extract x86_cpu_exec_halt() from accel/tcg/ Philippe Mathieu-Daudé
2024-01-24 17:19   ` Anton Johansson via
2024-01-24 23:03   ` Richard Henderson
2024-01-24 10:17 ` [PATCH 0/9] accel/tcg: Extract some x86-specific code Philippe Mathieu-Daudé
2024-01-28  3:35 ` Richard Henderson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240124101639.30056-6-philmd@linaro.org \
    --to=philmd@linaro.org \
    --cc=Pavel.Dovgalyuk@ispras.ru \
    --cc=anjo@rev.ng \
    --cc=cfontana@suse.de \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).