From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:37561) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1b8vtf-00006q-AS for qemu-devel@nongnu.org; Fri, 03 Jun 2016 16:40:49 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1b8vtd-0000a2-01 for qemu-devel@nongnu.org; Fri, 03 Jun 2016 16:40:46 -0400 Received: from mail-wm0-x22f.google.com ([2a00:1450:400c:c09::22f]:37006) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1b8vtc-0000Za-MH for qemu-devel@nongnu.org; Fri, 03 Jun 2016 16:40:44 -0400 Received: by mail-wm0-x22f.google.com with SMTP id z87so9738831wmh.0 for ; Fri, 03 Jun 2016 13:40:44 -0700 (PDT) From: =?UTF-8?q?Alex=20Benn=C3=A9e?= Date: Fri, 3 Jun 2016 21:40:23 +0100 Message-Id: <1464986428-6739-15-git-send-email-alex.bennee@linaro.org> In-Reply-To: <1464986428-6739-1-git-send-email-alex.bennee@linaro.org> References: <1464986428-6739-1-git-send-email-alex.bennee@linaro.org> MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Subject: [Qemu-devel] [RFC v3 14/19] tcg: remove global exit_request List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: mttcg@listserver.greensocs.com, qemu-devel@nongnu.org, fred.konrad@greensocs.com, a.rigo@virtualopensystems.com, serge.fdrv@gmail.com, cota@braap.org, bobby.prani@gmail.com Cc: mark.burton@greensocs.com, pbonzini@redhat.com, jan.kiszka@siemens.com, rth@twiddle.net, peter.maydell@linaro.org, claudio.fontana@huawei.com, =?UTF-8?q?Alex=20Benn=C3=A9e?= , Peter Crosthwaite The only remaining use of the global exit_request flag is now to ensure we exit the run_loop when we first start to process pending work. This is just as easily done by setting the first_cpu->exit_request flag. We lightly re-factor the main vCPU thread to ensure cpu->exit_requests cause us to exit the main loop and process any IO requests that might come along. Signed-off-by: Alex Bennée --- cpu-exec-common.c | 2 -- cpu-exec.c | 10 +++------- cpus.c | 35 +++++++++++++++++++++++------------ include/exec/exec-all.h | 2 -- 4 files changed, 26 insertions(+), 23 deletions(-) diff --git a/cpu-exec-common.c b/cpu-exec-common.c index f42d24a..1b4fb53 100644 --- a/cpu-exec-common.c +++ b/cpu-exec-common.c @@ -23,8 +23,6 @@ #include "exec/exec-all.h" #include "exec/memory-internal.h" -bool exit_request; - /* exit the current TB from a signal handler. The host registers are restored in a state compatible with the CPU emulator */ diff --git a/cpu-exec.c b/cpu-exec.c index 68e804b..1613c63 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -541,9 +541,8 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, /* Something asked us to stop executing * chained TBs; just continue round the main * loop. Whatever requested the exit will also - * have set something else (eg exit_request or - * interrupt_request) which we will handle - * next time around the loop. But we need to + * have set something else (eg interrupt_request) which we + * will handle next time around the loop. But we need to * ensure the tcg_exit_req read in generated code * comes before the next read of cpu->exit_request * or cpu->interrupt_request. @@ -594,15 +593,12 @@ int cpu_exec(CPUState *cpu) current_cpu = cpu; if (cpu_handle_halt(cpu)) { + cpu->exit_request = true; return EXCP_HALTED; } rcu_read_lock(); - if (unlikely(atomic_mb_read(&exit_request))) { - cpu->exit_request = 1; - } - cc->cpu_exec_enter(cpu); /* Calculate difference between guest clock and host clock. diff --git a/cpus.c b/cpus.c index a139ad3..a84f02c 100644 --- a/cpus.c +++ b/cpus.c @@ -1169,6 +1169,18 @@ static int64_t tcg_get_icount_limit(void) } } +static void handle_icount_deadline(void) +{ + if (use_icount) { + int64_t deadline = + qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); + + if (deadline == 0) { + qemu_clock_notify(QEMU_CLOCK_VIRTUAL); + } + } +} + static int tcg_cpu_exec(CPUState *cpu) { int ret; @@ -1276,11 +1288,11 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) timer_mod(kick_timer, TCG_KICK_FREQ); } - /* process any pending work */ - atomic_mb_set(&exit_request, 1); - cpu = first_cpu; + /* process any pending work */ + atomic_mb_set(&cpu->exit_request, 1); + while (1) { /* Account partial waits to QEMU_CLOCK_VIRTUAL. */ qemu_account_warp_timer(); @@ -1289,7 +1301,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) cpu = first_cpu; } - for (; cpu != NULL && !exit_request; cpu = CPU_NEXT(cpu)) { + while (cpu && !cpu->exit_request) { atomic_mb_set(&tcg_current_rr_cpu, cpu); qemu_clock_enable(QEMU_CLOCK_VIRTUAL, @@ -1303,22 +1315,21 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) } } else if (cpu->stop || cpu->stopped) { break; + } else if (cpu->exit_request) { + break; } + cpu = CPU_NEXT(cpu); } /* for cpu.. */ /* Does not need atomic_mb_set because a spurious wakeup is okay. */ atomic_set(&tcg_current_rr_cpu, NULL); - /* Pairs with smp_wmb in qemu_cpu_kick. */ - atomic_mb_set(&exit_request, 0); + if (cpu && cpu->exit_request) { + atomic_mb_set(&cpu->exit_request, 0); + } - if (use_icount) { - int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL); + handle_icount_deadline(); - if (deadline == 0) { - qemu_clock_notify(QEMU_CLOCK_VIRTUAL); - } - } qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus)); } diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 31f4c38..7919aac 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -405,6 +405,4 @@ bool memory_region_is_unassigned(MemoryRegion *mr); /* vl.c */ extern int singlestep; -extern bool exit_request; - #endif -- 2.7.4