From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:33186) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZTfZB-000762-Af for qemu-devel@nongnu.org; Sun, 23 Aug 2015 20:24:50 -0400 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1ZTfZ7-0000oT-RL for qemu-devel@nongnu.org; Sun, 23 Aug 2015 20:24:49 -0400 Received: from out4-smtp.messagingengine.com ([66.111.4.28]:38211) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1ZTfZ7-0000oF-L0 for qemu-devel@nongnu.org; Sun, 23 Aug 2015 20:24:45 -0400 Received: from compute6.internal (compute6.nyi.internal [10.202.2.46]) by mailout.nyi.internal (Postfix) with ESMTP id 7EE80208BF for ; Sun, 23 Aug 2015 20:24:45 -0400 (EDT) From: "Emilio G. Cota" Date: Sun, 23 Aug 2015 20:24:06 -0400 Message-Id: <1440375847-17603-38-git-send-email-cota@braap.org> In-Reply-To: <1440375847-17603-1-git-send-email-cota@braap.org> References: <1440375847-17603-1-git-send-email-cota@braap.org> Subject: [Qemu-devel] [RFC 37/38] cpus: remove async_run_safe_work_on_cpu List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org, mttcg@listserver.greensocs.com Cc: mark.burton@greensocs.com, a.rigo@virtualopensystems.com, guillaume.delbergue@greensocs.com, pbonzini@redhat.com, alex.bennee@linaro.org, Frederic Konrad It has no callers left. Signed-off-by: Emilio G. Cota --- cpu-exec.c | 10 --------- cpus.c | 64 +------------------------------------------------------ include/qom/cpu.h | 24 +-------------------- 3 files changed, 2 insertions(+), 96 deletions(-) diff --git a/cpu-exec.c b/cpu-exec.c index 378ce52..6d7bcc0 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -499,16 +499,6 @@ int cpu_exec(CPUState *cpu) } qemu_mutex_unlock(cpu->tcg_work_lock); -#ifndef CONFIG_USER_ONLY - /* FIXME: user-mode emulation probably needs a similar mechanism as well, - * for example for tb_flush. - */ - if (async_safe_work_pending()) { - cpu->exit_request = 1; - return 0; - } -#endif - if (cpu->halted) { if (!cpu_has_work(cpu)) { return EXCP_HALTED; diff --git a/cpus.c b/cpus.c index 0fe6576..e1033e1 100644 --- a/cpus.c +++ b/cpus.c @@ -68,8 +68,6 @@ int64_t max_delay; int64_t max_advance; -int safe_work_pending; /* Number of safe work pending for all VCPUs. */ - bool cpu_is_stopped(CPUState *cpu) { return cpu->stopped || !runstate_is_running(); @@ -77,7 +75,7 @@ bool cpu_is_stopped(CPUState *cpu) static bool cpu_thread_is_idle(CPUState *cpu) { - if (cpu->stop || cpu->queued_work_first || cpu->queued_safe_work_first) { + if (cpu->stop || cpu->queued_work_first) { return false; } if (cpu_is_stopped(cpu)) { @@ -860,63 +858,6 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data) qemu_cpu_kick(cpu); } -void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data), - void *data) -{ - struct qemu_work_item *wi; - - wi = g_malloc0(sizeof(struct qemu_work_item)); - wi->func = func; - wi->data = data; - wi->free = true; - - atomic_inc(&safe_work_pending); - qemu_mutex_lock(&cpu->work_mutex); - if (cpu->queued_safe_work_first == NULL) { - cpu->queued_safe_work_first = wi; - } else { - cpu->queued_safe_work_last->next = wi; - } - cpu->queued_safe_work_last = wi; - wi->next = NULL; - wi->done = false; - qemu_mutex_unlock(&cpu->work_mutex); - - CPU_FOREACH(cpu) { - qemu_cpu_kick(cpu); - } -} - -static void flush_queued_safe_work(CPUState *cpu) -{ - struct qemu_work_item *wi; - - if (cpu->queued_safe_work_first == NULL) { - return; - } - - qemu_mutex_lock(&cpu->work_mutex); - while ((wi = cpu->queued_safe_work_first)) { - cpu->queued_safe_work_first = wi->next; - qemu_mutex_unlock(&cpu->work_mutex); - wi->func(wi->data); - qemu_mutex_lock(&cpu->work_mutex); - wi->done = true; - if (wi->free) { - g_free(wi); - } - atomic_dec(&safe_work_pending); - } - cpu->queued_safe_work_last = NULL; - qemu_mutex_unlock(&cpu->work_mutex); - qemu_cond_broadcast(&qemu_work_cond); -} - -bool async_safe_work_pending(void) -{ - return safe_work_pending != 0; -} - static void flush_queued_work(CPUState *cpu) { struct qemu_work_item *wi; @@ -953,9 +894,6 @@ static void qemu_wait_io_event_common(CPUState *cpu) cpu->stopped = true; qemu_cond_signal(&qemu_pause_cond); } - qemu_mutex_unlock_iothread(); - flush_queued_safe_work(cpu); - qemu_mutex_lock_iothread(); flush_queued_work(cpu); } diff --git a/include/qom/cpu.h b/include/qom/cpu.h index aba7edb..79045b4 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -245,9 +245,8 @@ struct kvm_run; * @mem_io_pc: Host Program Counter at which the memory was accessed. * @mem_io_vaddr: Target virtual address at which the memory was accessed. * @kvm_fd: vCPU file descriptor for KVM. - * @work_mutex: Lock to prevent multiple access to queued_* qemu_work_item. + * @work_mutex: Lock to prevent multiple access to queued_work_*. * @queued_work_first: First asynchronous work pending. - * @queued_safe_work_first: First item of safe work pending. * * State of one CPU core or thread. */ @@ -290,7 +289,6 @@ struct CPUState { QemuMutex work_mutex; struct qemu_work_item *queued_work_first, *queued_work_last; - struct qemu_work_item *queued_safe_work_first, *queued_safe_work_last; AddressSpace *as; struct AddressSpaceDispatch *memory_dispatch; @@ -571,26 +569,6 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data); /** - * async_run_safe_work_on_cpu: - * @cpu: The vCPU to run on. - * @func: The function to be executed. - * @data: Data to pass to the function. - * - * Schedules the function @func for execution on the vCPU @cpu asynchronously - * when all the VCPUs are outside their loop. - */ -void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data), - void *data); - -/** - * async_safe_work_pending: - * - * Check whether any safe work is pending on any VCPUs. - * Returns: @true if a safe work is pending, @false otherwise. - */ -bool async_safe_work_pending(void); - -/** * cpu_tcg_sched_work: * @cpu: CPU thread to schedule the work on * @func: function to be called when all other CPU threads are asleep -- 1.9.1