From: "Alex Bennée" <alex.bennee@linaro.org>
To: Sergey Fedorov <sergey.fedorov@linaro.org>
Cc: qemu-devel@nongnu.org, Riku Voipio <riku.voipio@iki.fi>,
Peter Crosthwaite <crosthwaite.peter@gmail.com>,
patches@linaro.org, Paolo Bonzini <pbonzini@redhat.com>,
Sergey Fedorov <serge.fdrv@gmail.com>,
Richard Henderson <rth@twiddle.net>
Subject: Re: [Qemu-devel] [RFC 6/8] linux-user: Support CPU work queue
Date: Mon, 27 Jun 2016 10:31:28 +0100 [thread overview]
Message-ID: <87inwvdllb.fsf@linaro.org> (raw)
In-Reply-To: <1466375313-7562-7-git-send-email-sergey.fedorov@linaro.org>
Sergey Fedorov <sergey.fedorov@linaro.org> writes:
> From: Sergey Fedorov <serge.fdrv@gmail.com>
>
> Make CPU work core functions common between system and user-mode
> emulation. User-mode does not have BQL, so flush_queued_work() is
> protected by 'exclusive_lock'.
>
> Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
> ---
> cpu-exec-common.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++
> cpus.c | 87 ++-----------------------------------------------
> include/exec/exec-all.h | 4 +++
> linux-user/main.c | 13 ++++++++
> 4 files changed, 102 insertions(+), 85 deletions(-)
>
> diff --git a/cpu-exec-common.c b/cpu-exec-common.c
> index 0cb4ae60eff9..8184e0662cbd 100644
> --- a/cpu-exec-common.c
> +++ b/cpu-exec-common.c
> @@ -77,3 +77,86 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
> }
> siglongjmp(cpu->jmp_env, 1);
> }
> +
> +static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
> +{
> + qemu_mutex_lock(&cpu->work_mutex);
> + if (cpu->queued_work_first == NULL) {
> + cpu->queued_work_first = wi;
> + } else {
> + cpu->queued_work_last->next = wi;
> + }
> + cpu->queued_work_last = wi;
> + wi->next = NULL;
> + wi->done = false;
> + qemu_mutex_unlock(&cpu->work_mutex);
> +
> + qemu_cpu_kick(cpu);
> +}
> +
> +void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
> +{
> + struct qemu_work_item wi;
> +
> + if (qemu_cpu_is_self(cpu)) {
> + func(cpu, data);
> + return;
> + }
> +
> + wi.func = func;
> + wi.data = data;
> + wi.free = false;
> +
> + queue_work_on_cpu(cpu, &wi);
> + while (!atomic_mb_read(&wi.done)) {
> + CPUState *self_cpu = current_cpu;
> +
> + wait_cpu_work();
> + current_cpu = self_cpu;
> + }
> +}
> +
> +void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
> +{
> + struct qemu_work_item *wi;
> +
> + if (qemu_cpu_is_self(cpu)) {
> + func(cpu, data);
> + return;
> + }
> +
> + wi = g_malloc0(sizeof(struct qemu_work_item));
> + wi->func = func;
> + wi->data = data;
> + wi->free = true;
> +
> + queue_work_on_cpu(cpu, wi);
> +}
> +
> +void flush_queued_work(CPUState *cpu)
> +{
> + struct qemu_work_item *wi;
> +
> + if (cpu->queued_work_first == NULL) {
> + return;
> + }
> +
> + qemu_mutex_lock(&cpu->work_mutex);
> + while (cpu->queued_work_first != NULL) {
> + wi = cpu->queued_work_first;
> + cpu->queued_work_first = wi->next;
> + if (!cpu->queued_work_first) {
> + cpu->queued_work_last = NULL;
> + }
> + qemu_mutex_unlock(&cpu->work_mutex);
> + wi->func(cpu, wi->data);
> + qemu_mutex_lock(&cpu->work_mutex);
> + if (wi->free) {
> + g_free(wi);
> + } else {
> + atomic_mb_set(&wi->done, true);
> + }
> + }
> + qemu_mutex_unlock(&cpu->work_mutex);
> + signal_cpu_work();
> +}
> diff --git a/cpus.c b/cpus.c
> index f123eb707cc6..98f60f6f98f5 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -910,71 +910,16 @@ void qemu_init_cpu_loop(void)
> qemu_thread_get_self(&io_thread);
> }
>
> -static void wait_cpu_work(void)
> +void wait_cpu_work(void)
> {
> qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
> }
>
> -static void signal_cpu_work(void)
> +void signal_cpu_work(void)
> {
> qemu_cond_broadcast(&qemu_work_cond);
> }
>
> -static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
> -{
> - qemu_mutex_lock(&cpu->work_mutex);
> - if (cpu->queued_work_first == NULL) {
> - cpu->queued_work_first = wi;
> - } else {
> - cpu->queued_work_last->next = wi;
> - }
> - cpu->queued_work_last = wi;
> - wi->next = NULL;
> - wi->done = false;
> - qemu_mutex_unlock(&cpu->work_mutex);
> -
> - qemu_cpu_kick(cpu);
> -}
> -
> -void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
> -{
> - struct qemu_work_item wi;
> -
> - if (qemu_cpu_is_self(cpu)) {
> - func(cpu, data);
> - return;
> - }
> -
> - wi.func = func;
> - wi.data = data;
> - wi.free = false;
> -
> - queue_work_on_cpu(cpu, &wi);
> - while (!atomic_mb_read(&wi.done)) {
> - CPUState *self_cpu = current_cpu;
> -
> - wait_cpu_work();
> - current_cpu = self_cpu;
> - }
> -}
> -
> -void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
> -{
> - struct qemu_work_item *wi;
> -
> - if (qemu_cpu_is_self(cpu)) {
> - func(cpu, data);
> - return;
> - }
> -
> - wi = g_malloc0(sizeof(struct qemu_work_item));
> - wi->func = func;
> - wi->data = data;
> - wi->free = true;
> -
> - queue_work_on_cpu(cpu, wi);
> -}
> -
> static void qemu_kvm_destroy_vcpu(CPUState *cpu)
> {
> if (kvm_destroy_vcpu(cpu) < 0) {
> @@ -987,34 +932,6 @@ static void qemu_tcg_destroy_vcpu(CPUState *cpu)
> {
> }
>
> -static void flush_queued_work(CPUState *cpu)
> -{
> - struct qemu_work_item *wi;
> -
> - if (cpu->queued_work_first == NULL) {
> - return;
> - }
> -
> - qemu_mutex_lock(&cpu->work_mutex);
> - while (cpu->queued_work_first != NULL) {
> - wi = cpu->queued_work_first;
> - cpu->queued_work_first = wi->next;
> - if (!cpu->queued_work_first) {
> - cpu->queued_work_last = NULL;
> - }
> - qemu_mutex_unlock(&cpu->work_mutex);
> - wi->func(cpu, wi->data);
> - qemu_mutex_lock(&cpu->work_mutex);
> - if (wi->free) {
> - g_free(wi);
> - } else {
> - atomic_mb_set(&wi->done, true);
> - }
> - }
> - qemu_mutex_unlock(&cpu->work_mutex);
> - signal_cpu_work();
> -}
> -
> static void qemu_wait_io_event_common(CPUState *cpu)
> {
> if (cpu->stop) {
> diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
> index c1f59fa59d2c..23b4b50e0a45 100644
> --- a/include/exec/exec-all.h
> +++ b/include/exec/exec-all.h
> @@ -407,4 +407,8 @@ extern int singlestep;
> extern CPUState *tcg_current_cpu;
> extern bool exit_request;
>
> +void wait_cpu_work(void);
> +void signal_cpu_work(void);
> +void flush_queued_work(CPUState *cpu);
> +
Now these are public APIs (and have multiple implementations) some doc
comments would be useful here.
> #endif
> diff --git a/linux-user/main.c b/linux-user/main.c
> index 0093a8008c8e..5a68651159c2 100644
> --- a/linux-user/main.c
> +++ b/linux-user/main.c
> @@ -111,6 +111,7 @@ static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
> static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
> static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
> static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
> +static pthread_cond_t work_cond = PTHREAD_COND_INITIALIZER;
> static bool exclusive_pending;
> static int tcg_pending_cpus;
>
> @@ -140,6 +141,7 @@ void fork_end(int child)
> pthread_mutex_init(&cpu_list_mutex, NULL);
> pthread_cond_init(&exclusive_cond, NULL);
> pthread_cond_init(&exclusive_resume, NULL);
> + pthread_cond_init(&work_cond, NULL);
> qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
> gdbserver_fork(thread_cpu);
> } else {
> @@ -148,6 +150,16 @@ void fork_end(int child)
> }
> }
>
> +void wait_cpu_work(void)
> +{
> + pthread_cond_wait(&work_cond, &exclusive_lock);
> +}
> +
> +void signal_cpu_work(void)
> +{
> + pthread_cond_broadcast(&work_cond);
> +}
> +
> /* Wait for pending exclusive operations to complete. The exclusive lock
> must be held. */
> static inline void exclusive_idle(void)
> @@ -206,6 +218,7 @@ static inline void cpu_exec_end(CPUState *cpu)
> pthread_cond_broadcast(&exclusive_cond);
> }
> exclusive_idle();
> + flush_queued_work(cpu);
> pthread_mutex_unlock(&exclusive_lock);
> }
--
Alex Bennée
next prev parent reply other threads:[~2016-06-27 9:31 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-06-19 22:28 [Qemu-devel] [RFC 0/8] cpu-exec: Safe work in quiescent state Sergey Fedorov
2016-06-19 22:28 ` [Qemu-devel] [RFC 1/8] cpus: pass CPUState to run_on_cpu helpers Sergey Fedorov
2016-06-20 1:23 ` David Gibson
2016-06-20 13:02 ` Alex Bennée
2016-06-20 13:07 ` Sergey Fedorov
2016-06-19 22:28 ` [Qemu-devel] [RFC 2/8] cpus: Move common code out of {async_, }run_on_cpu() Sergey Fedorov
2016-06-27 8:54 ` Alex Bennée
2016-06-19 22:28 ` [Qemu-devel] [RFC 3/8] cpus: Add 'qemu_work_cond' usage wrappers Sergey Fedorov
2016-06-19 22:28 ` [Qemu-devel] [RFC 4/8] linux-user: Rework exclusive operation mechanism Sergey Fedorov
2016-06-27 9:02 ` Alex Bennée
2016-06-29 14:57 ` Sergey Fedorov
2016-06-19 22:28 ` [Qemu-devel] [RFC 5/8] linux-user: Add qemu_cpu_is_self() and qemu_cpu_kick() Sergey Fedorov
2016-06-19 22:28 ` [Qemu-devel] [RFC 6/8] linux-user: Support CPU work queue Sergey Fedorov
2016-06-27 9:31 ` Alex Bennée [this message]
2016-06-29 14:59 ` Sergey Fedorov
2016-06-29 16:17 ` Alex Bennée
2016-06-30 9:39 ` Sergey Fedorov
2016-06-30 10:32 ` Alex Bennée
2016-06-30 10:35 ` Sergey Fedorov
2016-07-01 8:56 ` Sergey Fedorov
2016-07-01 9:04 ` Sergey Fedorov
2016-06-19 22:28 ` [Qemu-devel] [RFC 7/8] cpu-exec-common: Introduce async_safe_run_on_cpu() Sergey Fedorov
2016-06-27 9:36 ` Alex Bennée
2016-06-29 15:03 ` Sergey Fedorov
2016-06-29 16:09 ` Alex Bennée
2016-07-01 16:29 ` Alvise Rigo
2016-07-01 16:55 ` Sergey Fedorov
2016-06-19 22:28 ` [Qemu-devel] [RFC 8/8] tcg: Make tb_flush() thread safe Sergey Fedorov
2016-06-28 16:18 ` Alex Bennée
2016-06-29 15:03 ` Sergey Fedorov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87inwvdllb.fsf@linaro.org \
--to=alex.bennee@linaro.org \
--cc=crosthwaite.peter@gmail.com \
--cc=patches@linaro.org \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=riku.voipio@iki.fi \
--cc=rth@twiddle.net \
--cc=serge.fdrv@gmail.com \
--cc=sergey.fedorov@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).