From: "Alex Bennée" <alex.bennee@linaro.org>
To: fred.konrad@greensocs.com
Cc: mttcg@listserver.greensocs.com, mark.burton@greensocs.com,
qemu-devel@nongnu.org, a.rigo@virtualopensystems.com,
guillaume.delbergue@greensocs.com, pbonzini@redhat.com
Subject: Re: [Qemu-devel] [RFC PATCH V2 3/3] cpus: introduce async_run_safe_work_on_cpu.
Date: Mon, 13 Jul 2015 17:20:34 +0100 [thread overview]
Message-ID: <87r3ocjagd.fsf@linaro.org> (raw)
In-Reply-To: <1436544486-31169-4-git-send-email-fred.konrad@greensocs.com>
fred.konrad@greensocs.com writes:
> From: KONRAD Frederic <fred.konrad@greensocs.com>
>
> We already had async_run_on_cpu but we need all VCPUs outside their execution
> loop to execute some tb_flush/invalidate task:
>
> async_run_on_cpu_safe schedule a work on a VCPU but the work start when no more
> VCPUs are executing code.
> When a safe work is pending cpu_has_work returns true, so cpu_exec returns and
> the VCPUs can't enters execution loop. cpu_thread_is_idle returns false so at
> the moment where all VCPUs are stop || stopped the safe work queue can be
> flushed.
>
> Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>
>
> Changes V2 -> V3:
> * Unlock the mutex while executing the callback.
> Changes V1 -> V2:
> * Move qemu_cpu_kick_thread to avoid prototype declaration.
> * Use the work_mutex lock to protect the queued_safe_work_* structures.
> ---
> cpu-exec.c | 5 ++
> cpus.c | 153 ++++++++++++++++++++++++++++++++++++++++--------------
> include/qom/cpu.h | 24 ++++++++-
> 3 files changed, 141 insertions(+), 41 deletions(-)
>
> diff --git a/cpu-exec.c b/cpu-exec.c
> index 2fdf89d..7581f76 100644
> --- a/cpu-exec.c
> +++ b/cpu-exec.c
> @@ -363,6 +363,11 @@ int cpu_exec(CPUState *cpu)
> /* This must be volatile so it is not trashed by longjmp() */
> volatile bool have_tb_lock = false;
>
> + if (async_safe_work_pending()) {
> + cpu->exit_request = 1;
> + return 0;
> + }
> +
> if (cpu->halted) {
> if (!cpu_has_work(cpu)) {
> return EXCP_HALTED;
> diff --git a/cpus.c b/cpus.c
> index eabd4b1..4321380 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -76,7 +76,7 @@ bool cpu_is_stopped(CPUState *cpu)
>
> static bool cpu_thread_is_idle(CPUState *cpu)
> {
> - if (cpu->stop || cpu->queued_work_first) {
> + if (cpu->stop || cpu->queued_work_first || cpu->queued_safe_work_first) {
> return false;
> }
> if (cpu_is_stopped(cpu)) {
> @@ -833,6 +833,45 @@ void qemu_init_cpu_loop(void)
> qemu_thread_get_self(&io_thread);
> }
>
> +static void qemu_cpu_kick_thread(CPUState *cpu)
> +{
> +#ifndef _WIN32
> + int err;
> +
> + err = pthread_kill(cpu->thread->thread, SIG_IPI);
> + if (err) {
> + fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
> + exit(1);
> + }
> +#else /* _WIN32 */
> + if (!qemu_cpu_is_self(cpu)) {
> + CONTEXT tcgContext;
> +
> + if (SuspendThread(cpu->hThread) == (DWORD)-1) {
> + fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
> + GetLastError());
> + exit(1);
> + }
> +
> + /* On multi-core systems, we are not sure that the thread is actually
> + * suspended until we can get the context.
> + */
> + tcgContext.ContextFlags = CONTEXT_CONTROL;
> + while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
> + continue;
> + }
> +
> + cpu_signal(0);
> +
> + if (ResumeThread(cpu->hThread) == (DWORD)-1) {
> + fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
> + GetLastError());
> + exit(1);
> + }
> + }
> +#endif
I'm going to go out on a limb and guess these sort of implementation
specifics should be in the posix/win32 utility files, that is unless
Glib abstracts enough of it for us.
> +}
> +
> void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
> {
> struct qemu_work_item wi;
> @@ -894,6 +933,76 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
> qemu_cpu_kick(cpu);
> }
>
> +void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data),
> + void *data)
> +{
> + struct qemu_work_item *wi;
> +
> + wi = g_malloc0(sizeof(struct qemu_work_item));
> + wi->func = func;
> + wi->data = data;
Is there anything that prevents the user calling the function with the
same payload for multiple CPUs?
> + wi->free = true;
> +
> + qemu_mutex_lock(&cpu->work_mutex);
> + if (cpu->queued_safe_work_first == NULL) {
> + cpu->queued_safe_work_first = wi;
> + } else {
> + cpu->queued_safe_work_last->next = wi;
> + }
> + cpu->queued_safe_work_last = wi;
I'm surprised we haven't added some helpers to the qemu_work_queue API
for all this identical boilerplate but whatever...
> + wi->next = NULL;
> + wi->done = false;
> + qemu_mutex_unlock(&cpu->work_mutex);
> +
> + CPU_FOREACH(cpu) {
> + qemu_cpu_kick_thread(cpu);
> + }
> +}
> +
> +static void flush_queued_safe_work(CPUState *cpu)
> +{
> + struct qemu_work_item *wi;
> + CPUState *other_cpu;
> +
> + if (cpu->queued_safe_work_first == NULL) {
> + return;
> + }
> +
> + CPU_FOREACH(other_cpu) {
> + if (other_cpu->tcg_executing != 0) {
> + return;
> + }
> + }
> +
> + qemu_mutex_lock(&cpu->work_mutex);
> + while ((wi = cpu->queued_safe_work_first)) {
> + cpu->queued_safe_work_first = wi->next;
> + qemu_mutex_unlock(&cpu->work_mutex);
> + wi->func(wi->data);
> + qemu_mutex_lock(&cpu->work_mutex);
> + wi->done = true;
> + if (wi->free) {
> + g_free(wi);
> + }
> + }
> + cpu->queued_safe_work_last = NULL;
> + qemu_mutex_unlock(&cpu->work_mutex);
> + qemu_cond_broadcast(&qemu_work_cond);
> +}
> +
> +/* FIXME: add a counter to avoid this CPU_FOREACH() */
> +bool async_safe_work_pending(void)
> +{
> + CPUState *cpu;
> +
> + CPU_FOREACH(cpu) {
> + if (cpu->queued_safe_work_first) {
> + return true;
> + }
> + }
> + return false;
> +}
> +
> static void flush_queued_work(CPUState *cpu)
> {
> struct qemu_work_item *wi;
> @@ -926,6 +1035,9 @@ static void qemu_wait_io_event_common(CPUState *cpu)
> cpu->stopped = true;
> qemu_cond_signal(&qemu_pause_cond);
> }
> + qemu_mutex_unlock_iothread();
> + flush_queued_safe_work(cpu);
> + qemu_mutex_lock_iothread();
> flush_queued_work(cpu);
> cpu->thread_kicked = false;
> }
> @@ -1085,45 +1197,6 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
> return NULL;
> }
>
> -static void qemu_cpu_kick_thread(CPUState *cpu)
> -{
> -#ifndef _WIN32
> - int err;
> -
> - err = pthread_kill(cpu->thread->thread, SIG_IPI);
> - if (err) {
> - fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
> - exit(1);
> - }
> -#else /* _WIN32 */
> - if (!qemu_cpu_is_self(cpu)) {
> - CONTEXT tcgContext;
> -
> - if (SuspendThread(cpu->hThread) == (DWORD)-1) {
> - fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
> - GetLastError());
> - exit(1);
> - }
> -
> - /* On multi-core systems, we are not sure that the thread is actually
> - * suspended until we can get the context.
> - */
> - tcgContext.ContextFlags = CONTEXT_CONTROL;
> - while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
> - continue;
> - }
> -
> - cpu_signal(0);
> -
> - if (ResumeThread(cpu->hThread) == (DWORD)-1) {
> - fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
> - GetLastError());
> - exit(1);
> - }
> - }
> -#endif
> -}
> -
Ahh here it is, could you not just have put a function declaration at
the top to avoid churning the code too much?
> void qemu_cpu_kick(CPUState *cpu)
> {
> qemu_cond_broadcast(cpu->halt_cond);
> diff --git a/include/qom/cpu.h b/include/qom/cpu.h
> index a2de536..692d0d3 100644
> --- a/include/qom/cpu.h
> +++ b/include/qom/cpu.h
> @@ -243,8 +243,9 @@ struct kvm_run;
> * @mem_io_pc: Host Program Counter at which the memory was accessed.
> * @mem_io_vaddr: Target virtual address at which the memory was accessed.
> * @kvm_fd: vCPU file descriptor for KVM.
> - * @work_mutex: Lock to prevent multiple access to queued_work_*.
> + * @work_mutex: Lock to prevent multiple access to queued_* qemu_work_item.
> * @queued_work_first: First asynchronous work pending.
> + * @queued_safe_work_first: First item of safe work pending.
> *
> * State of one CPU core or thread.
> */
> @@ -267,6 +268,7 @@ struct CPUState {
> struct QemuCond *halt_cond;
> QemuMutex work_mutex;
> struct qemu_work_item *queued_work_first, *queued_work_last;
> + struct qemu_work_item *queued_safe_work_first, *queued_safe_work_last;
> bool thread_kicked;
> bool created;
> bool stop;
> @@ -546,6 +548,26 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
> void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
>
> /**
> + * async_run_safe_work_on_cpu:
> + * @cpu: The vCPU to run on.
> + * @func: The function to be executed.
> + * @data: Data to pass to the function.
> + *
> + * Schedules the function @func for execution on the vCPU @cpu asynchronously
> + * when all the VCPUs are outside their loop.
> + */
> +void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data),
> + void *data);
We should probably document the fact that the user should allocate a
data structure for each CPU they send async work to.
> +
> +/**
> + * async_safe_work_pending:
> + *
> + * Check whether any safe work is pending on any VCPUs.
> + * Returns: @true if a safe work is pending, @false otherwise.
> + */
> +bool async_safe_work_pending(void);
> +
> +/**
> * qemu_get_cpu:
> * @index: The CPUState@cpu_index value of the CPU to obtain.
> *
Otherwise looks pretty reasonable to me.
--
Alex Bennée
next prev parent reply other threads:[~2015-07-13 16:20 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-07-10 16:08 [Qemu-devel] [RFC PATCH V2 0/3] Multithread TCG async_safe_work part fred.konrad
2015-07-10 16:08 ` [Qemu-devel] [RFC PATCH V2 1/3] cpus: protect queued_work_* with work_mutex fred.konrad
2015-07-10 16:08 ` [Qemu-devel] [RFC PATCH V2 2/3] cpus: add a tcg_executing flag fred.konrad
2015-07-13 15:56 ` Alex Bennée
2015-07-13 16:10 ` Peter Maydell
2015-07-13 16:15 ` Paolo Bonzini
2015-07-13 16:36 ` Alex Bennée
2015-07-15 8:40 ` Frederic Konrad
2015-07-10 16:08 ` [Qemu-devel] [RFC PATCH V2 3/3] cpus: introduce async_run_safe_work_on_cpu fred.konrad
2015-07-13 16:20 ` Alex Bennée [this message]
2015-07-13 22:56 ` Paolo Bonzini
2015-07-13 11:53 ` [Qemu-devel] [RFC PATCH V2 0/3] Multithread TCG async_safe_work part Paolo Bonzini
2015-07-13 15:50 ` Alex Bennée
2015-07-15 7:44 ` Frederic Konrad
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87r3ocjagd.fsf@linaro.org \
--to=alex.bennee@linaro.org \
--cc=a.rigo@virtualopensystems.com \
--cc=fred.konrad@greensocs.com \
--cc=guillaume.delbergue@greensocs.com \
--cc=mark.burton@greensocs.com \
--cc=mttcg@listserver.greensocs.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).