From: "Alex Bennée" <alex.bennee@linaro.org>
To: Alvise Rigo <a.rigo@virtualopensystems.com>
Cc: mttcg@listserver.greensocs.com, qemu-devel@nongnu.org,
jani.kokkonen@huawei.com, claudio.fontana@huawei.com,
tech@virtualopensystems.com, fred.konrad@greensocs.com,
pbonzini@redhat.com, rth@twiddle.net, serge.fdrv@gmail.com,
cota@braap.org, peter.maydell@linaro.org
Subject: Re: [Qemu-devel] [RFC 03/10] cpus: Introduce async_wait_run_on_cpu()
Date: Wed, 08 Jun 2016 14:54:59 +0100 [thread overview]
Message-ID: <87d1nrhjkc.fsf@linaro.org> (raw)
In-Reply-To: <20160526163549.3276-4-a.rigo@virtualopensystems.com>
Alvise Rigo <a.rigo@virtualopensystems.com> writes:
> Introduce a new function that allows the calling VCPU to add a work item
> to another VCPU (aka target VCPU). This new function differs from
> async_run_on_cpu() since it makes the calling VCPU waiting for the target
> VCPU to finish the work item. The mechanism makes use of the halt_cond
> to wait and in case process pending work items.
Isn't this exactly what would happen if you use run_on_cpu(). That will
stall the current vCPU and busy wait until the work item is completed.
>
> Signed-off-by: Alvise Rigo <a.rigo@virtualopensystems.com>
> ---
> cpus.c | 44 ++++++++++++++++++++++++++++++++++++++++++--
> include/qom/cpu.h | 31 +++++++++++++++++++++++++++++++
> 2 files changed, 73 insertions(+), 2 deletions(-)
>
> diff --git a/cpus.c b/cpus.c
> index b9ec903..7bc96e2 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -89,7 +89,7 @@ static bool cpu_thread_is_idle(CPUState *cpu)
> if (cpu->stop || cpu->queued_work_first) {
> return false;
> }
> - if (cpu_is_stopped(cpu)) {
> + if (cpu_is_stopped(cpu) || async_waiting_for_work(cpu)) {
> return true;
> }
> if (!cpu->halted || cpu_has_work(cpu) ||
> @@ -1012,6 +1012,7 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
> wi->func = func;
> wi->data = data;
> wi->free = true;
> + wi->wcpu = NULL;
>
> qemu_mutex_lock(&cpu->work_mutex);
> if (cpu->queued_work_first == NULL) {
> @@ -1027,6 +1028,40 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
> qemu_cpu_kick(cpu);
> }
>
> +void async_wait_run_on_cpu(CPUState *cpu, CPUState *wcpu, run_on_cpu_func func,
> + void *data)
> +{
> + struct qemu_work_item *wwi;
> +
> + assert(wcpu != cpu);
> +
> + wwi = g_malloc0(sizeof(struct qemu_work_item));
> + wwi->func = func;
> + wwi->data = data;
> + wwi->free = true;
> + wwi->wcpu = wcpu;
> +
> + /* Increase the number of pending work items */
> + atomic_inc(&wcpu->pending_work_items);
> +
> + qemu_mutex_lock(&cpu->work_mutex);
> + /* Add the waiting work items at the beginning to free as soon as possible
> + * the waiting CPU. */
> + if (cpu->queued_work_first == NULL) {
> + cpu->queued_work_last = wwi;
> + } else {
> + wwi->next = cpu->queued_work_first;
> + }
> + cpu->queued_work_first = wwi;
> + wwi->done = false;
> + qemu_mutex_unlock(&cpu->work_mutex);
> +
> + qemu_cpu_kick(cpu);
> +
> + /* In order to wait, @wcpu has to exit the CPU loop */
> + cpu_exit(wcpu);
> +}
> +
> /*
> * Safe work interface
> *
> @@ -1120,6 +1155,10 @@ static void flush_queued_work(CPUState *cpu)
> qemu_mutex_unlock(&cpu->work_mutex);
> wi->func(cpu, wi->data);
> qemu_mutex_lock(&cpu->work_mutex);
> + if (wi->wcpu != NULL) {
> + atomic_dec(&wi->wcpu->pending_work_items);
> + qemu_cond_broadcast(wi->wcpu->halt_cond);
> + }
> if (wi->free) {
> g_free(wi);
> } else {
> @@ -1406,7 +1445,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
> while (1) {
> bool sleep = false;
>
> - if (cpu_can_run(cpu) && !async_safe_work_pending()) {
> + if (cpu_can_run(cpu) && !async_safe_work_pending()
> + && !async_waiting_for_work(cpu)) {
> int r;
>
> atomic_inc(&tcg_scheduled_cpus);
> diff --git a/include/qom/cpu.h b/include/qom/cpu.h
> index 019f06d..7be82ed 100644
> --- a/include/qom/cpu.h
> +++ b/include/qom/cpu.h
> @@ -259,6 +259,8 @@ struct qemu_work_item {
> void *data;
> int done;
> bool free;
> + /* CPU waiting for this work item to finish. If NULL, no CPU is waiting. */
> + CPUState *wcpu;
> };
>
> /**
> @@ -303,6 +305,7 @@ struct qemu_work_item {
> * @kvm_fd: vCPU file descriptor for KVM.
> * @work_mutex: Lock to prevent multiple access to queued_work_*.
> * @queued_work_first: First asynchronous work pending.
> + * @pending_work_items: Work items for which the CPU needs to wait completion.
> *
> * State of one CPU core or thread.
> */
> @@ -337,6 +340,7 @@ struct CPUState {
>
> QemuMutex work_mutex;
> struct qemu_work_item *queued_work_first, *queued_work_last;
> + int pending_work_items;
>
> CPUAddressSpace *cpu_ases;
> int num_ases;
> @@ -398,6 +402,9 @@ struct CPUState {
> * by a stcond (see softmmu_template.h). */
> bool excl_succeeded;
>
> + /* True if some CPU requested a TLB flush for this CPU. */
> + bool pending_tlb_flush;
> +
> /* Note that this is accessed at the start of every TB via a negative
> offset from AREG0. Leave this field at the end so as to make the
> (absolute value) offset as small as possible. This reduces code
> @@ -680,6 +687,19 @@ void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
> void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
>
> /**
> + * async_wait_run_on_cpu:
> + * @cpu: The vCPU to run on.
> + * @wpu: The vCPU submitting the work.
> + * @func: The function to be executed.
> + * @data: Data to pass to the function.
> + *
> + * Schedules the function @func for execution on the vCPU @cpu asynchronously.
> + * The vCPU @wcpu will wait for @cpu to finish the job.
> + */
> +void async_wait_run_on_cpu(CPUState *cpu, CPUState *wcpu, run_on_cpu_func func,
> + void *data);
> +
> +/**
> * async_safe_run_on_cpu:
> * @cpu: The vCPU to run on.
> * @func: The function to be executed.
> @@ -699,6 +719,17 @@ void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
> bool async_safe_work_pending(void);
>
> /**
> + * async_waiting_for_work:
> + *
> + * Check whether there are work items for which @cpu is waiting completion.
> + * Returns: @true if work items are pending for completion, @false otherwise.
> + */
> +static inline bool async_waiting_for_work(CPUState *cpu)
> +{
> + return atomic_mb_read(&cpu->pending_work_items) != 0;
> +}
> +
> +/**
> * qemu_get_cpu:
> * @index: The CPUState@cpu_index value of the CPU to obtain.
> *
--
Alex Bennée
next prev parent reply other threads:[~2016-06-08 13:54 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-05-26 16:35 [Qemu-devel] [RFC 00/10] MTTCG: Slow-path for atomic insns Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 01/10] exec: Introduce tcg_exclusive_{lock, unlock}() Alvise Rigo
2016-05-31 15:03 ` Pranith Kumar
2016-06-02 16:21 ` alvise rigo
2016-06-08 9:21 ` Alex Bennée
2016-06-08 10:00 ` alvise rigo
2016-06-08 11:32 ` Peter Maydell
2016-06-08 13:52 ` Alex Bennée
2016-05-26 16:35 ` [Qemu-devel] [RFC 02/10] softmmu_llsc_template.h: Move to multi-threading Alvise Rigo
2016-06-10 15:21 ` Sergey Fedorov
2016-06-10 15:53 ` alvise rigo
2016-06-10 16:00 ` Sergey Fedorov
2016-06-10 16:04 ` alvise rigo
2016-06-14 12:00 ` Alex Bennée
2016-06-14 12:58 ` alvise rigo
2016-06-14 13:14 ` Alex Bennée
2016-06-10 16:15 ` Alex Bennée
2016-06-11 19:53 ` Sergey Fedorov
2016-06-14 8:37 ` Alex Bennée
2016-06-14 9:31 ` Sergey Fedorov
2016-05-26 16:35 ` [Qemu-devel] [RFC 03/10] cpus: Introduce async_wait_run_on_cpu() Alvise Rigo
2016-06-08 13:54 ` Alex Bennée [this message]
2016-06-08 14:10 ` alvise rigo
2016-06-08 14:53 ` Sergey Fedorov
2016-06-08 15:20 ` Alex Bennée
2016-06-08 16:24 ` alvise rigo
2016-06-13 9:26 ` Alex Bennée
2016-05-26 16:35 ` [Qemu-devel] [RFC 04/10] cputlb: Introduce tlb_flush_other() Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 05/10] target-arm: End TB after ldrex instruction Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 06/10] cputlb: Add tlb_tables_flush_bitmap() Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 07/10] cputlb: Query tlb_flush_by_mmuidx Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 08/10] cputlb: Query tlb_flush_page_by_mmuidx Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 09/10] cputlb: Query tlb_flush_page_all Alvise Rigo
2016-05-26 16:35 ` [Qemu-devel] [RFC 10/10] cpus: Do not sleep if some work item is pending Alvise Rigo
2016-06-10 15:21 ` [Qemu-devel] [RFC 00/10] MTTCG: Slow-path for atomic insns Alex Bennée
2016-06-10 15:30 ` alvise rigo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87d1nrhjkc.fsf@linaro.org \
--to=alex.bennee@linaro.org \
--cc=a.rigo@virtualopensystems.com \
--cc=claudio.fontana@huawei.com \
--cc=cota@braap.org \
--cc=fred.konrad@greensocs.com \
--cc=jani.kokkonen@huawei.com \
--cc=mttcg@listserver.greensocs.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=rth@twiddle.net \
--cc=serge.fdrv@gmail.com \
--cc=tech@virtualopensystems.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).