From: "Alex Bennée" <alex.bennee@linaro.org>
To: mttcg@listserver.greensocs.com, qemu-devel@nongnu.org,
fred.konrad@greensocs.com, a.rigo@virtualopensystems.com,
serge.fdrv@gmail.com, cota@braap.org, bobby.prani@gmail.com
Cc: mark.burton@greensocs.com, pbonzini@redhat.com,
jan.kiszka@siemens.com, rth@twiddle.net,
peter.maydell@linaro.org, claudio.fontana@huawei.com,
"Sergey Fedorov" <sergey.fedorov@linaro.org>,
"Alex Bennée" <alex.bennee@linaro.org>,
"Peter Crosthwaite" <crosthwaite.peter@gmail.com>,
"Riku Voipio" <riku.voipio@iki.fi>
Subject: [Qemu-devel] [PATCH v5 09/13] linux-user: Support CPU work queue
Date: Tue, 2 Aug 2016 18:27:40 +0100 [thread overview]
Message-ID: <1470158864-17651-10-git-send-email-alex.bennee@linaro.org> (raw)
In-Reply-To: <1470158864-17651-1-git-send-email-alex.bennee@linaro.org>
From: Sergey Fedorov <serge.fdrv@gmail.com>
Make CPU work core functions common between system and user-mode
emulation. User-mode does not have BQL, so process_queued_cpu_work() is
protected by 'exclusive_lock'.
Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
---
cpu-exec-common.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++++
cpus.c | 86 +------------------------------------------------
include/exec/exec-all.h | 17 ++++++++++
linux-user/main.c | 8 +++++
4 files changed, 111 insertions(+), 85 deletions(-)
diff --git a/cpu-exec-common.c b/cpu-exec-common.c
index 0cb4ae6..a233f01 100644
--- a/cpu-exec-common.c
+++ b/cpu-exec-common.c
@@ -77,3 +77,88 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
}
siglongjmp(cpu->jmp_env, 1);
}
+
+QemuCond qemu_work_cond;
+
+static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
+{
+ qemu_mutex_lock(&cpu->work_mutex);
+ if (cpu->queued_work_first == NULL) {
+ cpu->queued_work_first = wi;
+ } else {
+ cpu->queued_work_last->next = wi;
+ }
+ cpu->queued_work_last = wi;
+ wi->next = NULL;
+ wi->done = false;
+ qemu_mutex_unlock(&cpu->work_mutex);
+
+ qemu_cpu_kick(cpu);
+}
+
+void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
+{
+ struct qemu_work_item wi;
+
+ if (qemu_cpu_is_self(cpu)) {
+ func(cpu, data);
+ return;
+ }
+
+ wi.func = func;
+ wi.data = data;
+ wi.free = false;
+
+ queue_work_on_cpu(cpu, &wi);
+ while (!atomic_mb_read(&wi.done)) {
+ CPUState *self_cpu = current_cpu;
+
+ qemu_cond_wait(&qemu_work_cond, qemu_get_cpu_work_mutex());
+ current_cpu = self_cpu;
+ }
+}
+
+void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
+{
+ struct qemu_work_item *wi;
+
+ if (qemu_cpu_is_self(cpu)) {
+ func(cpu, data);
+ return;
+ }
+
+ wi = g_malloc0(sizeof(struct qemu_work_item));
+ wi->func = func;
+ wi->data = data;
+ wi->free = true;
+
+ queue_work_on_cpu(cpu, wi);
+}
+
+void process_queued_cpu_work(CPUState *cpu)
+{
+ struct qemu_work_item *wi;
+
+ if (cpu->queued_work_first == NULL) {
+ return;
+ }
+
+ qemu_mutex_lock(&cpu->work_mutex);
+ while (cpu->queued_work_first != NULL) {
+ wi = cpu->queued_work_first;
+ cpu->queued_work_first = wi->next;
+ if (!cpu->queued_work_first) {
+ cpu->queued_work_last = NULL;
+ }
+ qemu_mutex_unlock(&cpu->work_mutex);
+ wi->func(cpu, wi->data);
+ qemu_mutex_lock(&cpu->work_mutex);
+ if (wi->free) {
+ g_free(wi);
+ } else {
+ atomic_mb_set(&wi->done, true);
+ }
+ }
+ qemu_mutex_unlock(&cpu->work_mutex);
+ qemu_cond_broadcast(&qemu_work_cond);
+}
diff --git a/cpus.c b/cpus.c
index 51fd8c1..282d7e3 100644
--- a/cpus.c
+++ b/cpus.c
@@ -896,7 +896,6 @@ static QemuThread io_thread;
static QemuCond qemu_cpu_cond;
/* system init */
static QemuCond qemu_pause_cond;
-static QemuCond qemu_work_cond;
void qemu_init_cpu_loop(void)
{
@@ -910,66 +909,11 @@ void qemu_init_cpu_loop(void)
qemu_thread_get_self(&io_thread);
}
-static QemuMutex *qemu_get_cpu_work_mutex(void)
+QemuMutex *qemu_get_cpu_work_mutex(void)
{
return &qemu_global_mutex;
}
-static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
-{
- qemu_mutex_lock(&cpu->work_mutex);
- if (cpu->queued_work_first == NULL) {
- cpu->queued_work_first = wi;
- } else {
- cpu->queued_work_last->next = wi;
- }
- cpu->queued_work_last = wi;
- wi->next = NULL;
- wi->done = false;
- qemu_mutex_unlock(&cpu->work_mutex);
-
- qemu_cpu_kick(cpu);
-}
-
-void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
-{
- struct qemu_work_item wi;
-
- if (qemu_cpu_is_self(cpu)) {
- func(cpu, data);
- return;
- }
-
- wi.func = func;
- wi.data = data;
- wi.free = false;
-
- queue_work_on_cpu(cpu, &wi);
- while (!atomic_mb_read(&wi.done)) {
- CPUState *self_cpu = current_cpu;
-
- qemu_cond_wait(&qemu_work_cond, qemu_get_cpu_work_mutex());
- current_cpu = self_cpu;
- }
-}
-
-void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
-{
- struct qemu_work_item *wi;
-
- if (qemu_cpu_is_self(cpu)) {
- func(cpu, data);
- return;
- }
-
- wi = g_malloc0(sizeof(struct qemu_work_item));
- wi->func = func;
- wi->data = data;
- wi->free = true;
-
- queue_work_on_cpu(cpu, wi);
-}
-
static void qemu_kvm_destroy_vcpu(CPUState *cpu)
{
if (kvm_destroy_vcpu(cpu) < 0) {
@@ -982,34 +926,6 @@ static void qemu_tcg_destroy_vcpu(CPUState *cpu)
{
}
-static void process_queued_cpu_work(CPUState *cpu)
-{
- struct qemu_work_item *wi;
-
- if (cpu->queued_work_first == NULL) {
- return;
- }
-
- qemu_mutex_lock(&cpu->work_mutex);
- while (cpu->queued_work_first != NULL) {
- wi = cpu->queued_work_first;
- cpu->queued_work_first = wi->next;
- if (!cpu->queued_work_first) {
- cpu->queued_work_last = NULL;
- }
- qemu_mutex_unlock(&cpu->work_mutex);
- wi->func(cpu, wi->data);
- qemu_mutex_lock(&cpu->work_mutex);
- if (wi->free) {
- g_free(wi);
- } else {
- atomic_mb_set(&wi->done, true);
- }
- }
- qemu_mutex_unlock(&cpu->work_mutex);
- qemu_cond_broadcast(&qemu_work_cond);
-}
-
static void qemu_wait_io_event_common(CPUState *cpu)
{
if (cpu->stop) {
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index bc0bcc5..e4dfd3c 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -409,4 +409,21 @@ extern int singlestep;
extern CPUState *tcg_current_cpu;
extern bool exit_request;
+/**
+ * qemu_work_cond - condition to wait for CPU work items completion
+ */
+extern QemuCond qemu_work_cond;
+
+/**
+ * qemu_get_cpu_work_mutex() - get the mutex which protects CPU work execution
+ *
+ * Return: A pointer to the mutex.
+ */
+QemuMutex *qemu_get_cpu_work_mutex(void);
+/**
+ * process_queued_cpu_work() - process all items on CPU work queue
+ * @cpu: The CPU which work queue to process.
+ */
+void process_queued_cpu_work(CPUState *cpu);
+
#endif
diff --git a/linux-user/main.c b/linux-user/main.c
index f5ddf96..13ac77d 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -124,6 +124,7 @@ void qemu_init_cpu_loop(void)
qemu_mutex_init(&exclusive_lock);
qemu_cond_init(&exclusive_cond);
qemu_cond_init(&exclusive_resume);
+ qemu_cond_init(&qemu_work_cond);
}
/* Make sure everything is in a consistent state for calling fork(). */
@@ -152,6 +153,7 @@ void fork_end(int child)
qemu_mutex_init(&cpu_list_mutex);
qemu_cond_init(&exclusive_cond);
qemu_cond_init(&exclusive_resume);
+ qemu_cond_init(&qemu_work_cond);
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
gdbserver_fork(thread_cpu);
} else {
@@ -160,6 +162,11 @@ void fork_end(int child)
}
}
+QemuMutex *qemu_get_cpu_work_mutex(void)
+{
+ return &exclusive_lock;
+}
+
/* Wait for pending exclusive operations to complete. The exclusive lock
must be held. */
static inline void exclusive_idle(void)
@@ -218,6 +225,7 @@ static inline void cpu_exec_end(CPUState *cpu)
qemu_cond_signal(&exclusive_cond);
}
exclusive_idle();
+ process_queued_cpu_work(cpu);
qemu_mutex_unlock(&exclusive_lock);
}
--
2.7.4
next prev parent reply other threads:[~2016-08-02 17:28 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-08-02 17:27 [Qemu-devel] [PATCH v5 00/13] cpu-exec: Safe work in quiescent state Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 01/13] atomic: introduce atomic_dec_fetch Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 02/13] cpus: pass CPUState to run_on_cpu helpers Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 03/13] cpus: Move common code out of {async_, }run_on_cpu() Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 04/13] cpus: Wrap mutex used to protect CPU work Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 05/13] cpus: Rename flush_queued_work() Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 06/13] linux-user: Use QemuMutex and QemuCond Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 07/13] linux-user: Rework exclusive operation mechanism Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 08/13] linux-user: Add qemu_cpu_is_self() and qemu_cpu_kick() Alex Bennée
2016-08-02 17:27 ` Alex Bennée [this message]
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 10/13] bsd-user: Support CPU work queue Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 11/13] cpu-exec-common: Introduce async_safe_run_on_cpu() Alex Bennée
2016-08-02 19:22 ` Emilio G. Cota
2016-08-03 21:02 ` Alex Bennée
2016-08-03 23:17 ` Emilio G. Cota
2016-08-04 6:44 ` Alex Bennée
2016-08-28 0:21 ` Paolo Bonzini
2016-08-29 17:26 ` Paolo Bonzini
2016-08-31 10:09 ` Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 12/13] tcg: Make tb_flush() thread safe Alex Bennée
2016-08-02 17:27 ` [Qemu-devel] [PATCH v5 13/13] cpu-exec: replace cpu->queued_work with GArray Alex Bennée
2016-08-02 17:36 ` Alex Bennée
2016-08-02 17:42 ` Alex Bennée
2016-08-02 18:53 ` Emilio G. Cota
2016-08-03 8:34 ` Alex Bennée
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1470158864-17651-10-git-send-email-alex.bennee@linaro.org \
--to=alex.bennee@linaro.org \
--cc=a.rigo@virtualopensystems.com \
--cc=bobby.prani@gmail.com \
--cc=claudio.fontana@huawei.com \
--cc=cota@braap.org \
--cc=crosthwaite.peter@gmail.com \
--cc=fred.konrad@greensocs.com \
--cc=jan.kiszka@siemens.com \
--cc=mark.burton@greensocs.com \
--cc=mttcg@listserver.greensocs.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=riku.voipio@iki.fi \
--cc=rth@twiddle.net \
--cc=serge.fdrv@gmail.com \
--cc=sergey.fedorov@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).