qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
* [Qemu-devel] [RFC PATCH 0/3] Multithread TCG async_safe_work part.
@ 2015-07-10 15:19 fred.konrad
  2015-07-10 15:19 ` [Qemu-devel] [RFC PATCH 1/3] cpus: protect queued_work_* with work_mutex fred.konrad
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: fred.konrad @ 2015-07-10 15:19 UTC (permalink / raw)
  To: qemu-devel, mttcg
  Cc: mark.burton, a.rigo, guillaume.delbergue, pbonzini, alex.bennee,
	fred.konrad

From: KONRAD Frederic <fred.konrad@greensocs.com>

This is the async_safe_work introduction bit of the Multithread TCG work.
Rebased on current upstream (6169b60285fe1ff730d840a49527e721bfb30899).

It can be cloned here:
http://git.greensocs.com/fkonrad/mttcg.git branch async_work

The first patch introduces a mutex to protect the existing queued_work_*
CPUState members against multiple (concurent) access.

The second patch introduces a tcg_executing_flag which will be 1 when we are
inside cpu_exec(). This is required as safe work need to be sure that's all vCPU
are outside cpu_exec().

The last patch introduces async_safe_work. It allows to add some work which will
be done asynchronously but only when all vCPUs are outside cpu_exec(). The tcg
thread will wait that no vCPUs have any pending safe work before reentering
cpu-exec().

KONRAD Frederic (3):
  cpus: protect queued_work_* with work_mutex.
  cpus: add a tcg_executing flag.
  cpus: introduce async_run_safe_work_on_cpu.

 cpu-exec.c        |   7 +++
 cpus.c            | 160 ++++++++++++++++++++++++++++++++++++++++--------------
 include/qom/cpu.h |  28 ++++++++++
 qom/cpu.c         |   2 +
 4 files changed, 157 insertions(+), 40 deletions(-)

-- 
1.9.0

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [Qemu-devel] [RFC PATCH 1/3] cpus: protect queued_work_* with work_mutex.
  2015-07-10 15:19 [Qemu-devel] [RFC PATCH 0/3] Multithread TCG async_safe_work part fred.konrad
@ 2015-07-10 15:19 ` fred.konrad
  2015-07-10 15:22   ` Paolo Bonzini
  2015-07-10 15:19 ` [Qemu-devel] [RFC PATCH 2/3] cpus: add a tcg_executing flag fred.konrad
  2015-07-10 15:19 ` [Qemu-devel] [RFC PATCH 3/3] cpus: introduce async_run_safe_work_on_cpu fred.konrad
  2 siblings, 1 reply; 9+ messages in thread
From: fred.konrad @ 2015-07-10 15:19 UTC (permalink / raw)
  To: qemu-devel, mttcg
  Cc: mark.burton, a.rigo, guillaume.delbergue, pbonzini, alex.bennee,
	fred.konrad

From: KONRAD Frederic <fred.konrad@greensocs.com>

This protects queued_work_* used by async_run_on_cpu, run_on_cpu and
flush_queued_work with a new lock (work_mutex) to prevent multiple (concurrent)
access.

Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>
---
 cpus.c            | 9 +++++++++
 include/qom/cpu.h | 3 +++
 qom/cpu.c         | 1 +
 3 files changed, 13 insertions(+)

diff --git a/cpus.c b/cpus.c
index b00a423..3d95dbb 100644
--- a/cpus.c
+++ b/cpus.c
@@ -845,6 +845,8 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
     wi.func = func;
     wi.data = data;
     wi.free = false;
+
+    qemu_mutex_lock(&cpu->work_mutex);
     if (cpu->queued_work_first == NULL) {
         cpu->queued_work_first = &wi;
     } else {
@@ -853,6 +855,7 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
     cpu->queued_work_last = &wi;
     wi.next = NULL;
     wi.done = false;
+    qemu_mutex_unlock(&cpu->work_mutex);
 
     qemu_cpu_kick(cpu);
     while (!wi.done) {
@@ -876,6 +879,8 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
     wi->func = func;
     wi->data = data;
     wi->free = true;
+
+    qemu_mutex_lock(&cpu->work_mutex);
     if (cpu->queued_work_first == NULL) {
         cpu->queued_work_first = wi;
     } else {
@@ -884,6 +889,7 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
     cpu->queued_work_last = wi;
     wi->next = NULL;
     wi->done = false;
+    qemu_mutex_unlock(&cpu->work_mutex);
 
     qemu_cpu_kick(cpu);
 }
@@ -896,6 +902,7 @@ static void flush_queued_work(CPUState *cpu)
         return;
     }
 
+    qemu_mutex_lock(&cpu->work_mutex);
     while ((wi = cpu->queued_work_first)) {
         cpu->queued_work_first = wi->next;
         wi->func(wi->data);
@@ -905,6 +912,8 @@ static void flush_queued_work(CPUState *cpu)
         }
     }
     cpu->queued_work_last = NULL;
+    qemu_mutex_unlock(&cpu->work_mutex);
+
     qemu_cond_broadcast(&qemu_work_cond);
 }
 
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 20aabc9..efa9624 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -242,6 +242,8 @@ struct kvm_run;
  * @mem_io_pc: Host Program Counter at which the memory was accessed.
  * @mem_io_vaddr: Target virtual address at which the memory was accessed.
  * @kvm_fd: vCPU file descriptor for KVM.
+ * @work_mutex: Lock to prevent multiple access to queued_work_*.
+ * @queued_work_first: First asynchronous work pending.
  *
  * State of one CPU core or thread.
  */
@@ -262,6 +264,7 @@ struct CPUState {
     uint32_t host_tid;
     bool running;
     struct QemuCond *halt_cond;
+    QemuMutex work_mutex;
     struct qemu_work_item *queued_work_first, *queued_work_last;
     bool thread_kicked;
     bool created;
diff --git a/qom/cpu.c b/qom/cpu.c
index eb9cfec..4e12598 100644
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -316,6 +316,7 @@ static void cpu_common_initfn(Object *obj)
     cpu->gdb_num_regs = cpu->gdb_num_g_regs = cc->gdb_num_core_regs;
     QTAILQ_INIT(&cpu->breakpoints);
     QTAILQ_INIT(&cpu->watchpoints);
+    qemu_mutex_init(&cpu->work_mutex);
 }
 
 static void cpu_common_finalize(Object *obj)
-- 
1.9.0

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [Qemu-devel] [RFC PATCH 2/3] cpus: add a tcg_executing flag.
  2015-07-10 15:19 [Qemu-devel] [RFC PATCH 0/3] Multithread TCG async_safe_work part fred.konrad
  2015-07-10 15:19 ` [Qemu-devel] [RFC PATCH 1/3] cpus: protect queued_work_* with work_mutex fred.konrad
@ 2015-07-10 15:19 ` fred.konrad
  2015-07-10 15:19 ` [Qemu-devel] [RFC PATCH 3/3] cpus: introduce async_run_safe_work_on_cpu fred.konrad
  2 siblings, 0 replies; 9+ messages in thread
From: fred.konrad @ 2015-07-10 15:19 UTC (permalink / raw)
  To: qemu-devel, mttcg
  Cc: mark.burton, a.rigo, guillaume.delbergue, pbonzini, alex.bennee,
	fred.konrad

From: KONRAD Frederic <fred.konrad@greensocs.com>

This flag indicates if the VCPU is currently executing TCG code.

Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>

Changes V1 -> V2:
  * do both tcg_executing = 0 or 1 in cpu_exec().
---
 cpu-exec.c        | 2 ++
 include/qom/cpu.h | 3 +++
 qom/cpu.c         | 1 +
 3 files changed, 6 insertions(+)

diff --git a/cpu-exec.c b/cpu-exec.c
index 75694f3..2fdf89d 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -371,6 +371,7 @@ int cpu_exec(CPUState *cpu)
         cpu->halted = 0;
     }
 
+    cpu->tcg_executing = 1;
     current_cpu = cpu;
 
     /* As long as current_cpu is null, up to the assignment just above,
@@ -583,5 +584,6 @@ int cpu_exec(CPUState *cpu)
 
     /* fail safe : never use current_cpu outside cpu_exec() */
     current_cpu = NULL;
+    cpu->tcg_executing = 0;
     return ret;
 }
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index efa9624..a2de536 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -226,6 +226,7 @@ struct kvm_run;
  * @stopped: Indicates the CPU has been artificially stopped.
  * @tcg_exit_req: Set to force TCG to stop executing linked TBs for this
  *           CPU and return to its top level loop.
+ * @tcg_executing: This TCG thread is in cpu_exec().
  * @singlestep_enabled: Flags for single-stepping.
  * @icount_extra: Instructions until next timer event.
  * @icount_decr: Number of cycles left, with interrupt flag in high bit.
@@ -322,6 +323,8 @@ struct CPUState {
        (absolute value) offset as small as possible.  This reduces code
        size, especially for hosts without large memory offsets.  */
     volatile sig_atomic_t tcg_exit_req;
+
+    volatile int tcg_executing;
 };
 
 QTAILQ_HEAD(CPUTailQ, CPUState);
diff --git a/qom/cpu.c b/qom/cpu.c
index 4e12598..62663e5 100644
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -249,6 +249,7 @@ static void cpu_common_reset(CPUState *cpu)
     cpu->icount_decr.u32 = 0;
     cpu->can_do_io = 0;
     cpu->exception_index = -1;
+    cpu->tcg_executing = 0;
     memset(cpu->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
 }
 
-- 
1.9.0

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [Qemu-devel] [RFC PATCH 3/3] cpus: introduce async_run_safe_work_on_cpu.
  2015-07-10 15:19 [Qemu-devel] [RFC PATCH 0/3] Multithread TCG async_safe_work part fred.konrad
  2015-07-10 15:19 ` [Qemu-devel] [RFC PATCH 1/3] cpus: protect queued_work_* with work_mutex fred.konrad
  2015-07-10 15:19 ` [Qemu-devel] [RFC PATCH 2/3] cpus: add a tcg_executing flag fred.konrad
@ 2015-07-10 15:19 ` fred.konrad
  2015-07-10 15:23   ` Paolo Bonzini
  2 siblings, 1 reply; 9+ messages in thread
From: fred.konrad @ 2015-07-10 15:19 UTC (permalink / raw)
  To: qemu-devel, mttcg
  Cc: mark.burton, a.rigo, guillaume.delbergue, pbonzini, alex.bennee,
	fred.konrad

From: KONRAD Frederic <fred.konrad@greensocs.com>

We already had async_run_on_cpu but we need all VCPUs outside their execution
loop to execute some tb_flush/invalidate task:

async_run_on_cpu_safe schedule a work on a VCPU but the work start when no more
VCPUs are executing code.
When a safe work is pending cpu_has_work returns true, so cpu_exec returns and
the VCPUs can't enters execution loop. cpu_thread_is_idle returns false so at
the moment where all VCPUs are stop || stopped the safe work queue can be
flushed.

Signed-off-by: KONRAD Frederic <fred.konrad@greensocs.com>

Changes V1 -> V2:
  * Move qemu_cpu_kick_thread to avoid prototype declaration.
  * Use the work_mutex lock to protect the queued_safe_work_* structures.
---
 cpu-exec.c        |   5 ++
 cpus.c            | 151 +++++++++++++++++++++++++++++++++++++++---------------
 include/qom/cpu.h |  24 ++++++++-
 3 files changed, 139 insertions(+), 41 deletions(-)

diff --git a/cpu-exec.c b/cpu-exec.c
index 2fdf89d..7581f76 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -363,6 +363,11 @@ int cpu_exec(CPUState *cpu)
     /* This must be volatile so it is not trashed by longjmp() */
     volatile bool have_tb_lock = false;
 
+    if (async_safe_work_pending()) {
+        cpu->exit_request = 1;
+        return 0;
+    }
+
     if (cpu->halted) {
         if (!cpu_has_work(cpu)) {
             return EXCP_HALTED;
diff --git a/cpus.c b/cpus.c
index 3d95dbb..d912314 100644
--- a/cpus.c
+++ b/cpus.c
@@ -76,7 +76,7 @@ bool cpu_is_stopped(CPUState *cpu)
 
 static bool cpu_thread_is_idle(CPUState *cpu)
 {
-    if (cpu->stop || cpu->queued_work_first) {
+    if (cpu->stop || cpu->queued_work_first || cpu->queued_safe_work_first) {
         return false;
     }
     if (cpu_is_stopped(cpu)) {
@@ -833,6 +833,45 @@ void qemu_init_cpu_loop(void)
     qemu_thread_get_self(&io_thread);
 }
 
+static void qemu_cpu_kick_thread(CPUState *cpu)
+{
+#ifndef _WIN32
+    int err;
+
+    err = pthread_kill(cpu->thread->thread, SIG_IPI);
+    if (err) {
+        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
+        exit(1);
+    }
+#else /* _WIN32 */
+    if (!qemu_cpu_is_self(cpu)) {
+        CONTEXT tcgContext;
+
+        if (SuspendThread(cpu->hThread) == (DWORD)-1) {
+            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
+                    GetLastError());
+            exit(1);
+        }
+
+        /* On multi-core systems, we are not sure that the thread is actually
+         * suspended until we can get the context.
+         */
+        tcgContext.ContextFlags = CONTEXT_CONTROL;
+        while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
+            continue;
+        }
+
+        cpu_signal(0);
+
+        if (ResumeThread(cpu->hThread) == (DWORD)-1) {
+            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
+                    GetLastError());
+            exit(1);
+        }
+    }
+#endif
+}
+
 void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
 {
     struct qemu_work_item wi;
@@ -894,6 +933,74 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
     qemu_cpu_kick(cpu);
 }
 
+void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data),
+                                void *data)
+{
+    struct qemu_work_item *wi;
+
+    wi = g_malloc0(sizeof(struct qemu_work_item));
+    wi->func = func;
+    wi->data = data;
+    wi->free = true;
+
+    qemu_mutex_lock(&cpu->work_mutex);
+    if (cpu->queued_safe_work_first == NULL) {
+        cpu->queued_safe_work_first = wi;
+    } else {
+        cpu->queued_safe_work_last->next = wi;
+    }
+    cpu->queued_safe_work_last = wi;
+    wi->next = NULL;
+    wi->done = false;
+    qemu_mutex_unlock(&cpu->work_mutex);
+
+    CPU_FOREACH(cpu) {
+        qemu_cpu_kick_thread(cpu);
+    }
+}
+
+static void flush_queued_safe_work(CPUState *cpu)
+{
+    struct qemu_work_item *wi;
+    CPUState *other_cpu;
+
+    if (cpu->queued_safe_work_first == NULL) {
+        return;
+    }
+
+    CPU_FOREACH(other_cpu) {
+        if (other_cpu->tcg_executing != 0) {
+            return;
+        }
+    }
+
+    qemu_mutex_lock(&cpu->work_mutex);
+    while ((wi = cpu->queued_safe_work_first)) {
+        cpu->queued_safe_work_first = wi->next;
+        wi->func(wi->data);
+        wi->done = true;
+        if (wi->free) {
+            g_free(wi);
+        }
+    }
+    cpu->queued_safe_work_last = NULL;
+    qemu_mutex_unlock(&cpu->work_mutex);
+    qemu_cond_broadcast(&qemu_work_cond);
+}
+
+/* FIXME: add a counter to avoid this CPU_FOREACH() */
+bool async_safe_work_pending(void)
+{
+    CPUState *cpu;
+
+    CPU_FOREACH(cpu) {
+        if (cpu->queued_safe_work_first) {
+            return true;
+        }
+    }
+    return false;
+}
+
 static void flush_queued_work(CPUState *cpu)
 {
     struct qemu_work_item *wi;
@@ -924,6 +1031,9 @@ static void qemu_wait_io_event_common(CPUState *cpu)
         cpu->stopped = true;
         qemu_cond_signal(&qemu_pause_cond);
     }
+    qemu_mutex_unlock_iothread();
+    flush_queued_safe_work(cpu);
+    qemu_mutex_lock_iothread();
     flush_queued_work(cpu);
     cpu->thread_kicked = false;
 }
@@ -1083,45 +1193,6 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
     return NULL;
 }
 
-static void qemu_cpu_kick_thread(CPUState *cpu)
-{
-#ifndef _WIN32
-    int err;
-
-    err = pthread_kill(cpu->thread->thread, SIG_IPI);
-    if (err) {
-        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
-        exit(1);
-    }
-#else /* _WIN32 */
-    if (!qemu_cpu_is_self(cpu)) {
-        CONTEXT tcgContext;
-
-        if (SuspendThread(cpu->hThread) == (DWORD)-1) {
-            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
-                    GetLastError());
-            exit(1);
-        }
-
-        /* On multi-core systems, we are not sure that the thread is actually
-         * suspended until we can get the context.
-         */
-        tcgContext.ContextFlags = CONTEXT_CONTROL;
-        while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
-            continue;
-        }
-
-        cpu_signal(0);
-
-        if (ResumeThread(cpu->hThread) == (DWORD)-1) {
-            fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
-                    GetLastError());
-            exit(1);
-        }
-    }
-#endif
-}
-
 void qemu_cpu_kick(CPUState *cpu)
 {
     qemu_cond_broadcast(cpu->halt_cond);
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index a2de536..692d0d3 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -243,8 +243,9 @@ struct kvm_run;
  * @mem_io_pc: Host Program Counter at which the memory was accessed.
  * @mem_io_vaddr: Target virtual address at which the memory was accessed.
  * @kvm_fd: vCPU file descriptor for KVM.
- * @work_mutex: Lock to prevent multiple access to queued_work_*.
+ * @work_mutex: Lock to prevent multiple access to queued_* qemu_work_item.
  * @queued_work_first: First asynchronous work pending.
+ * @queued_safe_work_first: First item of safe work pending.
  *
  * State of one CPU core or thread.
  */
@@ -267,6 +268,7 @@ struct CPUState {
     struct QemuCond *halt_cond;
     QemuMutex work_mutex;
     struct qemu_work_item *queued_work_first, *queued_work_last;
+    struct qemu_work_item *queued_safe_work_first, *queued_safe_work_last;
     bool thread_kicked;
     bool created;
     bool stop;
@@ -546,6 +548,26 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
 void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
 
 /**
+ * async_run_safe_work_on_cpu:
+ * @cpu: The vCPU to run on.
+ * @func: The function to be executed.
+ * @data: Data to pass to the function.
+ *
+ * Schedules the function @func for execution on the vCPU @cpu asynchronously
+ * when all the VCPUs are outside their loop.
+ */
+void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data),
+                                void *data);
+
+/**
+ * async_safe_work_pending:
+ *
+ * Check whether any safe work is pending on any VCPUs.
+ * Returns: @true if a safe work is pending, @false otherwise.
+ */
+bool async_safe_work_pending(void);
+
+/**
  * qemu_get_cpu:
  * @index: The CPUState@cpu_index value of the CPU to obtain.
  *
-- 
1.9.0

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [RFC PATCH 1/3] cpus: protect queued_work_* with work_mutex.
  2015-07-10 15:19 ` [Qemu-devel] [RFC PATCH 1/3] cpus: protect queued_work_* with work_mutex fred.konrad
@ 2015-07-10 15:22   ` Paolo Bonzini
  2015-07-10 15:32     ` Frederic Konrad
  0 siblings, 1 reply; 9+ messages in thread
From: Paolo Bonzini @ 2015-07-10 15:22 UTC (permalink / raw)
  To: fred.konrad, qemu-devel, mttcg
  Cc: mark.burton, alex.bennee, a.rigo, guillaume.delbergue



On 10/07/2015 17:19, fred.konrad@greensocs.com wrote:
> +    qemu_mutex_lock(&cpu->work_mutex);
>      while ((wi = cpu->queued_work_first)) {
>          cpu->queued_work_first = wi->next;
>          wi->func(wi->data);

Please unlock the mutex while calling the callback.

Paolo

> @@ -905,6 +912,8 @@ static void flush_queued_work(CPUState *cpu)
>          }
>      }
>      cpu->queued_work_last = NULL;
> +    qemu_mutex_unlock(&cpu->work_mutex);
> +
>      qemu_cond_broadcast(&qemu_work_cond);

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [RFC PATCH 3/3] cpus: introduce async_run_safe_work_on_cpu.
  2015-07-10 15:19 ` [Qemu-devel] [RFC PATCH 3/3] cpus: introduce async_run_safe_work_on_cpu fred.konrad
@ 2015-07-10 15:23   ` Paolo Bonzini
  0 siblings, 0 replies; 9+ messages in thread
From: Paolo Bonzini @ 2015-07-10 15:23 UTC (permalink / raw)
  To: fred.konrad, qemu-devel, mttcg
  Cc: mark.burton, alex.bennee, a.rigo, guillaume.delbergue



On 10/07/2015 17:19, fred.konrad@greensocs.com wrote:
> +static void flush_queued_safe_work(CPUState *cpu)
> +{
> +    struct qemu_work_item *wi;
> +    CPUState *other_cpu;
> +
> +    if (cpu->queued_safe_work_first == NULL) {
> +        return;
> +    }
> +
> +    CPU_FOREACH(other_cpu) {
> +        if (other_cpu->tcg_executing != 0) {
> +            return;
> +        }
> +    }
> +
> +    qemu_mutex_lock(&cpu->work_mutex);
> +    while ((wi = cpu->queued_safe_work_first)) {
> +        cpu->queued_safe_work_first = wi->next;
> +        wi->func(wi->data);

Same here.

Paolo

> +        wi->done = true;
> +        if (wi->free) {
> +            g_free(wi);
> +        }
> +    }
> +    cpu->queued_safe_work_last = NULL;
> +    qemu_mutex_unlock(&cpu->work_mutex);
> +    qemu_cond_broadcast(&qemu_work_cond);
> +}

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [RFC PATCH 1/3] cpus: protect queued_work_* with work_mutex.
  2015-07-10 15:22   ` Paolo Bonzini
@ 2015-07-10 15:32     ` Frederic Konrad
  2015-07-10 15:34       ` Paolo Bonzini
  0 siblings, 1 reply; 9+ messages in thread
From: Frederic Konrad @ 2015-07-10 15:32 UTC (permalink / raw)
  To: Paolo Bonzini, qemu-devel, mttcg
  Cc: mark.burton, alex.bennee, a.rigo, guillaume.delbergue

On 10/07/2015 17:22, Paolo Bonzini wrote:
>
> On 10/07/2015 17:19, fred.konrad@greensocs.com wrote:
>> +    qemu_mutex_lock(&cpu->work_mutex);
>>       while ((wi = cpu->queued_work_first)) {
>>           cpu->queued_work_first = wi->next;
>>           wi->func(wi->data);
> Please unlock the mutex while calling the callback.
>
> Paolo
>
>> @@ -905,6 +912,8 @@ static void flush_queued_work(CPUState *cpu)
>>           }
>>       }
>>       cpu->queued_work_last = NULL;
>> +    qemu_mutex_unlock(&cpu->work_mutex);
>> +
>>       qemu_cond_broadcast(&qemu_work_cond);

I think something like that can work because we don't have two
flush_queued_work at the same time on the same CPU?

static void flush_queued_work(CPUState *cpu)
{
     struct qemu_work_item *wi;

     if (cpu->queued_work_first == NULL) {
         return;
     }

     qemu_mutex_lock(&cpu->work_mutex);
     while ((wi = cpu->queued_work_first)) {
         cpu->queued_work_first = wi->next;
         qemu_mutex_unlock(&cpu->work_mutex);
         wi->func(wi->data);
         qemu_mutex_lock(&cpu->work_mutex);
         wi->done = true;
         if (wi->free) {
             g_free(wi);
         }
     }
     cpu->queued_work_last = NULL;
     qemu_mutex_unlock(&cpu->work_mutex);

     qemu_cond_broadcast(&qemu_work_cond);
}

Fred

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [RFC PATCH 1/3] cpus: protect queued_work_* with work_mutex.
  2015-07-10 15:32     ` Frederic Konrad
@ 2015-07-10 15:34       ` Paolo Bonzini
  2015-07-10 15:43         ` Frederic Konrad
  0 siblings, 1 reply; 9+ messages in thread
From: Paolo Bonzini @ 2015-07-10 15:34 UTC (permalink / raw)
  To: Frederic Konrad, qemu-devel, mttcg
  Cc: mark.burton, alex.bennee, a.rigo, guillaume.delbergue



On 10/07/2015 17:32, Frederic Konrad wrote:
>>>
> 
> I think something like that can work because we don't have two
> flush_queued_work at the same time on the same CPU?

Yes, this works; there is only one consumer.

Holding locks within a callback can be very painful, especially if there
is a chance that the callback will take a very coarse lock such as big
QEMU lock.  It can cause AB-BA deadlocks.

Paolo

> static void flush_queued_work(CPUState *cpu)
> {
>     struct qemu_work_item *wi;
> 
>     if (cpu->queued_work_first == NULL) {
>         return;
>     }
> 
>     qemu_mutex_lock(&cpu->work_mutex);
>     while ((wi = cpu->queued_work_first)) {
>         cpu->queued_work_first = wi->next;
>         qemu_mutex_unlock(&cpu->work_mutex);
>         wi->func(wi->data);
>         qemu_mutex_lock(&cpu->work_mutex);
>         wi->done = true;
>         if (wi->free) {
>             g_free(wi);
>         }
>     }
>     cpu->queued_work_last = NULL;
>     qemu_mutex_unlock(&cpu->work_mutex);
> 
>     qemu_cond_broadcast(&qemu_work_cond);
> }

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [Qemu-devel] [RFC PATCH 1/3] cpus: protect queued_work_* with work_mutex.
  2015-07-10 15:34       ` Paolo Bonzini
@ 2015-07-10 15:43         ` Frederic Konrad
  0 siblings, 0 replies; 9+ messages in thread
From: Frederic Konrad @ 2015-07-10 15:43 UTC (permalink / raw)
  To: Paolo Bonzini, qemu-devel, mttcg
  Cc: mark.burton, alex.bennee, a.rigo, guillaume.delbergue

On 10/07/2015 17:34, Paolo Bonzini wrote:
>
> On 10/07/2015 17:32, Frederic Konrad wrote:
>> I think something like that can work because we don't have two
>> flush_queued_work at the same time on the same CPU?
> Yes, this works; there is only one consumer.
>
> Holding locks within a callback can be very painful, especially if there
> is a chance that the callback will take a very coarse lock such as big
> QEMU lock.  It can cause AB-BA deadlocks.
>
> Paolo

Ok fine I'll change that.

Fred
>
>> static void flush_queued_work(CPUState *cpu)
>> {
>>      struct qemu_work_item *wi;
>>
>>      if (cpu->queued_work_first == NULL) {
>>          return;
>>      }
>>
>>      qemu_mutex_lock(&cpu->work_mutex);
>>      while ((wi = cpu->queued_work_first)) {
>>          cpu->queued_work_first = wi->next;
>>          qemu_mutex_unlock(&cpu->work_mutex);
>>          wi->func(wi->data);
>>          qemu_mutex_lock(&cpu->work_mutex);
>>          wi->done = true;
>>          if (wi->free) {
>>              g_free(wi);
>>          }
>>      }
>>      cpu->queued_work_last = NULL;
>>      qemu_mutex_unlock(&cpu->work_mutex);
>>
>>      qemu_cond_broadcast(&qemu_work_cond);
>> }

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2015-07-10 15:43 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-07-10 15:19 [Qemu-devel] [RFC PATCH 0/3] Multithread TCG async_safe_work part fred.konrad
2015-07-10 15:19 ` [Qemu-devel] [RFC PATCH 1/3] cpus: protect queued_work_* with work_mutex fred.konrad
2015-07-10 15:22   ` Paolo Bonzini
2015-07-10 15:32     ` Frederic Konrad
2015-07-10 15:34       ` Paolo Bonzini
2015-07-10 15:43         ` Frederic Konrad
2015-07-10 15:19 ` [Qemu-devel] [RFC PATCH 2/3] cpus: add a tcg_executing flag fred.konrad
2015-07-10 15:19 ` [Qemu-devel] [RFC PATCH 3/3] cpus: introduce async_run_safe_work_on_cpu fred.konrad
2015-07-10 15:23   ` Paolo Bonzini

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).