qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Emilio G. Cota" <cota@braap.org>
To: qemu-devel@nongnu.org
Cc: Richard Henderson <richard.henderson@linaro.org>,
	Paolo Bonzini <pbonzini@redhat.com>
Subject: [Qemu-devel] [PATCH v6 04/73] cpu: make qemu_work_cond per-cpu
Date: Tue, 29 Jan 2019 19:47:02 -0500	[thread overview]
Message-ID: <20190130004811.27372-5-cota@braap.org> (raw)
In-Reply-To: <20190130004811.27372-1-cota@braap.org>

This eliminates the need to use the BQL to queue CPU work.

While at it, give the per-cpu field a generic name ("cond") since
it will soon be used for more than just queueing CPU work.

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
---
 include/qom/cpu.h |  6 ++--
 cpus-common.c     | 72 ++++++++++++++++++++++++++++++++++++++---------
 cpus.c            |  2 +-
 qom/cpu.c         |  1 +
 4 files changed, 63 insertions(+), 18 deletions(-)

diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 403c48695b..46e3c164aa 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -322,6 +322,7 @@ struct qemu_work_item;
  * @mem_io_vaddr: Target virtual address at which the memory was accessed.
  * @kvm_fd: vCPU file descriptor for KVM.
  * @lock: Lock to prevent multiple access to per-CPU fields.
+ * @cond: Condition variable for per-CPU events.
  * @work_list: List of pending asynchronous work.
  * @trace_dstate_delayed: Delayed changes to trace_dstate (includes all changes
  *                        to @trace_dstate).
@@ -364,6 +365,7 @@ struct CPUState {
 
     QemuMutex lock;
     /* fields below protected by @lock */
+    QemuCond cond;
     QSIMPLEQ_HEAD(, qemu_work_item) work_list;
 
     CPUAddressSpace *cpu_ases;
@@ -771,12 +773,10 @@ bool cpu_is_stopped(CPUState *cpu);
  * @cpu: The vCPU to run on.
  * @func: The function to be executed.
  * @data: Data to pass to the function.
- * @mutex: Mutex to release while waiting for @func to run.
  *
  * Used internally in the implementation of run_on_cpu.
  */
-void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
-                   QemuMutex *mutex);
+void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
 
 /**
  * run_on_cpu:
diff --git a/cpus-common.c b/cpus-common.c
index d0e619f149..daf1531868 100644
--- a/cpus-common.c
+++ b/cpus-common.c
@@ -26,7 +26,6 @@
 static QemuMutex qemu_cpu_list_lock;
 static QemuCond exclusive_cond;
 static QemuCond exclusive_resume;
-static QemuCond qemu_work_cond;
 
 /* >= 1 if a thread is inside start_exclusive/end_exclusive.  Written
  * under qemu_cpu_list_lock, read with atomic operations.
@@ -42,7 +41,6 @@ void qemu_init_cpu_list(void)
     qemu_mutex_init(&qemu_cpu_list_lock);
     qemu_cond_init(&exclusive_cond);
     qemu_cond_init(&exclusive_resume);
-    qemu_cond_init(&qemu_work_cond);
 }
 
 void cpu_list_lock(void)
@@ -113,23 +111,37 @@ struct qemu_work_item {
     bool free, exclusive, done;
 };
 
-static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
+/* Called with the CPU's lock held */
+static void queue_work_on_cpu_locked(CPUState *cpu, struct qemu_work_item *wi)
 {
-    qemu_mutex_lock(&cpu->lock);
     QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node);
     wi->done = false;
-    qemu_mutex_unlock(&cpu->lock);
 
     qemu_cpu_kick(cpu);
 }
 
-void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
-                   QemuMutex *mutex)
+static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
+{
+    cpu_mutex_lock(cpu);
+    queue_work_on_cpu_locked(cpu, wi);
+    cpu_mutex_unlock(cpu);
+}
+
+void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
 {
     struct qemu_work_item wi;
+    bool has_bql = qemu_mutex_iothread_locked();
+
+    g_assert(no_cpu_mutex_locked());
 
     if (qemu_cpu_is_self(cpu)) {
-        func(cpu, data);
+        if (has_bql) {
+            func(cpu, data);
+        } else {
+            qemu_mutex_lock_iothread();
+            func(cpu, data);
+            qemu_mutex_unlock_iothread();
+        }
         return;
     }
 
@@ -139,13 +151,34 @@ void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
     wi.free = false;
     wi.exclusive = false;
 
-    queue_work_on_cpu(cpu, &wi);
+    cpu_mutex_lock(cpu);
+    queue_work_on_cpu_locked(cpu, &wi);
+
+    /*
+     * We are going to sleep on the CPU lock, so release the BQL.
+     *
+     * During the transition to per-CPU locks, we release the BQL _after_
+     * having kicked the destination CPU (from queue_work_on_cpu_locked above).
+     * This makes sure that the enqueued work will be seen by the CPU
+     * after being woken up from the kick, since the CPU sleeps on the BQL.
+     * Once we complete the transition to per-CPU locks, we will release
+     * the BQL earlier in this function.
+     */
+    if (has_bql) {
+        qemu_mutex_unlock_iothread();
+    }
+
     while (!atomic_mb_read(&wi.done)) {
         CPUState *self_cpu = current_cpu;
 
-        qemu_cond_wait(&qemu_work_cond, mutex);
+        qemu_cond_wait(&cpu->cond, &cpu->lock);
         current_cpu = self_cpu;
     }
+    cpu_mutex_unlock(cpu);
+
+    if (has_bql) {
+        qemu_mutex_lock_iothread();
+    }
 }
 
 void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
@@ -307,6 +340,7 @@ void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
 void process_queued_cpu_work(CPUState *cpu)
 {
     struct qemu_work_item *wi;
+    bool has_bql = qemu_mutex_iothread_locked();
 
     qemu_mutex_lock(&cpu->lock);
     if (QSIMPLEQ_EMPTY(&cpu->work_list)) {
@@ -324,13 +358,23 @@ void process_queued_cpu_work(CPUState *cpu)
              * BQL, so it goes to sleep; start_exclusive() is sleeping too, so
              * neither CPU can proceed.
              */
-            qemu_mutex_unlock_iothread();
+            if (has_bql) {
+                qemu_mutex_unlock_iothread();
+            }
             start_exclusive();
             wi->func(cpu, wi->data);
             end_exclusive();
-            qemu_mutex_lock_iothread();
+            if (has_bql) {
+                qemu_mutex_lock_iothread();
+            }
         } else {
-            wi->func(cpu, wi->data);
+            if (has_bql) {
+                wi->func(cpu, wi->data);
+            } else {
+                qemu_mutex_lock_iothread();
+                wi->func(cpu, wi->data);
+                qemu_mutex_unlock_iothread();
+            }
         }
         qemu_mutex_lock(&cpu->lock);
         if (wi->free) {
@@ -340,5 +384,5 @@ void process_queued_cpu_work(CPUState *cpu)
         }
     }
     qemu_mutex_unlock(&cpu->lock);
-    qemu_cond_broadcast(&qemu_work_cond);
+    qemu_cond_broadcast(&cpu->cond);
 }
diff --git a/cpus.c b/cpus.c
index 187aed2533..42ea8cfbb5 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1236,7 +1236,7 @@ void qemu_init_cpu_loop(void)
 
 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
 {
-    do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
+    do_run_on_cpu(cpu, func, data);
 }
 
 static void qemu_kvm_destroy_vcpu(CPUState *cpu)
diff --git a/qom/cpu.c b/qom/cpu.c
index a2964e53c6..be8393e589 100644
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -372,6 +372,7 @@ static void cpu_common_initfn(Object *obj)
     cpu->nr_threads = 1;
 
     qemu_mutex_init(&cpu->lock);
+    qemu_cond_init(&cpu->cond);
     QSIMPLEQ_INIT(&cpu->work_list);
     QTAILQ_INIT(&cpu->breakpoints);
     QTAILQ_INIT(&cpu->watchpoints);
-- 
2.17.1

  parent reply	other threads:[~2019-01-30  0:48 UTC|newest]

Thread overview: 109+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-01-30  0:46 [Qemu-devel] [PATCH v6 00/73] per-CPU locks Emilio G. Cota
2019-01-30  0:46 ` [Qemu-devel] [PATCH v6 01/73] cpu: convert queued work to a QSIMPLEQ Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 02/73] cpu: rename cpu->work_mutex to cpu->lock Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 03/73] cpu: introduce cpu_mutex_lock/unlock Emilio G. Cota
2019-02-06 17:21   ` Alex Bennée
2019-02-06 20:02     ` Emilio G. Cota
2019-01-30  0:47 ` Emilio G. Cota [this message]
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 05/73] cpu: move run_on_cpu to cpus-common Emilio G. Cota
2019-02-06 17:22   ` Alex Bennée
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 06/73] cpu: introduce process_queued_cpu_work_locked Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 07/73] cpu: make per-CPU locks an alias of the BQL in TCG rr mode Emilio G. Cota
2019-02-07 12:40   ` Alex Bennée
2019-02-20 16:12   ` Richard Henderson
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 08/73] tcg-runtime: define helper_cpu_halted_set Emilio G. Cota
2019-02-07 12:40   ` Alex Bennée
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 09/73] ppc: convert to helper_cpu_halted_set Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 10/73] cris: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 11/73] hppa: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 12/73] m68k: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 13/73] alpha: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 14/73] microblaze: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 15/73] cpu: define cpu_halted helpers Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 16/73] tcg-runtime: convert to cpu_halted_set Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 17/73] arm: convert to cpu_halted Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 18/73] ppc: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 19/73] sh4: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 20/73] i386: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 21/73] lm32: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 22/73] m68k: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 23/73] mips: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 24/73] riscv: " Emilio G. Cota
2019-02-06 23:50   ` Alistair Francis
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 25/73] s390x: " Emilio G. Cota
2019-01-30 10:30   ` Cornelia Huck
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 26/73] sparc: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 27/73] xtensa: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 28/73] gdbstub: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 29/73] openrisc: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 30/73] cpu-exec: " Emilio G. Cota
2019-02-07 12:44   ` Alex Bennée
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 31/73] cpu: " Emilio G. Cota
2019-02-07 20:39   ` Alex Bennée
2019-02-20 16:21   ` Richard Henderson
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 32/73] cpu: define cpu_interrupt_request helpers Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 33/73] ppc: use cpu_reset_interrupt Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 34/73] exec: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 35/73] i386: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 36/73] s390x: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 37/73] openrisc: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 38/73] arm: convert to cpu_interrupt_request Emilio G. Cota
2019-02-07 20:55   ` Alex Bennée
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 39/73] i386: " Emilio G. Cota
2019-02-08 11:00   ` Alex Bennée
2019-03-02 22:48     ` Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 40/73] i386/kvm: " Emilio G. Cota
2019-02-08 11:15   ` Alex Bennée
2019-03-02 23:14     ` Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 41/73] i386/hax-all: " Emilio G. Cota
2019-02-08 11:20   ` Alex Bennée
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 42/73] i386/whpx-all: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 43/73] i386/hvf: convert to cpu_request_interrupt Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 44/73] ppc: convert to cpu_interrupt_request Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 45/73] sh4: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 46/73] cris: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 47/73] hppa: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 48/73] lm32: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 49/73] m68k: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 50/73] mips: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 51/73] nios: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 52/73] s390x: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 53/73] alpha: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 54/73] moxie: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 55/73] sparc: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 56/73] openrisc: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 57/73] unicore32: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 58/73] microblaze: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 59/73] accel/tcg: " Emilio G. Cota
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 60/73] cpu: convert to interrupt_request Emilio G. Cota
2019-02-08 11:21   ` Alex Bennée
2019-02-20 16:55   ` Richard Henderson
2019-01-30  0:47 ` [Qemu-devel] [PATCH v6 61/73] cpu: call .cpu_has_work with the CPU lock held Emilio G. Cota
2019-02-08 11:22   ` Alex Bennée
2019-01-30  0:48 ` [Qemu-devel] [PATCH v6 62/73] cpu: introduce cpu_has_work_with_iothread_lock Emilio G. Cota
2019-02-08 11:33   ` Alex Bennée
2019-03-03 19:52     ` Emilio G. Cota
2019-01-30  0:48 ` [Qemu-devel] [PATCH v6 63/73] ppc: convert to cpu_has_work_with_iothread_lock Emilio G. Cota
2019-01-30  0:48 ` [Qemu-devel] [PATCH v6 64/73] mips: " Emilio G. Cota
2019-01-30  0:48 ` [Qemu-devel] [PATCH v6 65/73] s390x: " Emilio G. Cota
2019-01-30 10:35   ` Cornelia Huck
2019-01-30  0:48 ` [Qemu-devel] [PATCH v6 66/73] riscv: " Emilio G. Cota
2019-02-06 23:51   ` Alistair Francis
2019-01-30  0:48 ` [Qemu-devel] [PATCH v6 67/73] sparc: " Emilio G. Cota
2019-01-30  0:48 ` [Qemu-devel] [PATCH v6 68/73] xtensa: " Emilio G. Cota
2019-01-30  0:48 ` [Qemu-devel] [PATCH v6 69/73] cpu: rename all_cpu_threads_idle to qemu_tcg_rr_all_cpu_threads_idle Emilio G. Cota
2019-02-08 11:34   ` Alex Bennée
2019-02-20 17:01   ` Richard Henderson
2019-01-30  0:48 ` [Qemu-devel] [PATCH v6 70/73] cpu: protect CPU state with cpu->lock instead of the BQL Emilio G. Cota
2019-02-08 14:33   ` Alex Bennée
2019-02-20 17:25   ` Richard Henderson
2019-01-30  0:48 ` [Qemu-devel] [PATCH v6 71/73] cpus-common: release BQL earlier in run_on_cpu Emilio G. Cota
2019-02-08 14:34   ` Alex Bennée
2019-01-30  0:48 ` [Qemu-devel] [PATCH v6 72/73] cpu: add async_run_on_cpu_no_bql Emilio G. Cota
2019-02-08 14:58   ` Alex Bennée
2019-03-03 20:47     ` Emilio G. Cota
2019-01-30  0:48 ` [Qemu-devel] [PATCH v6 73/73] cputlb: queue async flush jobs without the BQL Emilio G. Cota
2019-02-08 15:58   ` Alex Bennée
2019-02-20 17:18   ` Richard Henderson
2019-02-20 17:27 ` [Qemu-devel] [PATCH v6 00/73] per-CPU locks Richard Henderson
2019-02-20 22:50   ` Emilio G. Cota

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190130004811.27372-5-cota@braap.org \
    --to=cota@braap.org \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=richard.henderson@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).