* [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections
@ 2015-07-22 14:18 Paolo Bonzini
2015-07-23 2:56 ` Wen Congyang
2015-07-23 10:30 ` Christian Borntraeger
0 siblings, 2 replies; 13+ messages in thread
From: Paolo Bonzini @ 2015-07-22 14:18 UTC (permalink / raw)
To: qemu-devel
Otherwise, grace periods are detected too early!
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
cpus.c | 6 ++++++
iothread.c | 5 +++++
migration/migration.c | 4 ++++
tests/test-rcu-list.c | 4 ++++
util/rcu.c | 2 ++
5 files changed, 21 insertions(+)
diff --git a/cpus.c b/cpus.c
index b00a423..a822ce3 100644
--- a/cpus.c
+++ b/cpus.c
@@ -954,6 +954,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
CPUState *cpu = arg;
int r;
+ rcu_register_thread();
+
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
@@ -995,6 +997,8 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
sigset_t waitset;
int r;
+ rcu_register_thread();
+
qemu_mutex_lock_iothread();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
@@ -1034,6 +1038,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
{
CPUState *cpu = arg;
+ rcu_register_thread();
+
qemu_mutex_lock_iothread();
qemu_tcg_init_cpu_signals();
qemu_thread_get_self(cpu->thread);
diff --git a/iothread.c b/iothread.c
index 6d2a33f..da6ce7b 100644
--- a/iothread.c
+++ b/iothread.c
@@ -18,6 +18,7 @@
#include "sysemu/iothread.h"
#include "qmp-commands.h"
#include "qemu/error-report.h"
+#include "qemu/rcu.h"
typedef ObjectClass IOThreadClass;
@@ -31,6 +32,8 @@ static void *iothread_run(void *opaque)
IOThread *iothread = opaque;
bool blocking;
+ rcu_register_thread();
+
qemu_mutex_lock(&iothread->init_done_lock);
iothread->thread_id = qemu_get_thread_id();
qemu_cond_signal(&iothread->init_done_cond);
@@ -45,6 +48,8 @@ static void *iothread_run(void *opaque)
}
aio_context_release(iothread->ctx);
}
+
+ rcu_unregister_thread();
return NULL;
}
diff --git a/migration/migration.c b/migration/migration.c
index 86ca099..fd4f99b 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -22,6 +22,7 @@
#include "block/block.h"
#include "qapi/qmp/qerror.h"
#include "qemu/sockets.h"
+#include "qemu/rcu.h"
#include "migration/block.h"
#include "qemu/thread.h"
#include "qmp-commands.h"
@@ -917,6 +918,8 @@ static void *migration_thread(void *opaque)
int64_t start_time = initial_time;
bool old_vm_running = false;
+ rcu_register_thread();
+
qemu_savevm_state_header(s->file);
qemu_savevm_state_begin(s->file, &s->params);
@@ -1016,6 +1019,7 @@ static void *migration_thread(void *opaque)
qemu_bh_schedule(s->cleanup_bh);
qemu_mutex_unlock_iothread();
+ rcu_unregister_thread();
return NULL;
}
diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c
index 4c5f62e..daa8bf4 100644
--- a/tests/test-rcu-list.c
+++ b/tests/test-rcu-list.c
@@ -108,6 +108,8 @@ static void *rcu_q_reader(void *arg)
long long n_reads_local = 0;
struct list_element *el;
+ rcu_register_thread();
+
*(struct rcu_reader_data **)arg = &rcu_reader;
atomic_inc(&nthreadsrunning);
while (goflag == GOFLAG_INIT) {
@@ -129,6 +131,8 @@ static void *rcu_q_reader(void *arg)
qemu_mutex_lock(&counts_mutex);
n_reads += n_reads_local;
qemu_mutex_unlock(&counts_mutex);
+
+ rcu_unregister_thread();
return NULL;
}
diff --git a/util/rcu.c b/util/rcu.c
index 7270151..cdcad67 100644
--- a/util/rcu.c
+++ b/util/rcu.c
@@ -216,6 +216,8 @@ static void *call_rcu_thread(void *opaque)
{
struct rcu_head *node;
+ rcu_register_thread();
+
for (;;) {
int tries = 0;
int n = atomic_read(&rcu_call_count);
--
2.4.3
^ permalink raw reply related [flat|nested] 13+ messages in thread* Re: [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections
2015-07-22 14:18 [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections Paolo Bonzini
@ 2015-07-23 2:56 ` Wen Congyang
2015-07-23 5:35 ` Paolo Bonzini
2015-07-23 10:30 ` Christian Borntraeger
1 sibling, 1 reply; 13+ messages in thread
From: Wen Congyang @ 2015-07-23 2:56 UTC (permalink / raw)
To: Paolo Bonzini, qemu-devel
On 07/22/2015 10:18 PM, Paolo Bonzini wrote:
> Otherwise, grace periods are detected too early!
We always use qemu_thread_create() in qemu. So I think we can do it like this:
wrapped_fn()
{
rcu_register_thread();
call thread_fn() here
rcu_unregister_thread();
}
So we will never forget to call rcu_register_thread() when creating a new thread.
Thanks
Wen Congyang
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> cpus.c | 6 ++++++
> iothread.c | 5 +++++
> migration/migration.c | 4 ++++
> tests/test-rcu-list.c | 4 ++++
> util/rcu.c | 2 ++
> 5 files changed, 21 insertions(+)
>
> diff --git a/cpus.c b/cpus.c
> index b00a423..a822ce3 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -954,6 +954,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
> CPUState *cpu = arg;
> int r;
>
> + rcu_register_thread();
> +
> qemu_mutex_lock_iothread();
> qemu_thread_get_self(cpu->thread);
> cpu->thread_id = qemu_get_thread_id();
> @@ -995,6 +997,8 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
> sigset_t waitset;
> int r;
>
> + rcu_register_thread();
> +
> qemu_mutex_lock_iothread();
> qemu_thread_get_self(cpu->thread);
> cpu->thread_id = qemu_get_thread_id();
> @@ -1034,6 +1038,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
> {
> CPUState *cpu = arg;
>
> + rcu_register_thread();
> +
> qemu_mutex_lock_iothread();
> qemu_tcg_init_cpu_signals();
> qemu_thread_get_self(cpu->thread);
> diff --git a/iothread.c b/iothread.c
> index 6d2a33f..da6ce7b 100644
> --- a/iothread.c
> +++ b/iothread.c
> @@ -18,6 +18,7 @@
> #include "sysemu/iothread.h"
> #include "qmp-commands.h"
> #include "qemu/error-report.h"
> +#include "qemu/rcu.h"
>
> typedef ObjectClass IOThreadClass;
>
> @@ -31,6 +32,8 @@ static void *iothread_run(void *opaque)
> IOThread *iothread = opaque;
> bool blocking;
>
> + rcu_register_thread();
> +
> qemu_mutex_lock(&iothread->init_done_lock);
> iothread->thread_id = qemu_get_thread_id();
> qemu_cond_signal(&iothread->init_done_cond);
> @@ -45,6 +48,8 @@ static void *iothread_run(void *opaque)
> }
> aio_context_release(iothread->ctx);
> }
> +
> + rcu_unregister_thread();
> return NULL;
> }
>
> diff --git a/migration/migration.c b/migration/migration.c
> index 86ca099..fd4f99b 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -22,6 +22,7 @@
> #include "block/block.h"
> #include "qapi/qmp/qerror.h"
> #include "qemu/sockets.h"
> +#include "qemu/rcu.h"
> #include "migration/block.h"
> #include "qemu/thread.h"
> #include "qmp-commands.h"
> @@ -917,6 +918,8 @@ static void *migration_thread(void *opaque)
> int64_t start_time = initial_time;
> bool old_vm_running = false;
>
> + rcu_register_thread();
> +
> qemu_savevm_state_header(s->file);
> qemu_savevm_state_begin(s->file, &s->params);
>
> @@ -1016,6 +1019,7 @@ static void *migration_thread(void *opaque)
> qemu_bh_schedule(s->cleanup_bh);
> qemu_mutex_unlock_iothread();
>
> + rcu_unregister_thread();
> return NULL;
> }
>
> diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c
> index 4c5f62e..daa8bf4 100644
> --- a/tests/test-rcu-list.c
> +++ b/tests/test-rcu-list.c
> @@ -108,6 +108,8 @@ static void *rcu_q_reader(void *arg)
> long long n_reads_local = 0;
> struct list_element *el;
>
> + rcu_register_thread();
> +
> *(struct rcu_reader_data **)arg = &rcu_reader;
> atomic_inc(&nthreadsrunning);
> while (goflag == GOFLAG_INIT) {
> @@ -129,6 +131,8 @@ static void *rcu_q_reader(void *arg)
> qemu_mutex_lock(&counts_mutex);
> n_reads += n_reads_local;
> qemu_mutex_unlock(&counts_mutex);
> +
> + rcu_unregister_thread();
> return NULL;
> }
>
> diff --git a/util/rcu.c b/util/rcu.c
> index 7270151..cdcad67 100644
> --- a/util/rcu.c
> +++ b/util/rcu.c
> @@ -216,6 +216,8 @@ static void *call_rcu_thread(void *opaque)
> {
> struct rcu_head *node;
>
> + rcu_register_thread();
> +
> for (;;) {
> int tries = 0;
> int n = atomic_read(&rcu_call_count);
>
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections
2015-07-23 2:56 ` Wen Congyang
@ 2015-07-23 5:35 ` Paolo Bonzini
0 siblings, 0 replies; 13+ messages in thread
From: Paolo Bonzini @ 2015-07-23 5:35 UTC (permalink / raw)
To: Wen Congyang, qemu-devel
On 23/07/2015 04:56, Wen Congyang wrote:
>> > Otherwise, grace periods are detected too early!
> We always use qemu_thread_create() in qemu. So I think we can do it like this:
> wrapped_fn()
> {
> rcu_register_thread();
> call thread_fn() here
> rcu_unregister_thread();
> }
>
> So we will never forget to call rcu_register_thread() when creating a new thread.
That's a good idea. Would you like to propose a patch for 2.5? Then we
can also use it to run the thread_atexit notifiers and avoid the bug
that Peter reported for OS X.
Paolo
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections
2015-07-22 14:18 [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections Paolo Bonzini
2015-07-23 2:56 ` Wen Congyang
@ 2015-07-23 10:30 ` Christian Borntraeger
2015-07-23 10:42 ` Paolo Bonzini
1 sibling, 1 reply; 13+ messages in thread
From: Christian Borntraeger @ 2015-07-23 10:30 UTC (permalink / raw)
To: Paolo Bonzini, qemu-devel
Am 22.07.2015 um 16:18 schrieb Paolo Bonzini:
> Otherwise, grace periods are detected too early!
I guess this or Wens proposal is still necessary for 2.4?
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> cpus.c | 6 ++++++
> iothread.c | 5 +++++
> migration/migration.c | 4 ++++
> tests/test-rcu-list.c | 4 ++++
> util/rcu.c | 2 ++
> 5 files changed, 21 insertions(+)
>
> diff --git a/cpus.c b/cpus.c
> index b00a423..a822ce3 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -954,6 +954,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
> CPUState *cpu = arg;
> int r;
>
> + rcu_register_thread();
> +
> qemu_mutex_lock_iothread();
> qemu_thread_get_self(cpu->thread);
> cpu->thread_id = qemu_get_thread_id();
> @@ -995,6 +997,8 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
> sigset_t waitset;
> int r;
>
> + rcu_register_thread();
> +
> qemu_mutex_lock_iothread();
> qemu_thread_get_self(cpu->thread);
> cpu->thread_id = qemu_get_thread_id();
> @@ -1034,6 +1038,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
> {
> CPUState *cpu = arg;
>
> + rcu_register_thread();
> +
> qemu_mutex_lock_iothread();
> qemu_tcg_init_cpu_signals();
> qemu_thread_get_self(cpu->thread);
> diff --git a/iothread.c b/iothread.c
> index 6d2a33f..da6ce7b 100644
> --- a/iothread.c
> +++ b/iothread.c
> @@ -18,6 +18,7 @@
> #include "sysemu/iothread.h"
> #include "qmp-commands.h"
> #include "qemu/error-report.h"
> +#include "qemu/rcu.h"
>
> typedef ObjectClass IOThreadClass;
>
> @@ -31,6 +32,8 @@ static void *iothread_run(void *opaque)
> IOThread *iothread = opaque;
> bool blocking;
>
> + rcu_register_thread();
> +
> qemu_mutex_lock(&iothread->init_done_lock);
> iothread->thread_id = qemu_get_thread_id();
> qemu_cond_signal(&iothread->init_done_cond);
> @@ -45,6 +48,8 @@ static void *iothread_run(void *opaque)
> }
> aio_context_release(iothread->ctx);
> }
> +
> + rcu_unregister_thread();
> return NULL;
> }
>
> diff --git a/migration/migration.c b/migration/migration.c
> index 86ca099..fd4f99b 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -22,6 +22,7 @@
> #include "block/block.h"
> #include "qapi/qmp/qerror.h"
> #include "qemu/sockets.h"
> +#include "qemu/rcu.h"
> #include "migration/block.h"
> #include "qemu/thread.h"
> #include "qmp-commands.h"
> @@ -917,6 +918,8 @@ static void *migration_thread(void *opaque)
> int64_t start_time = initial_time;
> bool old_vm_running = false;
>
> + rcu_register_thread();
> +
> qemu_savevm_state_header(s->file);
> qemu_savevm_state_begin(s->file, &s->params);
>
> @@ -1016,6 +1019,7 @@ static void *migration_thread(void *opaque)
> qemu_bh_schedule(s->cleanup_bh);
> qemu_mutex_unlock_iothread();
>
> + rcu_unregister_thread();
> return NULL;
> }
>
> diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c
> index 4c5f62e..daa8bf4 100644
> --- a/tests/test-rcu-list.c
> +++ b/tests/test-rcu-list.c
> @@ -108,6 +108,8 @@ static void *rcu_q_reader(void *arg)
> long long n_reads_local = 0;
> struct list_element *el;
>
> + rcu_register_thread();
> +
> *(struct rcu_reader_data **)arg = &rcu_reader;
> atomic_inc(&nthreadsrunning);
> while (goflag == GOFLAG_INIT) {
> @@ -129,6 +131,8 @@ static void *rcu_q_reader(void *arg)
> qemu_mutex_lock(&counts_mutex);
> n_reads += n_reads_local;
> qemu_mutex_unlock(&counts_mutex);
> +
> + rcu_unregister_thread();
> return NULL;
> }
>
> diff --git a/util/rcu.c b/util/rcu.c
> index 7270151..cdcad67 100644
> --- a/util/rcu.c
> +++ b/util/rcu.c
> @@ -216,6 +216,8 @@ static void *call_rcu_thread(void *opaque)
> {
> struct rcu_head *node;
>
> + rcu_register_thread();
> +
> for (;;) {
> int tries = 0;
> int n = atomic_read(&rcu_call_count);
>
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections
2015-07-23 10:30 ` Christian Borntraeger
@ 2015-07-23 10:42 ` Paolo Bonzini
2015-07-23 11:04 ` Wen Congyang
0 siblings, 1 reply; 13+ messages in thread
From: Paolo Bonzini @ 2015-07-23 10:42 UTC (permalink / raw)
To: Christian Borntraeger, qemu-devel
On 23/07/2015 12:30, Christian Borntraeger wrote:
> Am 22.07.2015 um 16:18 schrieb Paolo Bonzini:
>> Otherwise, grace periods are detected too early!
>
> I guess this or Wens proposal is still necessary for 2.4?
Yes. I think this is better for 2.4. There are threads that do not
need RCU, for example the thread-pool.c worker threads, so it may just
be simpler to add an assertion in rcu_register_thread. I'm just a bit
wary of doing little more than the bare minimum in 2.4, because of the
OS X failure that I didn't quite understand.
Paolo
>
>>
>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>> ---
>> cpus.c | 6 ++++++
>> iothread.c | 5 +++++
>> migration/migration.c | 4 ++++
>> tests/test-rcu-list.c | 4 ++++
>> util/rcu.c | 2 ++
>> 5 files changed, 21 insertions(+)
>>
>> diff --git a/cpus.c b/cpus.c
>> index b00a423..a822ce3 100644
>> --- a/cpus.c
>> +++ b/cpus.c
>> @@ -954,6 +954,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
>> CPUState *cpu = arg;
>> int r;
>>
>> + rcu_register_thread();
>> +
>> qemu_mutex_lock_iothread();
>> qemu_thread_get_self(cpu->thread);
>> cpu->thread_id = qemu_get_thread_id();
>> @@ -995,6 +997,8 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
>> sigset_t waitset;
>> int r;
>>
>> + rcu_register_thread();
>> +
>> qemu_mutex_lock_iothread();
>> qemu_thread_get_self(cpu->thread);
>> cpu->thread_id = qemu_get_thread_id();
>> @@ -1034,6 +1038,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
>> {
>> CPUState *cpu = arg;
>>
>> + rcu_register_thread();
>> +
>> qemu_mutex_lock_iothread();
>> qemu_tcg_init_cpu_signals();
>> qemu_thread_get_self(cpu->thread);
>> diff --git a/iothread.c b/iothread.c
>> index 6d2a33f..da6ce7b 100644
>> --- a/iothread.c
>> +++ b/iothread.c
>> @@ -18,6 +18,7 @@
>> #include "sysemu/iothread.h"
>> #include "qmp-commands.h"
>> #include "qemu/error-report.h"
>> +#include "qemu/rcu.h"
>>
>> typedef ObjectClass IOThreadClass;
>>
>> @@ -31,6 +32,8 @@ static void *iothread_run(void *opaque)
>> IOThread *iothread = opaque;
>> bool blocking;
>>
>> + rcu_register_thread();
>> +
>> qemu_mutex_lock(&iothread->init_done_lock);
>> iothread->thread_id = qemu_get_thread_id();
>> qemu_cond_signal(&iothread->init_done_cond);
>> @@ -45,6 +48,8 @@ static void *iothread_run(void *opaque)
>> }
>> aio_context_release(iothread->ctx);
>> }
>> +
>> + rcu_unregister_thread();
>> return NULL;
>> }
>>
>> diff --git a/migration/migration.c b/migration/migration.c
>> index 86ca099..fd4f99b 100644
>> --- a/migration/migration.c
>> +++ b/migration/migration.c
>> @@ -22,6 +22,7 @@
>> #include "block/block.h"
>> #include "qapi/qmp/qerror.h"
>> #include "qemu/sockets.h"
>> +#include "qemu/rcu.h"
>> #include "migration/block.h"
>> #include "qemu/thread.h"
>> #include "qmp-commands.h"
>> @@ -917,6 +918,8 @@ static void *migration_thread(void *opaque)
>> int64_t start_time = initial_time;
>> bool old_vm_running = false;
>>
>> + rcu_register_thread();
>> +
>> qemu_savevm_state_header(s->file);
>> qemu_savevm_state_begin(s->file, &s->params);
>>
>> @@ -1016,6 +1019,7 @@ static void *migration_thread(void *opaque)
>> qemu_bh_schedule(s->cleanup_bh);
>> qemu_mutex_unlock_iothread();
>>
>> + rcu_unregister_thread();
>> return NULL;
>> }
>>
>> diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c
>> index 4c5f62e..daa8bf4 100644
>> --- a/tests/test-rcu-list.c
>> +++ b/tests/test-rcu-list.c
>> @@ -108,6 +108,8 @@ static void *rcu_q_reader(void *arg)
>> long long n_reads_local = 0;
>> struct list_element *el;
>>
>> + rcu_register_thread();
>> +
>> *(struct rcu_reader_data **)arg = &rcu_reader;
>> atomic_inc(&nthreadsrunning);
>> while (goflag == GOFLAG_INIT) {
>> @@ -129,6 +131,8 @@ static void *rcu_q_reader(void *arg)
>> qemu_mutex_lock(&counts_mutex);
>> n_reads += n_reads_local;
>> qemu_mutex_unlock(&counts_mutex);
>> +
>> + rcu_unregister_thread();
>> return NULL;
>> }
>>
>> diff --git a/util/rcu.c b/util/rcu.c
>> index 7270151..cdcad67 100644
>> --- a/util/rcu.c
>> +++ b/util/rcu.c
>> @@ -216,6 +216,8 @@ static void *call_rcu_thread(void *opaque)
>> {
>> struct rcu_head *node;
>>
>> + rcu_register_thread();
>> +
>> for (;;) {
>> int tries = 0;
>> int n = atomic_read(&rcu_call_count);
>>
>
>
>
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections
2015-07-23 10:42 ` Paolo Bonzini
@ 2015-07-23 11:04 ` Wen Congyang
2015-07-23 11:08 ` Paolo Bonzini
0 siblings, 1 reply; 13+ messages in thread
From: Wen Congyang @ 2015-07-23 11:04 UTC (permalink / raw)
To: Paolo Bonzini, Christian Borntraeger, qemu-devel
On 07/23/2015 06:42 PM, Paolo Bonzini wrote:
>
>
> On 23/07/2015 12:30, Christian Borntraeger wrote:
>> Am 22.07.2015 um 16:18 schrieb Paolo Bonzini:
>>> Otherwise, grace periods are detected too early!
>>
>> I guess this or Wens proposal is still necessary for 2.4?
>
> Yes. I think this is better for 2.4. There are threads that do not
> need RCU, for example the thread-pool.c worker threads, so it may just
If the thread doesn't use RCU, rcu_register_thread() is harmless, is it right?
> be simpler to add an assertion in rcu_register_thread. I'm just a bit
> wary of doing little more than the bare minimum in 2.4, because of the
> OS X failure that I didn't quite understand.
Which problem? I don't find it in the maillist. Do I miss something?
Thanks
Wen Congyang
>
> Paolo
>
>>
>>>
>>> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
>>> ---
>>> cpus.c | 6 ++++++
>>> iothread.c | 5 +++++
>>> migration/migration.c | 4 ++++
>>> tests/test-rcu-list.c | 4 ++++
>>> util/rcu.c | 2 ++
>>> 5 files changed, 21 insertions(+)
>>>
>>> diff --git a/cpus.c b/cpus.c
>>> index b00a423..a822ce3 100644
>>> --- a/cpus.c
>>> +++ b/cpus.c
>>> @@ -954,6 +954,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
>>> CPUState *cpu = arg;
>>> int r;
>>>
>>> + rcu_register_thread();
>>> +
>>> qemu_mutex_lock_iothread();
>>> qemu_thread_get_self(cpu->thread);
>>> cpu->thread_id = qemu_get_thread_id();
>>> @@ -995,6 +997,8 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
>>> sigset_t waitset;
>>> int r;
>>>
>>> + rcu_register_thread();
>>> +
>>> qemu_mutex_lock_iothread();
>>> qemu_thread_get_self(cpu->thread);
>>> cpu->thread_id = qemu_get_thread_id();
>>> @@ -1034,6 +1038,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
>>> {
>>> CPUState *cpu = arg;
>>>
>>> + rcu_register_thread();
>>> +
>>> qemu_mutex_lock_iothread();
>>> qemu_tcg_init_cpu_signals();
>>> qemu_thread_get_self(cpu->thread);
>>> diff --git a/iothread.c b/iothread.c
>>> index 6d2a33f..da6ce7b 100644
>>> --- a/iothread.c
>>> +++ b/iothread.c
>>> @@ -18,6 +18,7 @@
>>> #include "sysemu/iothread.h"
>>> #include "qmp-commands.h"
>>> #include "qemu/error-report.h"
>>> +#include "qemu/rcu.h"
>>>
>>> typedef ObjectClass IOThreadClass;
>>>
>>> @@ -31,6 +32,8 @@ static void *iothread_run(void *opaque)
>>> IOThread *iothread = opaque;
>>> bool blocking;
>>>
>>> + rcu_register_thread();
>>> +
>>> qemu_mutex_lock(&iothread->init_done_lock);
>>> iothread->thread_id = qemu_get_thread_id();
>>> qemu_cond_signal(&iothread->init_done_cond);
>>> @@ -45,6 +48,8 @@ static void *iothread_run(void *opaque)
>>> }
>>> aio_context_release(iothread->ctx);
>>> }
>>> +
>>> + rcu_unregister_thread();
>>> return NULL;
>>> }
>>>
>>> diff --git a/migration/migration.c b/migration/migration.c
>>> index 86ca099..fd4f99b 100644
>>> --- a/migration/migration.c
>>> +++ b/migration/migration.c
>>> @@ -22,6 +22,7 @@
>>> #include "block/block.h"
>>> #include "qapi/qmp/qerror.h"
>>> #include "qemu/sockets.h"
>>> +#include "qemu/rcu.h"
>>> #include "migration/block.h"
>>> #include "qemu/thread.h"
>>> #include "qmp-commands.h"
>>> @@ -917,6 +918,8 @@ static void *migration_thread(void *opaque)
>>> int64_t start_time = initial_time;
>>> bool old_vm_running = false;
>>>
>>> + rcu_register_thread();
>>> +
>>> qemu_savevm_state_header(s->file);
>>> qemu_savevm_state_begin(s->file, &s->params);
>>>
>>> @@ -1016,6 +1019,7 @@ static void *migration_thread(void *opaque)
>>> qemu_bh_schedule(s->cleanup_bh);
>>> qemu_mutex_unlock_iothread();
>>>
>>> + rcu_unregister_thread();
>>> return NULL;
>>> }
>>>
>>> diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c
>>> index 4c5f62e..daa8bf4 100644
>>> --- a/tests/test-rcu-list.c
>>> +++ b/tests/test-rcu-list.c
>>> @@ -108,6 +108,8 @@ static void *rcu_q_reader(void *arg)
>>> long long n_reads_local = 0;
>>> struct list_element *el;
>>>
>>> + rcu_register_thread();
>>> +
>>> *(struct rcu_reader_data **)arg = &rcu_reader;
>>> atomic_inc(&nthreadsrunning);
>>> while (goflag == GOFLAG_INIT) {
>>> @@ -129,6 +131,8 @@ static void *rcu_q_reader(void *arg)
>>> qemu_mutex_lock(&counts_mutex);
>>> n_reads += n_reads_local;
>>> qemu_mutex_unlock(&counts_mutex);
>>> +
>>> + rcu_unregister_thread();
>>> return NULL;
>>> }
>>>
>>> diff --git a/util/rcu.c b/util/rcu.c
>>> index 7270151..cdcad67 100644
>>> --- a/util/rcu.c
>>> +++ b/util/rcu.c
>>> @@ -216,6 +216,8 @@ static void *call_rcu_thread(void *opaque)
>>> {
>>> struct rcu_head *node;
>>>
>>> + rcu_register_thread();
>>> +
>>> for (;;) {
>>> int tries = 0;
>>> int n = atomic_read(&rcu_call_count);
>>>
>>
>>
>>
>
> .
>
^ permalink raw reply [flat|nested] 13+ messages in thread* Re: [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections
2015-07-23 11:04 ` Wen Congyang
@ 2015-07-23 11:08 ` Paolo Bonzini
2015-07-23 12:59 ` Wen Congyang
0 siblings, 1 reply; 13+ messages in thread
From: Paolo Bonzini @ 2015-07-23 11:08 UTC (permalink / raw)
To: Wen Congyang, Christian Borntraeger, qemu-devel
On 23/07/2015 13:04, Wen Congyang wrote:
> > Yes. I think this is better for 2.4. There are threads that do not
> > need RCU, for example the thread-pool.c worker threads, so it may just
>
> If the thread doesn't use RCU, rcu_register_thread() is harmless, is it right?
Every rcu_register_thread() makes synchronize_rcu() a little slower.
>> > be simpler to add an assertion in rcu_register_thread. I'm just a bit
>> > wary of doing little more than the bare minimum in 2.4, because of the
>> > OS X failure that I didn't quite understand.
> Which problem? I don't find it in the maillist.
http://article.gmane.org/gmane.comp.emulators.qemu/351548
Paolo
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections
2015-07-23 11:08 ` Paolo Bonzini
@ 2015-07-23 12:59 ` Wen Congyang
2015-07-23 16:58 ` Paolo Bonzini
0 siblings, 1 reply; 13+ messages in thread
From: Wen Congyang @ 2015-07-23 12:59 UTC (permalink / raw)
To: Paolo Bonzini, Wen Congyang, Christian Borntraeger, qemu-devel
At 2015/7/23 19:08, Paolo Bonzini Wrote:
>
>
> On 23/07/2015 13:04, Wen Congyang wrote:
>>> Yes. I think this is better for 2.4. There are threads that do not
>>> need RCU, for example the thread-pool.c worker threads, so it may just
>>
>> If the thread doesn't use RCU, rcu_register_thread() is harmless, is it right?
>
> Every rcu_register_thread() makes synchronize_rcu() a little slower.
Yes, but synchronize_rcu() is very slow...
>
>>>> be simpler to add an assertion in rcu_register_thread. I'm just a bit
>>>> wary of doing little more than the bare minimum in 2.4, because of the
>>>> OS X failure that I didn't quite understand.
>> Which problem? I don't find it in the maillist.
>
> http://article.gmane.org/gmane.comp.emulators.qemu/351548
Hmm, I guess rcu_reader is invalid when pthread key is destroyed.
pthread key and __thread
variable, which is destroyed first? I don't find any document to
describe it.
Thanks
Wen Congyang
>
> Paolo
>
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections
2015-07-23 12:59 ` Wen Congyang
@ 2015-07-23 16:58 ` Paolo Bonzini
2015-07-24 3:55 ` Wen Congyang
2015-07-24 5:56 ` Wen Congyang
0 siblings, 2 replies; 13+ messages in thread
From: Paolo Bonzini @ 2015-07-23 16:58 UTC (permalink / raw)
To: Wen Congyang, Wen Congyang, Christian Borntraeger, qemu-devel
On 23/07/2015 14:59, Wen Congyang wrote:
>>>
>>> If the thread doesn't use RCU, rcu_register_thread() is harmless, is
>>> it right?
>>
>> Every rcu_register_thread() makes synchronize_rcu() a little slower.
>
> Yes, but synchronize_rcu() is very slow...
Hmm, worse, rcu_register_thread() if called together with
synchronize_rcu() it waits for the synchronize_rcu() to finish. :/
Paolo
>>
>>>>> be simpler to add an assertion in rcu_register_thread. I'm just a bit
>>>>> wary of doing little more than the bare minimum in 2.4, because of the
>>>>> OS X failure that I didn't quite understand.
>>> Which problem? I don't find it in the maillist.
>>
>> http://article.gmane.org/gmane.comp.emulators.qemu/351548
>
> Hmm, I guess rcu_reader is invalid when pthread key is destroyed.
> pthread key and __thread
> variable, which is destroyed first? I don't find any document to
> describe it.
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections
2015-07-23 16:58 ` Paolo Bonzini
@ 2015-07-24 3:55 ` Wen Congyang
2015-07-24 5:56 ` Wen Congyang
1 sibling, 0 replies; 13+ messages in thread
From: Wen Congyang @ 2015-07-24 3:55 UTC (permalink / raw)
To: Paolo Bonzini, Wen Congyang, Christian Borntraeger, qemu-devel
On 07/24/2015 12:58 AM, Paolo Bonzini wrote:
>
>
> On 23/07/2015 14:59, Wen Congyang wrote:
>>>>
>>>> If the thread doesn't use RCU, rcu_register_thread() is harmless, is
>>>> it right?
>>>
>>> Every rcu_register_thread() makes synchronize_rcu() a little slower.
>>
>> Yes, but synchronize_rcu() is very slow...
>
> Hmm, worse, rcu_register_thread() if called together with
> synchronize_rcu() it waits for the synchronize_rcu() to finish. :/
Yes, it is a problem.
Thanks
Wen Congyang
>
> Paolo
>
>>>
>>>>>> be simpler to add an assertion in rcu_register_thread. I'm just a bit
>>>>>> wary of doing little more than the bare minimum in 2.4, because of the
>>>>>> OS X failure that I didn't quite understand.
>>>> Which problem? I don't find it in the maillist.
>>>
>>> http://article.gmane.org/gmane.comp.emulators.qemu/351548
>>
>> Hmm, I guess rcu_reader is invalid when pthread key is destroyed.
>> pthread key and __thread
>> variable, which is destroyed first? I don't find any document to
>> describe it.
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections
2015-07-23 16:58 ` Paolo Bonzini
2015-07-24 3:55 ` Wen Congyang
@ 2015-07-24 5:56 ` Wen Congyang
2015-07-24 6:22 ` Paolo Bonzini
1 sibling, 1 reply; 13+ messages in thread
From: Wen Congyang @ 2015-07-24 5:56 UTC (permalink / raw)
To: Paolo Bonzini, Wen Congyang, Christian Borntraeger, qemu-devel
On 07/24/2015 12:58 AM, Paolo Bonzini wrote:
>
>
> On 23/07/2015 14:59, Wen Congyang wrote:
>>>>
>>>> If the thread doesn't use RCU, rcu_register_thread() is harmless, is
>>>> it right?
>>>
>>> Every rcu_register_thread() makes synchronize_rcu() a little slower.
>>
>> Yes, but synchronize_rcu() is very slow...
>
> Hmm, worse, rcu_register_thread() if called together with
> synchronize_rcu() it waits for the synchronize_rcu() to finish. :/
What about this modification:
diff --git a/util/rcu.c b/util/rcu.c
index 7270151..ccf8cfa 100644
--- a/util/rcu.c
+++ b/util/rcu.c
@@ -48,6 +48,7 @@ unsigned long rcu_gp_ctr = RCU_GP_LOCKED;
QemuEvent rcu_gp_event;
static QemuMutex rcu_gp_lock;
+static QemuMutex rcu_sync_lock;
/*
* Check whether a quiescent state was crossed between the beginning of
@@ -115,9 +116,12 @@ static void wait_for_readers(void)
}
/* Wait for one thread to report a quiescent state and
- * try again.
+ * try again. Release rcu_gp_lock, so rcu_(un)register_thread()
+ * doesn't wait too much time.
*/
+ qemu_mutex_unlock(&rcu_gp_lock);
qemu_event_wait(&rcu_gp_event);
+ qemu_mutex_lock(&rcu_gp_lock);
}
/* put back the reader list in the registry */
@@ -126,6 +130,7 @@ static void wait_for_readers(void)
void synchronize_rcu(void)
{
+ qemu_mutex_lock(&rcu_sync_lock);
qemu_mutex_lock(&rcu_gp_lock);
if (!QLIST_EMPTY(®istry)) {
@@ -150,6 +155,7 @@ void synchronize_rcu(void)
}
qemu_mutex_unlock(&rcu_gp_lock);
+ qemu_mutex_unlock(&rcu_sync_lock);
}
@@ -288,6 +294,7 @@ static void rcu_init_complete(void)
QemuThread thread;
qemu_mutex_init(&rcu_gp_lock);
+ qemu_mutex_init(&rcu_sync_lock);
qemu_event_init(&rcu_gp_event, true);
qemu_event_init(&rcu_call_ready_event, false);
@@ -304,12 +311,14 @@ static void rcu_init_complete(void)
#ifdef CONFIG_POSIX
static void rcu_init_lock(void)
{
+ qemu_mutex_lock(&rcu_sync_lock);
qemu_mutex_lock(&rcu_gp_lock);
}
static void rcu_init_unlock(void)
{
qemu_mutex_unlock(&rcu_gp_lock);
+ qemu_mutex_unlock(&rcu_sync_lock);
}
#endif
rcu_register_thread() will be a littl slower when it is
called together with synchronize_rcu().
Thanks
Wen Congyang
>
> Paolo
>
>>>
>>>>>> be simpler to add an assertion in rcu_register_thread. I'm just a bit
>>>>>> wary of doing little more than the bare minimum in 2.4, because of the
>>>>>> OS X failure that I didn't quite understand.
>>>> Which problem? I don't find it in the maillist.
>>>
>>> http://article.gmane.org/gmane.comp.emulators.qemu/351548
>>
>> Hmm, I guess rcu_reader is invalid when pthread key is destroyed.
>> pthread key and __thread
>> variable, which is destroyed first? I don't find any document to
>> describe it.
>
^ permalink raw reply related [flat|nested] 13+ messages in thread* Re: [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections
2015-07-24 5:56 ` Wen Congyang
@ 2015-07-24 6:22 ` Paolo Bonzini
2015-07-24 6:30 ` Wen Congyang
0 siblings, 1 reply; 13+ messages in thread
From: Paolo Bonzini @ 2015-07-24 6:22 UTC (permalink / raw)
To: Wen Congyang, Wen Congyang, Christian Borntraeger, qemu-devel
On 24/07/2015 07:56, Wen Congyang wrote:
> @@ -115,9 +116,12 @@ static void wait_for_readers(void)
> }
>
> /* Wait for one thread to report a quiescent state and
> - * try again.
> + * try again. Release rcu_gp_lock, so rcu_(un)register_thread()
> + * doesn't wait too much time.
> */
> + qemu_mutex_unlock(&rcu_gp_lock);
> qemu_event_wait(&rcu_gp_event);
> + qemu_mutex_lock(&rcu_gp_lock);
> }
>
So in this case rcu_unregister_thread could actually remove the node
from synchronize_rcu's qsreaders, not just from registry. That's a bit
tricky, but it should work. Please add a comment, however.
Also, please rename "rcu_gp_lock" as well to rcu_registry_lock. We'll
get the patches in QEMU 2.5.
Paolo
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections
2015-07-24 6:22 ` Paolo Bonzini
@ 2015-07-24 6:30 ` Wen Congyang
0 siblings, 0 replies; 13+ messages in thread
From: Wen Congyang @ 2015-07-24 6:30 UTC (permalink / raw)
To: Paolo Bonzini, Wen Congyang, Christian Borntraeger, qemu-devel
On 07/24/2015 02:22 PM, Paolo Bonzini wrote:
>
>
> On 24/07/2015 07:56, Wen Congyang wrote:
>> @@ -115,9 +116,12 @@ static void wait_for_readers(void)
>> }
>>
>> /* Wait for one thread to report a quiescent state and
>> - * try again.
>> + * try again. Release rcu_gp_lock, so rcu_(un)register_thread()
>> + * doesn't wait too much time.
>> */
>> + qemu_mutex_unlock(&rcu_gp_lock);
>> qemu_event_wait(&rcu_gp_event);
>> + qemu_mutex_lock(&rcu_gp_lock);
>> }
>>
>
> So in this case rcu_unregister_thread could actually remove the node
> from synchronize_rcu's qsreaders, not just from registry. That's a bit
> tricky, but it should work. Please add a comment, however.
>
> Also, please rename "rcu_gp_lock" as well to rcu_registry_lock. We'll
> get the patches in QEMU 2.5.
OK, I will do it.
Thanks
Wen Congyang
>
> Paolo
> .
>
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2015-07-24 6:30 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2015-07-22 14:18 [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections Paolo Bonzini
2015-07-23 2:56 ` Wen Congyang
2015-07-23 5:35 ` Paolo Bonzini
2015-07-23 10:30 ` Christian Borntraeger
2015-07-23 10:42 ` Paolo Bonzini
2015-07-23 11:04 ` Wen Congyang
2015-07-23 11:08 ` Paolo Bonzini
2015-07-23 12:59 ` Wen Congyang
2015-07-23 16:58 ` Paolo Bonzini
2015-07-24 3:55 ` Wen Congyang
2015-07-24 5:56 ` Wen Congyang
2015-07-24 6:22 ` Paolo Bonzini
2015-07-24 6:30 ` Wen Congyang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).