qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Christian Borntraeger <borntraeger@de.ibm.com>
To: Paolo Bonzini <pbonzini@redhat.com>, qemu-devel@nongnu.org
Subject: Re: [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections
Date: Thu, 23 Jul 2015 12:30:36 +0200	[thread overview]
Message-ID: <55B0C24C.7020401@de.ibm.com> (raw)
In-Reply-To: <1437574681-18362-2-git-send-email-pbonzini@redhat.com>

Am 22.07.2015 um 16:18 schrieb Paolo Bonzini:
> Otherwise, grace periods are detected too early!

I guess this or Wens proposal is still necessary for 2.4?


> 
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
>  cpus.c                | 6 ++++++
>  iothread.c            | 5 +++++
>  migration/migration.c | 4 ++++
>  tests/test-rcu-list.c | 4 ++++
>  util/rcu.c            | 2 ++
>  5 files changed, 21 insertions(+)
> 
> diff --git a/cpus.c b/cpus.c
> index b00a423..a822ce3 100644
> --- a/cpus.c
> +++ b/cpus.c
> @@ -954,6 +954,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
>      CPUState *cpu = arg;
>      int r;
> 
> +    rcu_register_thread();
> +
>      qemu_mutex_lock_iothread();
>      qemu_thread_get_self(cpu->thread);
>      cpu->thread_id = qemu_get_thread_id();
> @@ -995,6 +997,8 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
>      sigset_t waitset;
>      int r;
> 
> +    rcu_register_thread();
> +
>      qemu_mutex_lock_iothread();
>      qemu_thread_get_self(cpu->thread);
>      cpu->thread_id = qemu_get_thread_id();
> @@ -1034,6 +1038,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
>  {
>      CPUState *cpu = arg;
> 
> +    rcu_register_thread();
> +
>      qemu_mutex_lock_iothread();
>      qemu_tcg_init_cpu_signals();
>      qemu_thread_get_self(cpu->thread);
> diff --git a/iothread.c b/iothread.c
> index 6d2a33f..da6ce7b 100644
> --- a/iothread.c
> +++ b/iothread.c
> @@ -18,6 +18,7 @@
>  #include "sysemu/iothread.h"
>  #include "qmp-commands.h"
>  #include "qemu/error-report.h"
> +#include "qemu/rcu.h"
> 
>  typedef ObjectClass IOThreadClass;
> 
> @@ -31,6 +32,8 @@ static void *iothread_run(void *opaque)
>      IOThread *iothread = opaque;
>      bool blocking;
> 
> +    rcu_register_thread();
> +
>      qemu_mutex_lock(&iothread->init_done_lock);
>      iothread->thread_id = qemu_get_thread_id();
>      qemu_cond_signal(&iothread->init_done_cond);
> @@ -45,6 +48,8 @@ static void *iothread_run(void *opaque)
>          }
>          aio_context_release(iothread->ctx);
>      }
> +
> +    rcu_unregister_thread();
>      return NULL;
>  }
> 
> diff --git a/migration/migration.c b/migration/migration.c
> index 86ca099..fd4f99b 100644
> --- a/migration/migration.c
> +++ b/migration/migration.c
> @@ -22,6 +22,7 @@
>  #include "block/block.h"
>  #include "qapi/qmp/qerror.h"
>  #include "qemu/sockets.h"
> +#include "qemu/rcu.h"
>  #include "migration/block.h"
>  #include "qemu/thread.h"
>  #include "qmp-commands.h"
> @@ -917,6 +918,8 @@ static void *migration_thread(void *opaque)
>      int64_t start_time = initial_time;
>      bool old_vm_running = false;
> 
> +    rcu_register_thread();
> +
>      qemu_savevm_state_header(s->file);
>      qemu_savevm_state_begin(s->file, &s->params);
> 
> @@ -1016,6 +1019,7 @@ static void *migration_thread(void *opaque)
>      qemu_bh_schedule(s->cleanup_bh);
>      qemu_mutex_unlock_iothread();
> 
> +    rcu_unregister_thread();
>      return NULL;
>  }
> 
> diff --git a/tests/test-rcu-list.c b/tests/test-rcu-list.c
> index 4c5f62e..daa8bf4 100644
> --- a/tests/test-rcu-list.c
> +++ b/tests/test-rcu-list.c
> @@ -108,6 +108,8 @@ static void *rcu_q_reader(void *arg)
>      long long n_reads_local = 0;
>      struct list_element *el;
> 
> +    rcu_register_thread();
> +
>      *(struct rcu_reader_data **)arg = &rcu_reader;
>      atomic_inc(&nthreadsrunning);
>      while (goflag == GOFLAG_INIT) {
> @@ -129,6 +131,8 @@ static void *rcu_q_reader(void *arg)
>      qemu_mutex_lock(&counts_mutex);
>      n_reads += n_reads_local;
>      qemu_mutex_unlock(&counts_mutex);
> +
> +    rcu_unregister_thread();
>      return NULL;
>  }
> 
> diff --git a/util/rcu.c b/util/rcu.c
> index 7270151..cdcad67 100644
> --- a/util/rcu.c
> +++ b/util/rcu.c
> @@ -216,6 +216,8 @@ static void *call_rcu_thread(void *opaque)
>  {
>      struct rcu_head *node;
> 
> +    rcu_register_thread();
> +
>      for (;;) {
>          int tries = 0;
>          int n = atomic_read(&rcu_call_count);
> 

  parent reply	other threads:[~2015-07-23 10:30 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-07-22 14:18 [Qemu-devel] [PATCH] rcu: actually register threads that have RCU read-side critical sections Paolo Bonzini
2015-07-23  2:56 ` Wen Congyang
2015-07-23  5:35   ` Paolo Bonzini
2015-07-23 10:30 ` Christian Borntraeger [this message]
2015-07-23 10:42   ` Paolo Bonzini
2015-07-23 11:04     ` Wen Congyang
2015-07-23 11:08       ` Paolo Bonzini
2015-07-23 12:59         ` Wen Congyang
2015-07-23 16:58           ` Paolo Bonzini
2015-07-24  3:55             ` Wen Congyang
2015-07-24  5:56             ` Wen Congyang
2015-07-24  6:22               ` Paolo Bonzini
2015-07-24  6:30                 ` Wen Congyang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=55B0C24C.7020401@de.ibm.com \
    --to=borntraeger@de.ibm.com \
    --cc=pbonzini@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).