From: Paolo Bonzini <pbonzini@redhat.com>
To: qemu-devel@nongnu.org
Cc: afaerber@suse.de
Subject: [Qemu-devel] [PATCH] rcutorture: fix compilation on 32-bit ppc
Date: Sat, 21 Mar 2015 16:34:51 +0100 [thread overview]
Message-ID: <1426952091-18019-1-git-send-email-pbonzini@redhat.com> (raw)
32-bit PPC cannot do atomic operations on long long. Inside the loops,
we are already using local counters that are summed at the end of
the run---with one exception in rcu_read_stress_test: fix it to use
the same technique. Then, use a mutex to protect the global counts.
Performance does not matter there because every thread will only enter
the critical section once.
Remaining uses of atomic instructions are for ints or pointers.
Reported-by: Andreas Faerber <afaerber@suse.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
tests/rcutorture.c | 20 ++++++++++++++++----
1 file changed, 16 insertions(+), 4 deletions(-)
diff --git a/tests/rcutorture.c b/tests/rcutorture.c
index 60a2ccf..d6b304d 100644
--- a/tests/rcutorture.c
+++ b/tests/rcutorture.c
@@ -82,6 +82,7 @@ static volatile int goflag = GOFLAG_INIT;
#define RCU_READ_RUN 1000
#define NR_THREADS 100
+static QemuMutex counts_mutex;
static QemuThread threads[NR_THREADS];
static struct rcu_reader_data *data[NR_THREADS];
static int n_threads;
@@ -130,7 +131,9 @@ static void *rcu_read_perf_test(void *arg)
}
n_reads_local += RCU_READ_RUN;
}
- atomic_add(&n_reads, n_reads_local);
+ qemu_mutex_lock(&counts_mutex);
+ n_reads += n_reads_local;
+ qemu_mutex_unlock(&counts_mutex);
rcu_unregister_thread();
return NULL;
@@ -151,7 +154,9 @@ static void *rcu_update_perf_test(void *arg)
synchronize_rcu();
n_updates_local++;
}
- atomic_add(&n_updates, n_updates_local);
+ qemu_mutex_lock(&counts_mutex);
+ n_updates += n_updates_local;
+ qemu_mutex_unlock(&counts_mutex);
rcu_unregister_thread();
return NULL;
@@ -241,6 +246,7 @@ static void *rcu_read_stress_test(void *arg)
struct rcu_stress *p;
int pc;
long long n_reads_local = 0;
+ long long rcu_stress_local[RCU_STRESS_PIPE_LEN + 1] = { 0 };
volatile int garbage = 0;
rcu_register_thread();
@@ -265,13 +271,18 @@ static void *rcu_read_stress_test(void *arg)
if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0)) {
pc = RCU_STRESS_PIPE_LEN;
}
- atomic_inc(&rcu_stress_count[pc]);
+ rcu_stress_local[pc]++;
n_reads_local++;
if ((++itercnt % 0x1000) == 0) {
synchronize_rcu();
}
}
- atomic_add(&n_reads, n_reads_local);
+ qemu_mutex_lock(&counts_mutex);
+ n_reads += n_reads_local;
+ for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) {
+ rcu_stress_count[i] += rcu_stress_local[i];
+ }
+ qemu_mutex_unlock(&counts_mutex);
rcu_unregister_thread();
return NULL;
@@ -419,6 +430,7 @@ int main(int argc, char *argv[])
int nreaders = 1;
int duration = 1;
+ qemu_mutex_init(&counts_mutex);
if (argc >= 2 && argv[1][0] == '-') {
g_test_init(&argc, &argv, NULL);
if (g_test_quick()) {
--
2.3.0
next reply other threads:[~2015-03-21 15:35 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-03-21 15:34 Paolo Bonzini [this message]
2015-03-21 15:44 ` [Qemu-devel] [PATCH] rcutorture: fix compilation on 32-bit ppc Peter Maydell
2015-03-23 12:09 ` Paolo Bonzini
2015-03-21 16:42 ` Andreas Färber
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1426952091-18019-1-git-send-email-pbonzini@redhat.com \
--to=pbonzini@redhat.com \
--cc=afaerber@suse.de \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).