qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Paolo Bonzini <pbonzini@redhat.com>
To: Fam Zheng <famz@redhat.com>
Cc: qemu-devel@nongnu.org, stefanha@redhat.com, kwolf@redhat.com,
	berto@igalia.com
Subject: Re: [Qemu-devel] [PATCH 08/11] test-aio-multithread: add performance comparison with thread-based mutexes
Date: Thu, 12 May 2016 12:49:56 -0400 (EDT)	[thread overview]
Message-ID: <1173754502.13650897.1463071796291.JavaMail.zimbra@redhat.com> (raw)
In-Reply-To: <20160429065227.GK1421@ad.usersys.redhat.com>

> >  tests/test-aio-multithread.c | 152
> >  +++++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 152 insertions(+)
> > 
> > diff --git a/tests/test-aio-multithread.c b/tests/test-aio-multithread.c
> > index 18b3548..d7bc1bf 100644
> > --- a/tests/test-aio-multithread.c
> > +++ b/tests/test-aio-multithread.c
> > @@ -279,6 +279,150 @@ static void test_multi_co_mutex_2_30(void)
> >      test_multi_co_mutex(2, 30);
> >  }
> >  
> > +/* Same test with fair mutexes, for performance comparison.  */
> > +
> > +#ifdef CONFIG_LINUX
> > +#include "qemu/futex.h"
> 
> Do we have qemu/futex.h?

It must be somewhere in the previous 50 patches...  QemuLockCnt adds it.

Paolo


> > +
> > +/* The nodes for the mutex reside in this structure (on which we try to
> > avoid
> > + * false sharing).  The head of the mutex is in the "mutex_head" variable.
> > + */
> > +static struct {
> > +    int next, locked;
> > +    int padding[14];
> > +} nodes[NUM_CONTEXTS] __attribute__((__aligned__(64)));
> > +
> > +static int mutex_head = -1;
> > +
> > +static void mcs_mutex_lock(void)
> > +{
> > +    int prev;
> > +
> > +    nodes[id].next = -1;
> > +    nodes[id].locked = 1;
> > +    prev = atomic_xchg(&mutex_head, id);
> > +    if (prev != -1) {
> > +        atomic_set(&nodes[prev].next, id);
> > +        futex_wait(&nodes[id].locked, 1);
> > +    }
> > +}
> > +
> > +static void mcs_mutex_unlock(void)
> > +{
> > +    int next;
> > +    if (nodes[id].next == -1) {
> > +        if (atomic_read(&mutex_head) == id &&
> > +            atomic_cmpxchg(&mutex_head, id, -1) == id) {
> > +            /* Last item in the list, exit.  */
> > +            return;
> > +        }
> > +        while (atomic_read(&nodes[id].next) == -1) {
> > +            /* Spin... */
> > +        }
> > +    }
> > +
> > +    /* Wake up the next in line.  */
> > +    next = nodes[id].next;
> > +    nodes[next].locked = 0;
> > +    futex_wake(&nodes[next].locked, 1);
> > +}
> > +
> > +static void test_multi_fair_mutex_entry(void *opaque)
> > +{
> > +    while (!atomic_mb_read(&now_stopping)) {
> > +        mcs_mutex_lock();
> > +        counter++;
> > +        mcs_mutex_unlock();
> > +        atomic_inc(&atomic_counter);
> > +    }
> > +
> > +}
> > +
> > +static void test_multi_fair_mutex(int threads, int seconds)
> > +{
> > +    int i;
> > +
> > +    assert(mutex_head == -1);
> > +    counter = 0;
> > +    atomic_counter = 0;
> > +    now_stopping = false;
> > +
> > +    create_aio_contexts();
> > +    assert(threads <= NUM_CONTEXTS);
> > +    for (i = 0; i < threads; i++) {
> > +        Coroutine *co1 =
> > qemu_coroutine_create(test_multi_fair_mutex_entry);
> > +        aio_co_schedule(ctx[i], co1);
> > +    }
> > +
> > +    g_usleep(seconds * 1000000);
> > +
> > +    atomic_mb_set(&now_stopping, true);
> > +    join_aio_contexts();
> > +    g_test_message("%d iterations/second\n", counter / seconds);
> > +    g_assert_cmpint(counter, ==, atomic_counter);
> > +}
> > +
> > +static void test_multi_fair_mutex_1(void)
> > +{
> > +    test_multi_fair_mutex(NUM_CONTEXTS, 1);
> > +}
> > +
> > +static void test_multi_fair_mutex_10(void)
> > +{
> > +    test_multi_fair_mutex(NUM_CONTEXTS, 10);
> > +}
> > +#endif
> > +
> > +/* Same test with pthread mutexes, for performance comparison and
> > + * portability.  */
> > +
> > +static QemuMutex mutex;
> > +
> > +static void test_multi_mutex_entry(void *opaque)
> > +{
> > +    while (!atomic_mb_read(&now_stopping)) {
> > +        qemu_mutex_lock(&mutex);
> > +        counter++;
> > +        qemu_mutex_unlock(&mutex);
> > +        atomic_inc(&atomic_counter);
> > +    }
> > +
> > +}
> > +
> > +static void test_multi_mutex(int threads, int seconds)
> > +{
> > +    int i;
> > +
> > +    qemu_mutex_init(&mutex);
> > +    counter = 0;
> > +    atomic_counter = 0;
> > +    now_stopping = false;
> > +
> > +    create_aio_contexts();
> > +    assert(threads <= NUM_CONTEXTS);
> > +    for (i = 0; i < threads; i++) {
> > +        Coroutine *co1 = qemu_coroutine_create(test_multi_mutex_entry);
> > +        aio_co_schedule(ctx[i], co1);
> > +    }
> > +
> > +    g_usleep(seconds * 1000000);
> > +
> > +    atomic_mb_set(&now_stopping, true);
> > +    join_aio_contexts();
> > +    g_test_message("%d iterations/second\n", counter / seconds);
> > +    g_assert_cmpint(counter, ==, atomic_counter);
> > +}
> > +
> > +static void test_multi_mutex_1(void)
> > +{
> > +    test_multi_mutex(NUM_CONTEXTS, 1);
> > +}
> > +
> > +static void test_multi_mutex_10(void)
> > +{
> > +    test_multi_mutex(NUM_CONTEXTS, 10);
> > +}
> > +
> >  /* End of tests.  */
> >  
> >  int main(int argc, char **argv)
> > @@ -291,10 +435,18 @@ int main(int argc, char **argv)
> >          g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_1);
> >          g_test_add_func("/aio/multi/mutex", test_multi_co_mutex_1);
> >          g_test_add_func("/aio/multi/mutex/handoff",
> >          test_multi_co_mutex_2_3);
> > +#ifdef CONFIG_LINUX
> > +        g_test_add_func("/aio/multi/mutex/mcs", test_multi_fair_mutex_1);
> > +#endif
> > +        g_test_add_func("/aio/multi/mutex/pthread", test_multi_mutex_1);
> >      } else {
> >          g_test_add_func("/aio/multi/schedule", test_multi_co_schedule_10);
> >          g_test_add_func("/aio/multi/mutex", test_multi_co_mutex_10);
> >          g_test_add_func("/aio/multi/mutex/handoff",
> >          test_multi_co_mutex_2_30);
> > +#ifdef CONFIG_LINUX
> > +        g_test_add_func("/aio/multi/mutex/mcs", test_multi_fair_mutex_10);
> > +#endif
> > +        g_test_add_func("/aio/multi/mutex/pthread", test_multi_mutex_10);
> >      }
> >      return g_test_run();
> >  }
> > --
> > 2.5.5
> > 
> > 
> 

  reply	other threads:[~2016-05-12 16:50 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-15 11:31 [Qemu-devel] [RFC PATCH resend 00/11] Make CoMutex/CoQueue/CoRwlock thread-safe Paolo Bonzini
2016-04-15 11:31 ` [Qemu-devel] [PATCH 01/11] coroutine: use QSIMPLEQ instead of QTAILQ Paolo Bonzini
2016-04-19 13:45   ` Stefan Hajnoczi
2016-04-15 11:31 ` [Qemu-devel] [PATCH 02/11] throttle-groups: restart throttled requests from coroutine context Paolo Bonzini
2016-04-19 13:49   ` Stefan Hajnoczi
2016-04-15 11:31 ` [Qemu-devel] [PATCH 03/11] coroutine: delete qemu_co_enter_next Paolo Bonzini
2016-04-19 13:49   ` Stefan Hajnoczi
2016-04-15 11:31 ` [Qemu-devel] [PATCH 04/11] aio: introduce aio_co_schedule Paolo Bonzini
2016-04-19 14:31   ` Stefan Hajnoczi
2016-05-17 14:57     ` Paolo Bonzini
2016-05-26 19:19       ` Stefan Hajnoczi
2016-04-29  5:11   ` Fam Zheng
2016-05-17 14:38     ` Paolo Bonzini
2016-04-15 11:32 ` [Qemu-devel] [PATCH 05/11] coroutine-lock: reschedule coroutine on the AioContext it was running on Paolo Bonzini
2016-04-15 11:32 ` [Qemu-devel] [PATCH 06/11] coroutine-lock: make CoMutex thread-safe Paolo Bonzini
2016-04-29  6:26   ` Fam Zheng
2016-05-17 15:34     ` Paolo Bonzini
2016-04-15 11:32 ` [Qemu-devel] [PATCH 07/11] coroutine-lock: add limited spinning to CoMutex Paolo Bonzini
2016-04-15 11:32 ` [Qemu-devel] [PATCH 08/11] test-aio-multithread: add performance comparison with thread-based mutexes Paolo Bonzini
2016-04-29  6:52   ` Fam Zheng
2016-05-12 16:49     ` Paolo Bonzini [this message]
2016-04-15 11:32 ` [Qemu-devel] [PATCH 09/11] coroutine-lock: place CoMutex before CoQueue in header Paolo Bonzini
2016-04-15 11:32 ` [Qemu-devel] [PATCH 10/11] coroutine-lock: add mutex argument to CoQueue APIs Paolo Bonzini
2016-04-15 11:32 ` [Qemu-devel] [PATCH 11/11] coroutine-lock: make CoRwlock thread-safe and fair Paolo Bonzini
2016-04-26 10:54 ` [Qemu-devel] [RFC PATCH resend 00/11] Make CoMutex/CoQueue/CoRwlock thread-safe Stefan Hajnoczi
2016-04-27 15:42   ` Stefan Hajnoczi
2016-05-17 15:34   ` Paolo Bonzini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1173754502.13650897.1463071796291.JavaMail.zimbra@redhat.com \
    --to=pbonzini@redhat.com \
    --cc=berto@igalia.com \
    --cc=famz@redhat.com \
    --cc=kwolf@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).