From: Rik van Riel <riel@redhat.com>
To: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Davidlohr Bueso <davidlohr.bueso@hp.com>,
Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
Thomas Gleixner <tglx@linutronix.de>,
Steven Rostedt <rostedt@goodmis.org>,
"Vinod, Chegu" <chegu_vinod@hp.com>,
"Low, Jason" <jason.low2@hp.com>,
linux-tip-commits@vger.kernel.org,
Peter Zijlstra <a.p.zijlstra@chello.nl>,
"H. Peter Anvin" <hpa@zytor.com>,
Andrew Morton <akpm@linux-foundation.org>,
aquini@redhat.com, Michel Lespinasse <walken@google.com>,
Ingo Molnar <mingo@kernel.org>,
Larry Woodman <lwoodman@redhat.com>
Subject: Re: [tip:core/locking] x86/smp: Move waiting on contended ticket lock out of line
Date: Thu, 28 Feb 2013 16:14:03 -0500 [thread overview]
Message-ID: <512FC89B.6030507@redhat.com> (raw)
In-Reply-To: <CA+55aFwEshukBGU5YCtKL=7c-43guMuGSHTnXo02rLxZWGQWTw@mail.gmail.com>
[-- Attachment #1: Type: text/plain, Size: 1124 bytes --]
On 02/28/2013 03:26 PM, Linus Torvalds wrote:
> On Thu, Feb 28, 2013 at 10:22 AM, Linus Torvalds
> <torvalds@linux-foundation.org> wrote:
>>
>> I'm sure there are other things we could do to improve ipc lock times
>> even if we don't actually split the lock, but the security one might
>> be a good first step.
>
> Btw, if somebody has a benchmark for threads using multiple ipc
> semaphores (from the same semget() allocation) concurrently, and we
> could have a simple way to see the contention without having to run
> some big DB thing, that would also be nice. Maybe there is something
> out there already? Google didn't find any, and the normal benchmarks
> I'm aware of all just do one single (private) ipc semaphore per
> process.
>
> Nothing gets some people going like just having a nice benchmark to
> show the effect.
I have modified one of the semop tests to use multiple semaphores.
To run the test, specify the number of threads. If you want the
number of semaphores to be different from the number of threads,
specify a second commandline argument.
$ ./semop-multi
usage: ./semop-multi <threads> [nsems]
[-- Attachment #2: semop-multi.c --]
[-- Type: text/x-csrc, Size: 4046 bytes --]
#define _GNU_SOURCE
#include <sched.h>
#include <pthread.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
#include <malloc.h>
#include <sys/ipc.h>
#include <sys/msg.h>
#include <sys/sem.h>
#define TEST_TIME 30
#define SEMMNI 128
int semid;
int state = 1;
unsigned long *results_array;
int threads_starting;
pthread_cond_t thread_parent;
pthread_cond_t thread_worker;
pthread_mutex_t thread_lock;
int nsems;
union semun {
int val;
struct semid_ds *buf;
unsigned short int *array;
struct seminfo *__buf;
void *__pad;
};
void *
worker(void *arg)
{
unsigned long count = 0;
int id = (int)(unsigned long)arg;
struct sembuf sembuff;
sembuff.sem_num = 0;
sembuff.sem_flg = 0;
pthread_mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
pthread_cond_signal(&thread_parent);
pthread_cond_wait(&thread_worker, &thread_lock);
pthread_mutex_unlock(&thread_lock);
for (;state;) {
/* Move "id" ahead through the semaphores */
sembuff.sem_num = (sembuff.sem_num + id) % nsems;
/* Lock the semaphore */
sembuff.sem_op = 1;
if (semop(semid, &sembuff, 1) < 0) {
perror("semop");
exit(1);
}
/* Unlock the semaphore */
sembuff.sem_op = -1;
if (semop(semid, &sembuff, 1) < 0) {
perror("semop");
exit(1);
}
count += 2;
}
results_array[id] = count;
return NULL;
}
int
main(int argc, char **argv)
{
pthread_t *thread_array;
pthread_attr_t thread_attr;
int thread_count;
unsigned short seminit[SEMMNI];
union semun sem_un;
cpu_set_t cpu;
unsigned long total = 0;
int i, ret;
long cpus;
cpus = sysconf(_SC_NPROCESSORS_ONLN);
if (argc < 2) {
printf("usage: %s <threads> [nsems]\n", argv[0]);
exit(1);
}
thread_count = atoi(argv[1]);
if (thread_count < 0) {
printf("threads must be >= 0\n");
exit(1);
}
if (thread_count == 0)
thread_count = cpus;
if (argc > 2)
nsems = atoi(argv[2]);
else
nsems = thread_count;
if (nsems > SEMMNI)
nsems = SEMMNI;
printf("cpus %ld, threads: %d, semaphores: %d, test duration: %d secs\n", cpus, thread_count, nsems, TEST_TIME);
thread_array = malloc(thread_count * sizeof(pthread_t));
if (!thread_array) {
perror("malloc(thread_array)");
exit(1);
}
results_array = malloc(thread_count * sizeof(unsigned long));
if (!results_array) {
perror("malloc(results_array)");
exit(1);
}
semid = semget(0x12345, nsems, 0777|IPC_CREAT );
if (semid < 0) {
perror("semget");
exit(1);
}
for (i = 0; i < SEMMNI; i++)
seminit[i] = 200;
sem_un.array = seminit;
if (semctl(semid, 1, SETALL, sem_un) < 0) {
perror("semctl(setall)");
exit(1);
}
pthread_mutex_init(&thread_lock, NULL);
pthread_cond_init(&thread_parent, NULL);
pthread_cond_init(&thread_worker, NULL);
pthread_attr_init(&thread_attr);
threads_starting = thread_count;
for (i = 0; i < thread_count; i++) {
CPU_ZERO(&cpu);
CPU_SET(i % cpus, &cpu);
ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu);
if (ret) {
printf("pthread_attr_setaffinity_np: %s\n", strerror(ret));
exit(1);
}
ret = pthread_create(&thread_array[i], &thread_attr, worker, (void *)(unsigned long)i);
if (ret) {
printf("pthread_create: %s\n", strerror(ret));
exit(1);
}
}
pthread_attr_destroy(&thread_attr);
pthread_mutex_lock(&thread_lock);
while (threads_starting)
pthread_cond_wait(&thread_parent, &thread_lock);
pthread_cond_broadcast(&thread_worker);
pthread_mutex_unlock(&thread_lock);
sleep(TEST_TIME);
state = 0;
for (i = 0; i < thread_count; i++)
pthread_join(thread_array[i], NULL);
pthread_cond_destroy(&thread_parent);
pthread_cond_destroy(&thread_worker);
pthread_mutex_destroy(&thread_lock);
if (semctl(semid, 1, IPC_RMID) < 0)
perror("semctl(rmid)");
for (i = 0; i < thread_count; i++)
total += results_array[i];
printf("total operations: %ld, ops/sec %ld\n", total, total / TEST_TIME);
free(thread_array);
free(results_array);
return 0;
}
next prev parent reply other threads:[~2013-02-28 21:15 UTC|newest]
Thread overview: 54+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-02-06 20:03 [PATCH -v5 0/5] x86,smp: make ticket spinlock proportional backoff w/ auto tuning Rik van Riel
2013-02-06 20:04 ` [PATCH -v5 1/5] x86,smp: move waiting on contended ticket lock out of line Rik van Riel
2013-02-13 12:06 ` [tip:core/locking] x86/smp: Move " tip-bot for Rik van Riel
2013-02-13 16:20 ` Linus Torvalds
2013-02-13 18:30 ` Linus Torvalds
2013-02-14 0:54 ` H. Peter Anvin
2013-02-14 1:31 ` Linus Torvalds
2013-02-14 1:56 ` H. Peter Anvin
2013-02-14 10:50 ` Ingo Molnar
2013-02-14 16:10 ` Linus Torvalds
2013-02-15 15:57 ` Ingo Molnar
2013-02-15 6:48 ` Benjamin Herrenschmidt
2013-02-13 19:08 ` Rik van Riel
2013-02-13 19:36 ` Linus Torvalds
2013-02-13 22:21 ` Rik van Riel
2013-02-13 22:40 ` Linus Torvalds
2013-02-13 23:41 ` Rik van Riel
2013-02-14 1:21 ` Linus Torvalds
2013-02-14 1:46 ` Linus Torvalds
2013-02-14 10:43 ` Ingo Molnar
2013-02-27 16:42 ` Rik van Riel
2013-02-27 17:10 ` Linus Torvalds
2013-02-27 19:53 ` Rik van Riel
2013-02-27 20:18 ` Linus Torvalds
2013-02-27 21:55 ` Rik van Riel
[not found] ` <CA+55aFwa0EjGG2NUDYVLVBmXJa2k81YiuNO2yggk=GLRQxhhUQ@mail.gmail.com>
2013-02-28 2:58 ` Rik van Riel
2013-02-28 3:19 ` Linus Torvalds
2013-02-28 4:06 ` Davidlohr Bueso
2013-02-28 4:49 ` Linus Torvalds
2013-02-28 15:13 ` Rik van Riel
2013-02-28 18:22 ` Linus Torvalds
2013-02-28 20:26 ` Linus Torvalds
2013-02-28 21:14 ` Rik van Riel [this message]
2013-02-28 21:58 ` Linus Torvalds
2013-02-28 22:38 ` Rik van Riel
2013-02-28 23:09 ` Linus Torvalds
2013-03-01 6:42 ` Rik van Riel
2013-03-01 18:18 ` Davidlohr Bueso
2013-03-01 18:50 ` Rik van Riel
2013-03-01 18:52 ` Linus Torvalds
2013-02-06 20:04 ` [PATCH -v5 2/5] x86,smp: proportional backoff for ticket spinlocks Rik van Riel
2013-02-13 12:07 ` [tip:core/locking] x86/smp: Implement " tip-bot for Rik van Riel
2013-02-06 20:05 ` [PATCH -v5 3/5] x86,smp: auto tune spinlock backoff delay factor Rik van Riel
2013-02-13 12:08 ` [tip:core/locking] x86/smp: Auto " tip-bot for Rik van Riel
2013-02-06 20:06 ` [PATCH -v5 4/5] x86,smp: keep spinlock delay values per hashed spinlock address Rik van Riel
2013-02-13 12:09 ` [tip:core/locking] x86/smp: Keep " tip-bot for Eric Dumazet
2013-02-06 20:07 ` [PATCH -v5 5/5] x86,smp: limit spinlock delay on virtual machines Rik van Riel
2013-02-07 11:11 ` Ingo Molnar
2013-02-07 21:24 ` [PATCH fix " Rik van Riel
2013-02-13 12:10 ` [tip:core/locking] x86/smp: Limit " tip-bot for Rik van Riel
2013-02-07 11:25 ` [PATCH -v5 5/5] x86,smp: limit " Stefano Stabellini
2013-02-07 11:59 ` Raghavendra K T
2013-02-07 13:28 ` Rik van Riel
2013-02-06 20:08 ` [PATCH -v5 6/5] x86,smp: add debugging code to track spinlock delay value Rik van Riel
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=512FC89B.6030507@redhat.com \
--to=riel@redhat.com \
--cc=a.p.zijlstra@chello.nl \
--cc=akpm@linux-foundation.org \
--cc=aquini@redhat.com \
--cc=chegu_vinod@hp.com \
--cc=davidlohr.bueso@hp.com \
--cc=hpa@zytor.com \
--cc=jason.low2@hp.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-tip-commits@vger.kernel.org \
--cc=lwoodman@redhat.com \
--cc=mingo@kernel.org \
--cc=rostedt@goodmis.org \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
--cc=walken@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).