* [GIT pull] (hr)timer updates feed from Andrew
@ 2008-04-19 20:51 Thomas Gleixner
2008-04-20 17:22 ` Roman Zippel
0 siblings, 1 reply; 4+ messages in thread
From: Thomas Gleixner @ 2008-04-19 20:51 UTC (permalink / raw)
To: Linus Torvalds; +Cc: LKML, Andrew Morton
Linus,
please pull the (hr)timer updates which came via Andrew from:
ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-hrt.git master
NB: John's time race window patch touches ia64/x86/ppc as those are
the affected architectures of the vsyscall race.
Thanks,
tglx
---
Dave Young (1):
jiffies: add time_is_after_jiffies and others which compare with jiffies
Dimitri Sivanich (1):
hrtimer: reduce calls to hrtimer_get_softirq_time()
Glauber Costa (1):
clockevents: fix typo in tick-broadcast.c
John Stultz (1):
time: close small window for vsyscall time inconsistencies
Thomas Gleixner (1):
hrtimer: optimize the softirq time optimization
arch/ia64/kernel/time.c | 19 +++++++++---
arch/powerpc/kernel/time.c | 23 +++++++++++----
arch/x86/kernel/vsyscall_64.c | 18 +++++++++--
include/linux/clocksource.h | 10 ++++++
include/linux/jiffies.h | 16 ++++++++++
kernel/hrtimer.c | 63 ++++++++++++++++++++---------------------
kernel/time/tick-broadcast.c | 2 +-
kernel/time/timekeeping.c | 8 ++++-
8 files changed, 110 insertions(+), 49 deletions(-)
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 48e15a5..efd64b6 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -427,11 +427,22 @@ void update_vsyscall_tz(void)
{
}
-void update_vsyscall(struct timespec *wall, struct clocksource *c)
+/* update_vsyscall_lock/unlock:
+ * methods for timekeeping core to block vsyscalls during update
+ */
+void update_vsyscall_lock(unsigned long *flags)
{
- unsigned long flags;
+ write_seqlock_irqsave(&fsyscall_gtod_data.lock, *flags);
+}
- write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
+void update_vsyscall_unlock(unsigned long *flags)
+{
+ write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, *flags);
+}
+
+/* Assumes fsyscall_gtod_data.lock has been taken via update_vsyscall_lock() */
+void update_vsyscall(struct timespec *wall, struct clocksource *c)
+{
/* copy fsyscall clock data */
fsyscall_gtod_data.clk_mask = c->mask;
@@ -453,7 +464,5 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c)
fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
fsyscall_gtod_data.monotonic_time.tv_sec++;
}
-
- write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 3b26fbd..c51d2f8 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -456,8 +456,6 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
vdso_data->tb_to_xs = new_tb_to_xs;
vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
- smp_wmb();
- ++(vdso_data->tb_update_count);
}
#ifdef CONFIG_SMP
@@ -801,6 +799,23 @@ static cycle_t timebase_read(void)
return (cycle_t)get_tb();
}
+/* update_vsyscall_lock/unlock:
+ * methods for timekeeping core to block vsyscalls during update
+ */
+void update_vsyscall_lock(unsigned long *flags)
+{
+ /* Make userspace gettimeofday spin until we're done. */
+ ++vdso_data->tb_update_count;
+ smp_mb();
+}
+
+void update_vsyscall_unlock(unsigned long *flags)
+{
+ smp_wmb();
+ ++(vdso_data->tb_update_count);
+}
+
+/* Assumes update_vsyscall_lock() has been called */
void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
{
u64 t2x, stamp_xsec;
@@ -808,10 +823,6 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
if (clock != &clocksource_timebase)
return;
- /* Make userspace gettimeofday spin until we're done. */
- ++vdso_data->tb_update_count;
- smp_mb();
-
/* XXX this assumes clock->shift == 22 */
/* 4611686018 ~= 2^(20+64-22) / 1e9 */
t2x = (u64) clock->mult * 4611686018ULL;
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index edff4c9..8a2eb77 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -69,11 +69,22 @@ void update_vsyscall_tz(void)
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
-void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
+/* update_vsyscall_lock/unlock:
+ * methods for timekeeping core to block vsyscalls during update
+ */
+void update_vsyscall_lock(unsigned long *flags)
{
- unsigned long flags;
+ write_seqlock_irqsave(&vsyscall_gtod_data.lock, *flags);
+}
- write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
+void update_vsyscall_unlock(unsigned long *flags)
+{
+ write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, *flags);
+}
+
+/* Assumes vsyscall_gtod_data.lock has been taken via update_vsyscall_lock() */
+void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
+{
/* copy vsyscall data */
vsyscall_gtod_data.clock.vread = clock->vread;
vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
@@ -83,7 +94,6 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
vsyscall_gtod_data.wall_to_monotonic = wall_to_monotonic;
- write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
/* RED-PEN may want to readd seq locking, but then the variable should be
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 3509447..3677ef7 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -222,9 +222,19 @@ extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_resume(void);
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
+void update_vsyscall_lock(unsigned long *flags);
+void update_vsyscall_unlock(unsigned long *flags);
extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
extern void update_vsyscall_tz(void);
#else
+static inline void update_vsyscall_lock(unsigned long *flags)
+{
+}
+
+static inline void update_vsyscall_unlock(unsigned long *flags)
+{
+}
+
static inline void update_vsyscall(struct timespec *ts, struct clocksource *c)
{
}
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index e0b5b68..e377e34 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -135,6 +135,22 @@ static inline u64 get_jiffies_64(void)
#define time_before_eq64(a,b) time_after_eq64(b,a)
/*
+ * These four macros compare jiffies and 'a' for convenience.
+ */
+
+/* time_is_before_jiffies(a) return true if a is before jiffies */
+#define time_is_before_jiffies(a) time_after(jiffies, a)
+
+/* time_is_after_jiffies(a) return true if a is after jiffies */
+#define time_is_after_jiffies(a) time_before(jiffies, a)
+
+/* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/
+#define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a)
+
+/* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/
+#define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a)
+
+/*
* Have the 32 bit jiffies value wrap 5 minutes after boot
* so jiffies wrap bugs show up earlier.
*/
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c642ef7..f78777a 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1238,51 +1238,50 @@ void hrtimer_run_pending(void)
/*
* Called from hardirq context every jiffy
*/
-static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
- int index)
+void hrtimer_run_queues(void)
{
struct rb_node *node;
- struct hrtimer_clock_base *base = &cpu_base->clock_base[index];
+ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+ struct hrtimer_clock_base *base;
+ int index, gettime = 1;
- if (!base->first)
+ if (hrtimer_hres_active())
return;
- if (base->get_softirq_time)
- base->softirq_time = base->get_softirq_time();
-
- spin_lock(&cpu_base->lock);
+ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
+ base = &cpu_base->clock_base[index];
- while ((node = base->first)) {
- struct hrtimer *timer;
-
- timer = rb_entry(node, struct hrtimer, node);
- if (base->softirq_time.tv64 <= timer->expires.tv64)
- break;
-
- if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
- __remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0);
- list_add_tail(&timer->cb_entry,
- &base->cpu_base->cb_pending);
+ if (!base->first)
continue;
+
+ if (base->get_softirq_time)
+ base->softirq_time = base->get_softirq_time();
+ else if (gettime) {
+ hrtimer_get_softirq_time(cpu_base);
+ gettime = 0;
}
- __run_hrtimer(timer);
- }
- spin_unlock(&cpu_base->lock);
-}
+ spin_lock(&cpu_base->lock);
-void hrtimer_run_queues(void)
-{
- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
- int i;
+ while ((node = base->first)) {
+ struct hrtimer *timer;
- if (hrtimer_hres_active())
- return;
+ timer = rb_entry(node, struct hrtimer, node);
+ if (base->softirq_time.tv64 <= timer->expires.tv64)
+ break;
- hrtimer_get_softirq_time(cpu_base);
+ if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
+ __remove_hrtimer(timer, base,
+ HRTIMER_STATE_PENDING, 0);
+ list_add_tail(&timer->cb_entry,
+ &base->cpu_base->cb_pending);
+ continue;
+ }
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
- run_hrtimer_queue(cpu_base, i);
+ __run_hrtimer(timer);
+ }
+ spin_unlock(&cpu_base->lock);
+ }
}
/*
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index fdfa0c7..57a1f02 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -262,7 +262,7 @@ out:
void tick_broadcast_on_off(unsigned long reason, int *oncpu)
{
if (!cpu_isset(*oncpu, cpu_online_map))
- printk(KERN_ERR "tick-braodcast: ignoring broadcast for "
+ printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
"offline CPU #%d\n", *oncpu);
else
smp_call_function_single(*oncpu, tick_do_broadcast_on_off,
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index a3fa587..47ca292 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -129,7 +129,7 @@ EXPORT_SYMBOL(do_gettimeofday);
*/
int do_settimeofday(struct timespec *tv)
{
- unsigned long flags;
+ unsigned long flags, vflags;
time_t wtm_sec, sec = tv->tv_sec;
long wtm_nsec, nsec = tv->tv_nsec;
@@ -137,6 +137,7 @@ int do_settimeofday(struct timespec *tv)
return -EINVAL;
write_seqlock_irqsave(&xtime_lock, flags);
+ update_vsyscall_lock(&vflags);
nsec -= __get_nsec_offset();
@@ -152,6 +153,7 @@ int do_settimeofday(struct timespec *tv)
update_vsyscall(&xtime, clock);
+ update_vsyscall_unlock(&vflags);
write_sequnlock_irqrestore(&xtime_lock, flags);
/* signal hrtimers about time change */
@@ -442,12 +444,15 @@ static void clocksource_adjust(s64 offset)
*/
void update_wall_time(void)
{
+ unsigned long flags;
cycle_t offset;
/* Make sure we're fully resumed: */
if (unlikely(timekeeping_suspended))
return;
+ /* grab the vsyscall lock to block vsyscalls during update */
+ update_vsyscall_lock(&flags);
#ifdef CONFIG_GENERIC_TIME
offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
#else
@@ -487,6 +492,7 @@ void update_wall_time(void)
/* check to see if there is a new clocksource to use */
change_clocksource();
update_vsyscall(&xtime, clock);
+ update_vsyscall_unlock(&flags);
}
/**
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [GIT pull] (hr)timer updates feed from Andrew
2008-04-19 20:51 [GIT pull] (hr)timer updates feed from Andrew Thomas Gleixner
@ 2008-04-20 17:22 ` Roman Zippel
2008-04-20 21:57 ` Andrew Morton
2008-04-21 6:22 ` Thomas Gleixner
0 siblings, 2 replies; 4+ messages in thread
From: Roman Zippel @ 2008-04-20 17:22 UTC (permalink / raw)
To: Thomas Gleixner; +Cc: Linus Torvalds, LKML, Andrew Morton
Hi,
On Saturday 19. April 2008, Thomas Gleixner wrote:
> John Stultz (1):
> time: close small window for vsyscall time inconsistencies
Out of curiosity: why did you merge this patch despite my objections?
Why couldn't you wait a little while longer? You knew I was waiting for more
information to analyze this properly...
(Especially as it turns out that this patch doesn't really close the window,
it only makes it smaller.)
bye, Roman
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [GIT pull] (hr)timer updates feed from Andrew
2008-04-20 17:22 ` Roman Zippel
@ 2008-04-20 21:57 ` Andrew Morton
2008-04-21 6:22 ` Thomas Gleixner
1 sibling, 0 replies; 4+ messages in thread
From: Andrew Morton @ 2008-04-20 21:57 UTC (permalink / raw)
To: Roman Zippel; +Cc: tglx, torvalds, linux-kernel
> On Sun, 20 Apr 2008 19:22:43 +0200 Roman Zippel <zippel@linux-m68k.org> wrote:
> Hi,
>
> On Saturday 19. April 2008, Thomas Gleixner wrote:
>
> > John Stultz (1):
> > time: close small window for vsyscall time inconsistencies
>
> Out of curiosity: why did you merge this patch despite my objections?
> Why couldn't you wait a little while longer? You knew I was waiting for more
> information to analyze this properly...
> (Especially as it turns out that this patch doesn't really close the window,
> it only makes it smaller.)
>
I have a note here that it's on hold, but I think I added that note after
sending the patch to Thomas on the 18th. If there was earlier controversy
about it then I guess I forgot to make the note at the time.
It happens sometimes. But I must say that it happens most frequently with
time-management things when you are the reviewer, because the review
discussion seems to happen a loooong time after I merge the patch and
proceeds at only a few emails per week.
Not that I'm saying "Roman, please review stuff faster". You review well,
and in detail and it's welcome. But that is the cause and effect.
There are two contentious time patches:
provide-u64-version-of-jiffies_to_usecs-in-kernel-tsacctc.patch
time-close-small-window-for-vsyscall-time-inconsistencies.patch
Possily I got the discussions confused and assumed they all
concerned provide-u64-version-of-jiffies_to_usecs-in-kernel-tsacctc.patch
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [GIT pull] (hr)timer updates feed from Andrew
2008-04-20 17:22 ` Roman Zippel
2008-04-20 21:57 ` Andrew Morton
@ 2008-04-21 6:22 ` Thomas Gleixner
1 sibling, 0 replies; 4+ messages in thread
From: Thomas Gleixner @ 2008-04-21 6:22 UTC (permalink / raw)
To: Roman Zippel; +Cc: Linus Torvalds, LKML, Andrew Morton
On Sun, 20 Apr 2008, Roman Zippel wrote:
> On Saturday 19. April 2008, Thomas Gleixner wrote:
>
> > John Stultz (1):
> > time: close small window for vsyscall time inconsistencies
>
> Out of curiosity: why did you merge this patch despite my objections?
> Why couldn't you wait a little while longer? You knew I was waiting for more
> information to analyze this properly...
> (Especially as it turns out that this patch doesn't really close the window,
> it only makes it smaller.)
I did not follow the discussion as I was burried in other work, so I
assumed that the contention was resolved, when Andrew forwarded a
bunch of patches from -mm.
As Linus did not pull yet, I dropped it. Can we please resolve this
issue ASAP ?
Find below the experimental clock source which made this easy
reproducible. Select the new clock source and run Ingos time-warp test
on a SMP machine.
Thanks,
tglx
---
Subject: fast-gtod-hack.patch
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 29 Mar 2008 12:03:26 +0100
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
drivers/clocksource/acpi_pm.c | 71 +++++++++++++++++++++++++++++++++++++++---
include/asm-x86/vgtod.h | 1
include/linux/seqlock.h | 46 +++++++++++++++++----------
3 files changed, 96 insertions(+), 22 deletions(-)
Index: linux-2.6/drivers/clocksource/acpi_pm.c
===================================================================
--- linux-2.6.orig/drivers/clocksource/acpi_pm.c
+++ linux-2.6/drivers/clocksource/acpi_pm.c
@@ -56,14 +56,72 @@ u32 acpi_pm_read_verified(void)
return v2;
}
+#ifdef CONFIG_X86_64
+#include <asm/vgtod.h>
+
+static cycle_t __vsyscall_fn vread_pm_timer(void)
+{
+ return (cycle_t) __vsyscall_gtod_data.clocksource_data;
+}
+
+static cycle_t acpi_pm_lowres_read_slow(void)
+{
+ unsigned long vdata, now, *p = &vsyscall_gtod_data.clocksource_data;
+
+ do {
+ vdata = vsyscall_gtod_data.clocksource_data;
+ now = acpi_pm_read_verified();
+ } while (cmpxchg(p, vdata, now) != vdata);
+
+ return (cycle_t) now;
+}
+
+static cycle_t acpi_pm_lowres_read(void)
+{
+ unsigned long vdata, now, *p = &vsyscall_gtod_data.clocksource_data;
+
+ do {
+ vdata = vsyscall_gtod_data.clocksource_data;
+ now = read_pmtmr();
+ } while (cmpxchg(p, vdata, now) != vdata);
+
+ return (cycle_t) now;
+}
+
+static struct clocksource clocksource_acpi_pm_lowres = {
+ .name = "acpi_pm-lowres",
+ .rating = 180,
+ .read = acpi_pm_lowres_read,
+ .mask = (cycle_t)ACPI_PM_MASK,
+ .mult = 0, /*to be calculated*/
+ .shift = 22,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .vread = vread_pm_timer,
+};
+
+static int __init register_lowres(u32 mult, int use_slow)
+{
+ if (use_slow)
+ clocksource_acpi_pm_lowres.read = acpi_pm_lowres_read_slow;
+
+ clocksource_acpi_pm_lowres.mult = mult;
+ return clocksource_register(&clocksource_acpi_pm_lowres);
+};
+
+#else
+
+static inline int register_lowres(u32 mult, int use_slow) { return 0; }
+
+#endif
+
static cycle_t acpi_pm_read_slow(void)
{
- return (cycle_t)acpi_pm_read_verified();
+ return (cycle_t) acpi_pm_read_verified();
}
static cycle_t acpi_pm_read(void)
{
- return (cycle_t)read_pmtmr();
+ return (cycle_t) read_pmtmr();
}
static struct clocksource clocksource_acpi_pm = {
@@ -74,7 +132,6 @@ static struct clocksource clocksource_ac
.mult = 0, /*to be calculated*/
.shift = 22,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
-
};
@@ -178,7 +235,7 @@ static int verify_pmtmr_rate(void)
static int __init init_acpi_pm_clocksource(void)
{
u32 value1, value2;
- unsigned int i;
+ unsigned int i, ret;
if (!pmtmr_ioport)
return -ENODEV;
@@ -208,7 +265,11 @@ pm_good:
if (verify_pmtmr_rate() != 0)
return -ENODEV;
- return clocksource_register(&clocksource_acpi_pm);
+ ret = clocksource_register(&clocksource_acpi_pm);
+ if (!ret)
+ ret = register_lowres(clocksource_acpi_pm.mult,
+ clocksource_acpi_pm.read != acpi_pm_read);
+ return ret;
}
/* We use fs_initcall because we want the PCI fixups to have run
Index: linux-2.6/include/asm-x86/vgtod.h
===================================================================
--- linux-2.6.orig/include/asm-x86/vgtod.h
+++ linux-2.6/include/asm-x86/vgtod.h
@@ -21,6 +21,7 @@ struct vsyscall_gtod_data {
u32 shift;
} clock;
struct timespec wall_to_monotonic;
+ unsigned long clocksource_data;
};
extern struct vsyscall_gtod_data __vsyscall_gtod_data
__section_vsyscall_gtod_data;
Index: linux-2.6/include/linux/seqlock.h
===================================================================
--- linux-2.6.orig/include/linux/seqlock.h
+++ linux-2.6/include/linux/seqlock.h
@@ -85,23 +85,29 @@ static inline int write_tryseqlock(seqlo
/* Start of read calculation -- fetch last complete writer token */
static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
{
- unsigned ret = sl->sequence;
+ unsigned ret;
+
+repeat:
+ ret = sl->sequence;
smp_rmb();
+ if (unlikely(ret & 1)) {
+ cpu_relax();
+ goto repeat;
+ }
+
return ret;
}
-/* Test if reader processed invalid data.
- * If initial values is odd,
- * then writer had already started when section was entered
- * If sequence value changed
- * then writer changed data while in section
- *
- * Using xor saves one conditional branch.
+/*
+ * Test if reader processed invalid data.
+ *
+ * If sequence value changed then writer changed data while in section.
*/
-static __always_inline int read_seqretry(const seqlock_t *sl, unsigned iv)
+static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
{
smp_rmb();
- return (iv & 1) | (sl->sequence ^ iv);
+
+ return (sl->sequence != start);
}
@@ -122,20 +128,26 @@ typedef struct seqcount {
/* Start of read using pointer to a sequence counter only. */
static inline unsigned read_seqcount_begin(const seqcount_t *s)
{
- unsigned ret = s->sequence;
+ unsigned ret;
+
+repeat:
+ ret = s->sequence;
smp_rmb();
+ if (unlikely(ret & 1)) {
+ cpu_relax();
+ goto repeat;
+ }
return ret;
}
-/* Test if reader processed invalid data.
- * Equivalent to: iv is odd or sequence number has changed.
- * (iv & 1) || (*s != iv)
- * Using xor saves one conditional branch.
+/*
+ * Test if reader processed invalid data because sequence number has changed.
*/
-static inline int read_seqcount_retry(const seqcount_t *s, unsigned iv)
+static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();
- return (iv & 1) | (s->sequence ^ iv);
+
+ return s->sequence != start;
}
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2008-04-21 6:23 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2008-04-19 20:51 [GIT pull] (hr)timer updates feed from Andrew Thomas Gleixner
2008-04-20 17:22 ` Roman Zippel
2008-04-20 21:57 ` Andrew Morton
2008-04-21 6:22 ` Thomas Gleixner
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox