--- linux/kernel/latency.c.orig2 +++ linux/kernel/latency.c @@ -66,6 +66,18 @@ static unsigned long notrace cycles_to_u return (unsigned long) delta; } +static cycles_t notrace robust_get_cycles(void) +{ + cycles_t t0 = get_cycles(), t1; + + for (;;) { + t1 = get_cycles(); + if (t1 - t0 < 1000) + return t1; + t0 = t1; + } +} + #ifdef CONFIG_LATENCY_TRACE unsigned int trace_enabled = 1; @@ -89,7 +101,7 @@ ____trace(struct cpu_trace *tr, unsigned entry = tr->trace + tr->trace_idx; entry->eip = eip; entry->parent_eip = parent_eip; - entry->timestamp = get_cycles(); + entry->timestamp = robust_get_cycles(); entry->preempt_count = preempt_count(); } tr->trace_idx++; @@ -295,7 +307,7 @@ check_preempt_timing(struct cpu_trace *t return; #endif atomic_inc(&tr->disabled); - latency = cycles_to_usecs(get_cycles() - tr->preempt_timestamp); + latency = cycles_to_usecs(robust_get_cycles() - tr->preempt_timestamp); if (preempt_thresh) { if (latency < preempt_thresh) @@ -337,7 +349,7 @@ check_preempt_timing(struct cpu_trace *t out: #ifdef CONFIG_LATENCY_TRACE tr->trace_idx = 0; - tr->preempt_timestamp = get_cycles(); + tr->preempt_timestamp = robust_get_cycles(); #endif tr->critical_start = parent_eip; __trace(eip, parent_eip); @@ -376,7 +388,7 @@ void notrace add_preempt_count(int val) struct cpu_trace *tr = &__get_cpu_var(trace); local_irq_save(flags); - tr->preempt_timestamp = get_cycles(); + tr->preempt_timestamp = robust_get_cycles(); tr->critical_start = eip; #ifdef CONFIG_LATENCY_TRACE tr->trace_idx = 0;