public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Ingo Molnar <mingo@elte.hu>
To: Andi Kleen <andi@firstfloor.org>
Cc: Satyam Sharma <satyam.sharma@gmail.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	linux-kernel@vger.kernel.org,
	Peter Zijlstra <a.p.zijlstra@chello.nl>
Subject: Re: [patch] sched_clock(): cleanups
Date: Fri, 25 May 2007 09:49:15 +0200	[thread overview]
Message-ID: <20070525074915.GA18400@elte.hu> (raw)
In-Reply-To: <20070525074316.GA16821@elte.hu>


* Ingo Molnar <mingo@elte.hu> wrote:

> updated version of the cleanup patch below, renamed r_s_c to 
> resync_freq, and changed 'f' to 'freq'.

updated one below.

	Ingo

------------------->
Subject: [patch] sched_clock(): cleanups
From: Ingo Molnar <mingo@elte.hu>

clean up sched-clock.c - mostly comment style fixes.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
 arch/i386/kernel/sched-clock.c |  110 +++++++++++++++++++++++++----------------
 1 file changed, 69 insertions(+), 41 deletions(-)

Index: linux-cfs-2.6.22-rc2-mm1.q/arch/i386/kernel/sched-clock.c
===================================================================
--- linux-cfs-2.6.22-rc2-mm1.q.orig/arch/i386/kernel/sched-clock.c
+++ linux-cfs-2.6.22-rc2-mm1.q/arch/i386/kernel/sched-clock.c
@@ -60,18 +60,20 @@ DEFINE_PER_CPU(struct sc_data, sc_data) 
  */
 unsigned long long native_sched_clock(void)
 {
-	unsigned long long r;
 	struct sc_data *sc = &get_cpu_var(sc_data);
+	unsigned long long r;
+	unsigned long flags;
 
 	if (sc->unstable) {
-		unsigned long flags;
 		r = (jiffies_64 - sc->sync_base) * (1000000000 / HZ);
 		r += sc->ns_base;
 		local_irq_save(flags);
-		/* last_val is used to avoid non monotonity on a
-		   stable->unstable transition. Make sure the time
-		   never goes to before the last value returned by
-		   the TSC clock */
+		/*
+		 * last_val is used to avoid non monotonity on a
+		 * stable->unstable transition. Make sure the time
+		 * never goes to before the last value returned by
+		 * the TSC clock
+		 */
 		if (r <= sc->last_val)
 			r = sc->last_val + 1;
 		sc->last_val = r;
@@ -87,8 +89,10 @@ unsigned long long native_sched_clock(vo
 	return r;
 }
 
-/* We need to define a real function for sched_clock, to override the
-   weak default version */
+/*
+ * We need to define a real function for sched_clock, to override the
+ * weak default version
+ */
 #ifdef CONFIG_PARAVIRT
 unsigned long long sched_clock(void)
 {
@@ -101,9 +105,11 @@ unsigned long long sched_clock(void)
 
 static int no_sc_for_printk;
 
-/* printk clock: when it is known the sc results are very non monotonic
-   fall back to jiffies for printk. Other sched_clock users are supposed
-   to handle this. */
+/*
+ * printk clock: when it is known the sc results are very non monotonic
+ * fall back to jiffies for printk. Other sched_clock users are supposed
+ * to handle this.
+ */
 unsigned long long printk_clock(void)
 {
 	if (unlikely(no_sc_for_printk))
@@ -119,35 +125,46 @@ static void resync_sc_freq(struct sc_dat
 		sc->unstable = 1;
 		return;
 	}
-	/* Handle nesting, but when we're zero multiple calls in a row
-	   are ok too and not a bug */
+	/*
+	 * Handle nesting, but when we're zero multiple calls in a row
+	 * are ok too and not a bug
+	 */
 	if (sc->unstable > 0)
 		sc->unstable--;
 	if (sc->unstable)
 		return;
-	/* RED-PEN protect with seqlock? I hope that's not needed
-	   because sched_clock callers should be able to tolerate small
-	   errors. */
+	/*
+	 * RED-PEN protect with seqlock? I hope that's not needed
+	 * because sched_clock callers should be able to tolerate small
+	 * errors.
+	 */
 	sc->ns_base = ktime_to_ns(ktime_get());
 	rdtscll(sc->sync_base);
 	sc->cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR) / newfreq;
 }
 
-static void call_r_s_f(void *arg)
+static void __resync_freq(void *arg)
 {
-	struct cpufreq_freqs *freq = arg;
-	unsigned f = freq->new;
-	if (!f)
-		f = cpufreq_get(freq->cpu);
-	if (!f)
-		f = tsc_khz;
-	resync_sc_freq(&per_cpu(sc_data, freq->cpu), f);
+	struct cpufreq_freqs *freqp = arg;
+	unsigned freq = freqp->new;
+
+	if (!freq) {
+		freq = cpufreq_get(freqp->cpu);
+		/*
+		 * Still no frequency? Then fall back to tsc_khz:
+		 */
+		if (!freq)
+			freq = tsc_khz;
+	}
+	resync_sc_freq(&per_cpu(sc_data, freqp->cpu), freq);
 }
 
-static void call_r_s_f_here(void *arg)
+static void resync_freq(void *arg)
 {
-	struct cpufreq_freqs f = { .cpu = get_cpu(), .new = 0 };
-	call_r_s_f(&f);
+	struct cpufreq_freqs f = { .new = 0 };
+
+	f.cpu = get_cpu();
+	__resync_freq(&f);
 	put_cpu();
 }
 
@@ -155,9 +172,11 @@ static int sc_freq_event(struct notifier
 			 void *data)
 {
 	struct cpufreq_freqs *freq = data;
-	int cpu = get_cpu();
-	struct sc_data *sc = &per_cpu(sc_data, cpu);
+	struct sc_data *sc;
+	int cpu;
 
+	cpu = get_cpu();
+	sc = &per_cpu(sc_data, cpu);
 	if (cpu_has(&cpu_data[cpu], X86_FEATURE_CONSTANT_TSC))
 		goto out;
 	if (freq->old == freq->new)
@@ -167,25 +186,30 @@ static int sc_freq_event(struct notifier
 	case CPUFREQ_SUSPENDCHANGE:
 		/* Mark TSC unstable during suspend/resume */
 	case CPUFREQ_PRECHANGE:
-		/* Mark TSC as unstable until cpu frequency change is done
-		   because we don't know when exactly it will change.
-		   unstable in used as a counter to guard against races
-		   between the cpu frequency notifiers and normal resyncs */
+		/*
+		 * Mark TSC as unstable until cpu frequency change is done
+		 * because we don't know when exactly it will change.
+		 * unstable in used as a counter to guard against races
+		 * between the cpu frequency notifiers and normal resyncs
+		 */
 		sc->unstable++;
 		/* FALL THROUGH */
 	case CPUFREQ_RESUMECHANGE:
 	case CPUFREQ_POSTCHANGE:
-		/* Frequency change or resume is done -- update everything and
-		   mark TSC as stable again. */
+		/*
+		 * Frequency change or resume is done -- update everything
+		 * and mark TSC as stable again.
+		 */
 		if (cpu == freq->cpu)
 			resync_sc_freq(sc, freq->new);
 		else
-			smp_call_function_single(freq->cpu, call_r_s_f,
+			smp_call_function_single(freq->cpu, __resync_freq,
 						 freq, 0, 1);
 		break;
 	}
 out:
 	put_cpu();
+
 	return NOTIFY_DONE;
 }
 
@@ -197,9 +221,11 @@ static int __cpuinit
 sc_cpu_event(struct notifier_block *self, unsigned long event, void *hcpu)
 {
 	long cpu = (long)hcpu;
+
 	if (event == CPU_ONLINE) {
 		struct cpufreq_freqs f = { .cpu = cpu, .new = 0 };
-		smp_call_function_single(cpu, call_r_s_f, &f, 0, 1);
+
+		smp_call_function_single(cpu, __resync_freq, &f, 0, 1);
 	}
 	return NOTIFY_DONE;
 }
@@ -209,13 +235,15 @@ static __init int init_sched_clock(void)
 	if (unsynchronized_tsc())
 		no_sc_for_printk = 1;
 
-	/* On a race between the various events the initialization might be
-	   done multiple times, but code is tolerant to this */
+	/*
+	 * On a race between the various events the initialization might be
+	 * done multiple times, but code is tolerant to this
+	 */
 	cpufreq_register_notifier(&sc_freq_notifier,
 				CPUFREQ_TRANSITION_NOTIFIER);
 	hotcpu_notifier(sc_cpu_event, 0);
-	on_each_cpu(call_r_s_f_here, NULL, 0, 0);
+	on_each_cpu(resync_freq, NULL, 0, 0);
+
 	return 0;
 }
 core_initcall(init_sched_clock);
-

  reply	other threads:[~2007-05-25  7:49 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2007-05-25  7:10 [patch] sched_clock(): cleanups Ingo Molnar
2007-05-25  7:22 ` Satyam Sharma
2007-05-25  7:25   ` Ingo Molnar
2007-05-25  7:26     ` Ingo Molnar
2007-05-25  7:35       ` Satyam Sharma
2007-05-25  7:39         ` Peter Zijlstra
2007-05-25  7:58           ` Andi Kleen
2007-05-25  8:02             ` Ingo Molnar
2007-05-25  8:16             ` Peter Zijlstra
2007-05-25  8:21               ` Andi Kleen
2007-05-25  7:38       ` Andi Kleen
2007-05-25  7:31   ` Andi Kleen
2007-05-25  7:39     ` Ingo Molnar
2007-05-25  7:43       ` Ingo Molnar
2007-05-25  7:49         ` Ingo Molnar [this message]
2007-05-25  7:54           ` [patch] x86_64: fix sched_clock() Ingo Molnar
2007-05-25  8:02             ` Andi Kleen
2007-05-25  8:04               ` Ingo Molnar
2007-05-25  8:20                 ` Andi Kleen
2007-05-25  8:34                   ` Ingo Molnar
2007-05-25  8:41                     ` Andi Kleen
2007-05-25  8:44                       ` Ingo Molnar
2007-05-25  8:45                         ` Andi Kleen
2007-05-25  8:55                           ` Ingo Molnar
2007-05-25  8:55                           ` Andrew Morton
2007-05-25  9:03                             ` Andi Kleen
2007-05-25  9:19                               ` Ingo Molnar
2007-05-25  9:46                                 ` Andi Kleen
2007-05-25 10:12                                   ` Ingo Molnar
2007-05-25 11:20                                     ` Andi Kleen
2007-05-25 11:26                                       ` Ingo Molnar
2007-05-25 11:31                                         ` Ingo Molnar
2007-05-25 11:46                                         ` [patch] sched_clock: fix preempt count imbalance Ingo Molnar
2007-05-25 11:50                                         ` [patch] sched_clock(): cleanups, #2 Ingo Molnar
2007-05-25 11:55                                           ` Andi Kleen
2007-05-25 12:02                                             ` Ingo Molnar
2007-05-25 12:15                                               ` Andi Kleen
2007-05-25 16:17                                                 ` Andrew Morton
2007-05-25 16:26                                                   ` Daniel Walker
2007-05-25 16:33                                                     ` Andi Kleen
2007-05-25 16:49                                               ` Linus Torvalds
2007-05-25 18:08                                                 ` Andi Kleen
2007-05-25 19:14                                                 ` Ingo Molnar
2007-05-25 19:45                                                   ` Linus Torvalds
2007-05-25 10:27                                   ` [patch] x86_64: fix sched_clock() Ingo Molnar
2007-05-25 11:05                               ` Andi Kleen
2007-05-28  3:12                                 ` Rusty Russell
2007-05-25  8:08             ` [patch] i386, numaq: enable TSCs again Ingo Molnar
2007-05-25  8:19               ` William Lee Irwin III
2007-05-25  8:22                 ` Andi Kleen
2007-05-25  8:25                   ` William Lee Irwin III
2007-05-25  8:31                 ` Ingo Molnar
2007-05-25  8:38                   ` William Lee Irwin III
2007-05-25  8:41                     ` Ingo Molnar
2007-05-25 18:16                       ` Dave Hansen
2007-05-25 18:23                         ` john stultz
2007-05-25  8:15             ` [patch] x86_64: fix sched_clock() Peter Zijlstra
2007-05-25  8:16               ` Ingo Molnar
2007-05-25  8:22           ` [patch] sched_clock(): cleanups Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20070525074915.GA18400@elte.hu \
    --to=mingo@elte.hu \
    --cc=a.p.zijlstra@chello.nl \
    --cc=akpm@linux-foundation.org \
    --cc=andi@firstfloor.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=satyam.sharma@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox