From: Viresh Kumar <viresh.kumar@linaro.org>
To: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Linux PM list <linux-pm@vger.kernel.org>,
Juri Lelli <juri.lelli@arm.com>,
Steve Muckle <steve.muckle@linaro.org>,
ACPI Devel Maling List <linux-acpi@vger.kernel.org>,
Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>,
Vincent Guittot <vincent.guittot@linaro.org>,
Michael Turquette <mturquette@baylibre.com>,
Ingo Molnar <mingo@kernel.org>
Subject: Re: [PATCH v6 6/7][Resend] cpufreq: Support for fast frequency switching
Date: Mon, 28 Mar 2016 11:57:51 +0530 [thread overview]
Message-ID: <20160328062751.GI32495@vireshk-i7> (raw)
In-Reply-To: <25154681.B5BGJ94JlQ@vostro.rjw.lan>
Sorry for jumping in late, was busy with other stuff and travel :(
On 22-03-16, 02:53, Rafael J. Wysocki wrote:
> Index: linux-pm/drivers/cpufreq/acpi-cpufreq.c
> ===================================================================
> --- linux-pm.orig/drivers/cpufreq/acpi-cpufreq.c
> +++ linux-pm/drivers/cpufreq/acpi-cpufreq.c
> @@ -458,6 +458,43 @@ static int acpi_cpufreq_target(struct cp
> return result;
> }
>
> +unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
> + unsigned int target_freq)
> +{
> + struct acpi_cpufreq_data *data = policy->driver_data;
> + struct acpi_processor_performance *perf;
> + struct cpufreq_frequency_table *entry;
> + unsigned int next_perf_state, next_freq, freq;
> +
> + /*
> + * Find the closest frequency above target_freq.
> + *
> + * The table is sorted in the reverse order with respect to the
> + * frequency and all of the entries are valid (see the initialization).
> + */
> + entry = data->freq_table;
> + do {
> + entry++;
> + freq = entry->frequency;
> + } while (freq >= target_freq && freq != CPUFREQ_TABLE_END);
> + entry--;
> + next_freq = entry->frequency;
> + next_perf_state = entry->driver_data;
> +
> + perf = to_perf_data(data);
> + if (perf->state == next_perf_state) {
> + if (unlikely(data->resume))
> + data->resume = 0;
> + else
> + return next_freq;
> + }
> +
> + data->cpu_freq_write(&perf->control_register,
> + perf->states[next_perf_state].control);
> + perf->state = next_perf_state;
> + return next_freq;
> +}
> +
> static unsigned long
> acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
> {
> @@ -740,6 +777,9 @@ static int acpi_cpufreq_cpu_init(struct
> goto err_unreg;
> }
>
> + policy->fast_switch_possible = !acpi_pstate_strict &&
> + !(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY);
> +
> data->freq_table = kzalloc(sizeof(*data->freq_table) *
> (perf->state_count+1), GFP_KERNEL);
> if (!data->freq_table) {
> @@ -874,6 +914,7 @@ static struct freq_attr *acpi_cpufreq_at
> static struct cpufreq_driver acpi_cpufreq_driver = {
> .verify = cpufreq_generic_frequency_table_verify,
> .target_index = acpi_cpufreq_target,
> + .fast_switch = acpi_cpufreq_fast_switch,
> .bios_limit = acpi_processor_get_bios_limit,
> .init = acpi_cpufreq_cpu_init,
> .exit = acpi_cpufreq_cpu_exit,
> Index: linux-pm/include/linux/cpufreq.h
> ===================================================================
> --- linux-pm.orig/include/linux/cpufreq.h
> +++ linux-pm/include/linux/cpufreq.h
> @@ -102,6 +102,10 @@ struct cpufreq_policy {
> */
> struct rw_semaphore rwsem;
>
> + /* Fast switch flags */
> + bool fast_switch_possible; /* Set by the driver. */
> + bool fast_switch_enabled;
> +
> /* Synchronization for frequency transitions */
> bool transition_ongoing; /* Tracks transition status */
> spinlock_t transition_lock;
> @@ -156,6 +160,7 @@ int cpufreq_get_policy(struct cpufreq_po
> int cpufreq_update_policy(unsigned int cpu);
> bool have_governor_per_policy(void);
> struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
> +void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
> #else
> static inline unsigned int cpufreq_get(unsigned int cpu)
> {
> @@ -236,6 +241,8 @@ struct cpufreq_driver {
> unsigned int relation); /* Deprecated */
> int (*target_index)(struct cpufreq_policy *policy,
> unsigned int index);
> + unsigned int (*fast_switch)(struct cpufreq_policy *policy,
> + unsigned int target_freq);
> /*
> * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
> * unset.
> @@ -464,6 +471,8 @@ struct cpufreq_governor {
> };
>
> /* Pass a target to the cpufreq driver */
> +unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
> + unsigned int target_freq);
> int cpufreq_driver_target(struct cpufreq_policy *policy,
> unsigned int target_freq,
> unsigned int relation);
> Index: linux-pm/drivers/cpufreq/cpufreq.c
> ===================================================================
> --- linux-pm.orig/drivers/cpufreq/cpufreq.c
> +++ linux-pm/drivers/cpufreq/cpufreq.c
> @@ -428,6 +428,57 @@ void cpufreq_freq_transition_end(struct
> }
> EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
>
> +/*
> + * Fast frequency switching status count. Positive means "enabled", negative
> + * means "disabled" and 0 means "not decided yet".
> + */
> +static int cpufreq_fast_switch_count;
> +static DEFINE_MUTEX(cpufreq_fast_switch_lock);
> +
> +static void cpufreq_list_transition_notifiers(void)
> +{
> + struct notifier_block *nb;
> +
> + pr_info("cpufreq: Registered transition notifiers:\n");
> +
> + mutex_lock(&cpufreq_transition_notifier_list.mutex);
> +
> + for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
> + pr_info("cpufreq: %pF\n", nb->notifier_call);
> +
> + mutex_unlock(&cpufreq_transition_notifier_list.mutex);
This will get printed as:
cpufreq: cpufreq: Registered transition notifiers:
cpufreq: cpufreq: <func>+0x0/0x<address>
cpufreq: cpufreq: <func>+0x0/0x<address>
cpufreq: cpufreq: <func>+0x0/0x<address>
Maybe we want something like:
cpufreq: Registered transition notifiers:
cpufreq: <func>+0x0/0x<address>
cpufreq: <func>+0x0/0x<address>
cpufreq: <func>+0x0/0x<address>
?
> +}
> +
> +/**
> + * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
> + * @policy: cpufreq policy to enable fast frequency switching for.
> + *
> + * Try to enable fast frequency switching for @policy.
> + *
> + * The attempt will fail if there is at least one transition notifier registered
> + * at this point, as fast frequency switching is quite fundamentally at odds
> + * with transition notifiers. Thus if successful, it will make registration of
> + * transition notifiers fail going forward.
> + */
> +void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
> +{
> + lockdep_assert_held(&policy->rwsem);
> +
> + if (!policy->fast_switch_possible)
> + return;
> +
> + mutex_lock(&cpufreq_fast_switch_lock);
> + if (cpufreq_fast_switch_count >= 0) {
> + cpufreq_fast_switch_count++;
> + policy->fast_switch_enabled = true;
> + } else {
> + pr_warn("cpufreq: CPU%u: Fast frequency switching not enabled\n",
> + policy->cpu);
> + cpufreq_list_transition_notifiers();
> + }
> + mutex_unlock(&cpufreq_fast_switch_lock);
> +}
> +EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
And, why don't we have support for disabling fast-switch support? What if we
switch to schedutil governor (from userspace) and then back to ondemand? We
don't call policy->exit for that.
> /*********************************************************************
> * SYSFS INTERFACE *
> @@ -1083,6 +1134,24 @@ static void cpufreq_policy_free(struct c
> kfree(policy);
> }
>
> +static void cpufreq_driver_exit_policy(struct cpufreq_policy *policy)
> +{
> + if (policy->fast_switch_enabled) {
Shouldn't this be accessed from within lock as well ?
> + mutex_lock(&cpufreq_fast_switch_lock);
> +
> + policy->fast_switch_enabled = false;
> + if (!WARN_ON(cpufreq_fast_switch_count <= 0))
> + cpufreq_fast_switch_count--;
Shouldn't we make it more efficient and write it as:
WARN_ON(cpufreq_fast_switch_count <= 0);
policy->fast_switch_enabled = false;
cpufreq_fast_switch_count--;
The WARN check will hold true only for a major bug somewhere in the core and we
shall *never* hit it.
> + mutex_unlock(&cpufreq_fast_switch_lock);
> + }
> +
> + if (cpufreq_driver->exit) {
> + cpufreq_driver->exit(policy);
> + policy->freq_table = NULL;
> + }
> +}
> +
> static int cpufreq_online(unsigned int cpu)
> {
> struct cpufreq_policy *policy;
> @@ -1236,8 +1305,7 @@ static int cpufreq_online(unsigned int c
> out_exit_policy:
> up_write(&policy->rwsem);
>
> - if (cpufreq_driver->exit)
> - cpufreq_driver->exit(policy);
> + cpufreq_driver_exit_policy(policy);
> out_free_policy:
> cpufreq_policy_free(policy, !new_policy);
> return ret;
> @@ -1334,10 +1402,7 @@ static void cpufreq_offline(unsigned int
> * since this is a core component, and is essential for the
> * subsequent light-weight ->init() to succeed.
> */
> - if (cpufreq_driver->exit) {
> - cpufreq_driver->exit(policy);
> - policy->freq_table = NULL;
> - }
> + cpufreq_driver_exit_policy(policy);
>
> unlock:
> up_write(&policy->rwsem);
> @@ -1452,8 +1517,12 @@ static unsigned int __cpufreq_get(struct
>
> ret_freq = cpufreq_driver->get(policy->cpu);
>
> - /* Updating inactive policies is invalid, so avoid doing that. */
> - if (unlikely(policy_is_inactive(policy)))
> + /*
> + * Updating inactive policies is invalid, so avoid doing that. Also
> + * if fast frequency switching is used with the given policy, the check
> + * against policy->cur is pointless, so skip it in that case too.
> + */
> + if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
> return ret_freq;
>
> if (ret_freq && policy->cur &&
> @@ -1465,7 +1534,6 @@ static unsigned int __cpufreq_get(struct
> schedule_work(&policy->update);
> }
> }
> -
Unrelated change ? And to me it looks better with the blank line ..
> return ret_freq;
> }
>
> @@ -1672,8 +1740,18 @@ int cpufreq_register_notifier(struct not
>
> switch (list) {
> case CPUFREQ_TRANSITION_NOTIFIER:
> + mutex_lock(&cpufreq_fast_switch_lock);
> +
> + if (cpufreq_fast_switch_count > 0) {
> + mutex_unlock(&cpufreq_fast_switch_lock);
> + return -EBUSY;
> + }
> ret = srcu_notifier_chain_register(
> &cpufreq_transition_notifier_list, nb);
> + if (!ret)
> + cpufreq_fast_switch_count--;
> +
> + mutex_unlock(&cpufreq_fast_switch_lock);
> break;
> case CPUFREQ_POLICY_NOTIFIER:
> ret = blocking_notifier_chain_register(
> @@ -1706,8 +1784,14 @@ int cpufreq_unregister_notifier(struct n
>
> switch (list) {
> case CPUFREQ_TRANSITION_NOTIFIER:
> + mutex_lock(&cpufreq_fast_switch_lock);
> +
> ret = srcu_notifier_chain_unregister(
> &cpufreq_transition_notifier_list, nb);
> + if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
> + cpufreq_fast_switch_count++;
Again here, why shouldn't we write it as:
WARN_ON(cpufreq_fast_switch_count >= 0);
if (!ret)
cpufreq_fast_switch_count++;
> +
> + mutex_unlock(&cpufreq_fast_switch_lock);
> break;
> case CPUFREQ_POLICY_NOTIFIER:
> ret = blocking_notifier_chain_unregister(
> @@ -1726,6 +1810,34 @@ EXPORT_SYMBOL(cpufreq_unregister_notifie
> * GOVERNORS *
> *********************************************************************/
>
> +/**
> + * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
> + * @policy: cpufreq policy to switch the frequency for.
> + * @target_freq: New frequency to set (may be approximate).
> + *
> + * Carry out a fast frequency switch from interrupt context.
> + *
> + * The driver's ->fast_switch() callback invoked by this function is expected to
> + * select the minimum available frequency greater than or equal to @target_freq
> + * (CPUFREQ_RELATION_L).
> + *
> + * This function must not be called if policy->fast_switch_enabled is unset.
> + *
> + * Governors calling this function must guarantee that it will never be invoked
> + * twice in parallel for the same policy and that it will never be called in
> + * parallel with either ->target() or ->target_index() for the same policy.
> + *
> + * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
> + * callback to indicate an error condition, the hardware configuration must be
> + * preserved.
> + */
> +unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
> + unsigned int target_freq)
> +{
> + return cpufreq_driver->fast_switch(policy, target_freq);
> +}
> +EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
> +
> /* Must set freqs->new to intermediate frequency */
> static int __target_intermediate(struct cpufreq_policy *policy,
> struct cpufreq_freqs *freqs, int index)
--
viresh
next prev parent reply other threads:[~2016-03-28 6:27 UTC|newest]
Thread overview: 52+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-03-22 1:44 [PATCH v6 0/7] cpufreq: schedutil governor Rafael J. Wysocki
2016-03-22 1:46 ` [PATCH v6 1/7][Resend] cpufreq: sched: Helpers to add and remove update_util hooks Rafael J. Wysocki
2016-03-28 5:31 ` Viresh Kumar
2016-03-31 12:47 ` Peter Zijlstra
2016-03-22 1:47 ` [PATCH v6 2/7][Resend] cpufreq: governor: New data type for management part of dbs_data Rafael J. Wysocki
2016-03-22 1:49 ` [PATCH v6 3/7][Resend] cpufreq: governor: Move abstract gov_attr_set code to seperate file Rafael J. Wysocki
2016-03-22 1:50 ` [PATCH v6 4/7][Resend] cpufreq: Move governor attribute set headers to cpufreq.h Rafael J. Wysocki
2016-03-22 1:51 ` [PATCH v6 5/7][Resend] cpufreq: Move governor symbols " Rafael J. Wysocki
2016-03-28 5:35 ` Viresh Kumar
2016-03-22 1:53 ` [PATCH v6 6/7][Resend] cpufreq: Support for fast frequency switching Rafael J. Wysocki
2016-03-26 1:12 ` Steve Muckle
2016-03-26 1:46 ` Rafael J. Wysocki
2016-03-27 1:27 ` Rafael J. Wysocki
2016-03-28 16:47 ` Steve Muckle
2016-03-29 12:10 ` Rafael J. Wysocki
2016-03-28 6:27 ` Viresh Kumar [this message]
2016-03-29 12:31 ` Rafael J. Wysocki
2016-03-28 7:03 ` Viresh Kumar
2016-03-29 12:10 ` Rafael J. Wysocki
2016-03-29 14:20 ` Viresh Kumar
2016-03-30 1:47 ` [Update][PATCH v7 6/7] " Rafael J. Wysocki
2016-03-30 5:07 ` Viresh Kumar
2016-03-30 11:28 ` Rafael J. Wysocki
2016-03-22 1:54 ` [PATCH v6 7/7][Resend] cpufreq: schedutil: New governor based on scheduler utilization data Rafael J. Wysocki
2016-03-26 1:12 ` Steve Muckle
2016-03-26 2:05 ` Rafael J. Wysocki
2016-03-27 1:36 ` Rafael J. Wysocki
2016-03-28 18:17 ` Steve Muckle
2016-03-29 12:23 ` Rafael J. Wysocki
2016-03-31 12:24 ` Peter Zijlstra
2016-03-31 12:32 ` Rafael J. Wysocki
2016-04-01 18:15 ` Steve Muckle
2016-03-28 9:03 ` Viresh Kumar
2016-03-29 12:58 ` Rafael J. Wysocki
2016-03-30 1:12 ` Rafael J. Wysocki
2016-03-31 12:28 ` Peter Zijlstra
2016-03-30 4:10 ` Viresh Kumar
2016-03-30 2:00 ` [Update][PATCH v7 7/7] " Rafael J. Wysocki
2016-03-30 5:30 ` Viresh Kumar
2016-03-30 11:31 ` Rafael J. Wysocki
2016-03-30 17:05 ` Steve Muckle
2016-03-30 17:24 ` Rafael J. Wysocki
2016-03-31 1:44 ` Steve Muckle
2016-03-31 12:12 ` Peter Zijlstra
2016-03-31 12:18 ` Rafael J. Wysocki
2016-03-31 12:42 ` Peter Zijlstra
2016-03-31 12:48 ` Peter Zijlstra
2016-04-01 17:49 ` Steve Muckle
2016-04-01 19:14 ` Rafael J. Wysocki
2016-04-01 19:23 ` Steve Muckle
2016-04-01 23:06 ` [Update][PATCH v8 " Rafael J. Wysocki
2016-04-02 1:09 ` Steve Muckle
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20160328062751.GI32495@vireshk-i7 \
--to=viresh.kumar@linaro.org \
--cc=juri.lelli@arm.com \
--cc=linux-acpi@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pm@vger.kernel.org \
--cc=mingo@kernel.org \
--cc=mturquette@baylibre.com \
--cc=peterz@infradead.org \
--cc=rjw@rjwysocki.net \
--cc=srinivas.pandruvada@linux.intel.com \
--cc=steve.muckle@linaro.org \
--cc=vincent.guittot@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).