* [PATCH v3 5/6] arm: exynos: Add MCPM call-back functions
2014-04-26 16:05 ` [PATCH v3 5/6] arm: exynos: Add MCPM call-back functions Abhilash Kesavan
@ 2014-04-28 17:44 ` Lorenzo Pieralisi
2014-04-29 3:32 ` Abhilash Kesavan
2014-04-30 14:43 ` Nicolas Pitre
` (2 subsequent siblings)
3 siblings, 1 reply; 30+ messages in thread
From: Lorenzo Pieralisi @ 2014-04-28 17:44 UTC (permalink / raw)
To: linux-arm-kernel
On Sat, Apr 26, 2014 at 05:05:47PM +0100, Abhilash Kesavan wrote:
> Add machine-dependent MCPM call-backs for Exynos5420. These are used
> to power up/down the secondary CPUs during boot, shutdown, s2r and
> switching.
>
> Signed-off-by: Thomas Abraham <thomas.ab@samsung.com>
> Signed-off-by: Inderpal Singh <inderpal.s@samsung.com>
> Signed-off-by: Andrew Bresticker <abrestic@chromium.org>
> Signed-off-by: Abhilash Kesavan <a.kesavan@samsung.com>
> ---
> arch/arm/mach-exynos/Kconfig | 8 +
> arch/arm/mach-exynos/Makefile | 2 +
> arch/arm/mach-exynos/mcpm-exynos.c | 321 +++++++++++++++++++++++++++++++++++++
> arch/arm/mach-exynos/regs-pmu.h | 3 +
> 4 files changed, 334 insertions(+)
> create mode 100644 arch/arm/mach-exynos/mcpm-exynos.c
>
> diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
> index fc8bf18..1602abc 100644
> --- a/arch/arm/mach-exynos/Kconfig
> +++ b/arch/arm/mach-exynos/Kconfig
> @@ -110,4 +110,12 @@ config SOC_EXYNOS5440
>
> endmenu
>
> +config EXYNOS5420_MCPM
> + bool "Exynos5420 Multi-Cluster PM support"
> + depends on MCPM && SOC_EXYNOS5420
> + select ARM_CCI
> + help
> + This is needed to provide CPU and cluster power management
> + on Exynos5420 implementing big.LITTLE.
> +
> endif
> diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
> index a656dbe..01bc9b9 100644
> --- a/arch/arm/mach-exynos/Makefile
> +++ b/arch/arm/mach-exynos/Makefile
> @@ -29,3 +29,5 @@ obj-$(CONFIG_ARCH_EXYNOS) += firmware.o
>
> plus_sec := $(call as-instr,.arch_extension sec,+sec)
> AFLAGS_exynos-smc.o :=-Wa,-march=armv7-a$(plus_sec)
> +
> +obj-$(CONFIG_EXYNOS5420_MCPM) += mcpm-exynos.o
> diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
> new file mode 100644
> index 0000000..f9977bf
> --- /dev/null
> +++ b/arch/arm/mach-exynos/mcpm-exynos.c
> @@ -0,0 +1,321 @@
> +/*
> + * Copyright (c) 2014 Samsung Electronics Co., Ltd.
> + * http://www.samsung.com
> + *
> + * arch/arm/mach-exynos/mcpm-exynos.c
> + *
> + * Based on arch/arm/mach-vexpress/dcscb.c
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#include <linux/kernel.h>
> +#include <linux/io.h>
> +#include <linux/spinlock.h>
> +#include <linux/uaccess.h>
> +#include <linux/miscdevice.h>
> +#include <linux/fs.h>
> +#include <linux/delay.h>
> +#include <linux/arm-cci.h>
Alphabetical order please.
> +
> +#include <asm/mcpm.h>
> +#include <asm/cputype.h>
> +#include <asm/cp15.h>
> +
> +#include <plat/cpu.h>
> +
> +#include "regs-pmu.h"
> +#include "common.h"
> +
> +#define EXYNOS5420_CPUS_PER_CLUSTER 4
> +#define EXYNOS5420_NR_CLUSTERS 2
> +
> +/* Secondary CPU entry point */
> +#define REG_ENTRY_ADDR (S5P_VA_SYSRAM_NS + 0x1C)
> +
> +/*
> + * The common v7_exit_coherency_flush API could not be used because of the
> + * Erratum 799270 workaround. This macro is the same as the common one (in
> + * arch/arm/include/asm/cacheflush.h) except for the erratum handling.
> + */
> +#define exynos_v7_exit_coherency_flush(level) \
> + asm volatile( \
> + "stmfd sp!, {fp, ip}\n\t"\
> + "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR\n\t" \
> + "bic r0, r0, #"__stringify(CR_C)"\n\t" \
> + "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR\n\t" \
> + "isb\n\t"\
> + "bl v7_flush_dcache_"__stringify(level)"\n\t" \
> + "clrex\n\t"\
> + "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR\n\t" \
> + "bic r0, r0, #(1 << 6) @ disable local coherency\n\t" \
> + /* Dummy Load of a device register to avoid Erratum 799270 */ \
> + "ldr r4, [%0]\n\t" \
> + "and r4, r4, #0\n\t" \
> + "orr r0, r0, r4\n\t" \
> + "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR\n\t" \
> + "isb\n\t" \
> + "dsb\n\t" \
> + "ldmfd sp!, {fp, ip}" \
> + : \
> + : "Ir" (S5P_INFORM0) \
> + : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
> + "r9", "r10", "lr", "memory")
> +
Still investigating to see if can work around errata 799270 in a
different fashion, will keep you posted.
> +/*
> + * We can't use regular spinlocks. In the switcher case, it is possible
> + * for an outbound CPU to call power_down() after its inbound counterpart
> + * is already live using the same logical CPU number which trips lockdep
> + * debugging.
> + */
> +static arch_spinlock_t exynos_mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
> +static int
> +cpu_use_count[EXYNOS5420_CPUS_PER_CLUSTER][EXYNOS5420_NR_CLUSTERS];
> +
> +static int exynos_cluster_power_control(unsigned int cluster, int enable)
> +{
> + unsigned int tries = 100;
> + unsigned int val = 0;
> +
> + if (enable) {
> + exynos_cluster_powerup(cluster);
> + val = S5P_CORE_LOCAL_PWR_EN;
> + } else {
> + exynos_cluster_powerdown(cluster);
> + }
> +
> + /* Wait until cluster power control is applied */
> + while (tries--) {
> + if (exynos_cluster_power_state(cluster) == val)
Do you really need val ?
> + return 0;
> +
> + cpu_relax();
> + }
> + pr_warn("timed out waiting for cluster %u to power %s\n", cluster,
> + enable ? "on" : "off");
> +
> + return -ETIMEDOUT;
> +}
> +
> +static int exynos_power_up(unsigned int cpu, unsigned int cluster)
> +{
> + unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
> + unsigned int err;
> +
> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
> + if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
> + cluster >= EXYNOS5420_NR_CLUSTERS)
> + return -EINVAL;
> +
> + /*
> + * Since this is called with IRQs enabled, and no arch_spin_lock_irq
> + * variant exists, we need to disable IRQs manually here.
> + */
> + local_irq_disable();
> + arch_spin_lock(&exynos_mcpm_lock);
> +
> + cpu_use_count[cpu][cluster]++;
> + if (cpu_use_count[cpu][cluster] == 1) {
> + bool was_cluster_down =
> + __mcpm_cluster_state(cluster) == CLUSTER_DOWN;
> +
> + /*
> + * Turn on the cluster (L2/COMMON) and then power on the cores.
> + * TODO: Turn off the clusters when all cores in the cluster
> + * are down to achieve significant power savings.
> + */
> + if (was_cluster_down) {
> + err = exynos_cluster_power_control(cluster, 1);
> + if (err) {
> + exynos_cluster_power_control(cluster, 0);
> + return err;
You must not return without undoing what you should (irqs and locking).
> + }
> + }
> +
> + exynos_cpu_powerup(cpunr);
> +
> + /* CPU should be powered up there */
> + /* Also setup Cluster Power Sequence here */
Stale comment ?
> + } else if (cpu_use_count[cpu][cluster] != 2) {
> + /*
> + * The only possible values are:
> + * 0 = CPU down
> + * 1 = CPU (still) up
> + * 2 = CPU requested to be up before it had a chance
> + * to actually make itself down.
> + * Any other value is a bug.
> + */
> + BUG();
> + }
> +
> + arch_spin_unlock(&exynos_mcpm_lock);
> + local_irq_enable();
> +
> + return 0;
> +}
> +
> +static void exynos_power_down(void)
> +{
> + unsigned int mpidr, cpu, cluster;
> + bool last_man = false, skip_wfi = false;
> + unsigned int cpunr;
> +
> + mpidr = read_cpuid_mpidr();
> + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
> + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
> + cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
> +
> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
> + BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
> + cluster >= EXYNOS5420_NR_CLUSTERS);
> +
> + __mcpm_cpu_going_down(cpu, cluster);
> +
> + arch_spin_lock(&exynos_mcpm_lock);
> + BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
> + cpu_use_count[cpu][cluster]--;
> + if (cpu_use_count[cpu][cluster] == 0) {
> + int cnt = 0, i;
> +
> + exynos_cpu_powerdown(cpunr);
> + for (i = 0; i < EXYNOS5420_CPUS_PER_CLUSTER; i++)
> + cnt += cpu_use_count[i][cluster];
> + if (cnt == 0)
> + last_man = true;
for (i = 0; i < EXYNOS5420_CPUS_PER_CLUSTER; i++)
if (cpu_use_count[i][cluster])
break;
last_man = i == EXYNOS5420_CPUS_PER_CLUSTER;
and get rid of cnt.
> + } else if (cpu_use_count[cpu][cluster] == 1) {
> + /*
> + * A power_up request went ahead of us.
> + * Even if we do not want to shut this CPU down,
> + * the caller expects a certain state as if the WFI
> + * was aborted. So let's continue with cache cleaning.
> + */
> + skip_wfi = true;
> + } else {
> + BUG();
> + }
> +
> + if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
> + arch_spin_unlock(&exynos_mcpm_lock);
> +
> + /* Flush all cache levels for this cluster. */
> + exynos_v7_exit_coherency_flush(all);
> +
> + /*
> + * Disable cluster-level coherency by masking
> + * incoming snoops and DVM messages:
> + */
> + cci_disable_port_by_cpu(mpidr);
> +
> + __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
> + } else {
> + arch_spin_unlock(&exynos_mcpm_lock);
> +
> + /* Disable and flush the local CPU cache. */
> + exynos_v7_exit_coherency_flush(louis);
> + }
> +
> + __mcpm_cpu_down(cpu, cluster);
> +
> + /* Now we are prepared for power-down, do it: */
> + if (!skip_wfi)
> + wfi();
> +
> + /* Not dead at this point? Let our caller cope. */
> +}
> +
> +static int exynos_power_down_finish(unsigned int cpu, unsigned int cluster)
> +{
> + unsigned int tries = 100;
> + unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
> +
> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
> + BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
> + cluster >= EXYNOS5420_NR_CLUSTERS);
> +
> + /* Wait for the core state to be OFF */
> + while (tries--) {
> + if (ACCESS_ONCE(cpu_use_count[cpu][cluster]) == 0) {
> + if ((exynos_cpu_power_state(cpunr) == 0))
> + return 0; /* success: the CPU is halted */
> + }
> +
> + /* Otherwise, wait and retry: */
> + msleep(1);
> + }
> +
> + return -ETIMEDOUT; /* timeout */
> +}
> +
> +static const struct mcpm_platform_ops exynos_power_ops = {
> + .power_up = exynos_power_up,
> + .power_down = exynos_power_down,
> + .power_down_finish = exynos_power_down_finish,
> +};
> +
> +static void __init exynos_mcpm_usage_count_init(void)
> +{
> + unsigned int mpidr, cpu, cluster;
> +
> + mpidr = read_cpuid_mpidr();
> + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
> + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
> +
> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
> + BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
> + cluster >= EXYNOS5420_NR_CLUSTERS);
> +
> + cpu_use_count[cpu][cluster] = 1;
> +}
> +
> +/*
> + * Enable cluster-level coherency, in preparation for turning on the MMU.
> + */
> +static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
> +{
> + asm volatile ("\n"
> + "cmp r0, #1\n"
> + "bxne lr\n"
> + "b cci_enable_port_for_self");
> +}
How many times are we going to duplicate this function before we decide
to move it to a common header ?
> +static int __init exynos_mcpm_init(void)
> +{
> + int ret = 0;
> +
> + if (!soc_is_exynos5420())
> + return -ENODEV;
I do not particularly like these soc_is* stubs, but it is just a matter
of taste.
Thanks,
Lorenzo
^ permalink raw reply [flat|nested] 30+ messages in thread* [PATCH v3 5/6] arm: exynos: Add MCPM call-back functions
2014-04-28 17:44 ` Lorenzo Pieralisi
@ 2014-04-29 3:32 ` Abhilash Kesavan
2014-04-29 18:49 ` Nicolas Pitre
0 siblings, 1 reply; 30+ messages in thread
From: Abhilash Kesavan @ 2014-04-29 3:32 UTC (permalink / raw)
To: linux-arm-kernel
Hi Lorenzo,
Thanks for the review.
[...]
>> +#include <linux/kernel.h>
>> +#include <linux/io.h>
>> +#include <linux/spinlock.h>
>> +#include <linux/uaccess.h>
>> +#include <linux/miscdevice.h>
>> +#include <linux/fs.h>
>> +#include <linux/delay.h>
>> +#include <linux/arm-cci.h>
>
> Alphabetical order please.
Will fix.
[...]
>> +#define exynos_v7_exit_coherency_flush(level) \
>> + asm volatile( \
>> + "stmfd sp!, {fp, ip}\n\t"\
>> + "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR\n\t" \
>> + "bic r0, r0, #"__stringify(CR_C)"\n\t" \
>> + "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR\n\t" \
>> + "isb\n\t"\
>> + "bl v7_flush_dcache_"__stringify(level)"\n\t" \
>> + "clrex\n\t"\
>> + "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR\n\t" \
>> + "bic r0, r0, #(1 << 6) @ disable local coherency\n\t" \
>> + /* Dummy Load of a device register to avoid Erratum 799270 */ \
>> + "ldr r4, [%0]\n\t" \
>> + "and r4, r4, #0\n\t" \
>> + "orr r0, r0, r4\n\t" \
>> + "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR\n\t" \
>> + "isb\n\t" \
>> + "dsb\n\t" \
>> + "ldmfd sp!, {fp, ip}" \
>> + : \
>> + : "Ir" (S5P_INFORM0) \
>> + : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
>> + "r9", "r10", "lr", "memory")
>> +
>
> Still investigating to see if can work around errata 799270 in a
> different fashion, will keep you posted.
Thanks.
>
>> +/*
>> + * We can't use regular spinlocks. In the switcher case, it is possible
>> + * for an outbound CPU to call power_down() after its inbound counterpart
>> + * is already live using the same logical CPU number which trips lockdep
>> + * debugging.
>> + */
>> +static arch_spinlock_t exynos_mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
>> +static int
>> +cpu_use_count[EXYNOS5420_CPUS_PER_CLUSTER][EXYNOS5420_NR_CLUSTERS];
>> +
>> +static int exynos_cluster_power_control(unsigned int cluster, int enable)
>> +{
>> + unsigned int tries = 100;
>> + unsigned int val = 0;
>> +
>> + if (enable) {
>> + exynos_cluster_powerup(cluster);
>> + val = S5P_CORE_LOCAL_PWR_EN;
>> + } else {
>> + exynos_cluster_powerdown(cluster);
>> + }
>> +
>> + /* Wait until cluster power control is applied */
>> + while (tries--) {
>> + if (exynos_cluster_power_state(cluster) == val)
>
> Do you really need val ?
Yes, val can be 0x0 or 0x3 depending on whether we want to enable or
disable the cluster.
>
>> + return 0;
>> +
>> + cpu_relax();
>> + }
>> + pr_warn("timed out waiting for cluster %u to power %s\n", cluster,
>> + enable ? "on" : "off");
>> +
>> + return -ETIMEDOUT;
>> +}
>> +
>> +static int exynos_power_up(unsigned int cpu, unsigned int cluster)
>> +{
>> + unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
>> + unsigned int err;
>> +
>> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
>> + if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
>> + cluster >= EXYNOS5420_NR_CLUSTERS)
>> + return -EINVAL;
>> +
>> + /*
>> + * Since this is called with IRQs enabled, and no arch_spin_lock_irq
>> + * variant exists, we need to disable IRQs manually here.
>> + */
>> + local_irq_disable();
>> + arch_spin_lock(&exynos_mcpm_lock);
>> +
>> + cpu_use_count[cpu][cluster]++;
>> + if (cpu_use_count[cpu][cluster] == 1) {
>> + bool was_cluster_down =
>> + __mcpm_cluster_state(cluster) == CLUSTER_DOWN;
>> +
>> + /*
>> + * Turn on the cluster (L2/COMMON) and then power on the cores.
>> + * TODO: Turn off the clusters when all cores in the cluster
>> + * are down to achieve significant power savings.
>> + */
>> + if (was_cluster_down) {
>> + err = exynos_cluster_power_control(cluster, 1);
>> + if (err) {
>> + exynos_cluster_power_control(cluster, 0);
>> + return err;
>
> You must not return without undoing what you should (irqs and locking).
Will do so.
>
>> + }
>> + }
>> +
>> + exynos_cpu_powerup(cpunr);
>> +
>> + /* CPU should be powered up there */
>> + /* Also setup Cluster Power Sequence here */
>
> Stale comment ?
Will remove.
>
>> + } else if (cpu_use_count[cpu][cluster] != 2) {
>> + /*
>> + * The only possible values are:
>> + * 0 = CPU down
>> + * 1 = CPU (still) up
>> + * 2 = CPU requested to be up before it had a chance
>> + * to actually make itself down.
>> + * Any other value is a bug.
>> + */
>> + BUG();
>> + }
>> +
>> + arch_spin_unlock(&exynos_mcpm_lock);
>> + local_irq_enable();
>> +
>> + return 0;
>> +}
>> +
>> +static void exynos_power_down(void)
>> +{
>> + unsigned int mpidr, cpu, cluster;
>> + bool last_man = false, skip_wfi = false;
>> + unsigned int cpunr;
>> +
>> + mpidr = read_cpuid_mpidr();
>> + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
>> + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
>> + cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
>> +
>> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
>> + BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
>> + cluster >= EXYNOS5420_NR_CLUSTERS);
>> +
>> + __mcpm_cpu_going_down(cpu, cluster);
>> +
>> + arch_spin_lock(&exynos_mcpm_lock);
>> + BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
>> + cpu_use_count[cpu][cluster]--;
>> + if (cpu_use_count[cpu][cluster] == 0) {
>> + int cnt = 0, i;
>> +
>> + exynos_cpu_powerdown(cpunr);
>> + for (i = 0; i < EXYNOS5420_CPUS_PER_CLUSTER; i++)
>> + cnt += cpu_use_count[i][cluster];
>> + if (cnt == 0)
>> + last_man = true;
>
> for (i = 0; i < EXYNOS5420_CPUS_PER_CLUSTER; i++)
> if (cpu_use_count[i][cluster])
> break;
> last_man = i == EXYNOS5420_CPUS_PER_CLUSTER;
>
> and get rid of cnt.
OK.
>
>> + } else if (cpu_use_count[cpu][cluster] == 1) {
>> + /*
>> + * A power_up request went ahead of us.
>> + * Even if we do not want to shut this CPU down,
>> + * the caller expects a certain state as if the WFI
>> + * was aborted. So let's continue with cache cleaning.
>> + */
>> + skip_wfi = true;
>> + } else {
>> + BUG();
>> + }
>> +
>> + if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
>> + arch_spin_unlock(&exynos_mcpm_lock);
>> +
>> + /* Flush all cache levels for this cluster. */
>> + exynos_v7_exit_coherency_flush(all);
>> +
>> + /*
>> + * Disable cluster-level coherency by masking
>> + * incoming snoops and DVM messages:
>> + */
>> + cci_disable_port_by_cpu(mpidr);
>> +
>> + __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
>> + } else {
>> + arch_spin_unlock(&exynos_mcpm_lock);
>> +
>> + /* Disable and flush the local CPU cache. */
>> + exynos_v7_exit_coherency_flush(louis);
>> + }
>> +
>> + __mcpm_cpu_down(cpu, cluster);
>> +
>> + /* Now we are prepared for power-down, do it: */
>> + if (!skip_wfi)
>> + wfi();
>> +
>> + /* Not dead at this point? Let our caller cope. */
>> +}
>> +
>> +static int exynos_power_down_finish(unsigned int cpu, unsigned int cluster)
>> +{
>> + unsigned int tries = 100;
>> + unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
>> +
>> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
>> + BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
>> + cluster >= EXYNOS5420_NR_CLUSTERS);
>> +
>> + /* Wait for the core state to be OFF */
>> + while (tries--) {
>> + if (ACCESS_ONCE(cpu_use_count[cpu][cluster]) == 0) {
>> + if ((exynos_cpu_power_state(cpunr) == 0))
>> + return 0; /* success: the CPU is halted */
>> + }
>> +
>> + /* Otherwise, wait and retry: */
>> + msleep(1);
>> + }
>> +
>> + return -ETIMEDOUT; /* timeout */
>> +}
>> +
>> +static const struct mcpm_platform_ops exynos_power_ops = {
>> + .power_up = exynos_power_up,
>> + .power_down = exynos_power_down,
>> + .power_down_finish = exynos_power_down_finish,
>> +};
>> +
>> +static void __init exynos_mcpm_usage_count_init(void)
>> +{
>> + unsigned int mpidr, cpu, cluster;
>> +
>> + mpidr = read_cpuid_mpidr();
>> + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
>> + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
>> +
>> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
>> + BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
>> + cluster >= EXYNOS5420_NR_CLUSTERS);
>> +
>> + cpu_use_count[cpu][cluster] = 1;
>> +}
>> +
>> +/*
>> + * Enable cluster-level coherency, in preparation for turning on the MMU.
>> + */
>> +static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
>> +{
>> + asm volatile ("\n"
>> + "cmp r0, #1\n"
>> + "bxne lr\n"
>> + "b cci_enable_port_for_self");
>> +}
>
> How many times are we going to duplicate this function before we decide
> to move it to a common header ?
I see this being used in arch/arm/mach-vexpress/tc2_pm.c (where I
copied it from for exynos) and arch/arm/mach-vexpress/dcscb.c. A
common function named "mcpm_default_power_up_setup" in the mcpm header
would be acceptable ?
>
>> +static int __init exynos_mcpm_init(void)
>> +{
>> + int ret = 0;
>> +
>> + if (!soc_is_exynos5420())
>> + return -ENODEV;
>
> I do not particularly like these soc_is* stubs, but it is just a matter
> of taste.
Would you prefer a of_find_compatible_node check for exynos5420 ?
Will wait a day or two for more comments and then re-post.
Regards,
Abhilash
>
> Thanks,
> Lorenzo
>
^ permalink raw reply [flat|nested] 30+ messages in thread* [PATCH v3 5/6] arm: exynos: Add MCPM call-back functions
2014-04-29 3:32 ` Abhilash Kesavan
@ 2014-04-29 18:49 ` Nicolas Pitre
2014-04-30 3:01 ` Abhilash Kesavan
0 siblings, 1 reply; 30+ messages in thread
From: Nicolas Pitre @ 2014-04-29 18:49 UTC (permalink / raw)
To: linux-arm-kernel
On Tue, 29 Apr 2014, Abhilash Kesavan wrote:
> >> +/*
> >> + * Enable cluster-level coherency, in preparation for turning on the MMU.
> >> + */
> >> +static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
> >> +{
> >> + asm volatile ("\n"
> >> + "cmp r0, #1\n"
> >> + "bxne lr\n"
> >> + "b cci_enable_port_for_self");
> >> +}
> >
> > How many times are we going to duplicate this function before we decide
> > to move it to a common header ?
> I see this being used in arch/arm/mach-vexpress/tc2_pm.c (where I
> copied it from for exynos) and arch/arm/mach-vexpress/dcscb.c. A
> common function named "mcpm_default_power_up_setup" in the mcpm header
> would be acceptable ?
Not necessarily.
First of all, this can't be a static inline as we need a pointer to it.
And moving this to a header would create multiple instances of the same
function in a multiplatform build for example.
Furthermore, this can't be called "default_power_up_setup" as this is
specific to systems with a CCI.
It is true that the code in dcscb_setup.S does the same thing as this
3-line inline assembly, but the former is heavily commented and may
serve as an example template for platforms that may require something
more sophisticated here.
There are other patterns that seem to emerge as more MCPM backends are
being submitted. I'm making a list of them and I intend to address such
duplications after those backends do hit mainline.
Nicolas
^ permalink raw reply [flat|nested] 30+ messages in thread* [PATCH v3 5/6] arm: exynos: Add MCPM call-back functions
2014-04-29 18:49 ` Nicolas Pitre
@ 2014-04-30 3:01 ` Abhilash Kesavan
2014-04-30 10:24 ` Dave Martin
0 siblings, 1 reply; 30+ messages in thread
From: Abhilash Kesavan @ 2014-04-30 3:01 UTC (permalink / raw)
To: linux-arm-kernel
Hi Nicolas,
On Wed, Apr 30, 2014 at 12:19 AM, Nicolas Pitre
<nicolas.pitre@linaro.org> wrote:
> On Tue, 29 Apr 2014, Abhilash Kesavan wrote:
>
>> >> +/*
>> >> + * Enable cluster-level coherency, in preparation for turning on the MMU.
>> >> + */
>> >> +static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
>> >> +{
>> >> + asm volatile ("\n"
>> >> + "cmp r0, #1\n"
>> >> + "bxne lr\n"
>> >> + "b cci_enable_port_for_self");
>> >> +}
>> >
>> > How many times are we going to duplicate this function before we decide
>> > to move it to a common header ?
>> I see this being used in arch/arm/mach-vexpress/tc2_pm.c (where I
>> copied it from for exynos) and arch/arm/mach-vexpress/dcscb.c. A
>> common function named "mcpm_default_power_up_setup" in the mcpm header
>> would be acceptable ?
>
> Not necessarily.
>
> First of all, this can't be a static inline as we need a pointer to it.
> And moving this to a header would create multiple instances of the same
> function in a multiplatform build for example.
>
> Furthermore, this can't be called "default_power_up_setup" as this is
> specific to systems with a CCI.
>
> It is true that the code in dcscb_setup.S does the same thing as this
> 3-line inline assembly, but the former is heavily commented and may
> serve as an example template for platforms that may require something
> more sophisticated here.
>
> There are other patterns that seem to emerge as more MCPM backends are
> being submitted. I'm making a list of them and I intend to address such
> duplications after those backends do hit mainline.
OK. I'll keep the power_up_setup code in the exynos mcpm backend as it is.
Are there any other comments that you have or should I re-post a new
version that handles Lorenzo's comments.
Regards,
Abhilash
>
>
> Nicolas
^ permalink raw reply [flat|nested] 30+ messages in thread* [PATCH v3 5/6] arm: exynos: Add MCPM call-back functions
2014-04-30 3:01 ` Abhilash Kesavan
@ 2014-04-30 10:24 ` Dave Martin
0 siblings, 0 replies; 30+ messages in thread
From: Dave Martin @ 2014-04-30 10:24 UTC (permalink / raw)
To: linux-arm-kernel
On Wed, Apr 30, 2014 at 04:01:24AM +0100, Abhilash Kesavan wrote:
> Hi Nicolas,
>
> On Wed, Apr 30, 2014 at 12:19 AM, Nicolas Pitre
> <nicolas.pitre@linaro.org> wrote:
> > On Tue, 29 Apr 2014, Abhilash Kesavan wrote:
> >
> >> >> +/*
> >> >> + * Enable cluster-level coherency, in preparation for turning on the MMU.
> >> >> + */
> >> >> +static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
> >> >> +{
> >> >> + asm volatile ("\n"
> >> >> + "cmp r0, #1\n"
> >> >> + "bxne lr\n"
> >> >> + "b cci_enable_port_for_self");
> >> >> +}
> >> >
> >> > How many times are we going to duplicate this function before we decide
> >> > to move it to a common header ?
> >> I see this being used in arch/arm/mach-vexpress/tc2_pm.c (where I
> >> copied it from for exynos) and arch/arm/mach-vexpress/dcscb.c. A
> >> common function named "mcpm_default_power_up_setup" in the mcpm header
> >> would be acceptable ?
> >
> > Not necessarily.
> >
> > First of all, this can't be a static inline as we need a pointer to it.
> > And moving this to a header would create multiple instances of the same
> > function in a multiplatform build for example.
> >
> > Furthermore, this can't be called "default_power_up_setup" as this is
> > specific to systems with a CCI.
On some SoCs the argument to cmp might not be #1, or the constant required
might not even be the same on all CPUs. Some SoCs might need to do other
setup too, such as invalidating a cluster-level cache that doesn't power
up into a clean state.
So, the above function is going to be common but it's not really generic.
For such a tiny function, I agree that factoring it may not be that
beneficial.
Cheers
---Dave
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH v3 5/6] arm: exynos: Add MCPM call-back functions
2014-04-26 16:05 ` [PATCH v3 5/6] arm: exynos: Add MCPM call-back functions Abhilash Kesavan
2014-04-28 17:44 ` Lorenzo Pieralisi
@ 2014-04-30 14:43 ` Nicolas Pitre
2014-05-01 9:39 ` Abhilash Kesavan
2014-04-30 14:59 ` Lorenzo Pieralisi
2014-05-02 15:25 ` [PATCH v4 5/5] " Abhilash Kesavan
3 siblings, 1 reply; 30+ messages in thread
From: Nicolas Pitre @ 2014-04-30 14:43 UTC (permalink / raw)
To: linux-arm-kernel
On Sat, 26 Apr 2014, Abhilash Kesavan wrote:
> Add machine-dependent MCPM call-backs for Exynos5420. These are used
> to power up/down the secondary CPUs during boot, shutdown, s2r and
> switching.
>
> Signed-off-by: Thomas Abraham <thomas.ab@samsung.com>
> Signed-off-by: Inderpal Singh <inderpal.s@samsung.com>
> Signed-off-by: Andrew Bresticker <abrestic@chromium.org>
> Signed-off-by: Abhilash Kesavan <a.kesavan@samsung.com>
Small comments below.
> +static int exynos_cluster_power_control(unsigned int cluster, int enable)
> +{
> + unsigned int tries = 100;
> + unsigned int val = 0;
> +
> + if (enable) {
> + exynos_cluster_powerup(cluster);
> + val = S5P_CORE_LOCAL_PWR_EN;
> + } else {
> + exynos_cluster_powerdown(cluster);
It would be clearer if you have "val = 0" here instead so it looks
symetric.
> + }
> +
> + /* Wait until cluster power control is applied */
> + while (tries--) {
> + if (exynos_cluster_power_state(cluster) == val)
> + return 0;
> +
> + cpu_relax();
> + }
> + pr_warn("timed out waiting for cluster %u to power %s\n", cluster,
> + enable ? "on" : "off");
> +
> + return -ETIMEDOUT;
> +}
> +
> +static int exynos_power_up(unsigned int cpu, unsigned int cluster)
> +{
> + unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
> + unsigned int err;
> +
> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
> + if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
> + cluster >= EXYNOS5420_NR_CLUSTERS)
> + return -EINVAL;
> +
> + /*
> + * Since this is called with IRQs enabled, and no arch_spin_lock_irq
> + * variant exists, we need to disable IRQs manually here.
> + */
> + local_irq_disable();
> + arch_spin_lock(&exynos_mcpm_lock);
> +
> + cpu_use_count[cpu][cluster]++;
> + if (cpu_use_count[cpu][cluster] == 1) {
> + bool was_cluster_down =
> + __mcpm_cluster_state(cluster) == CLUSTER_DOWN;
> +
> + /*
> + * Turn on the cluster (L2/COMMON) and then power on the cores.
> + * TODO: Turn off the clusters when all cores in the cluster
> + * are down to achieve significant power savings.
> + */
This TODO comment should be moved in exynos_power_down() were last_man
is determined to be true.
> + if (was_cluster_down) {
> + err = exynos_cluster_power_control(cluster, 1);
> + if (err) {
> + exynos_cluster_power_control(cluster, 0);
> + return err;
The lock is still held.
To make the code clearer, you should do:
if (!err)
exynos_cpu_powerup(cpunr);
and return err at the end.
> + }
> + }
> +
> + exynos_cpu_powerup(cpunr);
> +
> + /* CPU should be powered up there */
> + /* Also setup Cluster Power Sequence here */
Both comments are wrong. The CPU is not necessarily powered up yet at
this point, and "Cluster Power Sequence" (whatever that is) should not
happen here but in the callback provided to mcpm_sync_init().
> + } else if (cpu_use_count[cpu][cluster] != 2) {
> + /*
> + * The only possible values are:
> + * 0 = CPU down
> + * 1 = CPU (still) up
> + * 2 = CPU requested to be up before it had a chance
> + * to actually make itself down.
> + * Any other value is a bug.
> + */
> + BUG();
> + }
> +
> + arch_spin_unlock(&exynos_mcpm_lock);
> + local_irq_enable();
> +
> + return 0;
> +}
> +
> +static void exynos_power_down(void)
> +{
> + unsigned int mpidr, cpu, cluster;
> + bool last_man = false, skip_wfi = false;
> + unsigned int cpunr;
> +
> + mpidr = read_cpuid_mpidr();
> + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
> + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
> + cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
> +
> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
> + BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
> + cluster >= EXYNOS5420_NR_CLUSTERS);
> +
> + __mcpm_cpu_going_down(cpu, cluster);
> +
> + arch_spin_lock(&exynos_mcpm_lock);
> + BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
> + cpu_use_count[cpu][cluster]--;
> + if (cpu_use_count[cpu][cluster] == 0) {
> + int cnt = 0, i;
> +
> + exynos_cpu_powerdown(cpunr);
> + for (i = 0; i < EXYNOS5420_CPUS_PER_CLUSTER; i++)
> + cnt += cpu_use_count[i][cluster];
> + if (cnt == 0)
> + last_man = true;
I think Lorenzo commented on this code block already.
Otherwise it is almost there.
Nicolas
^ permalink raw reply [flat|nested] 30+ messages in thread* [PATCH v3 5/6] arm: exynos: Add MCPM call-back functions
2014-04-30 14:43 ` Nicolas Pitre
@ 2014-05-01 9:39 ` Abhilash Kesavan
0 siblings, 0 replies; 30+ messages in thread
From: Abhilash Kesavan @ 2014-05-01 9:39 UTC (permalink / raw)
To: linux-arm-kernel
Hi Nicolas,
On Wed, Apr 30, 2014 at 8:13 PM, Nicolas Pitre <nicolas.pitre@linaro.org> wrote:
> On Sat, 26 Apr 2014, Abhilash Kesavan wrote:
>
>> Add machine-dependent MCPM call-backs for Exynos5420. These are used
>> to power up/down the secondary CPUs during boot, shutdown, s2r and
>> switching.
>>
>> Signed-off-by: Thomas Abraham <thomas.ab@samsung.com>
>> Signed-off-by: Inderpal Singh <inderpal.s@samsung.com>
>> Signed-off-by: Andrew Bresticker <abrestic@chromium.org>
>> Signed-off-by: Abhilash Kesavan <a.kesavan@samsung.com>
>
> Small comments below.
>
>> +static int exynos_cluster_power_control(unsigned int cluster, int enable)
>> +{
>> + unsigned int tries = 100;
>> + unsigned int val = 0;
>> +
>> + if (enable) {
>> + exynos_cluster_powerup(cluster);
>> + val = S5P_CORE_LOCAL_PWR_EN;
>> + } else {
>> + exynos_cluster_powerdown(cluster);
>
> It would be clearer if you have "val = 0" here instead so it looks
> symetric.
OK.
>
>> + }
>> +
>> + /* Wait until cluster power control is applied */
>> + while (tries--) {
>> + if (exynos_cluster_power_state(cluster) == val)
>> + return 0;
>> +
>> + cpu_relax();
>> + }
>> + pr_warn("timed out waiting for cluster %u to power %s\n", cluster,
>> + enable ? "on" : "off");
>> +
>> + return -ETIMEDOUT;
>> +}
>> +
>> +static int exynos_power_up(unsigned int cpu, unsigned int cluster)
>> +{
>> + unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
>> + unsigned int err;
>> +
>> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
>> + if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
>> + cluster >= EXYNOS5420_NR_CLUSTERS)
>> + return -EINVAL;
>> +
>> + /*
>> + * Since this is called with IRQs enabled, and no arch_spin_lock_irq
>> + * variant exists, we need to disable IRQs manually here.
>> + */
>> + local_irq_disable();
>> + arch_spin_lock(&exynos_mcpm_lock);
>> +
>> + cpu_use_count[cpu][cluster]++;
>> + if (cpu_use_count[cpu][cluster] == 1) {
>> + bool was_cluster_down =
>> + __mcpm_cluster_state(cluster) == CLUSTER_DOWN;
>> +
>> + /*
>> + * Turn on the cluster (L2/COMMON) and then power on the cores.
>> + * TODO: Turn off the clusters when all cores in the cluster
>> + * are down to achieve significant power savings.
>> + */
>
> This TODO comment should be moved in exynos_power_down() were last_man
> is determined to be true.
Will move the comment.
>
>> + if (was_cluster_down) {
>> + err = exynos_cluster_power_control(cluster, 1);
>> + if (err) {
>> + exynos_cluster_power_control(cluster, 0);
>> + return err;
>
> The lock is still held.
>
> To make the code clearer, you should do:
>
> if (!err)
> exynos_cpu_powerup(cpunr);
>
> and return err at the end.
OK.
>
>> + }
>> + }
>> +
>> + exynos_cpu_powerup(cpunr);
>> +
>> + /* CPU should be powered up there */
>> + /* Also setup Cluster Power Sequence here */
>
> Both comments are wrong. The CPU is not necessarily powered up yet at
> this point, and "Cluster Power Sequence" (whatever that is) should not
> happen here but in the callback provided to mcpm_sync_init().
Have removed these.
>
>> + } else if (cpu_use_count[cpu][cluster] != 2) {
>> + /*
>> + * The only possible values are:
>> + * 0 = CPU down
>> + * 1 = CPU (still) up
>> + * 2 = CPU requested to be up before it had a chance
>> + * to actually make itself down.
>> + * Any other value is a bug.
>> + */
>> + BUG();
>> + }
>> +
>> + arch_spin_unlock(&exynos_mcpm_lock);
>> + local_irq_enable();
>> +
>> + return 0;
>> +}
>> +
>> +static void exynos_power_down(void)
>> +{
>> + unsigned int mpidr, cpu, cluster;
>> + bool last_man = false, skip_wfi = false;
>> + unsigned int cpunr;
>> +
>> + mpidr = read_cpuid_mpidr();
>> + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
>> + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
>> + cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
>> +
>> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
>> + BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
>> + cluster >= EXYNOS5420_NR_CLUSTERS);
>> +
>> + __mcpm_cpu_going_down(cpu, cluster);
>> +
>> + arch_spin_lock(&exynos_mcpm_lock);
>> + BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
>> + cpu_use_count[cpu][cluster]--;
>> + if (cpu_use_count[cpu][cluster] == 0) {
>> + int cnt = 0, i;
>> +
>> + exynos_cpu_powerdown(cpunr);
>> + for (i = 0; i < EXYNOS5420_CPUS_PER_CLUSTER; i++)
>> + cnt += cpu_use_count[i][cluster];
>> + if (cnt == 0)
>> + last_man = true;
>
> I think Lorenzo commented on this code block already.
>
> Otherwise it is almost there.
>
Thanks,
Abhilash
>
> Nicolas
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH v3 5/6] arm: exynos: Add MCPM call-back functions
2014-04-26 16:05 ` [PATCH v3 5/6] arm: exynos: Add MCPM call-back functions Abhilash Kesavan
2014-04-28 17:44 ` Lorenzo Pieralisi
2014-04-30 14:43 ` Nicolas Pitre
@ 2014-04-30 14:59 ` Lorenzo Pieralisi
2014-05-01 9:39 ` Abhilash Kesavan
2014-05-02 15:25 ` [PATCH v4 5/5] " Abhilash Kesavan
3 siblings, 1 reply; 30+ messages in thread
From: Lorenzo Pieralisi @ 2014-04-30 14:59 UTC (permalink / raw)
To: linux-arm-kernel
On Sat, Apr 26, 2014 at 05:05:47PM +0100, Abhilash Kesavan wrote:
[...]
> + if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
> + arch_spin_unlock(&exynos_mcpm_lock);
> +
I missed it on first review, but you do need to disable L2 prefetching
on A15 here as we do in TC2 (tc2_pm.c), this power down procedure is not
compliant otherwise.
Thanks,
Lorenzo
> + /* Flush all cache levels for this cluster. */
> + exynos_v7_exit_coherency_flush(all);
> +
> + /*
> + * Disable cluster-level coherency by masking
> + * incoming snoops and DVM messages:
> + */
> + cci_disable_port_by_cpu(mpidr);
> +
> + __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
> + } else {
> + arch_spin_unlock(&exynos_mcpm_lock);
> +
> + /* Disable and flush the local CPU cache. */
> + exynos_v7_exit_coherency_flush(louis);
> + }
> +
> + __mcpm_cpu_down(cpu, cluster);
> +
> + /* Now we are prepared for power-down, do it: */
> + if (!skip_wfi)
> + wfi();
> +
> + /* Not dead at this point? Let our caller cope. */
> +}
> +
> +static int exynos_power_down_finish(unsigned int cpu, unsigned int cluster)
> +{
> + unsigned int tries = 100;
> + unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
> +
> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
> + BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
> + cluster >= EXYNOS5420_NR_CLUSTERS);
> +
> + /* Wait for the core state to be OFF */
> + while (tries--) {
> + if (ACCESS_ONCE(cpu_use_count[cpu][cluster]) == 0) {
> + if ((exynos_cpu_power_state(cpunr) == 0))
> + return 0; /* success: the CPU is halted */
> + }
> +
> + /* Otherwise, wait and retry: */
> + msleep(1);
> + }
> +
> + return -ETIMEDOUT; /* timeout */
> +}
> +
> +static const struct mcpm_platform_ops exynos_power_ops = {
> + .power_up = exynos_power_up,
> + .power_down = exynos_power_down,
> + .power_down_finish = exynos_power_down_finish,
> +};
> +
> +static void __init exynos_mcpm_usage_count_init(void)
> +{
> + unsigned int mpidr, cpu, cluster;
> +
> + mpidr = read_cpuid_mpidr();
> + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
> + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
> +
> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
> + BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
> + cluster >= EXYNOS5420_NR_CLUSTERS);
> +
> + cpu_use_count[cpu][cluster] = 1;
> +}
> +
> +/*
> + * Enable cluster-level coherency, in preparation for turning on the MMU.
> + */
> +static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
> +{
> + asm volatile ("\n"
> + "cmp r0, #1\n"
> + "bxne lr\n"
> + "b cci_enable_port_for_self");
> +}
> +
> +static int __init exynos_mcpm_init(void)
> +{
> + int ret = 0;
> +
> + if (!soc_is_exynos5420())
> + return -ENODEV;
> +
> + if (!cci_probed())
> + return -ENODEV;
> +
> + /*
> + * To increase the stability of KFC reset we need to program
> + * the PMU SPARE3 register
> + */
> + __raw_writel(EXYNOS5420_SWRESET_KFC_SEL, S5P_PMU_SPARE3);
> +
> + exynos_mcpm_usage_count_init();
> +
> + ret = mcpm_platform_register(&exynos_power_ops);
> + if (!ret)
> + ret = mcpm_sync_init(exynos_pm_power_up_setup);
> + if (ret)
> + return ret;
> +
> + mcpm_smp_set_ops();
> +
> + pr_info("Exynos MCPM support installed\n");
> +
> + /*
> + * Future entries into the kernel can now go
> + * through the cluster entry vectors.
> + */
> + __raw_writel(virt_to_phys(mcpm_entry_point),
> + REG_ENTRY_ADDR);
> +
> + return ret;
> +}
> +
> +early_initcall(exynos_mcpm_init);
> diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
> index 6685ebf..f44d318 100644
> --- a/arch/arm/mach-exynos/regs-pmu.h
> +++ b/arch/arm/mach-exynos/regs-pmu.h
> @@ -38,6 +38,7 @@
> #define S5P_INFORM5 S5P_PMUREG(0x0814)
> #define S5P_INFORM6 S5P_PMUREG(0x0818)
> #define S5P_INFORM7 S5P_PMUREG(0x081C)
> +#define S5P_PMU_SPARE3 S5P_PMUREG(0x090C)
>
> #define S5P_ARM_CORE0_LOWPWR S5P_PMUREG(0x1000)
> #define S5P_DIS_IRQ_CORE0 S5P_PMUREG(0x1004)
> @@ -325,4 +326,6 @@
>
> #define EXYNOS5_OPTION_USE_RETENTION (1 << 4)
>
> +#define EXYNOS5420_SWRESET_KFC_SEL 0x3
> +
> #endif /* __ASM_ARCH_REGS_PMU_H */
> --
> 1.8.3.2
>
>
^ permalink raw reply [flat|nested] 30+ messages in thread* [PATCH v3 5/6] arm: exynos: Add MCPM call-back functions
2014-04-30 14:59 ` Lorenzo Pieralisi
@ 2014-05-01 9:39 ` Abhilash Kesavan
0 siblings, 0 replies; 30+ messages in thread
From: Abhilash Kesavan @ 2014-05-01 9:39 UTC (permalink / raw)
To: linux-arm-kernel
Hi Lorenzo,
On Wed, Apr 30, 2014 at 8:29 PM, Lorenzo Pieralisi
<lorenzo.pieralisi@arm.com> wrote:
> On Sat, Apr 26, 2014 at 05:05:47PM +0100, Abhilash Kesavan wrote:
>
> [...]
>
>> + if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
>> + arch_spin_unlock(&exynos_mcpm_lock);
>> +
>
> I missed it on first review, but you do need to disable L2 prefetching
> on A15 here as we do in TC2 (tc2_pm.c), this power down procedure is not
> compliant otherwise.
OK, I see this as being part of the power down sequence described in
A15 TRM, will add.
Regards,
Abhilash
>
> Thanks,
> Lorenzo
>
>> + /* Flush all cache levels for this cluster. */
>> + exynos_v7_exit_coherency_flush(all);
>> +
>> + /*
>> + * Disable cluster-level coherency by masking
>> + * incoming snoops and DVM messages:
>> + */
>> + cci_disable_port_by_cpu(mpidr);
>> +
>> + __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
>> + } else {
>> + arch_spin_unlock(&exynos_mcpm_lock);
>> +
>> + /* Disable and flush the local CPU cache. */
>> + exynos_v7_exit_coherency_flush(louis);
>> + }
>> +
>> + __mcpm_cpu_down(cpu, cluster);
>> +
>> + /* Now we are prepared for power-down, do it: */
>> + if (!skip_wfi)
>> + wfi();
>> +
>> + /* Not dead at this point? Let our caller cope. */
>> +}
>> +
>> +static int exynos_power_down_finish(unsigned int cpu, unsigned int cluster)
>> +{
>> + unsigned int tries = 100;
>> + unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
>> +
>> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
>> + BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
>> + cluster >= EXYNOS5420_NR_CLUSTERS);
>> +
>> + /* Wait for the core state to be OFF */
>> + while (tries--) {
>> + if (ACCESS_ONCE(cpu_use_count[cpu][cluster]) == 0) {
>> + if ((exynos_cpu_power_state(cpunr) == 0))
>> + return 0; /* success: the CPU is halted */
>> + }
>> +
>> + /* Otherwise, wait and retry: */
>> + msleep(1);
>> + }
>> +
>> + return -ETIMEDOUT; /* timeout */
>> +}
>> +
>> +static const struct mcpm_platform_ops exynos_power_ops = {
>> + .power_up = exynos_power_up,
>> + .power_down = exynos_power_down,
>> + .power_down_finish = exynos_power_down_finish,
>> +};
>> +
>> +static void __init exynos_mcpm_usage_count_init(void)
>> +{
>> + unsigned int mpidr, cpu, cluster;
>> +
>> + mpidr = read_cpuid_mpidr();
>> + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
>> + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
>> +
>> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
>> + BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
>> + cluster >= EXYNOS5420_NR_CLUSTERS);
>> +
>> + cpu_use_count[cpu][cluster] = 1;
>> +}
>> +
>> +/*
>> + * Enable cluster-level coherency, in preparation for turning on the MMU.
>> + */
>> +static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
>> +{
>> + asm volatile ("\n"
>> + "cmp r0, #1\n"
>> + "bxne lr\n"
>> + "b cci_enable_port_for_self");
>> +}
>> +
>> +static int __init exynos_mcpm_init(void)
>> +{
>> + int ret = 0;
>> +
>> + if (!soc_is_exynos5420())
>> + return -ENODEV;
>> +
>> + if (!cci_probed())
>> + return -ENODEV;
>> +
>> + /*
>> + * To increase the stability of KFC reset we need to program
>> + * the PMU SPARE3 register
>> + */
>> + __raw_writel(EXYNOS5420_SWRESET_KFC_SEL, S5P_PMU_SPARE3);
>> +
>> + exynos_mcpm_usage_count_init();
>> +
>> + ret = mcpm_platform_register(&exynos_power_ops);
>> + if (!ret)
>> + ret = mcpm_sync_init(exynos_pm_power_up_setup);
>> + if (ret)
>> + return ret;
>> +
>> + mcpm_smp_set_ops();
>> +
>> + pr_info("Exynos MCPM support installed\n");
>> +
>> + /*
>> + * Future entries into the kernel can now go
>> + * through the cluster entry vectors.
>> + */
>> + __raw_writel(virt_to_phys(mcpm_entry_point),
>> + REG_ENTRY_ADDR);
>> +
>> + return ret;
>> +}
>> +
>> +early_initcall(exynos_mcpm_init);
>> diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
>> index 6685ebf..f44d318 100644
>> --- a/arch/arm/mach-exynos/regs-pmu.h
>> +++ b/arch/arm/mach-exynos/regs-pmu.h
>> @@ -38,6 +38,7 @@
>> #define S5P_INFORM5 S5P_PMUREG(0x0814)
>> #define S5P_INFORM6 S5P_PMUREG(0x0818)
>> #define S5P_INFORM7 S5P_PMUREG(0x081C)
>> +#define S5P_PMU_SPARE3 S5P_PMUREG(0x090C)
>>
>> #define S5P_ARM_CORE0_LOWPWR S5P_PMUREG(0x1000)
>> #define S5P_DIS_IRQ_CORE0 S5P_PMUREG(0x1004)
>> @@ -325,4 +326,6 @@
>>
>> #define EXYNOS5_OPTION_USE_RETENTION (1 << 4)
>>
>> +#define EXYNOS5420_SWRESET_KFC_SEL 0x3
>> +
>> #endif /* __ASM_ARCH_REGS_PMU_H */
>> --
>> 1.8.3.2
>>
>>
>
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH v4 5/5] arm: exynos: Add MCPM call-back functions
2014-04-26 16:05 ` [PATCH v3 5/6] arm: exynos: Add MCPM call-back functions Abhilash Kesavan
` (2 preceding siblings ...)
2014-04-30 14:59 ` Lorenzo Pieralisi
@ 2014-05-02 15:25 ` Abhilash Kesavan
2014-05-02 18:16 ` Nicolas Pitre
3 siblings, 1 reply; 30+ messages in thread
From: Abhilash Kesavan @ 2014-05-02 15:25 UTC (permalink / raw)
To: linux-arm-kernel
Add machine-dependent MCPM call-backs for Exynos5420. These are used
to power up/down the secondary CPUs during boot, shutdown, s2r and
switching.
Signed-off-by: Thomas Abraham <thomas.ab@samsung.com>
Signed-off-by: Inderpal Singh <inderpal.s@samsung.com>
Signed-off-by: Andrew Bresticker <abrestic@chromium.org>
Signed-off-by: Abhilash Kesavan <a.kesavan@samsung.com>
---
arch/arm/mach-exynos/Kconfig | 8 +
arch/arm/mach-exynos/Makefile | 2 +
arch/arm/mach-exynos/mcpm-exynos.c | 345 ++++++++++++++++++++++++++++++++++++
arch/arm/mach-exynos/regs-pmu.h | 3 +
4 files changed, 358 insertions(+)
create mode 100644 arch/arm/mach-exynos/mcpm-exynos.c
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 5c34dc2..138070e 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -73,4 +73,12 @@ config SOC_EXYNOS5440
endmenu
+config EXYNOS5420_MCPM
+ bool "Exynos5420 Multi-Cluster PM support"
+ depends on MCPM && SOC_EXYNOS5420
+ select ARM_CCI
+ help
+ This is needed to provide CPU and cluster power management
+ on Exynos5420 implementing big.LITTLE.
+
endif
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index a656dbe..01bc9b9 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -29,3 +29,5 @@ obj-$(CONFIG_ARCH_EXYNOS) += firmware.o
plus_sec := $(call as-instr,.arch_extension sec,+sec)
AFLAGS_exynos-smc.o :=-Wa,-march=armv7-a$(plus_sec)
+
+obj-$(CONFIG_EXYNOS5420_MCPM) += mcpm-exynos.o
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
new file mode 100644
index 0000000..d0f7461
--- /dev/null
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * arch/arm/mach-exynos/mcpm-exynos.c
+ *
+ * Based on arch/arm/mach-vexpress/dcscb.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/arm-cci.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+
+#include <asm/cputype.h>
+#include <asm/cp15.h>
+#include <asm/mcpm.h>
+
+#include "regs-pmu.h"
+#include "common.h"
+
+#define EXYNOS5420_CPUS_PER_CLUSTER 4
+#define EXYNOS5420_NR_CLUSTERS 2
+
+/* Non-secure iRAM base address */
+static void __iomem *ns_sram_base_addr;
+
+/*
+ * The common v7_exit_coherency_flush API could not be used because of the
+ * Erratum 799270 workaround. This macro is the same as the common one (in
+ * arch/arm/include/asm/cacheflush.h) except for the erratum handling.
+ */
+#define exynos_v7_exit_coherency_flush(level) \
+ asm volatile( \
+ "stmfd sp!, {fp, ip}\n\t"\
+ "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR\n\t" \
+ "bic r0, r0, #"__stringify(CR_C)"\n\t" \
+ "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR\n\t" \
+ "isb\n\t"\
+ "bl v7_flush_dcache_"__stringify(level)"\n\t" \
+ "clrex\n\t"\
+ "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR\n\t" \
+ "bic r0, r0, #(1 << 6) @ disable local coherency\n\t" \
+ /* Dummy Load of a device register to avoid Erratum 799270 */ \
+ "ldr r4, [%0]\n\t" \
+ "and r4, r4, #0\n\t" \
+ "orr r0, r0, r4\n\t" \
+ "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR\n\t" \
+ "isb\n\t" \
+ "dsb\n\t" \
+ "ldmfd sp!, {fp, ip}" \
+ : \
+ : "Ir" (S5P_INFORM0) \
+ : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r9", "r10", "lr", "memory")
+
+/*
+ * We can't use regular spinlocks. In the switcher case, it is possible
+ * for an outbound CPU to call power_down() after its inbound counterpart
+ * is already live using the same logical CPU number which trips lockdep
+ * debugging.
+ */
+static arch_spinlock_t exynos_mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+static int
+cpu_use_count[EXYNOS5420_CPUS_PER_CLUSTER][EXYNOS5420_NR_CLUSTERS];
+
+#define exynos_cluster_unused(cluster) \
+ (!cpu_use_count[0][cluster] && \
+ !cpu_use_count[1][cluster] && \
+ !cpu_use_count[2][cluster] && \
+ !cpu_use_count[3][cluster])
+
+static int exynos_cluster_power_control(unsigned int cluster, int enable)
+{
+ unsigned int tries = 100;
+ unsigned int val;
+
+ if (enable) {
+ exynos_cluster_powerup(cluster);
+ val = S5P_CORE_LOCAL_PWR_EN;
+ } else {
+ exynos_cluster_powerdown(cluster);
+ val = 0;
+ }
+
+ /* Wait until cluster power control is applied */
+ while (tries--) {
+ if (exynos_cluster_power_state(cluster) == val)
+ return 0;
+
+ cpu_relax();
+ }
+ pr_warn("timed out waiting for cluster %u to power %s\n", cluster,
+ enable ? "on" : "off");
+
+ return -ETIMEDOUT;
+}
+
+static int exynos_power_up(unsigned int cpu, unsigned int cluster)
+{
+ unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
+ int err = 0;
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
+ cluster >= EXYNOS5420_NR_CLUSTERS)
+ return -EINVAL;
+
+ /*
+ * Since this is called with IRQs enabled, and no arch_spin_lock_irq
+ * variant exists, we need to disable IRQs manually here.
+ */
+ local_irq_disable();
+ arch_spin_lock(&exynos_mcpm_lock);
+
+ cpu_use_count[cpu][cluster]++;
+ if (cpu_use_count[cpu][cluster] == 1) {
+ bool was_cluster_down =
+ __mcpm_cluster_state(cluster) == CLUSTER_DOWN;
+
+ /*
+ * Turn on the cluster (L2/COMMON) and then power on the
+ * cores.
+ */
+ if (was_cluster_down)
+ err = exynos_cluster_power_control(cluster, 1);
+
+ if (!err)
+ exynos_cpu_powerup(cpunr);
+ else
+ exynos_cluster_power_control(cluster, 0);
+ } else if (cpu_use_count[cpu][cluster] != 2) {
+ /*
+ * The only possible values are:
+ * 0 = CPU down
+ * 1 = CPU (still) up
+ * 2 = CPU requested to be up before it had a chance
+ * to actually make itself down.
+ * Any other value is a bug.
+ */
+ BUG();
+ }
+
+ arch_spin_unlock(&exynos_mcpm_lock);
+ local_irq_enable();
+
+ return err;
+}
+
+static void exynos_power_down(void)
+{
+ unsigned int mpidr, cpu, cluster;
+ bool last_man = false, skip_wfi = false;
+ unsigned int cpunr;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
+ cluster >= EXYNOS5420_NR_CLUSTERS);
+
+ __mcpm_cpu_going_down(cpu, cluster);
+
+ arch_spin_lock(&exynos_mcpm_lock);
+ BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
+ cpu_use_count[cpu][cluster]--;
+ if (cpu_use_count[cpu][cluster] == 0) {
+ exynos_cpu_powerdown(cpunr);
+
+ if (exynos_cluster_unused(cluster))
+ last_man = true;
+ } else if (cpu_use_count[cpu][cluster] == 1) {
+ /*
+ * A power_up request went ahead of us.
+ * Even if we do not want to shut this CPU down,
+ * the caller expects a certain state as if the WFI
+ * was aborted. So let's continue with cache cleaning.
+ */
+ skip_wfi = true;
+ } else {
+ BUG();
+ }
+
+ /*
+ * TODO: Turn off the clusters when all cores in the cluster
+ * are down to achieve significant power savings.
+ */
+ if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
+ arch_spin_unlock(&exynos_mcpm_lock);
+
+ /* Flush all cache levels for this cluster. */
+ exynos_v7_exit_coherency_flush(all);
+
+ /*
+ * Disable cluster-level coherency by masking
+ * incoming snoops and DVM messages:
+ */
+ cci_disable_port_by_cpu(mpidr);
+
+ __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
+ } else {
+ arch_spin_unlock(&exynos_mcpm_lock);
+
+ if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
+ /*
+ * On the Cortex-A15 we need to disable
+ * L2 prefetching before flushing the cache.
+ */
+ asm volatile(
+ "mcr p15, 1, %0, c15, c0, 3\n\t"
+ "isb\n\t"
+ "dsb"
+ : : "r" (0x400));
+ }
+
+ /* Disable and flush the local CPU cache. */
+ exynos_v7_exit_coherency_flush(louis);
+ }
+
+ __mcpm_cpu_down(cpu, cluster);
+
+ /* Now we are prepared for power-down, do it: */
+ if (!skip_wfi)
+ wfi();
+
+ /* Not dead at this point? Let our caller cope. */
+}
+
+static int exynos_power_down_finish(unsigned int cpu, unsigned int cluster)
+{
+ unsigned int tries = 100;
+ unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
+ cluster >= EXYNOS5420_NR_CLUSTERS);
+
+ /* Wait for the core state to be OFF */
+ while (tries--) {
+ if (ACCESS_ONCE(cpu_use_count[cpu][cluster]) == 0) {
+ if ((exynos_cpu_power_state(cpunr) == 0))
+ return 0; /* success: the CPU is halted */
+ }
+
+ /* Otherwise, wait and retry: */
+ msleep(1);
+ }
+
+ return -ETIMEDOUT; /* timeout */
+}
+
+static const struct mcpm_platform_ops exynos_power_ops = {
+ .power_up = exynos_power_up,
+ .power_down = exynos_power_down,
+ .power_down_finish = exynos_power_down_finish,
+};
+
+static void __init exynos_mcpm_usage_count_init(void)
+{
+ unsigned int mpidr, cpu, cluster;
+
+ mpidr = read_cpuid_mpidr();
+ cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+ cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+ pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
+ BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
+ cluster >= EXYNOS5420_NR_CLUSTERS);
+
+ cpu_use_count[cpu][cluster] = 1;
+}
+
+/*
+ * Enable cluster-level coherency, in preparation for turning on the MMU.
+ */
+static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
+{
+ asm volatile ("\n"
+ "cmp r0, #1\n"
+ "bxne lr\n"
+ "b cci_enable_port_for_self");
+}
+
+static int __init exynos_mcpm_init(void)
+{
+ struct device_node *node;
+ int ret = 0;
+
+ node = of_find_compatible_node(NULL, NULL, "samsung,exynos5420");
+ if (!node)
+ return -ENODEV;
+ of_node_put(node);
+
+ if (!cci_probed())
+ return -ENODEV;
+
+ node = of_find_compatible_node(NULL, NULL,
+ "samsung,exynos4210-sram-ns");
+ if (!node)
+ return -ENODEV;
+
+ ns_sram_base_addr = of_iomap(node, 0);
+ of_node_put(node);
+ if (!ns_sram_base_addr) {
+ pr_err("failed to map non-secure iRAM base address\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * To increase the stability of KFC reset we need to program
+ * the PMU SPARE3 register
+ */
+ __raw_writel(EXYNOS5420_SWRESET_KFC_SEL, S5P_PMU_SPARE3);
+
+ exynos_mcpm_usage_count_init();
+
+ ret = mcpm_platform_register(&exynos_power_ops);
+ if (!ret)
+ ret = mcpm_sync_init(exynos_pm_power_up_setup);
+ if (ret) {
+ iounmap(ns_sram_base_addr);
+ return ret;
+ }
+
+ mcpm_smp_set_ops();
+
+ pr_info("Exynos MCPM support installed\n");
+
+ /*
+ * Future entries into the kernel can now go
+ * through the cluster entry vectors.
+ */
+ __raw_writel(virt_to_phys(mcpm_entry_point), ns_sram_base_addr + 0x1c);
+
+ return ret;
+}
+
+early_initcall(exynos_mcpm_init);
diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
index 6685ebf..f44d318 100644
--- a/arch/arm/mach-exynos/regs-pmu.h
+++ b/arch/arm/mach-exynos/regs-pmu.h
@@ -38,6 +38,7 @@
#define S5P_INFORM5 S5P_PMUREG(0x0814)
#define S5P_INFORM6 S5P_PMUREG(0x0818)
#define S5P_INFORM7 S5P_PMUREG(0x081C)
+#define S5P_PMU_SPARE3 S5P_PMUREG(0x090C)
#define S5P_ARM_CORE0_LOWPWR S5P_PMUREG(0x1000)
#define S5P_DIS_IRQ_CORE0 S5P_PMUREG(0x1004)
@@ -325,4 +326,6 @@
#define EXYNOS5_OPTION_USE_RETENTION (1 << 4)
+#define EXYNOS5420_SWRESET_KFC_SEL 0x3
+
#endif /* __ASM_ARCH_REGS_PMU_H */
--
1.7.9.5
^ permalink raw reply related [flat|nested] 30+ messages in thread* [PATCH v4 5/5] arm: exynos: Add MCPM call-back functions
2014-05-02 15:25 ` [PATCH v4 5/5] " Abhilash Kesavan
@ 2014-05-02 18:16 ` Nicolas Pitre
2014-05-02 18:23 ` Andrew Bresticker
2014-05-05 16:25 ` Abhilash Kesavan
0 siblings, 2 replies; 30+ messages in thread
From: Nicolas Pitre @ 2014-05-02 18:16 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, 2 May 2014, Abhilash Kesavan wrote:
> Add machine-dependent MCPM call-backs for Exynos5420. These are used
> to power up/down the secondary CPUs during boot, shutdown, s2r and
> switching.
>
> Signed-off-by: Thomas Abraham <thomas.ab@samsung.com>
> Signed-off-by: Inderpal Singh <inderpal.s@samsung.com>
> Signed-off-by: Andrew Bresticker <abrestic@chromium.org>
> Signed-off-by: Abhilash Kesavan <a.kesavan@samsung.com>
OK.... There is still a detail wrong. At least we are converging.
> ---
> arch/arm/mach-exynos/Kconfig | 8 +
> arch/arm/mach-exynos/Makefile | 2 +
> arch/arm/mach-exynos/mcpm-exynos.c | 345 ++++++++++++++++++++++++++++++++++++
> arch/arm/mach-exynos/regs-pmu.h | 3 +
> 4 files changed, 358 insertions(+)
> create mode 100644 arch/arm/mach-exynos/mcpm-exynos.c
>
> diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
> index 5c34dc2..138070e 100644
> --- a/arch/arm/mach-exynos/Kconfig
> +++ b/arch/arm/mach-exynos/Kconfig
> @@ -73,4 +73,12 @@ config SOC_EXYNOS5440
>
> endmenu
>
> +config EXYNOS5420_MCPM
> + bool "Exynos5420 Multi-Cluster PM support"
> + depends on MCPM && SOC_EXYNOS5420
> + select ARM_CCI
> + help
> + This is needed to provide CPU and cluster power management
> + on Exynos5420 implementing big.LITTLE.
> +
> endif
> diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
> index a656dbe..01bc9b9 100644
> --- a/arch/arm/mach-exynos/Makefile
> +++ b/arch/arm/mach-exynos/Makefile
> @@ -29,3 +29,5 @@ obj-$(CONFIG_ARCH_EXYNOS) += firmware.o
>
> plus_sec := $(call as-instr,.arch_extension sec,+sec)
> AFLAGS_exynos-smc.o :=-Wa,-march=armv7-a$(plus_sec)
> +
> +obj-$(CONFIG_EXYNOS5420_MCPM) += mcpm-exynos.o
> diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
> new file mode 100644
> index 0000000..d0f7461
> --- /dev/null
> +++ b/arch/arm/mach-exynos/mcpm-exynos.c
> @@ -0,0 +1,345 @@
> +/*
> + * Copyright (c) 2014 Samsung Electronics Co., Ltd.
> + * http://www.samsung.com
> + *
> + * arch/arm/mach-exynos/mcpm-exynos.c
> + *
> + * Based on arch/arm/mach-vexpress/dcscb.c
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + */
> +
> +#include <linux/arm-cci.h>
> +#include <linux/delay.h>
> +#include <linux/io.h>
> +#include <linux/of_address.h>
> +
> +#include <asm/cputype.h>
> +#include <asm/cp15.h>
> +#include <asm/mcpm.h>
> +
> +#include "regs-pmu.h"
> +#include "common.h"
> +
> +#define EXYNOS5420_CPUS_PER_CLUSTER 4
> +#define EXYNOS5420_NR_CLUSTERS 2
> +
> +/* Non-secure iRAM base address */
> +static void __iomem *ns_sram_base_addr;
> +
> +/*
> + * The common v7_exit_coherency_flush API could not be used because of the
> + * Erratum 799270 workaround. This macro is the same as the common one (in
> + * arch/arm/include/asm/cacheflush.h) except for the erratum handling.
> + */
> +#define exynos_v7_exit_coherency_flush(level) \
> + asm volatile( \
> + "stmfd sp!, {fp, ip}\n\t"\
> + "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR\n\t" \
> + "bic r0, r0, #"__stringify(CR_C)"\n\t" \
> + "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR\n\t" \
> + "isb\n\t"\
> + "bl v7_flush_dcache_"__stringify(level)"\n\t" \
> + "clrex\n\t"\
> + "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR\n\t" \
> + "bic r0, r0, #(1 << 6) @ disable local coherency\n\t" \
> + /* Dummy Load of a device register to avoid Erratum 799270 */ \
> + "ldr r4, [%0]\n\t" \
> + "and r4, r4, #0\n\t" \
> + "orr r0, r0, r4\n\t" \
> + "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR\n\t" \
> + "isb\n\t" \
> + "dsb\n\t" \
> + "ldmfd sp!, {fp, ip}" \
> + : \
> + : "Ir" (S5P_INFORM0) \
> + : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
> + "r9", "r10", "lr", "memory")
> +
> +/*
> + * We can't use regular spinlocks. In the switcher case, it is possible
> + * for an outbound CPU to call power_down() after its inbound counterpart
> + * is already live using the same logical CPU number which trips lockdep
> + * debugging.
> + */
> +static arch_spinlock_t exynos_mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
> +static int
> +cpu_use_count[EXYNOS5420_CPUS_PER_CLUSTER][EXYNOS5420_NR_CLUSTERS];
> +
> +#define exynos_cluster_unused(cluster) \
> + (!cpu_use_count[0][cluster] && \
> + !cpu_use_count[1][cluster] && \
> + !cpu_use_count[2][cluster] && \
> + !cpu_use_count[3][cluster])
> +
> +static int exynos_cluster_power_control(unsigned int cluster, int enable)
> +{
> + unsigned int tries = 100;
> + unsigned int val;
> +
> + if (enable) {
> + exynos_cluster_powerup(cluster);
> + val = S5P_CORE_LOCAL_PWR_EN;
> + } else {
> + exynos_cluster_powerdown(cluster);
> + val = 0;
> + }
> +
> + /* Wait until cluster power control is applied */
> + while (tries--) {
> + if (exynos_cluster_power_state(cluster) == val)
> + return 0;
> +
> + cpu_relax();
> + }
> + pr_warn("timed out waiting for cluster %u to power %s\n", cluster,
> + enable ? "on" : "off");
> +
> + return -ETIMEDOUT;
> +}
> +
> +static int exynos_power_up(unsigned int cpu, unsigned int cluster)
> +{
> + unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
> + int err = 0;
> +
> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
> + if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
> + cluster >= EXYNOS5420_NR_CLUSTERS)
> + return -EINVAL;
> +
> + /*
> + * Since this is called with IRQs enabled, and no arch_spin_lock_irq
> + * variant exists, we need to disable IRQs manually here.
> + */
> + local_irq_disable();
> + arch_spin_lock(&exynos_mcpm_lock);
> +
> + cpu_use_count[cpu][cluster]++;
> + if (cpu_use_count[cpu][cluster] == 1) {
> + bool was_cluster_down =
> + __mcpm_cluster_state(cluster) == CLUSTER_DOWN;
This is racy. I probably made this comment already. The MCPM cluster
state may change in mcpm-head.S where concurrency protection is achieved
with a different mechanism.
What you should do instead is to redefine exynos_cluster_unused() into
exynos_cluster_usecnt() and simply add all counts together. You could
even have:
#define exynos_cluster_unused(cluster) !exynos_cluster_usecnt(cluster)
Yet, here you should use:
bool was_cluster_down = (exynos_cluster_usecnt(cluster) == 1);
> +
> + /*
> + * Turn on the cluster (L2/COMMON) and then power on the
> + * cores.
> + */
> + if (was_cluster_down)
> + err = exynos_cluster_power_control(cluster, 1);
> +
> + if (!err)
> + exynos_cpu_powerup(cpunr);
> + else
> + exynos_cluster_power_control(cluster, 0);
> + } else if (cpu_use_count[cpu][cluster] != 2) {
> + /*
> + * The only possible values are:
> + * 0 = CPU down
> + * 1 = CPU (still) up
> + * 2 = CPU requested to be up before it had a chance
> + * to actually make itself down.
> + * Any other value is a bug.
> + */
> + BUG();
> + }
> +
> + arch_spin_unlock(&exynos_mcpm_lock);
> + local_irq_enable();
> +
> + return err;
> +}
> +
> +static void exynos_power_down(void)
> +{
> + unsigned int mpidr, cpu, cluster;
> + bool last_man = false, skip_wfi = false;
> + unsigned int cpunr;
> +
> + mpidr = read_cpuid_mpidr();
> + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
> + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
> + cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
> +
> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
> + BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
> + cluster >= EXYNOS5420_NR_CLUSTERS);
> +
> + __mcpm_cpu_going_down(cpu, cluster);
> +
> + arch_spin_lock(&exynos_mcpm_lock);
> + BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
> + cpu_use_count[cpu][cluster]--;
> + if (cpu_use_count[cpu][cluster] == 0) {
> + exynos_cpu_powerdown(cpunr);
> +
> + if (exynos_cluster_unused(cluster))
> + last_man = true;
> + } else if (cpu_use_count[cpu][cluster] == 1) {
> + /*
> + * A power_up request went ahead of us.
> + * Even if we do not want to shut this CPU down,
> + * the caller expects a certain state as if the WFI
> + * was aborted. So let's continue with cache cleaning.
> + */
> + skip_wfi = true;
> + } else {
> + BUG();
> + }
> +
> + /*
> + * TODO: Turn off the clusters when all cores in the cluster
> + * are down to achieve significant power savings.
> + */
This comment should actually be located right after the
"if (exynos_cluster_unused(cluster))" above. That is where the cluster
control should be applied, assuming it'll be effective only when WFI is
executed.
> + if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
> + arch_spin_unlock(&exynos_mcpm_lock);
> +
> + /* Flush all cache levels for this cluster. */
> + exynos_v7_exit_coherency_flush(all);
> +
> + /*
> + * Disable cluster-level coherency by masking
> + * incoming snoops and DVM messages:
> + */
> + cci_disable_port_by_cpu(mpidr);
> +
> + __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
> + } else {
> + arch_spin_unlock(&exynos_mcpm_lock);
> +
> + if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
> + /*
> + * On the Cortex-A15 we need to disable
> + * L2 prefetching before flushing the cache.
> + */
> + asm volatile(
> + "mcr p15, 1, %0, c15, c0, 3\n\t"
> + "isb\n\t"
> + "dsb"
> + : : "r" (0x400));
> + }
This doesn't belong here. That is for the last_man only to do, right
before the "Flush all cache levels for this cluster" comment.
The rest looks fine to me.
Nicolas
^ permalink raw reply [flat|nested] 30+ messages in thread* [PATCH v4 5/5] arm: exynos: Add MCPM call-back functions
2014-05-02 18:16 ` Nicolas Pitre
@ 2014-05-02 18:23 ` Andrew Bresticker
2014-05-02 18:37 ` Nicolas Pitre
2014-05-05 16:26 ` Abhilash Kesavan
2014-05-05 16:25 ` Abhilash Kesavan
1 sibling, 2 replies; 30+ messages in thread
From: Andrew Bresticker @ 2014-05-02 18:23 UTC (permalink / raw)
To: linux-arm-kernel
>> + /*
>> + * TODO: Turn off the clusters when all cores in the cluster
>> + * are down to achieve significant power savings.
>> + */
>
> This comment should actually be located right after the
> "if (exynos_cluster_unused(cluster))" above. That is where the cluster
> control should be applied, assuming it'll be effective only when WFI is
> executed.
Correct me if I'm wrong Samsung folks, but I thought it was not
possible to apply cluster power control from a CPU within the cluster
being powered down, i.e. a CPU in the other cluster must be the one to
apply the cluster power control to power down the outbound cluster.
-Andrew
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH v4 5/5] arm: exynos: Add MCPM call-back functions
2014-05-02 18:23 ` Andrew Bresticker
@ 2014-05-02 18:37 ` Nicolas Pitre
2014-05-05 16:26 ` Abhilash Kesavan
1 sibling, 0 replies; 30+ messages in thread
From: Nicolas Pitre @ 2014-05-02 18:37 UTC (permalink / raw)
To: linux-arm-kernel
On Fri, 2 May 2014, Andrew Bresticker wrote:
> >> + /*
> >> + * TODO: Turn off the clusters when all cores in the cluster
> >> + * are down to achieve significant power savings.
> >> + */
> >
> > This comment should actually be located right after the
> > "if (exynos_cluster_unused(cluster))" above. That is where the cluster
> > control should be applied, assuming it'll be effective only when WFI is
> > executed.
>
> Correct me if I'm wrong Samsung folks, but I thought it was not
> possible to apply cluster power control from a CPU within the cluster
> being powered down, i.e. a CPU in the other cluster must be the one to
> apply the cluster power control to power down the outbound cluster.
Is this true even for deep idle C-states?
Nicolas
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH v4 5/5] arm: exynos: Add MCPM call-back functions
2014-05-02 18:23 ` Andrew Bresticker
2014-05-02 18:37 ` Nicolas Pitre
@ 2014-05-05 16:26 ` Abhilash Kesavan
1 sibling, 0 replies; 30+ messages in thread
From: Abhilash Kesavan @ 2014-05-05 16:26 UTC (permalink / raw)
To: linux-arm-kernel
Hi Andrew,
On Fri, May 2, 2014 at 11:53 PM, Andrew Bresticker
<abrestic@chromium.org> wrote:
>>> + /*
>>> + * TODO: Turn off the clusters when all cores in the cluster
>>> + * are down to achieve significant power savings.
>>> + */
>>
>> This comment should actually be located right after the
>> "if (exynos_cluster_unused(cluster))" above. That is where the cluster
>> control should be applied, assuming it'll be effective only when WFI is
>> executed.
>
> Correct me if I'm wrong Samsung folks, but I thought it was not
> possible to apply cluster power control from a CPU within the cluster
> being powered down, i.e. a CPU in the other cluster must be the one to
> apply the cluster power control to power down the outbound cluster.
I was under the same impression until quite recently. However, based
on inputs from the hardware team, there are bits available (in the
*COMMON_OPTION register) that ensure "ARM_CORE0~3 are turned off
earlier and then ARM_COMMON_L2 is turned off finally". This allows us
to turn off the cluster from a cpu of the same cluster. We have used
these bits in our cluster power down cpuidle state implementation as
well.
Regards,
Abhilash
>
> -Andrew
^ permalink raw reply [flat|nested] 30+ messages in thread
* [PATCH v4 5/5] arm: exynos: Add MCPM call-back functions
2014-05-02 18:16 ` Nicolas Pitre
2014-05-02 18:23 ` Andrew Bresticker
@ 2014-05-05 16:25 ` Abhilash Kesavan
1 sibling, 0 replies; 30+ messages in thread
From: Abhilash Kesavan @ 2014-05-05 16:25 UTC (permalink / raw)
To: linux-arm-kernel
Hi Nicolas,
On Fri, May 2, 2014 at 11:46 PM, Nicolas Pitre <nicolas.pitre@linaro.org> wrote:
> On Fri, 2 May 2014, Abhilash Kesavan wrote:
>
>> Add machine-dependent MCPM call-backs for Exynos5420. These are used
>> to power up/down the secondary CPUs during boot, shutdown, s2r and
>> switching.
>>
>> Signed-off-by: Thomas Abraham <thomas.ab@samsung.com>
>> Signed-off-by: Inderpal Singh <inderpal.s@samsung.com>
>> Signed-off-by: Andrew Bresticker <abrestic@chromium.org>
>> Signed-off-by: Abhilash Kesavan <a.kesavan@samsung.com>
>
> OK.... There is still a detail wrong. At least we are converging.
>
>> ---
>> arch/arm/mach-exynos/Kconfig | 8 +
>> arch/arm/mach-exynos/Makefile | 2 +
>> arch/arm/mach-exynos/mcpm-exynos.c | 345 ++++++++++++++++++++++++++++++++++++
>> arch/arm/mach-exynos/regs-pmu.h | 3 +
>> 4 files changed, 358 insertions(+)
>> create mode 100644 arch/arm/mach-exynos/mcpm-exynos.c
>>
>> diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
>> index 5c34dc2..138070e 100644
>> --- a/arch/arm/mach-exynos/Kconfig
>> +++ b/arch/arm/mach-exynos/Kconfig
>> @@ -73,4 +73,12 @@ config SOC_EXYNOS5440
>>
>> endmenu
>>
>> +config EXYNOS5420_MCPM
>> + bool "Exynos5420 Multi-Cluster PM support"
>> + depends on MCPM && SOC_EXYNOS5420
>> + select ARM_CCI
>> + help
>> + This is needed to provide CPU and cluster power management
>> + on Exynos5420 implementing big.LITTLE.
>> +
>> endif
>> diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
>> index a656dbe..01bc9b9 100644
>> --- a/arch/arm/mach-exynos/Makefile
>> +++ b/arch/arm/mach-exynos/Makefile
>> @@ -29,3 +29,5 @@ obj-$(CONFIG_ARCH_EXYNOS) += firmware.o
>>
>> plus_sec := $(call as-instr,.arch_extension sec,+sec)
>> AFLAGS_exynos-smc.o :=-Wa,-march=armv7-a$(plus_sec)
>> +
>> +obj-$(CONFIG_EXYNOS5420_MCPM) += mcpm-exynos.o
>> diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
>> new file mode 100644
>> index 0000000..d0f7461
>> --- /dev/null
>> +++ b/arch/arm/mach-exynos/mcpm-exynos.c
>> @@ -0,0 +1,345 @@
>> +/*
>> + * Copyright (c) 2014 Samsung Electronics Co., Ltd.
>> + * http://www.samsung.com
>> + *
>> + * arch/arm/mach-exynos/mcpm-exynos.c
>> + *
>> + * Based on arch/arm/mach-vexpress/dcscb.c
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> + * it under the terms of the GNU General Public License version 2 as
>> + * published by the Free Software Foundation.
>> + */
>> +
>> +#include <linux/arm-cci.h>
>> +#include <linux/delay.h>
>> +#include <linux/io.h>
>> +#include <linux/of_address.h>
>> +
>> +#include <asm/cputype.h>
>> +#include <asm/cp15.h>
>> +#include <asm/mcpm.h>
>> +
>> +#include "regs-pmu.h"
>> +#include "common.h"
>> +
>> +#define EXYNOS5420_CPUS_PER_CLUSTER 4
>> +#define EXYNOS5420_NR_CLUSTERS 2
>> +
>> +/* Non-secure iRAM base address */
>> +static void __iomem *ns_sram_base_addr;
>> +
>> +/*
>> + * The common v7_exit_coherency_flush API could not be used because of the
>> + * Erratum 799270 workaround. This macro is the same as the common one (in
>> + * arch/arm/include/asm/cacheflush.h) except for the erratum handling.
>> + */
>> +#define exynos_v7_exit_coherency_flush(level) \
>> + asm volatile( \
>> + "stmfd sp!, {fp, ip}\n\t"\
>> + "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR\n\t" \
>> + "bic r0, r0, #"__stringify(CR_C)"\n\t" \
>> + "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR\n\t" \
>> + "isb\n\t"\
>> + "bl v7_flush_dcache_"__stringify(level)"\n\t" \
>> + "clrex\n\t"\
>> + "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR\n\t" \
>> + "bic r0, r0, #(1 << 6) @ disable local coherency\n\t" \
>> + /* Dummy Load of a device register to avoid Erratum 799270 */ \
>> + "ldr r4, [%0]\n\t" \
>> + "and r4, r4, #0\n\t" \
>> + "orr r0, r0, r4\n\t" \
>> + "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR\n\t" \
>> + "isb\n\t" \
>> + "dsb\n\t" \
>> + "ldmfd sp!, {fp, ip}" \
>> + : \
>> + : "Ir" (S5P_INFORM0) \
>> + : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
>> + "r9", "r10", "lr", "memory")
>> +
>> +/*
>> + * We can't use regular spinlocks. In the switcher case, it is possible
>> + * for an outbound CPU to call power_down() after its inbound counterpart
>> + * is already live using the same logical CPU number which trips lockdep
>> + * debugging.
>> + */
>> +static arch_spinlock_t exynos_mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
>> +static int
>> +cpu_use_count[EXYNOS5420_CPUS_PER_CLUSTER][EXYNOS5420_NR_CLUSTERS];
>> +
>> +#define exynos_cluster_unused(cluster) \
>> + (!cpu_use_count[0][cluster] && \
>> + !cpu_use_count[1][cluster] && \
>> + !cpu_use_count[2][cluster] && \
>> + !cpu_use_count[3][cluster])
>> +
>> +static int exynos_cluster_power_control(unsigned int cluster, int enable)
>> +{
>> + unsigned int tries = 100;
>> + unsigned int val;
>> +
>> + if (enable) {
>> + exynos_cluster_powerup(cluster);
>> + val = S5P_CORE_LOCAL_PWR_EN;
>> + } else {
>> + exynos_cluster_powerdown(cluster);
>> + val = 0;
>> + }
>> +
>> + /* Wait until cluster power control is applied */
>> + while (tries--) {
>> + if (exynos_cluster_power_state(cluster) == val)
>> + return 0;
>> +
>> + cpu_relax();
>> + }
>> + pr_warn("timed out waiting for cluster %u to power %s\n", cluster,
>> + enable ? "on" : "off");
>> +
>> + return -ETIMEDOUT;
>> +}
>> +
>> +static int exynos_power_up(unsigned int cpu, unsigned int cluster)
>> +{
>> + unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
>> + int err = 0;
>> +
>> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
>> + if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
>> + cluster >= EXYNOS5420_NR_CLUSTERS)
>> + return -EINVAL;
>> +
>> + /*
>> + * Since this is called with IRQs enabled, and no arch_spin_lock_irq
>> + * variant exists, we need to disable IRQs manually here.
>> + */
>> + local_irq_disable();
>> + arch_spin_lock(&exynos_mcpm_lock);
>> +
>> + cpu_use_count[cpu][cluster]++;
>> + if (cpu_use_count[cpu][cluster] == 1) {
>> + bool was_cluster_down =
>> + __mcpm_cluster_state(cluster) == CLUSTER_DOWN;
>
> This is racy. I probably made this comment already. The MCPM cluster
> state may change in mcpm-head.S where concurrency protection is achieved
> with a different mechanism.
>
> What you should do instead is to redefine exynos_cluster_unused() into
> exynos_cluster_usecnt() and simply add all counts together. You could
> even have:
>
> #define exynos_cluster_unused(cluster) !exynos_cluster_usecnt(cluster)
>
> Yet, here you should use:
>
> bool was_cluster_down = (exynos_cluster_usecnt(cluster) == 1);
Fixed as per suggestion.
>
>> +
>> + /*
>> + * Turn on the cluster (L2/COMMON) and then power on the
>> + * cores.
>> + */
>> + if (was_cluster_down)
>> + err = exynos_cluster_power_control(cluster, 1);
>> +
>> + if (!err)
>> + exynos_cpu_powerup(cpunr);
>> + else
>> + exynos_cluster_power_control(cluster, 0);
>> + } else if (cpu_use_count[cpu][cluster] != 2) {
>> + /*
>> + * The only possible values are:
>> + * 0 = CPU down
>> + * 1 = CPU (still) up
>> + * 2 = CPU requested to be up before it had a chance
>> + * to actually make itself down.
>> + * Any other value is a bug.
>> + */
>> + BUG();
>> + }
>> +
>> + arch_spin_unlock(&exynos_mcpm_lock);
>> + local_irq_enable();
>> +
>> + return err;
>> +}
>> +
>> +static void exynos_power_down(void)
>> +{
>> + unsigned int mpidr, cpu, cluster;
>> + bool last_man = false, skip_wfi = false;
>> + unsigned int cpunr;
>> +
>> + mpidr = read_cpuid_mpidr();
>> + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
>> + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
>> + cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
>> +
>> + pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
>> + BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
>> + cluster >= EXYNOS5420_NR_CLUSTERS);
>> +
>> + __mcpm_cpu_going_down(cpu, cluster);
>> +
>> + arch_spin_lock(&exynos_mcpm_lock);
>> + BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
>> + cpu_use_count[cpu][cluster]--;
>> + if (cpu_use_count[cpu][cluster] == 0) {
>> + exynos_cpu_powerdown(cpunr);
>> +
>> + if (exynos_cluster_unused(cluster))
>> + last_man = true;
>> + } else if (cpu_use_count[cpu][cluster] == 1) {
>> + /*
>> + * A power_up request went ahead of us.
>> + * Even if we do not want to shut this CPU down,
>> + * the caller expects a certain state as if the WFI
>> + * was aborted. So let's continue with cache cleaning.
>> + */
>> + skip_wfi = true;
>> + } else {
>> + BUG();
>> + }
>> +
>> + /*
>> + * TODO: Turn off the clusters when all cores in the cluster
>> + * are down to achieve significant power savings.
>> + */
>
> This comment should actually be located right after the
> "if (exynos_cluster_unused(cluster))" above. That is where the cluster
> control should be applied, assuming it'll be effective only when WFI is
> executed.
OK.
>
>
>> + if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
>> + arch_spin_unlock(&exynos_mcpm_lock);
>> +
>> + /* Flush all cache levels for this cluster. */
>> + exynos_v7_exit_coherency_flush(all);
>> +
>> + /*
>> + * Disable cluster-level coherency by masking
>> + * incoming snoops and DVM messages:
>> + */
>> + cci_disable_port_by_cpu(mpidr);
>> +
>> + __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
>> + } else {
>> + arch_spin_unlock(&exynos_mcpm_lock);
>> +
>> + if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) {
>> + /*
>> + * On the Cortex-A15 we need to disable
>> + * L2 prefetching before flushing the cache.
>> + */
>> + asm volatile(
>> + "mcr p15, 1, %0, c15, c0, 3\n\t"
>> + "isb\n\t"
>> + "dsb"
>> + : : "r" (0x400));
>> + }
>
> This doesn't belong here. That is for the last_man only to do, right
> before the "Flush all cache levels for this cluster" comment.
This was a bad miss on my part. Will fix.
>
> The rest looks fine to me.
Will post v5 soon.
Regards,
Abhilash
>
>
> Nicolas
^ permalink raw reply [flat|nested] 30+ messages in thread