1*d7445676SArnd Bergmann // SPDX-License-Identifier: GPL-2.0-only
2*d7445676SArnd Bergmann /*
3*d7445676SArnd Bergmann * Created by: Nicolas Pitre, October 2012
4*d7445676SArnd Bergmann * Copyright: (C) 2012-2013 Linaro Limited
5*d7445676SArnd Bergmann *
6*d7445676SArnd Bergmann * Some portions of this file were originally written by Achin Gupta
7*d7445676SArnd Bergmann * Copyright: (C) 2012 ARM Limited
8*d7445676SArnd Bergmann */
9*d7445676SArnd Bergmann
10*d7445676SArnd Bergmann #include <linux/delay.h>
11*d7445676SArnd Bergmann #include <linux/init.h>
12*d7445676SArnd Bergmann #include <linux/io.h>
13*d7445676SArnd Bergmann #include <linux/kernel.h>
14*d7445676SArnd Bergmann #include <linux/of_address.h>
15*d7445676SArnd Bergmann #include <linux/of_irq.h>
16*d7445676SArnd Bergmann #include <linux/errno.h>
17*d7445676SArnd Bergmann #include <linux/irqchip/arm-gic.h>
18*d7445676SArnd Bergmann
19*d7445676SArnd Bergmann #include <asm/mcpm.h>
20*d7445676SArnd Bergmann #include <asm/proc-fns.h>
21*d7445676SArnd Bergmann #include <asm/cacheflush.h>
22*d7445676SArnd Bergmann #include <asm/cputype.h>
23*d7445676SArnd Bergmann #include <asm/cp15.h>
24*d7445676SArnd Bergmann
25*d7445676SArnd Bergmann #include <linux/arm-cci.h>
26*d7445676SArnd Bergmann
27*d7445676SArnd Bergmann #include "spc.h"
28*d7445676SArnd Bergmann
29*d7445676SArnd Bergmann /* SCC conf registers */
30*d7445676SArnd Bergmann #define RESET_CTRL 0x018
31*d7445676SArnd Bergmann #define RESET_A15_NCORERESET(cpu) (1 << (2 + (cpu)))
32*d7445676SArnd Bergmann #define RESET_A7_NCORERESET(cpu) (1 << (16 + (cpu)))
33*d7445676SArnd Bergmann
34*d7445676SArnd Bergmann #define A15_CONF 0x400
35*d7445676SArnd Bergmann #define A7_CONF 0x500
36*d7445676SArnd Bergmann #define SYS_INFO 0x700
37*d7445676SArnd Bergmann #define SPC_BASE 0xb00
38*d7445676SArnd Bergmann
39*d7445676SArnd Bergmann static void __iomem *scc;
40*d7445676SArnd Bergmann
41*d7445676SArnd Bergmann #define TC2_CLUSTERS 2
42*d7445676SArnd Bergmann #define TC2_MAX_CPUS_PER_CLUSTER 3
43*d7445676SArnd Bergmann
44*d7445676SArnd Bergmann static unsigned int tc2_nr_cpus[TC2_CLUSTERS];
45*d7445676SArnd Bergmann
tc2_pm_cpu_powerup(unsigned int cpu,unsigned int cluster)46*d7445676SArnd Bergmann static int tc2_pm_cpu_powerup(unsigned int cpu, unsigned int cluster)
47*d7445676SArnd Bergmann {
48*d7445676SArnd Bergmann pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
49*d7445676SArnd Bergmann if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster])
50*d7445676SArnd Bergmann return -EINVAL;
51*d7445676SArnd Bergmann ve_spc_set_resume_addr(cluster, cpu,
52*d7445676SArnd Bergmann __pa_symbol(mcpm_entry_point));
53*d7445676SArnd Bergmann ve_spc_cpu_wakeup_irq(cluster, cpu, true);
54*d7445676SArnd Bergmann return 0;
55*d7445676SArnd Bergmann }
56*d7445676SArnd Bergmann
tc2_pm_cluster_powerup(unsigned int cluster)57*d7445676SArnd Bergmann static int tc2_pm_cluster_powerup(unsigned int cluster)
58*d7445676SArnd Bergmann {
59*d7445676SArnd Bergmann pr_debug("%s: cluster %u\n", __func__, cluster);
60*d7445676SArnd Bergmann if (cluster >= TC2_CLUSTERS)
61*d7445676SArnd Bergmann return -EINVAL;
62*d7445676SArnd Bergmann ve_spc_powerdown(cluster, false);
63*d7445676SArnd Bergmann return 0;
64*d7445676SArnd Bergmann }
65*d7445676SArnd Bergmann
tc2_pm_cpu_powerdown_prepare(unsigned int cpu,unsigned int cluster)66*d7445676SArnd Bergmann static void tc2_pm_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
67*d7445676SArnd Bergmann {
68*d7445676SArnd Bergmann pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
69*d7445676SArnd Bergmann BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
70*d7445676SArnd Bergmann ve_spc_cpu_wakeup_irq(cluster, cpu, true);
71*d7445676SArnd Bergmann /*
72*d7445676SArnd Bergmann * If the CPU is committed to power down, make sure
73*d7445676SArnd Bergmann * the power controller will be in charge of waking it
74*d7445676SArnd Bergmann * up upon IRQ, ie IRQ lines are cut from GIC CPU IF
75*d7445676SArnd Bergmann * to the CPU by disabling the GIC CPU IF to prevent wfi
76*d7445676SArnd Bergmann * from completing execution behind power controller back
77*d7445676SArnd Bergmann */
78*d7445676SArnd Bergmann gic_cpu_if_down(0);
79*d7445676SArnd Bergmann }
80*d7445676SArnd Bergmann
tc2_pm_cluster_powerdown_prepare(unsigned int cluster)81*d7445676SArnd Bergmann static void tc2_pm_cluster_powerdown_prepare(unsigned int cluster)
82*d7445676SArnd Bergmann {
83*d7445676SArnd Bergmann pr_debug("%s: cluster %u\n", __func__, cluster);
84*d7445676SArnd Bergmann BUG_ON(cluster >= TC2_CLUSTERS);
85*d7445676SArnd Bergmann ve_spc_powerdown(cluster, true);
86*d7445676SArnd Bergmann ve_spc_global_wakeup_irq(true);
87*d7445676SArnd Bergmann }
88*d7445676SArnd Bergmann
tc2_pm_cpu_cache_disable(void)89*d7445676SArnd Bergmann static void tc2_pm_cpu_cache_disable(void)
90*d7445676SArnd Bergmann {
91*d7445676SArnd Bergmann v7_exit_coherency_flush(louis);
92*d7445676SArnd Bergmann }
93*d7445676SArnd Bergmann
tc2_pm_cluster_cache_disable(void)94*d7445676SArnd Bergmann static void tc2_pm_cluster_cache_disable(void)
95*d7445676SArnd Bergmann {
96*d7445676SArnd Bergmann if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
97*d7445676SArnd Bergmann /*
98*d7445676SArnd Bergmann * On the Cortex-A15 we need to disable
99*d7445676SArnd Bergmann * L2 prefetching before flushing the cache.
100*d7445676SArnd Bergmann */
101*d7445676SArnd Bergmann asm volatile(
102*d7445676SArnd Bergmann "mcr p15, 1, %0, c15, c0, 3 \n\t"
103*d7445676SArnd Bergmann "isb \n\t"
104*d7445676SArnd Bergmann "dsb "
105*d7445676SArnd Bergmann : : "r" (0x400) );
106*d7445676SArnd Bergmann }
107*d7445676SArnd Bergmann
108*d7445676SArnd Bergmann v7_exit_coherency_flush(all);
109*d7445676SArnd Bergmann cci_disable_port_by_cpu(read_cpuid_mpidr());
110*d7445676SArnd Bergmann }
111*d7445676SArnd Bergmann
tc2_core_in_reset(unsigned int cpu,unsigned int cluster)112*d7445676SArnd Bergmann static int tc2_core_in_reset(unsigned int cpu, unsigned int cluster)
113*d7445676SArnd Bergmann {
114*d7445676SArnd Bergmann u32 mask = cluster ?
115*d7445676SArnd Bergmann RESET_A7_NCORERESET(cpu)
116*d7445676SArnd Bergmann : RESET_A15_NCORERESET(cpu);
117*d7445676SArnd Bergmann
118*d7445676SArnd Bergmann return !(readl_relaxed(scc + RESET_CTRL) & mask);
119*d7445676SArnd Bergmann }
120*d7445676SArnd Bergmann
121*d7445676SArnd Bergmann #define POLL_MSEC 10
122*d7445676SArnd Bergmann #define TIMEOUT_MSEC 1000
123*d7445676SArnd Bergmann
tc2_pm_wait_for_powerdown(unsigned int cpu,unsigned int cluster)124*d7445676SArnd Bergmann static int tc2_pm_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
125*d7445676SArnd Bergmann {
126*d7445676SArnd Bergmann unsigned tries;
127*d7445676SArnd Bergmann
128*d7445676SArnd Bergmann pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
129*d7445676SArnd Bergmann BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
130*d7445676SArnd Bergmann
131*d7445676SArnd Bergmann for (tries = 0; tries < TIMEOUT_MSEC / POLL_MSEC; ++tries) {
132*d7445676SArnd Bergmann pr_debug("%s(cpu=%u, cluster=%u): RESET_CTRL = 0x%08X\n",
133*d7445676SArnd Bergmann __func__, cpu, cluster,
134*d7445676SArnd Bergmann readl_relaxed(scc + RESET_CTRL));
135*d7445676SArnd Bergmann
136*d7445676SArnd Bergmann /*
137*d7445676SArnd Bergmann * We need the CPU to reach WFI, but the power
138*d7445676SArnd Bergmann * controller may put the cluster in reset and
139*d7445676SArnd Bergmann * power it off as soon as that happens, before
140*d7445676SArnd Bergmann * we have a chance to see STANDBYWFI.
141*d7445676SArnd Bergmann *
142*d7445676SArnd Bergmann * So we need to check for both conditions:
143*d7445676SArnd Bergmann */
144*d7445676SArnd Bergmann if (tc2_core_in_reset(cpu, cluster) ||
145*d7445676SArnd Bergmann ve_spc_cpu_in_wfi(cpu, cluster))
146*d7445676SArnd Bergmann return 0; /* success: the CPU is halted */
147*d7445676SArnd Bergmann
148*d7445676SArnd Bergmann /* Otherwise, wait and retry: */
149*d7445676SArnd Bergmann msleep(POLL_MSEC);
150*d7445676SArnd Bergmann }
151*d7445676SArnd Bergmann
152*d7445676SArnd Bergmann return -ETIMEDOUT; /* timeout */
153*d7445676SArnd Bergmann }
154*d7445676SArnd Bergmann
tc2_pm_cpu_suspend_prepare(unsigned int cpu,unsigned int cluster)155*d7445676SArnd Bergmann static void tc2_pm_cpu_suspend_prepare(unsigned int cpu, unsigned int cluster)
156*d7445676SArnd Bergmann {
157*d7445676SArnd Bergmann ve_spc_set_resume_addr(cluster, cpu, __pa_symbol(mcpm_entry_point));
158*d7445676SArnd Bergmann }
159*d7445676SArnd Bergmann
tc2_pm_cpu_is_up(unsigned int cpu,unsigned int cluster)160*d7445676SArnd Bergmann static void tc2_pm_cpu_is_up(unsigned int cpu, unsigned int cluster)
161*d7445676SArnd Bergmann {
162*d7445676SArnd Bergmann pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
163*d7445676SArnd Bergmann BUG_ON(cluster >= TC2_CLUSTERS || cpu >= TC2_MAX_CPUS_PER_CLUSTER);
164*d7445676SArnd Bergmann ve_spc_cpu_wakeup_irq(cluster, cpu, false);
165*d7445676SArnd Bergmann ve_spc_set_resume_addr(cluster, cpu, 0);
166*d7445676SArnd Bergmann }
167*d7445676SArnd Bergmann
tc2_pm_cluster_is_up(unsigned int cluster)168*d7445676SArnd Bergmann static void tc2_pm_cluster_is_up(unsigned int cluster)
169*d7445676SArnd Bergmann {
170*d7445676SArnd Bergmann pr_debug("%s: cluster %u\n", __func__, cluster);
171*d7445676SArnd Bergmann BUG_ON(cluster >= TC2_CLUSTERS);
172*d7445676SArnd Bergmann ve_spc_powerdown(cluster, false);
173*d7445676SArnd Bergmann ve_spc_global_wakeup_irq(false);
174*d7445676SArnd Bergmann }
175*d7445676SArnd Bergmann
176*d7445676SArnd Bergmann static const struct mcpm_platform_ops tc2_pm_power_ops = {
177*d7445676SArnd Bergmann .cpu_powerup = tc2_pm_cpu_powerup,
178*d7445676SArnd Bergmann .cluster_powerup = tc2_pm_cluster_powerup,
179*d7445676SArnd Bergmann .cpu_suspend_prepare = tc2_pm_cpu_suspend_prepare,
180*d7445676SArnd Bergmann .cpu_powerdown_prepare = tc2_pm_cpu_powerdown_prepare,
181*d7445676SArnd Bergmann .cluster_powerdown_prepare = tc2_pm_cluster_powerdown_prepare,
182*d7445676SArnd Bergmann .cpu_cache_disable = tc2_pm_cpu_cache_disable,
183*d7445676SArnd Bergmann .cluster_cache_disable = tc2_pm_cluster_cache_disable,
184*d7445676SArnd Bergmann .wait_for_powerdown = tc2_pm_wait_for_powerdown,
185*d7445676SArnd Bergmann .cpu_is_up = tc2_pm_cpu_is_up,
186*d7445676SArnd Bergmann .cluster_is_up = tc2_pm_cluster_is_up,
187*d7445676SArnd Bergmann };
188*d7445676SArnd Bergmann
189*d7445676SArnd Bergmann /*
190*d7445676SArnd Bergmann * Enable cluster-level coherency, in preparation for turning on the MMU.
191*d7445676SArnd Bergmann */
tc2_pm_power_up_setup(unsigned int affinity_level)192*d7445676SArnd Bergmann static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
193*d7445676SArnd Bergmann {
194*d7445676SArnd Bergmann asm volatile (" \n"
195*d7445676SArnd Bergmann " cmp r0, #1 \n"
196*d7445676SArnd Bergmann " bxne lr \n"
197*d7445676SArnd Bergmann " b cci_enable_port_for_self ");
198*d7445676SArnd Bergmann }
199*d7445676SArnd Bergmann
tc2_pm_init(void)200*d7445676SArnd Bergmann static int __init tc2_pm_init(void)
201*d7445676SArnd Bergmann {
202*d7445676SArnd Bergmann unsigned int mpidr, cpu, cluster;
203*d7445676SArnd Bergmann int ret, irq;
204*d7445676SArnd Bergmann u32 a15_cluster_id, a7_cluster_id, sys_info;
205*d7445676SArnd Bergmann struct device_node *np;
206*d7445676SArnd Bergmann
207*d7445676SArnd Bergmann /*
208*d7445676SArnd Bergmann * The power management-related features are hidden behind
209*d7445676SArnd Bergmann * SCC registers. We need to extract runtime information like
210*d7445676SArnd Bergmann * cluster ids and number of CPUs really available in clusters.
211*d7445676SArnd Bergmann */
212*d7445676SArnd Bergmann np = of_find_compatible_node(NULL, NULL,
213*d7445676SArnd Bergmann "arm,vexpress-scc,v2p-ca15_a7");
214*d7445676SArnd Bergmann scc = of_iomap(np, 0);
215*d7445676SArnd Bergmann if (!scc)
216*d7445676SArnd Bergmann return -ENODEV;
217*d7445676SArnd Bergmann
218*d7445676SArnd Bergmann a15_cluster_id = readl_relaxed(scc + A15_CONF) & 0xf;
219*d7445676SArnd Bergmann a7_cluster_id = readl_relaxed(scc + A7_CONF) & 0xf;
220*d7445676SArnd Bergmann if (a15_cluster_id >= TC2_CLUSTERS || a7_cluster_id >= TC2_CLUSTERS)
221*d7445676SArnd Bergmann return -EINVAL;
222*d7445676SArnd Bergmann
223*d7445676SArnd Bergmann sys_info = readl_relaxed(scc + SYS_INFO);
224*d7445676SArnd Bergmann tc2_nr_cpus[a15_cluster_id] = (sys_info >> 16) & 0xf;
225*d7445676SArnd Bergmann tc2_nr_cpus[a7_cluster_id] = (sys_info >> 20) & 0xf;
226*d7445676SArnd Bergmann
227*d7445676SArnd Bergmann irq = irq_of_parse_and_map(np, 0);
228*d7445676SArnd Bergmann
229*d7445676SArnd Bergmann /*
230*d7445676SArnd Bergmann * A subset of the SCC registers is also used to communicate
231*d7445676SArnd Bergmann * with the SPC (power controller). We need to be able to
232*d7445676SArnd Bergmann * drive it very early in the boot process to power up
233*d7445676SArnd Bergmann * processors, so we initialize the SPC driver here.
234*d7445676SArnd Bergmann */
235*d7445676SArnd Bergmann ret = ve_spc_init(scc + SPC_BASE, a15_cluster_id, irq);
236*d7445676SArnd Bergmann if (ret)
237*d7445676SArnd Bergmann return ret;
238*d7445676SArnd Bergmann
239*d7445676SArnd Bergmann if (!cci_probed())
240*d7445676SArnd Bergmann return -ENODEV;
241*d7445676SArnd Bergmann
242*d7445676SArnd Bergmann mpidr = read_cpuid_mpidr();
243*d7445676SArnd Bergmann cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
244*d7445676SArnd Bergmann cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
245*d7445676SArnd Bergmann pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
246*d7445676SArnd Bergmann if (cluster >= TC2_CLUSTERS || cpu >= tc2_nr_cpus[cluster]) {
247*d7445676SArnd Bergmann pr_err("%s: boot CPU is out of bound!\n", __func__);
248*d7445676SArnd Bergmann return -EINVAL;
249*d7445676SArnd Bergmann }
250*d7445676SArnd Bergmann
251*d7445676SArnd Bergmann ret = mcpm_platform_register(&tc2_pm_power_ops);
252*d7445676SArnd Bergmann if (!ret) {
253*d7445676SArnd Bergmann mcpm_sync_init(tc2_pm_power_up_setup);
254*d7445676SArnd Bergmann /* test if we can (re)enable the CCI on our own */
255*d7445676SArnd Bergmann BUG_ON(mcpm_loopback(tc2_pm_cluster_cache_disable) != 0);
256*d7445676SArnd Bergmann pr_info("TC2 power management initialized\n");
257*d7445676SArnd Bergmann }
258*d7445676SArnd Bergmann return ret;
259*d7445676SArnd Bergmann }
260*d7445676SArnd Bergmann
261*d7445676SArnd Bergmann early_initcall(tc2_pm_init);
262