xref: /openbmc/linux/arch/arm/common/mcpm_entry.c (revision bc000245)
1 /*
2  * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
3  *
4  * Created by:  Nicolas Pitre, March 2012
5  * Copyright:   (C) 2012-2013  Linaro Limited
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/irqflags.h>
15 
16 #include <asm/mcpm.h>
17 #include <asm/cacheflush.h>
18 #include <asm/idmap.h>
19 #include <asm/cputype.h>
20 
21 extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
22 
23 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
24 {
25 	unsigned long val = ptr ? virt_to_phys(ptr) : 0;
26 	mcpm_entry_vectors[cluster][cpu] = val;
27 	sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
28 }
29 
30 extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
31 
32 void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
33 			 unsigned long poke_phys_addr, unsigned long poke_val)
34 {
35 	unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
36 	poke[0] = poke_phys_addr;
37 	poke[1] = poke_val;
38 	__cpuc_flush_dcache_area((void *)poke, 8);
39 	outer_clean_range(__pa(poke), __pa(poke + 2));
40 }
41 
42 static const struct mcpm_platform_ops *platform_ops;
43 
44 int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
45 {
46 	if (platform_ops)
47 		return -EBUSY;
48 	platform_ops = ops;
49 	return 0;
50 }
51 
52 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
53 {
54 	if (!platform_ops)
55 		return -EUNATCH; /* try not to shadow power_up errors */
56 	might_sleep();
57 	return platform_ops->power_up(cpu, cluster);
58 }
59 
60 typedef void (*phys_reset_t)(unsigned long);
61 
62 void mcpm_cpu_power_down(void)
63 {
64 	phys_reset_t phys_reset;
65 
66 	if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
67 		return;
68 	BUG_ON(!irqs_disabled());
69 
70 	/*
71 	 * Do this before calling into the power_down method,
72 	 * as it might not always be safe to do afterwards.
73 	 */
74 	setup_mm_for_reboot();
75 
76 	platform_ops->power_down();
77 
78 	/*
79 	 * It is possible for a power_up request to happen concurrently
80 	 * with a power_down request for the same CPU. In this case the
81 	 * power_down method might not be able to actually enter a
82 	 * powered down state with the WFI instruction if the power_up
83 	 * method has removed the required reset condition.  The
84 	 * power_down method is then allowed to return. We must perform
85 	 * a re-entry in the kernel as if the power_up method just had
86 	 * deasserted reset on the CPU.
87 	 *
88 	 * To simplify race issues, the platform specific implementation
89 	 * must accommodate for the possibility of unordered calls to
90 	 * power_down and power_up with a usage count. Therefore, if a
91 	 * call to power_up is issued for a CPU that is not down, then
92 	 * the next call to power_down must not attempt a full shutdown
93 	 * but only do the minimum (normally disabling L1 cache and CPU
94 	 * coherency) and return just as if a concurrent power_up request
95 	 * had happened as described above.
96 	 */
97 
98 	phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
99 	phys_reset(virt_to_phys(mcpm_entry_point));
100 
101 	/* should never get here */
102 	BUG();
103 }
104 
105 int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster)
106 {
107 	int ret;
108 
109 	if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down_finish))
110 		return -EUNATCH;
111 
112 	ret = platform_ops->power_down_finish(cpu, cluster);
113 	if (ret)
114 		pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
115 			__func__, cpu, cluster, ret);
116 
117 	return ret;
118 }
119 
120 void mcpm_cpu_suspend(u64 expected_residency)
121 {
122 	phys_reset_t phys_reset;
123 
124 	if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
125 		return;
126 	BUG_ON(!irqs_disabled());
127 
128 	/* Very similar to mcpm_cpu_power_down() */
129 	setup_mm_for_reboot();
130 	platform_ops->suspend(expected_residency);
131 	phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
132 	phys_reset(virt_to_phys(mcpm_entry_point));
133 	BUG();
134 }
135 
136 int mcpm_cpu_powered_up(void)
137 {
138 	if (!platform_ops)
139 		return -EUNATCH;
140 	if (platform_ops->powered_up)
141 		platform_ops->powered_up();
142 	return 0;
143 }
144 
145 struct sync_struct mcpm_sync;
146 
147 /*
148  * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
149  *    This must be called at the point of committing to teardown of a CPU.
150  *    The CPU cache (SCTRL.C bit) is expected to still be active.
151  */
152 void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
153 {
154 	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
155 	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
156 }
157 
158 /*
159  * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
160  *    cluster can be torn down without disrupting this CPU.
161  *    To avoid deadlocks, this must be called before a CPU is powered down.
162  *    The CPU cache (SCTRL.C bit) is expected to be off.
163  *    However L2 cache might or might not be active.
164  */
165 void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
166 {
167 	dmb();
168 	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
169 	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
170 	dsb_sev();
171 }
172 
173 /*
174  * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
175  * @state: the final state of the cluster:
176  *     CLUSTER_UP: no destructive teardown was done and the cluster has been
177  *         restored to the previous state (CPU cache still active); or
178  *     CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
179  *         (CPU cache disabled, L2 cache either enabled or disabled).
180  */
181 void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
182 {
183 	dmb();
184 	mcpm_sync.clusters[cluster].cluster = state;
185 	sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
186 	dsb_sev();
187 }
188 
189 /*
190  * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
191  * This function should be called by the last man, after local CPU teardown
192  * is complete.  CPU cache expected to be active.
193  *
194  * Returns:
195  *     false: the critical section was not entered because an inbound CPU was
196  *         observed, or the cluster is already being set up;
197  *     true: the critical section was entered: it is now safe to tear down the
198  *         cluster.
199  */
200 bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
201 {
202 	unsigned int i;
203 	struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
204 
205 	/* Warn inbound CPUs that the cluster is being torn down: */
206 	c->cluster = CLUSTER_GOING_DOWN;
207 	sync_cache_w(&c->cluster);
208 
209 	/* Back out if the inbound cluster is already in the critical region: */
210 	sync_cache_r(&c->inbound);
211 	if (c->inbound == INBOUND_COMING_UP)
212 		goto abort;
213 
214 	/*
215 	 * Wait for all CPUs to get out of the GOING_DOWN state, so that local
216 	 * teardown is complete on each CPU before tearing down the cluster.
217 	 *
218 	 * If any CPU has been woken up again from the DOWN state, then we
219 	 * shouldn't be taking the cluster down at all: abort in that case.
220 	 */
221 	sync_cache_r(&c->cpus);
222 	for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
223 		int cpustate;
224 
225 		if (i == cpu)
226 			continue;
227 
228 		while (1) {
229 			cpustate = c->cpus[i].cpu;
230 			if (cpustate != CPU_GOING_DOWN)
231 				break;
232 
233 			wfe();
234 			sync_cache_r(&c->cpus[i].cpu);
235 		}
236 
237 		switch (cpustate) {
238 		case CPU_DOWN:
239 			continue;
240 
241 		default:
242 			goto abort;
243 		}
244 	}
245 
246 	return true;
247 
248 abort:
249 	__mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
250 	return false;
251 }
252 
253 int __mcpm_cluster_state(unsigned int cluster)
254 {
255 	sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
256 	return mcpm_sync.clusters[cluster].cluster;
257 }
258 
259 extern unsigned long mcpm_power_up_setup_phys;
260 
261 int __init mcpm_sync_init(
262 	void (*power_up_setup)(unsigned int affinity_level))
263 {
264 	unsigned int i, j, mpidr, this_cluster;
265 
266 	BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
267 	BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
268 
269 	/*
270 	 * Set initial CPU and cluster states.
271 	 * Only one cluster is assumed to be active at this point.
272 	 */
273 	for (i = 0; i < MAX_NR_CLUSTERS; i++) {
274 		mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
275 		mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
276 		for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
277 			mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
278 	}
279 	mpidr = read_cpuid_mpidr();
280 	this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
281 	for_each_online_cpu(i)
282 		mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
283 	mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
284 	sync_cache_w(&mcpm_sync);
285 
286 	if (power_up_setup) {
287 		mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
288 		sync_cache_w(&mcpm_power_up_setup_phys);
289 	}
290 
291 	return 0;
292 }
293