xref: /openbmc/linux/arch/arm/common/mcpm_entry.c (revision f7777dcc)
1 /*
2  * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
3  *
4  * Created by:  Nicolas Pitre, March 2012
5  * Copyright:   (C) 2012-2013  Linaro Limited
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/irqflags.h>
15 
16 #include <asm/mcpm.h>
17 #include <asm/cacheflush.h>
18 #include <asm/idmap.h>
19 #include <asm/cputype.h>
20 
21 extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
22 
23 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
24 {
25 	unsigned long val = ptr ? virt_to_phys(ptr) : 0;
26 	mcpm_entry_vectors[cluster][cpu] = val;
27 	sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
28 }
29 
30 static const struct mcpm_platform_ops *platform_ops;
31 
32 int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
33 {
34 	if (platform_ops)
35 		return -EBUSY;
36 	platform_ops = ops;
37 	return 0;
38 }
39 
40 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
41 {
42 	if (!platform_ops)
43 		return -EUNATCH; /* try not to shadow power_up errors */
44 	might_sleep();
45 	return platform_ops->power_up(cpu, cluster);
46 }
47 
48 typedef void (*phys_reset_t)(unsigned long);
49 
50 void mcpm_cpu_power_down(void)
51 {
52 	phys_reset_t phys_reset;
53 
54 	if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
55 		return;
56 	BUG_ON(!irqs_disabled());
57 
58 	/*
59 	 * Do this before calling into the power_down method,
60 	 * as it might not always be safe to do afterwards.
61 	 */
62 	setup_mm_for_reboot();
63 
64 	platform_ops->power_down();
65 
66 	/*
67 	 * It is possible for a power_up request to happen concurrently
68 	 * with a power_down request for the same CPU. In this case the
69 	 * power_down method might not be able to actually enter a
70 	 * powered down state with the WFI instruction if the power_up
71 	 * method has removed the required reset condition.  The
72 	 * power_down method is then allowed to return. We must perform
73 	 * a re-entry in the kernel as if the power_up method just had
74 	 * deasserted reset on the CPU.
75 	 *
76 	 * To simplify race issues, the platform specific implementation
77 	 * must accommodate for the possibility of unordered calls to
78 	 * power_down and power_up with a usage count. Therefore, if a
79 	 * call to power_up is issued for a CPU that is not down, then
80 	 * the next call to power_down must not attempt a full shutdown
81 	 * but only do the minimum (normally disabling L1 cache and CPU
82 	 * coherency) and return just as if a concurrent power_up request
83 	 * had happened as described above.
84 	 */
85 
86 	phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
87 	phys_reset(virt_to_phys(mcpm_entry_point));
88 
89 	/* should never get here */
90 	BUG();
91 }
92 
93 void mcpm_cpu_suspend(u64 expected_residency)
94 {
95 	phys_reset_t phys_reset;
96 
97 	if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
98 		return;
99 	BUG_ON(!irqs_disabled());
100 
101 	/* Very similar to mcpm_cpu_power_down() */
102 	setup_mm_for_reboot();
103 	platform_ops->suspend(expected_residency);
104 	phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
105 	phys_reset(virt_to_phys(mcpm_entry_point));
106 	BUG();
107 }
108 
109 int mcpm_cpu_powered_up(void)
110 {
111 	if (!platform_ops)
112 		return -EUNATCH;
113 	if (platform_ops->powered_up)
114 		platform_ops->powered_up();
115 	return 0;
116 }
117 
118 struct sync_struct mcpm_sync;
119 
120 /*
121  * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
122  *    This must be called at the point of committing to teardown of a CPU.
123  *    The CPU cache (SCTRL.C bit) is expected to still be active.
124  */
125 void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
126 {
127 	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
128 	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
129 }
130 
131 /*
132  * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
133  *    cluster can be torn down without disrupting this CPU.
134  *    To avoid deadlocks, this must be called before a CPU is powered down.
135  *    The CPU cache (SCTRL.C bit) is expected to be off.
136  *    However L2 cache might or might not be active.
137  */
138 void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
139 {
140 	dmb();
141 	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
142 	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
143 	dsb_sev();
144 }
145 
146 /*
147  * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
148  * @state: the final state of the cluster:
149  *     CLUSTER_UP: no destructive teardown was done and the cluster has been
150  *         restored to the previous state (CPU cache still active); or
151  *     CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
152  *         (CPU cache disabled, L2 cache either enabled or disabled).
153  */
154 void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
155 {
156 	dmb();
157 	mcpm_sync.clusters[cluster].cluster = state;
158 	sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
159 	dsb_sev();
160 }
161 
162 /*
163  * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
164  * This function should be called by the last man, after local CPU teardown
165  * is complete.  CPU cache expected to be active.
166  *
167  * Returns:
168  *     false: the critical section was not entered because an inbound CPU was
169  *         observed, or the cluster is already being set up;
170  *     true: the critical section was entered: it is now safe to tear down the
171  *         cluster.
172  */
173 bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
174 {
175 	unsigned int i;
176 	struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
177 
178 	/* Warn inbound CPUs that the cluster is being torn down: */
179 	c->cluster = CLUSTER_GOING_DOWN;
180 	sync_cache_w(&c->cluster);
181 
182 	/* Back out if the inbound cluster is already in the critical region: */
183 	sync_cache_r(&c->inbound);
184 	if (c->inbound == INBOUND_COMING_UP)
185 		goto abort;
186 
187 	/*
188 	 * Wait for all CPUs to get out of the GOING_DOWN state, so that local
189 	 * teardown is complete on each CPU before tearing down the cluster.
190 	 *
191 	 * If any CPU has been woken up again from the DOWN state, then we
192 	 * shouldn't be taking the cluster down at all: abort in that case.
193 	 */
194 	sync_cache_r(&c->cpus);
195 	for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
196 		int cpustate;
197 
198 		if (i == cpu)
199 			continue;
200 
201 		while (1) {
202 			cpustate = c->cpus[i].cpu;
203 			if (cpustate != CPU_GOING_DOWN)
204 				break;
205 
206 			wfe();
207 			sync_cache_r(&c->cpus[i].cpu);
208 		}
209 
210 		switch (cpustate) {
211 		case CPU_DOWN:
212 			continue;
213 
214 		default:
215 			goto abort;
216 		}
217 	}
218 
219 	return true;
220 
221 abort:
222 	__mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
223 	return false;
224 }
225 
226 int __mcpm_cluster_state(unsigned int cluster)
227 {
228 	sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
229 	return mcpm_sync.clusters[cluster].cluster;
230 }
231 
232 extern unsigned long mcpm_power_up_setup_phys;
233 
234 int __init mcpm_sync_init(
235 	void (*power_up_setup)(unsigned int affinity_level))
236 {
237 	unsigned int i, j, mpidr, this_cluster;
238 
239 	BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
240 	BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
241 
242 	/*
243 	 * Set initial CPU and cluster states.
244 	 * Only one cluster is assumed to be active at this point.
245 	 */
246 	for (i = 0; i < MAX_NR_CLUSTERS; i++) {
247 		mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
248 		mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
249 		for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
250 			mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
251 	}
252 	mpidr = read_cpuid_mpidr();
253 	this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
254 	for_each_online_cpu(i)
255 		mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
256 	mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
257 	sync_cache_w(&mcpm_sync);
258 
259 	if (power_up_setup) {
260 		mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
261 		sync_cache_w(&mcpm_power_up_setup_phys);
262 	}
263 
264 	return 0;
265 }
266