xref: /openbmc/linux/arch/arm/common/mcpm_entry.c (revision 39b6f3aa)
1 /*
2  * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
3  *
4  * Created by:  Nicolas Pitre, March 2012
5  * Copyright:   (C) 2012-2013  Linaro Limited
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/irqflags.h>
15 
16 #include <asm/mcpm.h>
17 #include <asm/cacheflush.h>
18 #include <asm/idmap.h>
19 #include <asm/cputype.h>
20 
21 extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
22 
23 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
24 {
25 	unsigned long val = ptr ? virt_to_phys(ptr) : 0;
26 	mcpm_entry_vectors[cluster][cpu] = val;
27 	sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
28 }
29 
30 static const struct mcpm_platform_ops *platform_ops;
31 
32 int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
33 {
34 	if (platform_ops)
35 		return -EBUSY;
36 	platform_ops = ops;
37 	return 0;
38 }
39 
40 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
41 {
42 	if (!platform_ops)
43 		return -EUNATCH; /* try not to shadow power_up errors */
44 	might_sleep();
45 	return platform_ops->power_up(cpu, cluster);
46 }
47 
48 typedef void (*phys_reset_t)(unsigned long);
49 
50 void mcpm_cpu_power_down(void)
51 {
52 	phys_reset_t phys_reset;
53 
54 	BUG_ON(!platform_ops);
55 	BUG_ON(!irqs_disabled());
56 
57 	/*
58 	 * Do this before calling into the power_down method,
59 	 * as it might not always be safe to do afterwards.
60 	 */
61 	setup_mm_for_reboot();
62 
63 	platform_ops->power_down();
64 
65 	/*
66 	 * It is possible for a power_up request to happen concurrently
67 	 * with a power_down request for the same CPU. In this case the
68 	 * power_down method might not be able to actually enter a
69 	 * powered down state with the WFI instruction if the power_up
70 	 * method has removed the required reset condition.  The
71 	 * power_down method is then allowed to return. We must perform
72 	 * a re-entry in the kernel as if the power_up method just had
73 	 * deasserted reset on the CPU.
74 	 *
75 	 * To simplify race issues, the platform specific implementation
76 	 * must accommodate for the possibility of unordered calls to
77 	 * power_down and power_up with a usage count. Therefore, if a
78 	 * call to power_up is issued for a CPU that is not down, then
79 	 * the next call to power_down must not attempt a full shutdown
80 	 * but only do the minimum (normally disabling L1 cache and CPU
81 	 * coherency) and return just as if a concurrent power_up request
82 	 * had happened as described above.
83 	 */
84 
85 	phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
86 	phys_reset(virt_to_phys(mcpm_entry_point));
87 
88 	/* should never get here */
89 	BUG();
90 }
91 
92 void mcpm_cpu_suspend(u64 expected_residency)
93 {
94 	phys_reset_t phys_reset;
95 
96 	BUG_ON(!platform_ops);
97 	BUG_ON(!irqs_disabled());
98 
99 	/* Very similar to mcpm_cpu_power_down() */
100 	setup_mm_for_reboot();
101 	platform_ops->suspend(expected_residency);
102 	phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
103 	phys_reset(virt_to_phys(mcpm_entry_point));
104 	BUG();
105 }
106 
107 int mcpm_cpu_powered_up(void)
108 {
109 	if (!platform_ops)
110 		return -EUNATCH;
111 	if (platform_ops->powered_up)
112 		platform_ops->powered_up();
113 	return 0;
114 }
115 
116 struct sync_struct mcpm_sync;
117 
118 /*
119  * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
120  *    This must be called at the point of committing to teardown of a CPU.
121  *    The CPU cache (SCTRL.C bit) is expected to still be active.
122  */
123 void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
124 {
125 	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
126 	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
127 }
128 
129 /*
130  * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
131  *    cluster can be torn down without disrupting this CPU.
132  *    To avoid deadlocks, this must be called before a CPU is powered down.
133  *    The CPU cache (SCTRL.C bit) is expected to be off.
134  *    However L2 cache might or might not be active.
135  */
136 void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
137 {
138 	dmb();
139 	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
140 	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
141 	dsb_sev();
142 }
143 
144 /*
145  * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
146  * @state: the final state of the cluster:
147  *     CLUSTER_UP: no destructive teardown was done and the cluster has been
148  *         restored to the previous state (CPU cache still active); or
149  *     CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
150  *         (CPU cache disabled, L2 cache either enabled or disabled).
151  */
152 void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
153 {
154 	dmb();
155 	mcpm_sync.clusters[cluster].cluster = state;
156 	sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
157 	dsb_sev();
158 }
159 
160 /*
161  * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
162  * This function should be called by the last man, after local CPU teardown
163  * is complete.  CPU cache expected to be active.
164  *
165  * Returns:
166  *     false: the critical section was not entered because an inbound CPU was
167  *         observed, or the cluster is already being set up;
168  *     true: the critical section was entered: it is now safe to tear down the
169  *         cluster.
170  */
171 bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
172 {
173 	unsigned int i;
174 	struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
175 
176 	/* Warn inbound CPUs that the cluster is being torn down: */
177 	c->cluster = CLUSTER_GOING_DOWN;
178 	sync_cache_w(&c->cluster);
179 
180 	/* Back out if the inbound cluster is already in the critical region: */
181 	sync_cache_r(&c->inbound);
182 	if (c->inbound == INBOUND_COMING_UP)
183 		goto abort;
184 
185 	/*
186 	 * Wait for all CPUs to get out of the GOING_DOWN state, so that local
187 	 * teardown is complete on each CPU before tearing down the cluster.
188 	 *
189 	 * If any CPU has been woken up again from the DOWN state, then we
190 	 * shouldn't be taking the cluster down at all: abort in that case.
191 	 */
192 	sync_cache_r(&c->cpus);
193 	for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
194 		int cpustate;
195 
196 		if (i == cpu)
197 			continue;
198 
199 		while (1) {
200 			cpustate = c->cpus[i].cpu;
201 			if (cpustate != CPU_GOING_DOWN)
202 				break;
203 
204 			wfe();
205 			sync_cache_r(&c->cpus[i].cpu);
206 		}
207 
208 		switch (cpustate) {
209 		case CPU_DOWN:
210 			continue;
211 
212 		default:
213 			goto abort;
214 		}
215 	}
216 
217 	return true;
218 
219 abort:
220 	__mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
221 	return false;
222 }
223 
224 int __mcpm_cluster_state(unsigned int cluster)
225 {
226 	sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
227 	return mcpm_sync.clusters[cluster].cluster;
228 }
229 
230 extern unsigned long mcpm_power_up_setup_phys;
231 
232 int __init mcpm_sync_init(
233 	void (*power_up_setup)(unsigned int affinity_level))
234 {
235 	unsigned int i, j, mpidr, this_cluster;
236 
237 	BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
238 	BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
239 
240 	/*
241 	 * Set initial CPU and cluster states.
242 	 * Only one cluster is assumed to be active at this point.
243 	 */
244 	for (i = 0; i < MAX_NR_CLUSTERS; i++) {
245 		mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
246 		mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
247 		for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
248 			mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
249 	}
250 	mpidr = read_cpuid_mpidr();
251 	this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
252 	for_each_online_cpu(i)
253 		mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
254 	mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
255 	sync_cache_w(&mcpm_sync);
256 
257 	if (power_up_setup) {
258 		mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
259 		sync_cache_w(&mcpm_power_up_setup_phys);
260 	}
261 
262 	return 0;
263 }
264