xref: /openbmc/linux/arch/arm/mach-tegra/pm.c (revision 15e3ae36)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPU complex suspend & resume functions for Tegra SoCs
4  *
5  * Copyright (c) 2009-2012, NVIDIA Corporation. All rights reserved.
6  */
7 
8 #include <linux/clk/tegra.h>
9 #include <linux/cpumask.h>
10 #include <linux/cpu_pm.h>
11 #include <linux/delay.h>
12 #include <linux/err.h>
13 #include <linux/io.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/suspend.h>
18 
19 #include <linux/firmware/trusted_foundations.h>
20 
21 #include <soc/tegra/flowctrl.h>
22 #include <soc/tegra/fuse.h>
23 #include <soc/tegra/pm.h>
24 #include <soc/tegra/pmc.h>
25 
26 #include <asm/cacheflush.h>
27 #include <asm/firmware.h>
28 #include <asm/idmap.h>
29 #include <asm/proc-fns.h>
30 #include <asm/smp_plat.h>
31 #include <asm/suspend.h>
32 #include <asm/tlbflush.h>
33 
34 #include "iomap.h"
35 #include "pm.h"
36 #include "reset.h"
37 #include "sleep.h"
38 
39 #ifdef CONFIG_PM_SLEEP
40 static DEFINE_SPINLOCK(tegra_lp2_lock);
41 static u32 iram_save_size;
42 static void *iram_save_addr;
43 struct tegra_lp1_iram tegra_lp1_iram;
44 void (*tegra_tear_down_cpu)(void);
45 void (*tegra_sleep_core_finish)(unsigned long v2p);
46 static int (*tegra_sleep_func)(unsigned long v2p);
47 
48 static void tegra_tear_down_cpu_init(void)
49 {
50 	switch (tegra_get_chip_id()) {
51 	case TEGRA20:
52 		if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC))
53 			tegra_tear_down_cpu = tegra20_tear_down_cpu;
54 		break;
55 	case TEGRA30:
56 	case TEGRA114:
57 	case TEGRA124:
58 		if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) ||
59 		    IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
60 		    IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
61 			tegra_tear_down_cpu = tegra30_tear_down_cpu;
62 		break;
63 	}
64 }
65 
66 /*
67  * restore_cpu_complex
68  *
69  * restores cpu clock setting, clears flow controller
70  *
71  * Always called on CPU 0.
72  */
73 static void restore_cpu_complex(void)
74 {
75 	int cpu = smp_processor_id();
76 
77 	BUG_ON(cpu != 0);
78 
79 #ifdef CONFIG_SMP
80 	cpu = cpu_logical_map(cpu);
81 #endif
82 
83 	/* Restore the CPU clock settings */
84 	tegra_cpu_clock_resume();
85 
86 	flowctrl_cpu_suspend_exit(cpu);
87 }
88 
89 /*
90  * suspend_cpu_complex
91  *
92  * saves pll state for use by restart_plls, prepares flow controller for
93  * transition to suspend state
94  *
95  * Must always be called on cpu 0.
96  */
97 static void suspend_cpu_complex(void)
98 {
99 	int cpu = smp_processor_id();
100 
101 	BUG_ON(cpu != 0);
102 
103 #ifdef CONFIG_SMP
104 	cpu = cpu_logical_map(cpu);
105 #endif
106 
107 	/* Save the CPU clock settings */
108 	tegra_cpu_clock_suspend();
109 
110 	flowctrl_cpu_suspend_enter(cpu);
111 }
112 
113 void tegra_pm_clear_cpu_in_lp2(void)
114 {
115 	int phy_cpu_id = cpu_logical_map(smp_processor_id());
116 	u32 *cpu_in_lp2 = tegra_cpu_lp2_mask;
117 
118 	spin_lock(&tegra_lp2_lock);
119 
120 	BUG_ON(!(*cpu_in_lp2 & BIT(phy_cpu_id)));
121 	*cpu_in_lp2 &= ~BIT(phy_cpu_id);
122 
123 	spin_unlock(&tegra_lp2_lock);
124 }
125 
126 void tegra_pm_set_cpu_in_lp2(void)
127 {
128 	int phy_cpu_id = cpu_logical_map(smp_processor_id());
129 	u32 *cpu_in_lp2 = tegra_cpu_lp2_mask;
130 
131 	spin_lock(&tegra_lp2_lock);
132 
133 	BUG_ON((*cpu_in_lp2 & BIT(phy_cpu_id)));
134 	*cpu_in_lp2 |= BIT(phy_cpu_id);
135 
136 	spin_unlock(&tegra_lp2_lock);
137 }
138 
139 static int tegra_sleep_cpu(unsigned long v2p)
140 {
141 	if (tegra_cpu_car_ops->rail_off_ready &&
142 	    WARN_ON(!tegra_cpu_rail_off_ready()))
143 		return -EBUSY;
144 
145 	/*
146 	 * L2 cache disabling using kernel API only allowed when all
147 	 * secondary CPU's are offline. Cache have to be disabled with
148 	 * MMU-on if cache maintenance is done via Trusted Foundations
149 	 * firmware. Note that CPUIDLE won't ever enter powergate on Tegra30
150 	 * if any of secondary CPU's is online and this is the LP2-idle
151 	 * code-path only for Tegra20/30.
152 	 */
153 #ifdef CONFIG_OUTER_CACHE
154 	if (trusted_foundations_registered() && outer_cache.disable)
155 		outer_cache.disable();
156 #endif
157 	/*
158 	 * Note that besides of setting up CPU reset vector this firmware
159 	 * call may also do the following, depending on the FW version:
160 	 *  1) Disable L2. But this doesn't matter since we already
161 	 *     disabled the L2.
162 	 *  2) Disable D-cache. This need to be taken into account in
163 	 *     particular by the tegra_disable_clean_inv_dcache() which
164 	 *     shall avoid the re-disable.
165 	 */
166 	call_firmware_op(prepare_idle, TF_PM_MODE_LP2);
167 
168 	setup_mm_for_reboot();
169 	tegra_sleep_cpu_finish(v2p);
170 
171 	/* should never here */
172 	BUG();
173 
174 	return 0;
175 }
176 
177 static void tegra_pm_set(enum tegra_suspend_mode mode)
178 {
179 	u32 value;
180 
181 	switch (tegra_get_chip_id()) {
182 	case TEGRA20:
183 	case TEGRA30:
184 		break;
185 	default:
186 		/* Turn off CRAIL */
187 		value = flowctrl_read_cpu_csr(0);
188 		value &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
189 		value |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
190 		flowctrl_write_cpu_csr(0, value);
191 		break;
192 	}
193 
194 	tegra_pmc_enter_suspend_mode(mode);
195 }
196 
197 int tegra_pm_enter_lp2(void)
198 {
199 	int err;
200 
201 	tegra_pm_set(TEGRA_SUSPEND_LP2);
202 
203 	cpu_cluster_pm_enter();
204 	suspend_cpu_complex();
205 
206 	err = cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, &tegra_sleep_cpu);
207 
208 	/*
209 	 * Resume L2 cache if it wasn't re-enabled early during resume,
210 	 * which is the case for Tegra30 that has to re-enable the cache
211 	 * via firmware call. In other cases cache is already enabled and
212 	 * hence re-enabling is a no-op. This is always a no-op on Tegra114+.
213 	 */
214 	outer_resume();
215 
216 	restore_cpu_complex();
217 	cpu_cluster_pm_exit();
218 
219 	return err;
220 }
221 
222 enum tegra_suspend_mode tegra_pm_validate_suspend_mode(
223 				enum tegra_suspend_mode mode)
224 {
225 	/*
226 	 * The Tegra devices support suspending to LP1 or lower currently.
227 	 */
228 	if (mode > TEGRA_SUSPEND_LP1)
229 		return TEGRA_SUSPEND_LP1;
230 
231 	return mode;
232 }
233 
234 static int tegra_sleep_core(unsigned long v2p)
235 {
236 	/*
237 	 * Cache have to be disabled with MMU-on if cache maintenance is done
238 	 * via Trusted Foundations firmware. This is a no-op on Tegra114+.
239 	 */
240 	if (trusted_foundations_registered())
241 		outer_disable();
242 
243 	call_firmware_op(prepare_idle, TF_PM_MODE_LP1);
244 
245 	setup_mm_for_reboot();
246 	tegra_sleep_core_finish(v2p);
247 
248 	/* should never here */
249 	BUG();
250 
251 	return 0;
252 }
253 
254 /*
255  * tegra_lp1_iram_hook
256  *
257  * Hooking the address of LP1 reset vector and SDRAM self-refresh code in
258  * SDRAM. These codes not be copied to IRAM in this fuction. We need to
259  * copy these code to IRAM before LP0/LP1 suspend and restore the content
260  * of IRAM after resume.
261  */
262 static bool tegra_lp1_iram_hook(void)
263 {
264 	switch (tegra_get_chip_id()) {
265 	case TEGRA20:
266 		if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC))
267 			tegra20_lp1_iram_hook();
268 		break;
269 	case TEGRA30:
270 	case TEGRA114:
271 	case TEGRA124:
272 		if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) ||
273 		    IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
274 		    IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
275 			tegra30_lp1_iram_hook();
276 		break;
277 	default:
278 		break;
279 	}
280 
281 	if (!tegra_lp1_iram.start_addr || !tegra_lp1_iram.end_addr)
282 		return false;
283 
284 	iram_save_size = tegra_lp1_iram.end_addr - tegra_lp1_iram.start_addr;
285 	iram_save_addr = kmalloc(iram_save_size, GFP_KERNEL);
286 	if (!iram_save_addr)
287 		return false;
288 
289 	return true;
290 }
291 
292 static bool tegra_sleep_core_init(void)
293 {
294 	switch (tegra_get_chip_id()) {
295 	case TEGRA20:
296 		if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC))
297 			tegra20_sleep_core_init();
298 		break;
299 	case TEGRA30:
300 	case TEGRA114:
301 	case TEGRA124:
302 		if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) ||
303 		    IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
304 		    IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
305 			tegra30_sleep_core_init();
306 		break;
307 	default:
308 		break;
309 	}
310 
311 	if (!tegra_sleep_core_finish)
312 		return false;
313 
314 	return true;
315 }
316 
317 static void tegra_suspend_enter_lp1(void)
318 {
319 	/* copy the reset vector & SDRAM shutdown code into IRAM */
320 	memcpy(iram_save_addr, IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA),
321 		iram_save_size);
322 	memcpy(IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA),
323 		tegra_lp1_iram.start_addr, iram_save_size);
324 
325 	*((u32 *)tegra_cpu_lp1_mask) = 1;
326 }
327 
328 static void tegra_suspend_exit_lp1(void)
329 {
330 	/* restore IRAM */
331 	memcpy(IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA), iram_save_addr,
332 		iram_save_size);
333 
334 	*(u32 *)tegra_cpu_lp1_mask = 0;
335 }
336 
337 static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = {
338 	[TEGRA_SUSPEND_NONE] = "none",
339 	[TEGRA_SUSPEND_LP2] = "LP2",
340 	[TEGRA_SUSPEND_LP1] = "LP1",
341 	[TEGRA_SUSPEND_LP0] = "LP0",
342 };
343 
344 static int tegra_suspend_enter(suspend_state_t state)
345 {
346 	enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode();
347 
348 	if (WARN_ON(mode < TEGRA_SUSPEND_NONE ||
349 		    mode >= TEGRA_MAX_SUSPEND_MODE))
350 		return -EINVAL;
351 
352 	pr_info("Entering suspend state %s\n", lp_state[mode]);
353 
354 	tegra_pm_set(mode);
355 
356 	local_fiq_disable();
357 
358 	suspend_cpu_complex();
359 	switch (mode) {
360 	case TEGRA_SUSPEND_LP1:
361 		tegra_suspend_enter_lp1();
362 		break;
363 	case TEGRA_SUSPEND_LP2:
364 		tegra_pm_set_cpu_in_lp2();
365 		break;
366 	default:
367 		break;
368 	}
369 
370 	cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, tegra_sleep_func);
371 
372 	/*
373 	 * Resume L2 cache if it wasn't re-enabled early during resume,
374 	 * which is the case for Tegra30 that has to re-enable the cache
375 	 * via firmware call. In other cases cache is already enabled and
376 	 * hence re-enabling is a no-op.
377 	 */
378 	outer_resume();
379 
380 	switch (mode) {
381 	case TEGRA_SUSPEND_LP1:
382 		tegra_suspend_exit_lp1();
383 		break;
384 	case TEGRA_SUSPEND_LP2:
385 		tegra_pm_clear_cpu_in_lp2();
386 		break;
387 	default:
388 		break;
389 	}
390 	restore_cpu_complex();
391 
392 	local_fiq_enable();
393 
394 	return 0;
395 }
396 
397 static const struct platform_suspend_ops tegra_suspend_ops = {
398 	.valid		= suspend_valid_only_mem,
399 	.enter		= tegra_suspend_enter,
400 };
401 
402 void __init tegra_init_suspend(void)
403 {
404 	enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode();
405 
406 	if (mode == TEGRA_SUSPEND_NONE)
407 		return;
408 
409 	tegra_tear_down_cpu_init();
410 
411 	if (mode >= TEGRA_SUSPEND_LP1) {
412 		if (!tegra_lp1_iram_hook() || !tegra_sleep_core_init()) {
413 			pr_err("%s: unable to allocate memory for SDRAM"
414 			       "self-refresh -- LP0/LP1 unavailable\n",
415 			       __func__);
416 			tegra_pmc_set_suspend_mode(TEGRA_SUSPEND_LP2);
417 			mode = TEGRA_SUSPEND_LP2;
418 		}
419 	}
420 
421 	/* set up sleep function for cpu_suspend */
422 	switch (mode) {
423 	case TEGRA_SUSPEND_LP1:
424 		tegra_sleep_func = tegra_sleep_core;
425 		break;
426 	case TEGRA_SUSPEND_LP2:
427 		tegra_sleep_func = tegra_sleep_cpu;
428 		break;
429 	default:
430 		break;
431 	}
432 
433 	suspend_set_ops(&tegra_suspend_ops);
434 }
435 
436 int tegra_pm_park_secondary_cpu(unsigned long cpu)
437 {
438 	if (cpu > 0) {
439 		tegra_disable_clean_inv_dcache(TEGRA_FLUSH_CACHE_LOUIS);
440 
441 		if (tegra_get_chip_id() == TEGRA20)
442 			tegra20_hotplug_shutdown();
443 		else
444 			tegra30_hotplug_shutdown();
445 	}
446 
447 	return -EINVAL;
448 }
449 #endif
450