xref: /openbmc/linux/arch/arm/mach-tegra/pm.c (revision 5a1ea477)
1 /*
2  * CPU complex suspend & resume functions for Tegra SoCs
3  *
4  * Copyright (c) 2009-2012, NVIDIA Corporation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include <linux/clk/tegra.h>
20 #include <linux/cpumask.h>
21 #include <linux/cpu_pm.h>
22 #include <linux/delay.h>
23 #include <linux/err.h>
24 #include <linux/io.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/suspend.h>
29 
30 #include <linux/firmware/trusted_foundations.h>
31 
32 #include <soc/tegra/flowctrl.h>
33 #include <soc/tegra/fuse.h>
34 #include <soc/tegra/pm.h>
35 #include <soc/tegra/pmc.h>
36 
37 #include <asm/cacheflush.h>
38 #include <asm/firmware.h>
39 #include <asm/idmap.h>
40 #include <asm/proc-fns.h>
41 #include <asm/smp_plat.h>
42 #include <asm/suspend.h>
43 #include <asm/tlbflush.h>
44 
45 #include "iomap.h"
46 #include "pm.h"
47 #include "reset.h"
48 #include "sleep.h"
49 
50 #ifdef CONFIG_PM_SLEEP
51 static DEFINE_SPINLOCK(tegra_lp2_lock);
52 static u32 iram_save_size;
53 static void *iram_save_addr;
54 struct tegra_lp1_iram tegra_lp1_iram;
55 void (*tegra_tear_down_cpu)(void);
56 void (*tegra_sleep_core_finish)(unsigned long v2p);
57 static int (*tegra_sleep_func)(unsigned long v2p);
58 
59 static void tegra_tear_down_cpu_init(void)
60 {
61 	switch (tegra_get_chip_id()) {
62 	case TEGRA20:
63 		if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC))
64 			tegra_tear_down_cpu = tegra20_tear_down_cpu;
65 		break;
66 	case TEGRA30:
67 	case TEGRA114:
68 	case TEGRA124:
69 		if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) ||
70 		    IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
71 		    IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
72 			tegra_tear_down_cpu = tegra30_tear_down_cpu;
73 		break;
74 	}
75 }
76 
77 /*
78  * restore_cpu_complex
79  *
80  * restores cpu clock setting, clears flow controller
81  *
82  * Always called on CPU 0.
83  */
84 static void restore_cpu_complex(void)
85 {
86 	int cpu = smp_processor_id();
87 
88 	BUG_ON(cpu != 0);
89 
90 #ifdef CONFIG_SMP
91 	cpu = cpu_logical_map(cpu);
92 #endif
93 
94 	/* Restore the CPU clock settings */
95 	tegra_cpu_clock_resume();
96 
97 	flowctrl_cpu_suspend_exit(cpu);
98 }
99 
100 /*
101  * suspend_cpu_complex
102  *
103  * saves pll state for use by restart_plls, prepares flow controller for
104  * transition to suspend state
105  *
106  * Must always be called on cpu 0.
107  */
108 static void suspend_cpu_complex(void)
109 {
110 	int cpu = smp_processor_id();
111 
112 	BUG_ON(cpu != 0);
113 
114 #ifdef CONFIG_SMP
115 	cpu = cpu_logical_map(cpu);
116 #endif
117 
118 	/* Save the CPU clock settings */
119 	tegra_cpu_clock_suspend();
120 
121 	flowctrl_cpu_suspend_enter(cpu);
122 }
123 
124 void tegra_clear_cpu_in_lp2(void)
125 {
126 	int phy_cpu_id = cpu_logical_map(smp_processor_id());
127 	u32 *cpu_in_lp2 = tegra_cpu_lp2_mask;
128 
129 	spin_lock(&tegra_lp2_lock);
130 
131 	BUG_ON(!(*cpu_in_lp2 & BIT(phy_cpu_id)));
132 	*cpu_in_lp2 &= ~BIT(phy_cpu_id);
133 
134 	spin_unlock(&tegra_lp2_lock);
135 }
136 
137 bool tegra_set_cpu_in_lp2(void)
138 {
139 	int phy_cpu_id = cpu_logical_map(smp_processor_id());
140 	bool last_cpu = false;
141 	cpumask_t *cpu_lp2_mask = tegra_cpu_lp2_mask;
142 	u32 *cpu_in_lp2 = tegra_cpu_lp2_mask;
143 
144 	spin_lock(&tegra_lp2_lock);
145 
146 	BUG_ON((*cpu_in_lp2 & BIT(phy_cpu_id)));
147 	*cpu_in_lp2 |= BIT(phy_cpu_id);
148 
149 	if ((phy_cpu_id == 0) && cpumask_equal(cpu_lp2_mask, cpu_online_mask))
150 		last_cpu = true;
151 	else if (tegra_get_chip_id() == TEGRA20 && phy_cpu_id == 1)
152 		tegra20_cpu_set_resettable_soon();
153 
154 	spin_unlock(&tegra_lp2_lock);
155 	return last_cpu;
156 }
157 
158 int tegra_cpu_do_idle(void)
159 {
160 	return cpu_do_idle();
161 }
162 
163 static int tegra_sleep_cpu(unsigned long v2p)
164 {
165 	/*
166 	 * L2 cache disabling using kernel API only allowed when all
167 	 * secondary CPU's are offline. Cache have to be disabled with
168 	 * MMU-on if cache maintenance is done via Trusted Foundations
169 	 * firmware. Note that CPUIDLE won't ever enter powergate on Tegra30
170 	 * if any of secondary CPU's is online and this is the LP2-idle
171 	 * code-path only for Tegra20/30.
172 	 */
173 	if (trusted_foundations_registered())
174 		outer_disable();
175 
176 	/*
177 	 * Note that besides of setting up CPU reset vector this firmware
178 	 * call may also do the following, depending on the FW version:
179 	 *  1) Disable L2. But this doesn't matter since we already
180 	 *     disabled the L2.
181 	 *  2) Disable D-cache. This need to be taken into account in
182 	 *     particular by the tegra_disable_clean_inv_dcache() which
183 	 *     shall avoid the re-disable.
184 	 */
185 	call_firmware_op(prepare_idle, TF_PM_MODE_LP2);
186 
187 	setup_mm_for_reboot();
188 	tegra_sleep_cpu_finish(v2p);
189 
190 	/* should never here */
191 	BUG();
192 
193 	return 0;
194 }
195 
196 static void tegra_pm_set(enum tegra_suspend_mode mode)
197 {
198 	u32 value;
199 
200 	switch (tegra_get_chip_id()) {
201 	case TEGRA20:
202 	case TEGRA30:
203 		break;
204 	default:
205 		/* Turn off CRAIL */
206 		value = flowctrl_read_cpu_csr(0);
207 		value &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
208 		value |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
209 		flowctrl_write_cpu_csr(0, value);
210 		break;
211 	}
212 
213 	tegra_pmc_enter_suspend_mode(mode);
214 }
215 
216 void tegra_idle_lp2_last(void)
217 {
218 	tegra_pm_set(TEGRA_SUSPEND_LP2);
219 
220 	cpu_cluster_pm_enter();
221 	suspend_cpu_complex();
222 
223 	cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, &tegra_sleep_cpu);
224 
225 	/*
226 	 * Resume L2 cache if it wasn't re-enabled early during resume,
227 	 * which is the case for Tegra30 that has to re-enable the cache
228 	 * via firmware call. In other cases cache is already enabled and
229 	 * hence re-enabling is a no-op. This is always a no-op on Tegra114+.
230 	 */
231 	outer_resume();
232 
233 	restore_cpu_complex();
234 	cpu_cluster_pm_exit();
235 }
236 
237 enum tegra_suspend_mode tegra_pm_validate_suspend_mode(
238 				enum tegra_suspend_mode mode)
239 {
240 	/*
241 	 * The Tegra devices support suspending to LP1 or lower currently.
242 	 */
243 	if (mode > TEGRA_SUSPEND_LP1)
244 		return TEGRA_SUSPEND_LP1;
245 
246 	return mode;
247 }
248 
249 static int tegra_sleep_core(unsigned long v2p)
250 {
251 	/*
252 	 * Cache have to be disabled with MMU-on if cache maintenance is done
253 	 * via Trusted Foundations firmware. This is a no-op on Tegra114+.
254 	 */
255 	if (trusted_foundations_registered())
256 		outer_disable();
257 
258 	call_firmware_op(prepare_idle, TF_PM_MODE_LP1);
259 
260 	setup_mm_for_reboot();
261 	tegra_sleep_core_finish(v2p);
262 
263 	/* should never here */
264 	BUG();
265 
266 	return 0;
267 }
268 
269 /*
270  * tegra_lp1_iram_hook
271  *
272  * Hooking the address of LP1 reset vector and SDRAM self-refresh code in
273  * SDRAM. These codes not be copied to IRAM in this fuction. We need to
274  * copy these code to IRAM before LP0/LP1 suspend and restore the content
275  * of IRAM after resume.
276  */
277 static bool tegra_lp1_iram_hook(void)
278 {
279 	switch (tegra_get_chip_id()) {
280 	case TEGRA20:
281 		if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC))
282 			tegra20_lp1_iram_hook();
283 		break;
284 	case TEGRA30:
285 	case TEGRA114:
286 	case TEGRA124:
287 		if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) ||
288 		    IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
289 		    IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
290 			tegra30_lp1_iram_hook();
291 		break;
292 	default:
293 		break;
294 	}
295 
296 	if (!tegra_lp1_iram.start_addr || !tegra_lp1_iram.end_addr)
297 		return false;
298 
299 	iram_save_size = tegra_lp1_iram.end_addr - tegra_lp1_iram.start_addr;
300 	iram_save_addr = kmalloc(iram_save_size, GFP_KERNEL);
301 	if (!iram_save_addr)
302 		return false;
303 
304 	return true;
305 }
306 
307 static bool tegra_sleep_core_init(void)
308 {
309 	switch (tegra_get_chip_id()) {
310 	case TEGRA20:
311 		if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC))
312 			tegra20_sleep_core_init();
313 		break;
314 	case TEGRA30:
315 	case TEGRA114:
316 	case TEGRA124:
317 		if (IS_ENABLED(CONFIG_ARCH_TEGRA_3x_SOC) ||
318 		    IS_ENABLED(CONFIG_ARCH_TEGRA_114_SOC) ||
319 		    IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC))
320 			tegra30_sleep_core_init();
321 		break;
322 	default:
323 		break;
324 	}
325 
326 	if (!tegra_sleep_core_finish)
327 		return false;
328 
329 	return true;
330 }
331 
332 static void tegra_suspend_enter_lp1(void)
333 {
334 	/* copy the reset vector & SDRAM shutdown code into IRAM */
335 	memcpy(iram_save_addr, IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA),
336 		iram_save_size);
337 	memcpy(IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA),
338 		tegra_lp1_iram.start_addr, iram_save_size);
339 
340 	*((u32 *)tegra_cpu_lp1_mask) = 1;
341 }
342 
343 static void tegra_suspend_exit_lp1(void)
344 {
345 	/* restore IRAM */
346 	memcpy(IO_ADDRESS(TEGRA_IRAM_LPx_RESUME_AREA), iram_save_addr,
347 		iram_save_size);
348 
349 	*(u32 *)tegra_cpu_lp1_mask = 0;
350 }
351 
352 static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = {
353 	[TEGRA_SUSPEND_NONE] = "none",
354 	[TEGRA_SUSPEND_LP2] = "LP2",
355 	[TEGRA_SUSPEND_LP1] = "LP1",
356 	[TEGRA_SUSPEND_LP0] = "LP0",
357 };
358 
359 static int tegra_suspend_enter(suspend_state_t state)
360 {
361 	enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode();
362 
363 	if (WARN_ON(mode < TEGRA_SUSPEND_NONE ||
364 		    mode >= TEGRA_MAX_SUSPEND_MODE))
365 		return -EINVAL;
366 
367 	pr_info("Entering suspend state %s\n", lp_state[mode]);
368 
369 	tegra_pm_set(mode);
370 
371 	local_fiq_disable();
372 
373 	suspend_cpu_complex();
374 	switch (mode) {
375 	case TEGRA_SUSPEND_LP1:
376 		tegra_suspend_enter_lp1();
377 		break;
378 	case TEGRA_SUSPEND_LP2:
379 		tegra_set_cpu_in_lp2();
380 		break;
381 	default:
382 		break;
383 	}
384 
385 	cpu_suspend(PHYS_OFFSET - PAGE_OFFSET, tegra_sleep_func);
386 
387 	/*
388 	 * Resume L2 cache if it wasn't re-enabled early during resume,
389 	 * which is the case for Tegra30 that has to re-enable the cache
390 	 * via firmware call. In other cases cache is already enabled and
391 	 * hence re-enabling is a no-op.
392 	 */
393 	outer_resume();
394 
395 	switch (mode) {
396 	case TEGRA_SUSPEND_LP1:
397 		tegra_suspend_exit_lp1();
398 		break;
399 	case TEGRA_SUSPEND_LP2:
400 		tegra_clear_cpu_in_lp2();
401 		break;
402 	default:
403 		break;
404 	}
405 	restore_cpu_complex();
406 
407 	local_fiq_enable();
408 
409 	return 0;
410 }
411 
412 static const struct platform_suspend_ops tegra_suspend_ops = {
413 	.valid		= suspend_valid_only_mem,
414 	.enter		= tegra_suspend_enter,
415 };
416 
417 void __init tegra_init_suspend(void)
418 {
419 	enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode();
420 
421 	if (mode == TEGRA_SUSPEND_NONE)
422 		return;
423 
424 	tegra_tear_down_cpu_init();
425 
426 	if (mode >= TEGRA_SUSPEND_LP1) {
427 		if (!tegra_lp1_iram_hook() || !tegra_sleep_core_init()) {
428 			pr_err("%s: unable to allocate memory for SDRAM"
429 			       "self-refresh -- LP0/LP1 unavailable\n",
430 			       __func__);
431 			tegra_pmc_set_suspend_mode(TEGRA_SUSPEND_LP2);
432 			mode = TEGRA_SUSPEND_LP2;
433 		}
434 	}
435 
436 	/* set up sleep function for cpu_suspend */
437 	switch (mode) {
438 	case TEGRA_SUSPEND_LP1:
439 		tegra_sleep_func = tegra_sleep_core;
440 		break;
441 	case TEGRA_SUSPEND_LP2:
442 		tegra_sleep_func = tegra_sleep_cpu;
443 		break;
444 	default:
445 		break;
446 	}
447 
448 	suspend_set_ops(&tegra_suspend_ops);
449 }
450 #endif
451