xref: /openbmc/linux/arch/arm/mach-omap2/pm34xx.c (revision 69e26b4f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * OMAP3 Power Management Routines
4  *
5  * Copyright (C) 2006-2008 Nokia Corporation
6  * Tony Lindgren <tony@atomide.com>
7  * Jouni Hogander
8  *
9  * Copyright (C) 2007 Texas Instruments, Inc.
10  * Rajendra Nayak <rnayak@ti.com>
11  *
12  * Copyright (C) 2005 Texas Instruments, Inc.
13  * Richard Woodruff <r-woodruff2@ti.com>
14  *
15  * Based on pm.c for omap1
16  */
17 
18 #include <linux/cpu_pm.h>
19 #include <linux/pm.h>
20 #include <linux/suspend.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/list.h>
24 #include <linux/err.h>
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <linux/of.h>
29 #include <linux/cpuidle.h>
30 
31 #include <trace/events/power.h>
32 
33 #include <asm/fncpy.h>
34 #include <asm/suspend.h>
35 #include <asm/system_misc.h>
36 
37 #include "clockdomain.h"
38 #include "powerdomain.h"
39 #include "soc.h"
40 #include "common.h"
41 #include "cm3xxx.h"
42 #include "cm-regbits-34xx.h"
43 #include "prm-regbits-34xx.h"
44 #include "prm3xxx.h"
45 #include "pm.h"
46 #include "sdrc.h"
47 #include "omap-secure.h"
48 #include "sram.h"
49 #include "control.h"
50 #include "vc.h"
51 
52 /* pm34xx errata defined in pm.h */
53 u16 pm34xx_errata;
54 
55 struct power_state {
56 	struct powerdomain *pwrdm;
57 	u32 next_state;
58 #ifdef CONFIG_SUSPEND
59 	u32 saved_state;
60 #endif
61 	struct list_head node;
62 };
63 
64 static LIST_HEAD(pwrst_list);
65 
66 void (*omap3_do_wfi_sram)(void);
67 
68 static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
69 static struct powerdomain *core_pwrdm, *per_pwrdm;
70 
omap3_core_save_context(void)71 static void omap3_core_save_context(void)
72 {
73 	omap3_ctrl_save_padconf();
74 
75 	/*
76 	 * Force write last pad into memory, as this can fail in some
77 	 * cases according to errata 1.157, 1.185
78 	 */
79 	omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
80 		OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
81 
82 	/* Save the Interrupt controller context */
83 	omap_intc_save_context();
84 	/* Save the system control module context, padconf already save above*/
85 	omap3_control_save_context();
86 }
87 
omap3_core_restore_context(void)88 static void omap3_core_restore_context(void)
89 {
90 	/* Restore the control module context, padconf restored by h/w */
91 	omap3_control_restore_context();
92 	/* Restore the interrupt controller context */
93 	omap_intc_restore_context();
94 }
95 
96 /*
97  * FIXME: This function should be called before entering off-mode after
98  * OMAP3 secure services have been accessed. Currently it is only called
99  * once during boot sequence, but this works as we are not using secure
100  * services.
101  */
omap3_save_secure_ram_context(void)102 static void omap3_save_secure_ram_context(void)
103 {
104 	u32 ret;
105 	int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
106 
107 	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
108 		/*
109 		 * MPU next state must be set to POWER_ON temporarily,
110 		 * otherwise the WFI executed inside the ROM code
111 		 * will hang the system.
112 		 */
113 		pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
114 		ret = omap3_save_secure_ram(omap3_secure_ram_storage,
115 					    OMAP3_SAVE_SECURE_RAM_SZ);
116 		pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
117 		/* Following is for error tracking, it should not happen */
118 		if (ret) {
119 			pr_err("save_secure_sram() returns %08x\n", ret);
120 			while (1)
121 				;
122 		}
123 	}
124 }
125 
_prcm_int_handle_io(int irq,void * unused)126 static irqreturn_t _prcm_int_handle_io(int irq, void *unused)
127 {
128 	int c;
129 
130 	c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, OMAP3430_ST_IO_MASK |
131 				    OMAP3430_ST_IO_CHAIN_MASK);
132 
133 	return c ? IRQ_HANDLED : IRQ_NONE;
134 }
135 
_prcm_int_handle_wakeup(int irq,void * unused)136 static irqreturn_t _prcm_int_handle_wakeup(int irq, void *unused)
137 {
138 	int c;
139 
140 	/*
141 	 * Clear all except ST_IO and ST_IO_CHAIN for wkup module,
142 	 * these are handled in a separate handler to avoid acking
143 	 * IO events before parsing in mux code
144 	 */
145 	c = omap_prm_clear_mod_irqs(WKUP_MOD, 1, ~(OMAP3430_ST_IO_MASK |
146 						   OMAP3430_ST_IO_CHAIN_MASK));
147 	c += omap_prm_clear_mod_irqs(CORE_MOD, 1, ~0);
148 	c += omap_prm_clear_mod_irqs(OMAP3430_PER_MOD, 1, ~0);
149 	if (omap_rev() > OMAP3430_REV_ES1_0) {
150 		c += omap_prm_clear_mod_irqs(CORE_MOD, 3, ~0);
151 		c += omap_prm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, ~0);
152 	}
153 
154 	return c ? IRQ_HANDLED : IRQ_NONE;
155 }
156 
omap34xx_save_context(u32 * save)157 static void omap34xx_save_context(u32 *save)
158 {
159 	u32 val;
160 
161 	/* Read Auxiliary Control Register */
162 	asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val));
163 	*save++ = 1;
164 	*save++ = val;
165 
166 	/* Read L2 AUX ctrl register */
167 	asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val));
168 	*save++ = 1;
169 	*save++ = val;
170 }
171 
omap34xx_do_sram_idle(unsigned long save_state)172 static int omap34xx_do_sram_idle(unsigned long save_state)
173 {
174 	omap34xx_cpu_suspend(save_state);
175 	return 0;
176 }
177 
omap_sram_idle(bool rcuidle)178 __cpuidle void omap_sram_idle(bool rcuidle)
179 {
180 	/* Variable to tell what needs to be saved and restored
181 	 * in omap_sram_idle*/
182 	/* save_state = 0 => Nothing to save and restored */
183 	/* save_state = 1 => Only L1 and logic lost */
184 	/* save_state = 2 => Only L2 lost */
185 	/* save_state = 3 => L1, L2 and logic lost */
186 	int save_state = 0;
187 	int mpu_next_state = PWRDM_POWER_ON;
188 	int per_next_state = PWRDM_POWER_ON;
189 	int core_next_state = PWRDM_POWER_ON;
190 	u32 sdrc_pwr = 0;
191 	int error;
192 
193 	mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
194 	switch (mpu_next_state) {
195 	case PWRDM_POWER_ON:
196 	case PWRDM_POWER_RET:
197 		/* No need to save context */
198 		save_state = 0;
199 		break;
200 	case PWRDM_POWER_OFF:
201 		save_state = 3;
202 		break;
203 	default:
204 		/* Invalid state */
205 		pr_err("Invalid mpu state in sram_idle\n");
206 		return;
207 	}
208 
209 	/* NEON control */
210 	if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
211 		pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
212 
213 	/* Enable IO-PAD and IO-CHAIN wakeups */
214 	per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
215 	core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
216 
217 	pwrdm_pre_transition(NULL);
218 
219 	/* PER */
220 	if (per_next_state == PWRDM_POWER_OFF) {
221 		error = cpu_cluster_pm_enter();
222 		if (error)
223 			return;
224 	}
225 
226 	/* CORE */
227 	if (core_next_state < PWRDM_POWER_ON) {
228 		if (core_next_state == PWRDM_POWER_OFF) {
229 			omap3_core_save_context();
230 			omap3_cm_save_context();
231 		}
232 	}
233 
234 	/* Configure PMIC signaling for I2C4 or sys_off_mode */
235 	omap3_vc_set_pmic_signaling(core_next_state);
236 
237 	omap3_intc_prepare_idle();
238 
239 	/*
240 	 * On EMU/HS devices ROM code restores a SRDC value
241 	 * from scratchpad which has automatic self refresh on timeout
242 	 * of AUTO_CNT = 1 enabled. This takes care of erratum ID i443.
243 	 * Hence store/restore the SDRC_POWER register here.
244 	 */
245 	if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
246 	    (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
247 	     omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
248 	    core_next_state == PWRDM_POWER_OFF)
249 		sdrc_pwr = sdrc_read_reg(SDRC_POWER);
250 
251 	/*
252 	 * omap3_arm_context is the location where some ARM context
253 	 * get saved. The rest is placed on the stack, and restored
254 	 * from there before resuming.
255 	 */
256 	if (save_state)
257 		omap34xx_save_context(omap3_arm_context);
258 
259 	if (rcuidle)
260 		ct_cpuidle_enter();
261 
262 	if (save_state == 1 || save_state == 3)
263 		cpu_suspend(save_state, omap34xx_do_sram_idle);
264 	else
265 		omap34xx_do_sram_idle(save_state);
266 
267 	if (rcuidle)
268 		ct_cpuidle_exit();
269 
270 	/* Restore normal SDRC POWER settings */
271 	if (cpu_is_omap3430() && omap_rev() >= OMAP3430_REV_ES3_0 &&
272 	    (omap_type() == OMAP2_DEVICE_TYPE_EMU ||
273 	     omap_type() == OMAP2_DEVICE_TYPE_SEC) &&
274 	    core_next_state == PWRDM_POWER_OFF)
275 		sdrc_write_reg(sdrc_pwr, SDRC_POWER);
276 
277 	/* CORE */
278 	if (core_next_state < PWRDM_POWER_ON &&
279 	    pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) {
280 		omap3_core_restore_context();
281 		omap3_cm_restore_context();
282 		omap3_sram_restore_context();
283 		omap2_sms_restore_context();
284 	} else {
285 		/*
286 		 * In off-mode resume path above, omap3_core_restore_context
287 		 * also handles the INTC autoidle restore done here so limit
288 		 * this to non-off mode resume paths so we don't do it twice.
289 		 */
290 		omap3_intc_resume_idle();
291 	}
292 
293 	pwrdm_post_transition(NULL);
294 
295 	/* PER */
296 	if (per_next_state == PWRDM_POWER_OFF)
297 		cpu_cluster_pm_exit();
298 }
299 
omap3_pm_idle(void)300 static void omap3_pm_idle(void)
301 {
302 	if (omap_irq_pending())
303 		return;
304 
305 	omap3_do_wfi();
306 }
307 
308 #ifdef CONFIG_SUSPEND
omap3_pm_suspend(void)309 static int omap3_pm_suspend(void)
310 {
311 	struct power_state *pwrst;
312 	int state, ret = 0;
313 
314 	/* Read current next_pwrsts */
315 	list_for_each_entry(pwrst, &pwrst_list, node)
316 		pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
317 	/* Set ones wanted by suspend */
318 	list_for_each_entry(pwrst, &pwrst_list, node) {
319 		if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
320 			goto restore;
321 		if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
322 			goto restore;
323 	}
324 
325 	omap3_intc_suspend();
326 
327 	omap_sram_idle(false);
328 
329 restore:
330 	/* Restore next_pwrsts */
331 	list_for_each_entry(pwrst, &pwrst_list, node) {
332 		state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
333 		if (state > pwrst->next_state) {
334 			pr_info("Powerdomain (%s) didn't enter target state %d\n",
335 				pwrst->pwrdm->name, pwrst->next_state);
336 			ret = -1;
337 		}
338 		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
339 	}
340 	if (ret)
341 		pr_err("Could not enter target state in pm_suspend\n");
342 	else
343 		pr_info("Successfully put all powerdomains to target state\n");
344 
345 	return ret;
346 }
347 #else
348 #define omap3_pm_suspend NULL
349 #endif /* CONFIG_SUSPEND */
350 
prcm_setup_regs(void)351 static void __init prcm_setup_regs(void)
352 {
353 	omap3_ctrl_init();
354 
355 	omap3_prm_init_pm(cpu_is_omap3630(), omap3_has_iva());
356 }
357 
omap3_pm_off_mode_enable(int enable)358 void omap3_pm_off_mode_enable(int enable)
359 {
360 	struct power_state *pwrst;
361 	u32 state;
362 
363 	if (enable)
364 		state = PWRDM_POWER_OFF;
365 	else
366 		state = PWRDM_POWER_RET;
367 
368 	list_for_each_entry(pwrst, &pwrst_list, node) {
369 		if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
370 				pwrst->pwrdm == core_pwrdm &&
371 				state == PWRDM_POWER_OFF) {
372 			pwrst->next_state = PWRDM_POWER_RET;
373 			pr_warn("%s: Core OFF disabled due to errata i583\n",
374 				__func__);
375 		} else {
376 			pwrst->next_state = state;
377 		}
378 		omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
379 	}
380 }
381 
omap3_pm_get_suspend_state(struct powerdomain * pwrdm)382 int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
383 {
384 	struct power_state *pwrst;
385 
386 	list_for_each_entry(pwrst, &pwrst_list, node) {
387 		if (pwrst->pwrdm == pwrdm)
388 			return pwrst->next_state;
389 	}
390 	return -EINVAL;
391 }
392 
omap3_pm_set_suspend_state(struct powerdomain * pwrdm,int state)393 int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
394 {
395 	struct power_state *pwrst;
396 
397 	list_for_each_entry(pwrst, &pwrst_list, node) {
398 		if (pwrst->pwrdm == pwrdm) {
399 			pwrst->next_state = state;
400 			return 0;
401 		}
402 	}
403 	return -EINVAL;
404 }
405 
pwrdms_setup(struct powerdomain * pwrdm,void * unused)406 static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
407 {
408 	struct power_state *pwrst;
409 
410 	if (!pwrdm->pwrsts)
411 		return 0;
412 
413 	pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
414 	if (!pwrst)
415 		return -ENOMEM;
416 	pwrst->pwrdm = pwrdm;
417 
418 	if (enable_off_mode)
419 		pwrst->next_state = PWRDM_POWER_OFF;
420 	else
421 		pwrst->next_state = PWRDM_POWER_RET;
422 
423 	list_add(&pwrst->node, &pwrst_list);
424 
425 	if (pwrdm_has_hdwr_sar(pwrdm))
426 		pwrdm_enable_hdwr_sar(pwrdm);
427 
428 	return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
429 }
430 
431 /*
432  * Push functions to SRAM
433  *
434  * The minimum set of functions is pushed to SRAM for execution:
435  * - omap3_do_wfi for erratum i581 WA,
436  */
omap_push_sram_idle(void)437 void omap_push_sram_idle(void)
438 {
439 	omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
440 }
441 
pm_errata_configure(void)442 static void __init pm_errata_configure(void)
443 {
444 	if (cpu_is_omap3630()) {
445 		pm34xx_errata |= PM_RTA_ERRATUM_i608;
446 		/* Enable the l2 cache toggling in sleep logic */
447 		enable_omap3630_toggle_l2_on_restore();
448 		if (omap_rev() < OMAP3630_REV_ES1_2)
449 			pm34xx_errata |= (PM_SDRC_WAKEUP_ERRATUM_i583 |
450 					  PM_PER_MEMORIES_ERRATUM_i582);
451 	} else if (cpu_is_omap34xx()) {
452 		pm34xx_errata |= PM_PER_MEMORIES_ERRATUM_i582;
453 	}
454 }
455 
omap3_pm_check_pmic(void)456 static void __init omap3_pm_check_pmic(void)
457 {
458 	struct device_node *np;
459 
460 	np = of_find_compatible_node(NULL, NULL, "ti,twl4030-power-idle");
461 	if (!np)
462 		np = of_find_compatible_node(NULL, NULL, "ti,twl4030-power-idle-osc-off");
463 
464 	if (np) {
465 		of_node_put(np);
466 		enable_off_mode = 1;
467 	} else {
468 		enable_off_mode = 0;
469 	}
470 }
471 
omap3_pm_init(void)472 int __init omap3_pm_init(void)
473 {
474 	struct power_state *pwrst, *tmp;
475 	struct clockdomain *neon_clkdm, *mpu_clkdm, *per_clkdm, *wkup_clkdm;
476 	int ret;
477 
478 	if (!omap3_has_io_chain_ctrl())
479 		pr_warn("PM: no software I/O chain control; some wakeups may be lost\n");
480 
481 	pm_errata_configure();
482 
483 	/* XXX prcm_setup_regs needs to be before enabling hw
484 	 * supervised mode for powerdomains */
485 	prcm_setup_regs();
486 
487 	ret = request_irq(omap_prcm_event_to_irq("wkup"),
488 		_prcm_int_handle_wakeup, IRQF_NO_SUSPEND, "pm_wkup", NULL);
489 
490 	if (ret) {
491 		pr_err("pm: Failed to request pm_wkup irq\n");
492 		goto err1;
493 	}
494 
495 	/* IO interrupt is shared with mux code */
496 	ret = request_irq(omap_prcm_event_to_irq("io"),
497 		_prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
498 		omap3_pm_init);
499 
500 	if (ret) {
501 		pr_err("pm: Failed to request pm_io irq\n");
502 		goto err2;
503 	}
504 
505 	omap3_pm_check_pmic();
506 
507 	ret = pwrdm_for_each(pwrdms_setup, NULL);
508 	if (ret) {
509 		pr_err("Failed to setup powerdomains\n");
510 		goto err3;
511 	}
512 
513 	(void) clkdm_for_each(omap_pm_clkdms_setup, NULL);
514 
515 	mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
516 	if (mpu_pwrdm == NULL) {
517 		pr_err("Failed to get mpu_pwrdm\n");
518 		ret = -EINVAL;
519 		goto err3;
520 	}
521 
522 	neon_pwrdm = pwrdm_lookup("neon_pwrdm");
523 	per_pwrdm = pwrdm_lookup("per_pwrdm");
524 	core_pwrdm = pwrdm_lookup("core_pwrdm");
525 
526 	neon_clkdm = clkdm_lookup("neon_clkdm");
527 	mpu_clkdm = clkdm_lookup("mpu_clkdm");
528 	per_clkdm = clkdm_lookup("per_clkdm");
529 	wkup_clkdm = clkdm_lookup("wkup_clkdm");
530 
531 	omap_common_suspend_init(omap3_pm_suspend);
532 
533 	arm_pm_idle = omap3_pm_idle;
534 	omap3_idle_init();
535 
536 	/*
537 	 * RTA is disabled during initialization as per erratum i608
538 	 * it is safer to disable RTA by the bootloader, but we would like
539 	 * to be doubly sure here and prevent any mishaps.
540 	 */
541 	if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
542 		omap3630_ctrl_disable_rta();
543 
544 	/*
545 	 * The UART3/4 FIFO and the sidetone memory in McBSP2/3 are
546 	 * not correctly reset when the PER powerdomain comes back
547 	 * from OFF or OSWR when the CORE powerdomain is kept active.
548 	 * See OMAP36xx Erratum i582 "PER Domain reset issue after
549 	 * Domain-OFF/OSWR Wakeup".  This wakeup dependency is not a
550 	 * complete workaround.  The kernel must also prevent the PER
551 	 * powerdomain from going to OSWR/OFF while the CORE
552 	 * powerdomain is not going to OSWR/OFF.  And if PER last
553 	 * power state was off while CORE last power state was ON, the
554 	 * UART3/4 and McBSP2/3 SIDETONE devices need to run a
555 	 * self-test using their loopback tests; if that fails, those
556 	 * devices are unusable until the PER/CORE can complete a transition
557 	 * from ON to OSWR/OFF and then back to ON.
558 	 *
559 	 * XXX Technically this workaround is only needed if off-mode
560 	 * or OSWR is enabled.
561 	 */
562 	if (IS_PM34XX_ERRATUM(PM_PER_MEMORIES_ERRATUM_i582))
563 		clkdm_add_wkdep(per_clkdm, wkup_clkdm);
564 
565 	clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
566 	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
567 		omap3_secure_ram_storage =
568 			kmalloc(OMAP3_SAVE_SECURE_RAM_SZ, GFP_KERNEL);
569 		if (!omap3_secure_ram_storage)
570 			pr_err("Memory allocation failed when allocating for secure sram context\n");
571 
572 		local_irq_disable();
573 
574 		omap3_save_secure_ram_context();
575 
576 		local_irq_enable();
577 	}
578 
579 	omap3_save_scratchpad_contents();
580 	return ret;
581 
582 err3:
583 	list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
584 		list_del(&pwrst->node);
585 		kfree(pwrst);
586 	}
587 	free_irq(omap_prcm_event_to_irq("io"), omap3_pm_init);
588 err2:
589 	free_irq(omap_prcm_event_to_irq("wkup"), NULL);
590 err1:
591 	return ret;
592 }
593