1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * AM33XX Arch Power Management Routines
4  *
5  * Copyright (C) 2016-2018 Texas Instruments Incorporated - https://www.ti.com/
6  *	Dave Gerlach
7  */
8 
9 #include <linux/cpuidle.h>
10 #include <linux/platform_data/pm33xx.h>
11 #include <linux/suspend.h>
12 #include <asm/cpuidle.h>
13 #include <asm/smp_scu.h>
14 #include <asm/suspend.h>
15 #include <linux/errno.h>
16 #include <linux/clk.h>
17 #include <linux/cpu.h>
18 #include <linux/platform_data/gpio-omap.h>
19 #include <linux/pinctrl/pinmux.h>
20 #include <linux/wkup_m3_ipc.h>
21 #include <linux/of.h>
22 #include <linux/rtc.h>
23 
24 #include "cm33xx.h"
25 #include "common.h"
26 #include "control.h"
27 #include "clockdomain.h"
28 #include "iomap.h"
29 #include "pm.h"
30 #include "powerdomain.h"
31 #include "prm33xx.h"
32 #include "soc.h"
33 #include "sram.h"
34 #include "omap-secure.h"
35 
36 static struct powerdomain *cefuse_pwrdm, *gfx_pwrdm, *per_pwrdm, *mpu_pwrdm;
37 static struct clockdomain *gfx_l4ls_clkdm;
38 static void __iomem *scu_base;
39 
40 static int (*idle_fn)(u32 wfi_flags);
41 
42 struct amx3_idle_state {
43 	int wfi_flags;
44 };
45 
46 static struct amx3_idle_state *idle_states;
47 
48 static int am43xx_map_scu(void)
49 {
50 	scu_base = ioremap(scu_a9_get_base(), SZ_256);
51 
52 	if (!scu_base)
53 		return -ENOMEM;
54 
55 	return 0;
56 }
57 
58 static int am33xx_check_off_mode_enable(void)
59 {
60 	if (enable_off_mode)
61 		pr_warn("WARNING: This platform does not support off-mode, entering DeepSleep suspend.\n");
62 
63 	/* off mode not supported on am335x so return 0 always */
64 	return 0;
65 }
66 
67 static int am43xx_check_off_mode_enable(void)
68 {
69 	/*
70 	 * Check for am437x-gp-evm which has the right Hardware design to
71 	 * support this mode reliably.
72 	 */
73 	if (of_machine_is_compatible("ti,am437x-gp-evm") && enable_off_mode)
74 		return enable_off_mode;
75 	else if (enable_off_mode)
76 		pr_warn("WARNING: This platform does not support off-mode, entering DeepSleep suspend.\n");
77 
78 	return 0;
79 }
80 
81 static int amx3_common_init(int (*idle)(u32 wfi_flags))
82 {
83 	gfx_pwrdm = pwrdm_lookup("gfx_pwrdm");
84 	per_pwrdm = pwrdm_lookup("per_pwrdm");
85 	mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
86 
87 	if ((!gfx_pwrdm) || (!per_pwrdm) || (!mpu_pwrdm))
88 		return -ENODEV;
89 
90 	(void)clkdm_for_each(omap_pm_clkdms_setup, NULL);
91 
92 	/* CEFUSE domain can be turned off post bootup */
93 	cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm");
94 	if (!cefuse_pwrdm)
95 		pr_err("PM: Failed to get cefuse_pwrdm\n");
96 	else if (omap_type() != OMAP2_DEVICE_TYPE_GP)
97 		pr_info("PM: Leaving EFUSE power domain active\n");
98 	else
99 		omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
100 
101 	idle_fn = idle;
102 
103 	return 0;
104 }
105 
106 static int am33xx_suspend_init(int (*idle)(u32 wfi_flags))
107 {
108 	int ret;
109 
110 	gfx_l4ls_clkdm = clkdm_lookup("gfx_l4ls_gfx_clkdm");
111 
112 	if (!gfx_l4ls_clkdm) {
113 		pr_err("PM: Cannot lookup gfx_l4ls_clkdm clockdomains\n");
114 		return -ENODEV;
115 	}
116 
117 	ret = amx3_common_init(idle);
118 
119 	return ret;
120 }
121 
122 static int am43xx_suspend_init(int (*idle)(u32 wfi_flags))
123 {
124 	int ret = 0;
125 
126 	ret = am43xx_map_scu();
127 	if (ret) {
128 		pr_err("PM: Could not ioremap SCU\n");
129 		return ret;
130 	}
131 
132 	ret = amx3_common_init(idle);
133 
134 	return ret;
135 }
136 
137 static int amx3_suspend_deinit(void)
138 {
139 	idle_fn = NULL;
140 	return 0;
141 }
142 
143 static void amx3_pre_suspend_common(void)
144 {
145 	omap_set_pwrdm_state(gfx_pwrdm, PWRDM_POWER_OFF);
146 }
147 
148 static void amx3_post_suspend_common(void)
149 {
150 	int status;
151 	/*
152 	 * Because gfx_pwrdm is the only one under MPU control,
153 	 * comment on transition status
154 	 */
155 	status = pwrdm_read_pwrst(gfx_pwrdm);
156 	if (status != PWRDM_POWER_OFF)
157 		pr_err("PM: GFX domain did not transition: %x\n", status);
158 }
159 
160 static int am33xx_suspend(unsigned int state, int (*fn)(unsigned long),
161 			  unsigned long args)
162 {
163 	int ret = 0;
164 
165 	amx3_pre_suspend_common();
166 	ret = cpu_suspend(args, fn);
167 	amx3_post_suspend_common();
168 
169 	/*
170 	 * BUG: GFX_L4LS clock domain needs to be woken up to
171 	 * ensure thet L4LS clock domain does not get stuck in
172 	 * transition. If that happens L3 module does not get
173 	 * disabled, thereby leading to PER power domain
174 	 * transition failing
175 	 */
176 
177 	clkdm_wakeup(gfx_l4ls_clkdm);
178 	clkdm_sleep(gfx_l4ls_clkdm);
179 
180 	return ret;
181 }
182 
183 static int am43xx_suspend(unsigned int state, int (*fn)(unsigned long),
184 			  unsigned long args)
185 {
186 	int ret = 0;
187 
188 	/* Suspend secure side on HS devices */
189 	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
190 		if (optee_available)
191 			omap_smccc_smc(AM43xx_PPA_SVC_PM_SUSPEND, 0);
192 		else
193 			omap_secure_dispatcher(AM43xx_PPA_SVC_PM_SUSPEND,
194 					       FLAG_START_CRITICAL,
195 					       0, 0, 0, 0, 0);
196 	}
197 
198 	amx3_pre_suspend_common();
199 	scu_power_mode(scu_base, SCU_PM_POWEROFF);
200 	ret = cpu_suspend(args, fn);
201 	scu_power_mode(scu_base, SCU_PM_NORMAL);
202 
203 	if (!am43xx_check_off_mode_enable())
204 		amx3_post_suspend_common();
205 
206 	/*
207 	 * Resume secure side on HS devices.
208 	 *
209 	 * Note that even on systems with OP-TEE available this resume call is
210 	 * issued to the ROM. This is because upon waking from suspend the ROM
211 	 * is restored as the secure monitor. On systems with OP-TEE ROM will
212 	 * restore OP-TEE during this call.
213 	 */
214 	if (omap_type() != OMAP2_DEVICE_TYPE_GP)
215 		omap_secure_dispatcher(AM43xx_PPA_SVC_PM_RESUME,
216 				       FLAG_START_CRITICAL,
217 				       0, 0, 0, 0, 0);
218 
219 	return ret;
220 }
221 
222 static int am33xx_cpu_suspend(int (*fn)(unsigned long), unsigned long args)
223 {
224 	int ret = 0;
225 
226 	if (omap_irq_pending() || need_resched())
227 		return ret;
228 
229 	ret = cpu_suspend(args, fn);
230 
231 	return ret;
232 }
233 
234 static int am43xx_cpu_suspend(int (*fn)(unsigned long), unsigned long args)
235 {
236 	int ret = 0;
237 
238 	if (!scu_base)
239 		return 0;
240 
241 	scu_power_mode(scu_base, SCU_PM_DORMANT);
242 	ret = cpu_suspend(args, fn);
243 	scu_power_mode(scu_base, SCU_PM_NORMAL);
244 
245 	return ret;
246 }
247 
248 static void amx3_begin_suspend(void)
249 {
250 	cpu_idle_poll_ctrl(true);
251 }
252 
253 static void amx3_finish_suspend(void)
254 {
255 	cpu_idle_poll_ctrl(false);
256 }
257 
258 
259 static struct am33xx_pm_sram_addr *amx3_get_sram_addrs(void)
260 {
261 	if (soc_is_am33xx())
262 		return &am33xx_pm_sram;
263 	else if (soc_is_am437x())
264 		return &am43xx_pm_sram;
265 	else
266 		return NULL;
267 }
268 
269 static void am43xx_save_context(void)
270 {
271 }
272 
273 static void am33xx_save_context(void)
274 {
275 	omap_intc_save_context();
276 }
277 
278 static void am33xx_restore_context(void)
279 {
280 	omap_intc_restore_context();
281 }
282 
283 static void am43xx_restore_context(void)
284 {
285 	/*
286 	 * HACK: restore dpll_per_clkdcoldo register contents, to avoid
287 	 * breaking suspend-resume
288 	 */
289 	writel_relaxed(0x0, AM33XX_L4_WK_IO_ADDRESS(0x44df2e14));
290 }
291 
292 static struct am33xx_pm_platform_data am33xx_ops = {
293 	.init = am33xx_suspend_init,
294 	.deinit = amx3_suspend_deinit,
295 	.soc_suspend = am33xx_suspend,
296 	.cpu_suspend = am33xx_cpu_suspend,
297 	.begin_suspend = amx3_begin_suspend,
298 	.finish_suspend = amx3_finish_suspend,
299 	.get_sram_addrs = amx3_get_sram_addrs,
300 	.save_context = am33xx_save_context,
301 	.restore_context = am33xx_restore_context,
302 	.check_off_mode_enable = am33xx_check_off_mode_enable,
303 };
304 
305 static struct am33xx_pm_platform_data am43xx_ops = {
306 	.init = am43xx_suspend_init,
307 	.deinit = amx3_suspend_deinit,
308 	.soc_suspend = am43xx_suspend,
309 	.cpu_suspend = am43xx_cpu_suspend,
310 	.begin_suspend = amx3_begin_suspend,
311 	.finish_suspend = amx3_finish_suspend,
312 	.get_sram_addrs = amx3_get_sram_addrs,
313 	.save_context = am43xx_save_context,
314 	.restore_context = am43xx_restore_context,
315 	.check_off_mode_enable = am43xx_check_off_mode_enable,
316 };
317 
318 static struct am33xx_pm_platform_data *am33xx_pm_get_pdata(void)
319 {
320 	if (soc_is_am33xx())
321 		return &am33xx_ops;
322 	else if (soc_is_am437x())
323 		return &am43xx_ops;
324 	else
325 		return NULL;
326 }
327 
328 #ifdef CONFIG_SUSPEND
329 /*
330  * Block system suspend initially. Later on pm33xx sets up it's own
331  * platform_suspend_ops after probe. That depends also on loaded
332  * wkup_m3_ipc and booted am335x-pm-firmware.elf.
333  */
334 static int amx3_suspend_block(suspend_state_t state)
335 {
336 	pr_warn("PM not initialized for pm33xx, wkup_m3_ipc, or am335x-pm-firmware.elf\n");
337 
338 	return -EINVAL;
339 }
340 
341 static int amx3_pm_valid(suspend_state_t state)
342 {
343 	switch (state) {
344 	case PM_SUSPEND_STANDBY:
345 		return 1;
346 	default:
347 		return 0;
348 	}
349 }
350 
351 static const struct platform_suspend_ops amx3_blocked_pm_ops = {
352 	.begin = amx3_suspend_block,
353 	.valid = amx3_pm_valid,
354 };
355 
356 static void __init amx3_block_suspend(void)
357 {
358 	suspend_set_ops(&amx3_blocked_pm_ops);
359 }
360 #else
361 static inline void amx3_block_suspend(void)
362 {
363 }
364 #endif	/* CONFIG_SUSPEND */
365 
366 int __init amx3_common_pm_init(void)
367 {
368 	struct am33xx_pm_platform_data *pdata;
369 	struct platform_device_info devinfo;
370 
371 	pdata = am33xx_pm_get_pdata();
372 
373 	memset(&devinfo, 0, sizeof(devinfo));
374 	devinfo.name = "pm33xx";
375 	devinfo.data = pdata;
376 	devinfo.size_data = sizeof(*pdata);
377 	devinfo.id = -1;
378 	platform_device_register_full(&devinfo);
379 	amx3_block_suspend();
380 
381 	return 0;
382 }
383 
384 static int __init amx3_idle_init(struct device_node *cpu_node, int cpu)
385 {
386 	struct device_node *state_node;
387 	struct amx3_idle_state states[CPUIDLE_STATE_MAX];
388 	int i;
389 	int state_count = 1;
390 
391 	for (i = 0; ; i++) {
392 		state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
393 		if (!state_node)
394 			break;
395 
396 		if (!of_device_is_available(state_node))
397 			continue;
398 
399 		if (i == CPUIDLE_STATE_MAX) {
400 			pr_warn("%s: cpuidle states reached max possible\n",
401 				__func__);
402 			break;
403 		}
404 
405 		states[state_count].wfi_flags = 0;
406 
407 		if (of_property_read_bool(state_node, "ti,idle-wkup-m3"))
408 			states[state_count].wfi_flags |= WFI_FLAG_WAKE_M3 |
409 							 WFI_FLAG_FLUSH_CACHE;
410 
411 		state_count++;
412 	}
413 
414 	idle_states = kcalloc(state_count, sizeof(*idle_states), GFP_KERNEL);
415 	if (!idle_states)
416 		return -ENOMEM;
417 
418 	for (i = 1; i < state_count; i++)
419 		idle_states[i].wfi_flags = states[i].wfi_flags;
420 
421 	return 0;
422 }
423 
424 static int amx3_idle_enter(unsigned long index)
425 {
426 	struct amx3_idle_state *idle_state = &idle_states[index];
427 
428 	if (!idle_state)
429 		return -EINVAL;
430 
431 	if (idle_fn)
432 		idle_fn(idle_state->wfi_flags);
433 
434 	return 0;
435 }
436 
437 static struct cpuidle_ops amx3_cpuidle_ops __initdata = {
438 	.init = amx3_idle_init,
439 	.suspend = amx3_idle_enter,
440 };
441 
442 CPUIDLE_METHOD_OF_DECLARE(pm33xx_idle, "ti,am3352", &amx3_cpuidle_ops);
443 CPUIDLE_METHOD_OF_DECLARE(pm43xx_idle, "ti,am4372", &amx3_cpuidle_ops);
444