xref: /openbmc/linux/arch/arm/mach-at91/pm.c (revision 4d75f5c664195b970e1cd2fd25b65b5eff257a0a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * arch/arm/mach-at91/pm.c
4  * AT91 Power Management
5  *
6  * Copyright (C) 2005 David Brownell
7  */
8 
9 #include <linux/genalloc.h>
10 #include <linux/io.h>
11 #include <linux/of_address.h>
12 #include <linux/of.h>
13 #include <linux/of_fdt.h>
14 #include <linux/of_platform.h>
15 #include <linux/platform_device.h>
16 #include <linux/parser.h>
17 #include <linux/suspend.h>
18 
19 #include <linux/clk.h>
20 #include <linux/clk/at91_pmc.h>
21 #include <linux/platform_data/atmel.h>
22 
23 #include <asm/cacheflush.h>
24 #include <asm/fncpy.h>
25 #include <asm/system_misc.h>
26 #include <asm/suspend.h>
27 
28 #include "generic.h"
29 #include "pm.h"
30 #include "sam_secure.h"
31 
32 #define BACKUP_DDR_PHY_CALIBRATION	(9)
33 
34 /**
35  * struct at91_pm_bu - AT91 power management backup unit data structure
36  * @suspended: true if suspended to backup mode
37  * @reserved: reserved
38  * @canary: canary data for memory checking after exit from backup mode
39  * @resume: resume API
40  * @ddr_phy_calibration: DDR PHY calibration data: ZQ0CR0, first 8 words
41  * of the memory
42  */
43 struct at91_pm_bu {
44 	int suspended;
45 	unsigned long reserved;
46 	phys_addr_t canary;
47 	phys_addr_t resume;
48 	unsigned long ddr_phy_calibration[BACKUP_DDR_PHY_CALIBRATION];
49 };
50 
51 /**
52  * struct at91_pm_sfrbu_regs - registers mapping for SFRBU
53  * @pswbu: power switch BU control registers
54  */
55 struct at91_pm_sfrbu_regs {
56 	struct {
57 		u32 key;
58 		u32 ctrl;
59 		u32 state;
60 		u32 softsw;
61 	} pswbu;
62 };
63 
64 /**
65  * enum at91_pm_eth_clk - Ethernet clock indexes
66  * @AT91_PM_ETH_PCLK: pclk index
67  * @AT91_PM_ETH_HCLK: hclk index
68  * @AT91_PM_ETH_MAX_CLK: max index
69  */
70 enum at91_pm_eth_clk {
71 	AT91_PM_ETH_PCLK,
72 	AT91_PM_ETH_HCLK,
73 	AT91_PM_ETH_MAX_CLK,
74 };
75 
76 /**
77  * enum at91_pm_eth - Ethernet controller indexes
78  * @AT91_PM_G_ETH: gigabit Ethernet controller index
79  * @AT91_PM_E_ETH: megabit Ethernet controller index
80  * @AT91_PM_MAX_ETH: max index
81  */
82 enum at91_pm_eth {
83 	AT91_PM_G_ETH,
84 	AT91_PM_E_ETH,
85 	AT91_PM_MAX_ETH,
86 };
87 
88 /**
89  * struct at91_pm_quirk_eth - AT91 PM Ethernet quirks
90  * @dev: Ethernet device
91  * @np: Ethernet device node
92  * @clks: Ethernet clocks
93  * @modes: power management mode that this quirk applies to
94  * @dns_modes: do not suspend modes: stop suspending if Ethernet is configured
95  *	       as wakeup source but buggy and no other wakeup source is
96  *	       available
97  */
98 struct at91_pm_quirk_eth {
99 	struct device *dev;
100 	struct device_node *np;
101 	struct clk_bulk_data clks[AT91_PM_ETH_MAX_CLK];
102 	u32 modes;
103 	u32 dns_modes;
104 };
105 
106 /**
107  * struct at91_pm_quirks - AT91 PM quirks
108  * @eth: Ethernet quirks
109  */
110 struct at91_pm_quirks {
111 	struct at91_pm_quirk_eth eth[AT91_PM_MAX_ETH];
112 };
113 
114 /**
115  * struct at91_soc_pm - AT91 SoC power management data structure
116  * @config_shdwc_ws: wakeup sources configuration function for SHDWC
117  * @config_pmc_ws: wakeup srouces configuration function for PMC
118  * @ws_ids: wakup sources of_device_id array
119  * @bu: backup unit mapped data (for backup mode)
120  * @quirks: PM quirks
121  * @data: PM data to be used on last phase of suspend
122  * @sfrbu_regs: SFRBU registers mapping
123  * @memcs: memory chip select
124  */
125 struct at91_soc_pm {
126 	int (*config_shdwc_ws)(void __iomem *shdwc, u32 *mode, u32 *polarity);
127 	int (*config_pmc_ws)(void __iomem *pmc, u32 mode, u32 polarity);
128 	const struct of_device_id *ws_ids;
129 	struct at91_pm_bu *bu;
130 	struct at91_pm_quirks quirks;
131 	struct at91_pm_data data;
132 	struct at91_pm_sfrbu_regs sfrbu_regs;
133 	void *memcs;
134 };
135 
136 /**
137  * enum at91_pm_iomaps - IOs that needs to be mapped for different PM modes
138  * @AT91_PM_IOMAP_SHDWC:	SHDWC controller
139  * @AT91_PM_IOMAP_SFRBU:	SFRBU controller
140  * @AT91_PM_IOMAP_ETHC:		Ethernet controller
141  */
142 enum at91_pm_iomaps {
143 	AT91_PM_IOMAP_SHDWC,
144 	AT91_PM_IOMAP_SFRBU,
145 	AT91_PM_IOMAP_ETHC,
146 };
147 
148 #define AT91_PM_IOMAP(name)	BIT(AT91_PM_IOMAP_##name)
149 
150 static struct at91_soc_pm soc_pm = {
151 	.data = {
152 		.standby_mode = AT91_PM_STANDBY,
153 		.suspend_mode = AT91_PM_ULP0,
154 	},
155 };
156 
157 static const match_table_t pm_modes __initconst = {
158 	{ AT91_PM_STANDBY,	"standby" },
159 	{ AT91_PM_ULP0,		"ulp0" },
160 	{ AT91_PM_ULP0_FAST,    "ulp0-fast" },
161 	{ AT91_PM_ULP1,		"ulp1" },
162 	{ AT91_PM_BACKUP,	"backup" },
163 	{ -1, NULL },
164 };
165 
166 #define at91_ramc_read(id, field) \
167 	__raw_readl(soc_pm.data.ramc[id] + field)
168 
169 #define at91_ramc_write(id, field, value) \
170 	__raw_writel(value, soc_pm.data.ramc[id] + field)
171 
at91_pm_valid_state(suspend_state_t state)172 static int at91_pm_valid_state(suspend_state_t state)
173 {
174 	switch (state) {
175 		case PM_SUSPEND_ON:
176 		case PM_SUSPEND_STANDBY:
177 		case PM_SUSPEND_MEM:
178 			return 1;
179 
180 		default:
181 			return 0;
182 	}
183 }
184 
185 static int canary = 0xA5A5A5A5;
186 
187 struct wakeup_source_info {
188 	unsigned int pmc_fsmr_bit;
189 	unsigned int shdwc_mr_bit;
190 	bool set_polarity;
191 };
192 
193 static const struct wakeup_source_info ws_info[] = {
194 	{ .pmc_fsmr_bit = AT91_PMC_FSTT(10),	.set_polarity = true },
195 	{ .pmc_fsmr_bit = AT91_PMC_RTCAL,	.shdwc_mr_bit = BIT(17) },
196 	{ .pmc_fsmr_bit = AT91_PMC_USBAL },
197 	{ .pmc_fsmr_bit = AT91_PMC_SDMMC_CD },
198 	{ .pmc_fsmr_bit = AT91_PMC_RTTAL },
199 	{ .pmc_fsmr_bit = AT91_PMC_RXLP_MCE },
200 };
201 
202 static const struct of_device_id sama5d2_ws_ids[] = {
203 	{ .compatible = "atmel,sama5d2-gem",		.data = &ws_info[0] },
204 	{ .compatible = "atmel,sama5d2-rtc",		.data = &ws_info[1] },
205 	{ .compatible = "atmel,sama5d3-udc",		.data = &ws_info[2] },
206 	{ .compatible = "atmel,at91rm9200-ohci",	.data = &ws_info[2] },
207 	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
208 	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
209 	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
210 	{ .compatible = "atmel,sama5d2-sdhci",		.data = &ws_info[3] },
211 	{ /* sentinel */ }
212 };
213 
214 static const struct of_device_id sam9x60_ws_ids[] = {
215 	{ .compatible = "microchip,sam9x60-rtc",	.data = &ws_info[1] },
216 	{ .compatible = "atmel,at91rm9200-ohci",	.data = &ws_info[2] },
217 	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
218 	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
219 	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
220 	{ .compatible = "microchip,sam9x60-rtt",	.data = &ws_info[4] },
221 	{ .compatible = "cdns,sam9x60-macb",		.data = &ws_info[5] },
222 	{ /* sentinel */ }
223 };
224 
225 static const struct of_device_id sama7g5_ws_ids[] = {
226 	{ .compatible = "microchip,sama7g5-rtc",	.data = &ws_info[1] },
227 	{ .compatible = "microchip,sama7g5-ohci",	.data = &ws_info[2] },
228 	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
229 	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
230 	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
231 	{ .compatible = "microchip,sama7g5-sdhci",	.data = &ws_info[3] },
232 	{ .compatible = "microchip,sama7g5-rtt",	.data = &ws_info[4] },
233 	{ /* sentinel */ }
234 };
235 
at91_pm_config_ws(unsigned int pm_mode,bool set)236 static int at91_pm_config_ws(unsigned int pm_mode, bool set)
237 {
238 	const struct wakeup_source_info *wsi;
239 	const struct of_device_id *match;
240 	struct platform_device *pdev;
241 	struct device_node *np;
242 	unsigned int mode = 0, polarity = 0, val = 0;
243 
244 	if (pm_mode != AT91_PM_ULP1)
245 		return 0;
246 
247 	if (!soc_pm.data.pmc || !soc_pm.data.shdwc || !soc_pm.ws_ids)
248 		return -EPERM;
249 
250 	if (!set) {
251 		writel(mode, soc_pm.data.pmc + AT91_PMC_FSMR);
252 		return 0;
253 	}
254 
255 	if (soc_pm.config_shdwc_ws)
256 		soc_pm.config_shdwc_ws(soc_pm.data.shdwc, &mode, &polarity);
257 
258 	/* SHDWC.MR */
259 	val = readl(soc_pm.data.shdwc + 0x04);
260 
261 	/* Loop through defined wakeup sources. */
262 	for_each_matching_node_and_match(np, soc_pm.ws_ids, &match) {
263 		pdev = of_find_device_by_node(np);
264 		if (!pdev)
265 			continue;
266 
267 		if (device_may_wakeup(&pdev->dev)) {
268 			wsi = match->data;
269 
270 			/* Check if enabled on SHDWC. */
271 			if (wsi->shdwc_mr_bit && !(val & wsi->shdwc_mr_bit))
272 				goto put_device;
273 
274 			mode |= wsi->pmc_fsmr_bit;
275 			if (wsi->set_polarity)
276 				polarity |= wsi->pmc_fsmr_bit;
277 		}
278 
279 put_device:
280 		put_device(&pdev->dev);
281 	}
282 
283 	if (mode) {
284 		if (soc_pm.config_pmc_ws)
285 			soc_pm.config_pmc_ws(soc_pm.data.pmc, mode, polarity);
286 	} else {
287 		pr_err("AT91: PM: no ULP1 wakeup sources found!");
288 	}
289 
290 	return mode ? 0 : -EPERM;
291 }
292 
at91_sama5d2_config_shdwc_ws(void __iomem * shdwc,u32 * mode,u32 * polarity)293 static int at91_sama5d2_config_shdwc_ws(void __iomem *shdwc, u32 *mode,
294 					u32 *polarity)
295 {
296 	u32 val;
297 
298 	/* SHDWC.WUIR */
299 	val = readl(shdwc + 0x0c);
300 	*mode |= (val & 0x3ff);
301 	*polarity |= ((val >> 16) & 0x3ff);
302 
303 	return 0;
304 }
305 
at91_sama5d2_config_pmc_ws(void __iomem * pmc,u32 mode,u32 polarity)306 static int at91_sama5d2_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
307 {
308 	writel(mode, pmc + AT91_PMC_FSMR);
309 	writel(polarity, pmc + AT91_PMC_FSPR);
310 
311 	return 0;
312 }
313 
at91_sam9x60_config_pmc_ws(void __iomem * pmc,u32 mode,u32 polarity)314 static int at91_sam9x60_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
315 {
316 	writel(mode, pmc + AT91_PMC_FSMR);
317 
318 	return 0;
319 }
320 
at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth * eth)321 static bool at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth *eth)
322 {
323 	struct platform_device *pdev;
324 
325 	/* Interface NA in DT. */
326 	if (!eth->np)
327 		return false;
328 
329 	/* No quirks for this interface and current suspend mode. */
330 	if (!(eth->modes & BIT(soc_pm.data.mode)))
331 		return false;
332 
333 	if (!eth->dev) {
334 		/* Driver not probed. */
335 		pdev = of_find_device_by_node(eth->np);
336 		if (!pdev)
337 			return false;
338 		/* put_device(eth->dev) is called at the end of suspend. */
339 		eth->dev = &pdev->dev;
340 	}
341 
342 	/* No quirks if device isn't a wakeup source. */
343 	if (!device_may_wakeup(eth->dev))
344 		return false;
345 
346 	return true;
347 }
348 
at91_pm_config_quirks(bool suspend)349 static int at91_pm_config_quirks(bool suspend)
350 {
351 	struct at91_pm_quirk_eth *eth;
352 	int i, j, ret, tmp;
353 
354 	/*
355 	 * Ethernet IPs who's device_node pointers are stored into
356 	 * soc_pm.quirks.eth[].np cannot handle WoL packets while in ULP0, ULP1
357 	 * or both due to a hardware bug. If they receive WoL packets while in
358 	 * ULP0 or ULP1 IPs could stop working or the whole system could stop
359 	 * working. We cannot handle this scenario in the ethernet driver itself
360 	 * as the driver is common to multiple vendors and also we only know
361 	 * here, in this file, if we suspend to ULP0 or ULP1 mode. Thus handle
362 	 * these scenarios here, as quirks.
363 	 */
364 	for (i = 0; i < AT91_PM_MAX_ETH; i++) {
365 		eth = &soc_pm.quirks.eth[i];
366 
367 		if (!at91_pm_eth_quirk_is_valid(eth))
368 			continue;
369 
370 		/*
371 		 * For modes in dns_modes mask the system blocks if quirk is not
372 		 * applied but if applied the interface doesn't act at WoL
373 		 * events. Thus take care to avoid suspending if this interface
374 		 * is the only configured wakeup source.
375 		 */
376 		if (suspend && eth->dns_modes & BIT(soc_pm.data.mode)) {
377 			int ws_count = 0;
378 #ifdef CONFIG_PM_SLEEP
379 			struct wakeup_source *ws;
380 
381 			for_each_wakeup_source(ws) {
382 				if (ws->dev == eth->dev)
383 					continue;
384 
385 				ws_count++;
386 				break;
387 			}
388 #endif
389 
390 			/*
391 			 * Checking !ws is good for all platforms with issues
392 			 * even when both G_ETH and E_ETH are available as dns_modes
393 			 * is populated only on G_ETH interface.
394 			 */
395 			if (!ws_count) {
396 				pr_err("AT91: PM: Ethernet cannot resume from WoL!");
397 				ret = -EPERM;
398 				put_device(eth->dev);
399 				eth->dev = NULL;
400 				/* No need to revert clock settings for this eth. */
401 				i--;
402 				goto clk_unconfigure;
403 			}
404 		}
405 
406 		if (suspend) {
407 			clk_bulk_disable_unprepare(AT91_PM_ETH_MAX_CLK, eth->clks);
408 		} else {
409 			ret = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK,
410 						      eth->clks);
411 			if (ret)
412 				goto clk_unconfigure;
413 			/*
414 			 * Release the reference to eth->dev taken in
415 			 * at91_pm_eth_quirk_is_valid().
416 			 */
417 			put_device(eth->dev);
418 			eth->dev = NULL;
419 		}
420 	}
421 
422 	return 0;
423 
424 clk_unconfigure:
425 	/*
426 	 * In case of resume we reach this point if clk_prepare_enable() failed.
427 	 * we don't want to revert the previous clk_prepare_enable() for the
428 	 * other IP.
429 	 */
430 	for (j = i; j >= 0; j--) {
431 		eth = &soc_pm.quirks.eth[j];
432 		if (suspend) {
433 			if (!at91_pm_eth_quirk_is_valid(eth))
434 				continue;
435 
436 			tmp = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK, eth->clks);
437 			if (tmp) {
438 				pr_err("AT91: PM: failed to enable %s clocks\n",
439 				       j == AT91_PM_G_ETH ? "geth" : "eth");
440 			}
441 		}
442 
443 		/*
444 		 * Release the reference to eth->dev taken in
445 		 * at91_pm_eth_quirk_is_valid().
446 		 */
447 		put_device(eth->dev);
448 		eth->dev = NULL;
449 	}
450 
451 	return ret;
452 }
453 
454 /*
455  * Called after processes are frozen, but before we shutdown devices.
456  */
at91_pm_begin(suspend_state_t state)457 static int at91_pm_begin(suspend_state_t state)
458 {
459 	int ret;
460 
461 	switch (state) {
462 	case PM_SUSPEND_MEM:
463 		soc_pm.data.mode = soc_pm.data.suspend_mode;
464 		break;
465 
466 	case PM_SUSPEND_STANDBY:
467 		soc_pm.data.mode = soc_pm.data.standby_mode;
468 		break;
469 
470 	default:
471 		soc_pm.data.mode = -1;
472 	}
473 
474 	ret = at91_pm_config_ws(soc_pm.data.mode, true);
475 	if (ret)
476 		return ret;
477 
478 	if (soc_pm.data.mode == AT91_PM_BACKUP)
479 		soc_pm.bu->suspended = 1;
480 	else if (soc_pm.bu)
481 		soc_pm.bu->suspended = 0;
482 
483 	return 0;
484 }
485 
486 /*
487  * Verify that all the clocks are correct before entering
488  * slow-clock mode.
489  */
at91_pm_verify_clocks(void)490 static int at91_pm_verify_clocks(void)
491 {
492 	unsigned long scsr;
493 	int i;
494 
495 	scsr = readl(soc_pm.data.pmc + AT91_PMC_SCSR);
496 
497 	/* USB must not be using PLLB */
498 	if ((scsr & soc_pm.data.uhp_udp_mask) != 0) {
499 		pr_err("AT91: PM - Suspend-to-RAM with USB still active\n");
500 		return 0;
501 	}
502 
503 	/* PCK0..PCK3 must be disabled, or configured to use clk32k */
504 	for (i = 0; i < 4; i++) {
505 		u32 css;
506 
507 		if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
508 			continue;
509 		css = readl(soc_pm.data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
510 		if (css != AT91_PMC_CSS_SLOW) {
511 			pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css);
512 			return 0;
513 		}
514 	}
515 
516 	return 1;
517 }
518 
519 /*
520  * Call this from platform driver suspend() to see how deeply to suspend.
521  * For example, some controllers (like OHCI) need one of the PLL clocks
522  * in order to act as a wakeup source, and those are not available when
523  * going into slow clock mode.
524  *
525  * REVISIT: generalize as clk_will_be_available(clk)?  Other platforms have
526  * the very same problem (but not using at91 main_clk), and it'd be better
527  * to add one generic API rather than lots of platform-specific ones.
528  */
at91_suspend_entering_slow_clock(void)529 int at91_suspend_entering_slow_clock(void)
530 {
531 	return (soc_pm.data.mode >= AT91_PM_ULP0);
532 }
533 EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
534 
535 static void (*at91_suspend_sram_fn)(struct at91_pm_data *);
536 extern void at91_pm_suspend_in_sram(struct at91_pm_data *pm_data);
537 extern u32 at91_pm_suspend_in_sram_sz;
538 
at91_suspend_finish(unsigned long val)539 static int at91_suspend_finish(unsigned long val)
540 {
541 	/* SYNOPSYS workaround to fix a bug in the calibration logic */
542 	unsigned char modified_fix_code[] = {
543 		0x00, 0x01, 0x01, 0x06, 0x07, 0x0c, 0x06, 0x07, 0x0b, 0x18,
544 		0x0a, 0x0b, 0x0c, 0x0d, 0x0d, 0x0a, 0x13, 0x13, 0x12, 0x13,
545 		0x14, 0x15, 0x15, 0x12, 0x18, 0x19, 0x19, 0x1e, 0x1f, 0x14,
546 		0x1e, 0x1f,
547 	};
548 	unsigned int tmp, index;
549 	int i;
550 
551 	if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) {
552 		/*
553 		 * Bootloader will perform DDR recalibration and will try to
554 		 * restore the ZQ0SR0 with the value saved here. But the
555 		 * calibration is buggy and restoring some values from ZQ0SR0
556 		 * is forbidden and risky thus we need to provide processed
557 		 * values for these.
558 		 */
559 		tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
560 
561 		/* Store pull-down output impedance select. */
562 		index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
563 		soc_pm.bu->ddr_phy_calibration[0] = modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDO_OFF;
564 
565 		/* Store pull-up output impedance select. */
566 		index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
567 		soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PUO_OFF;
568 
569 		/* Store pull-down on-die termination impedance select. */
570 		index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
571 		soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDODT_OFF;
572 
573 		/* Store pull-up on-die termination impedance select. */
574 		index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
575 		soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SRO_PUODT_OFF;
576 
577 		/*
578 		 * The 1st 8 words of memory might get corrupted in the process
579 		 * of DDR PHY recalibration; it is saved here in securam and it
580 		 * will be restored later, after recalibration, by bootloader
581 		 */
582 		for (i = 1; i < BACKUP_DDR_PHY_CALIBRATION; i++)
583 			soc_pm.bu->ddr_phy_calibration[i] =
584 				*((unsigned int *)soc_pm.memcs + (i - 1));
585 	}
586 
587 	flush_cache_all();
588 	outer_disable();
589 
590 	at91_suspend_sram_fn(&soc_pm.data);
591 
592 	return 0;
593 }
594 
595 /**
596  * at91_pm_switch_ba_to_auto() - Configure Backup Unit Power Switch
597  * to automatic/hardware mode.
598  *
599  * The Backup Unit Power Switch can be managed either by software or hardware.
600  * Enabling hardware mode allows the automatic transition of power between
601  * VDDANA (or VDDIN33) and VDDBU (or VBAT, respectively), based on the
602  * availability of these power sources.
603  *
604  * If the Backup Unit Power Switch is already in automatic mode, no action is
605  * required. If it is in software-controlled mode, it is switched to automatic
606  * mode to enhance safety and eliminate the need for toggling between power
607  * sources.
608  */
at91_pm_switch_ba_to_auto(void)609 static void at91_pm_switch_ba_to_auto(void)
610 {
611 	unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu);
612 	unsigned int val;
613 
614 	/* Just for safety. */
615 	if (!soc_pm.data.sfrbu)
616 		return;
617 
618 	val = readl(soc_pm.data.sfrbu + offset);
619 
620 	/* Already on auto/hardware. */
621 	if (!(val & soc_pm.sfrbu_regs.pswbu.ctrl))
622 		return;
623 
624 	val &= ~soc_pm.sfrbu_regs.pswbu.ctrl;
625 	val |= soc_pm.sfrbu_regs.pswbu.key;
626 	writel(val, soc_pm.data.sfrbu + offset);
627 }
628 
at91_pm_suspend(suspend_state_t state)629 static void at91_pm_suspend(suspend_state_t state)
630 {
631 	if (soc_pm.data.mode == AT91_PM_BACKUP) {
632 		at91_pm_switch_ba_to_auto();
633 
634 		cpu_suspend(0, at91_suspend_finish);
635 
636 		/* The SRAM is lost between suspend cycles */
637 		at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
638 					     &at91_pm_suspend_in_sram,
639 					     at91_pm_suspend_in_sram_sz);
640 	} else {
641 		at91_suspend_finish(0);
642 	}
643 
644 	outer_resume();
645 }
646 
647 /*
648  * STANDBY mode has *all* drivers suspended; ignores irqs not marked as 'wakeup'
649  * event sources; and reduces DRAM power.  But otherwise it's identical to
650  * PM_SUSPEND_ON: cpu idle, and nothing fancy done with main or cpu clocks.
651  *
652  * AT91_PM_ULP0 is like STANDBY plus slow clock mode, so drivers must
653  * suspend more deeply, the master clock switches to the clk32k and turns off
654  * the main oscillator
655  *
656  * AT91_PM_BACKUP turns off the whole SoC after placing the DDR in self refresh
657  */
at91_pm_enter(suspend_state_t state)658 static int at91_pm_enter(suspend_state_t state)
659 {
660 	int ret;
661 
662 	ret = at91_pm_config_quirks(true);
663 	if (ret)
664 		return ret;
665 
666 	switch (state) {
667 	case PM_SUSPEND_MEM:
668 	case PM_SUSPEND_STANDBY:
669 		/*
670 		 * Ensure that clocks are in a valid state.
671 		 */
672 		if (soc_pm.data.mode >= AT91_PM_ULP0 &&
673 		    !at91_pm_verify_clocks())
674 			goto error;
675 
676 		at91_pm_suspend(state);
677 
678 		break;
679 
680 	case PM_SUSPEND_ON:
681 		cpu_do_idle();
682 		break;
683 
684 	default:
685 		pr_debug("AT91: PM - bogus suspend state %d\n", state);
686 		goto error;
687 	}
688 
689 error:
690 	at91_pm_config_quirks(false);
691 	return 0;
692 }
693 
694 /*
695  * Called right prior to thawing processes.
696  */
at91_pm_end(void)697 static void at91_pm_end(void)
698 {
699 	at91_pm_config_ws(soc_pm.data.mode, false);
700 }
701 
702 
703 static const struct platform_suspend_ops at91_pm_ops = {
704 	.valid	= at91_pm_valid_state,
705 	.begin	= at91_pm_begin,
706 	.enter	= at91_pm_enter,
707 	.end	= at91_pm_end,
708 };
709 
710 static struct platform_device at91_cpuidle_device = {
711 	.name = "cpuidle-at91",
712 };
713 
714 /*
715  * The AT91RM9200 goes into self-refresh mode with this command, and will
716  * terminate self-refresh automatically on the next SDRAM access.
717  *
718  * Self-refresh mode is exited as soon as a memory access is made, but we don't
719  * know for sure when that happens. However, we need to restore the low-power
720  * mode if it was enabled before going idle. Restoring low-power mode while
721  * still in self-refresh is "not recommended", but seems to work.
722  */
at91rm9200_standby(void)723 static void at91rm9200_standby(void)
724 {
725 	asm volatile(
726 		"b    1f\n\t"
727 		".align    5\n\t"
728 		"1:  mcr    p15, 0, %0, c7, c10, 4\n\t"
729 		"    str    %2, [%1, %3]\n\t"
730 		"    mcr    p15, 0, %0, c7, c0, 4\n\t"
731 		:
732 		: "r" (0), "r" (soc_pm.data.ramc[0]),
733 		  "r" (1), "r" (AT91_MC_SDRAMC_SRR));
734 }
735 
736 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
737  * remember.
738  */
at91_ddr_standby(void)739 static void at91_ddr_standby(void)
740 {
741 	/* Those two values allow us to delay self-refresh activation
742 	 * to the maximum. */
743 	u32 lpr0, lpr1 = 0;
744 	u32 mdr, saved_mdr0, saved_mdr1 = 0;
745 	u32 saved_lpr0, saved_lpr1 = 0;
746 
747 	/* LPDDR1 --> force DDR2 mode during self-refresh */
748 	saved_mdr0 = at91_ramc_read(0, AT91_DDRSDRC_MDR);
749 	if ((saved_mdr0 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
750 		mdr = saved_mdr0 & ~AT91_DDRSDRC_MD;
751 		mdr |= AT91_DDRSDRC_MD_DDR2;
752 		at91_ramc_write(0, AT91_DDRSDRC_MDR, mdr);
753 	}
754 
755 	if (soc_pm.data.ramc[1]) {
756 		saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
757 		lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
758 		lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
759 		saved_mdr1 = at91_ramc_read(1, AT91_DDRSDRC_MDR);
760 		if ((saved_mdr1 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
761 			mdr = saved_mdr1 & ~AT91_DDRSDRC_MD;
762 			mdr |= AT91_DDRSDRC_MD_DDR2;
763 			at91_ramc_write(1, AT91_DDRSDRC_MDR, mdr);
764 		}
765 	}
766 
767 	saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
768 	lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
769 	lpr0 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
770 
771 	/* self-refresh mode now */
772 	at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
773 	if (soc_pm.data.ramc[1])
774 		at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
775 
776 	cpu_do_idle();
777 
778 	at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr0);
779 	at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
780 	if (soc_pm.data.ramc[1]) {
781 		at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr1);
782 		at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
783 	}
784 }
785 
sama5d3_ddr_standby(void)786 static void sama5d3_ddr_standby(void)
787 {
788 	u32 lpr0;
789 	u32 saved_lpr0;
790 
791 	saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
792 	lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
793 	lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
794 
795 	at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
796 
797 	cpu_do_idle();
798 
799 	at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
800 }
801 
802 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
803  * remember.
804  */
at91sam9_sdram_standby(void)805 static void at91sam9_sdram_standby(void)
806 {
807 	u32 lpr0, lpr1 = 0;
808 	u32 saved_lpr0, saved_lpr1 = 0;
809 
810 	if (soc_pm.data.ramc[1]) {
811 		saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
812 		lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
813 		lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
814 	}
815 
816 	saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
817 	lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
818 	lpr0 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
819 
820 	/* self-refresh mode now */
821 	at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
822 	if (soc_pm.data.ramc[1])
823 		at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
824 
825 	cpu_do_idle();
826 
827 	at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
828 	if (soc_pm.data.ramc[1])
829 		at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
830 }
831 
sama7g5_standby(void)832 static void sama7g5_standby(void)
833 {
834 	int pwrtmg, ratio;
835 
836 	pwrtmg = readl(soc_pm.data.ramc[0] + UDDRC_PWRCTL);
837 	ratio = readl(soc_pm.data.pmc + AT91_PMC_RATIO);
838 
839 	/*
840 	 * Place RAM into self-refresh after a maximum idle clocks. The maximum
841 	 * idle clocks is configured by bootloader in
842 	 * UDDRC_PWRMGT.SELFREF_TO_X32.
843 	 */
844 	writel(pwrtmg | UDDRC_PWRCTL_SELFREF_EN,
845 	       soc_pm.data.ramc[0] + UDDRC_PWRCTL);
846 	/* Divide CPU clock by 16. */
847 	writel(ratio & ~AT91_PMC_RATIO_RATIO, soc_pm.data.pmc + AT91_PMC_RATIO);
848 
849 	cpu_do_idle();
850 
851 	/* Restore previous configuration. */
852 	writel(ratio, soc_pm.data.pmc + AT91_PMC_RATIO);
853 	writel(pwrtmg, soc_pm.data.ramc[0] + UDDRC_PWRCTL);
854 }
855 
856 struct ramc_info {
857 	void (*idle)(void);
858 	unsigned int memctrl;
859 };
860 
861 static const struct ramc_info ramc_infos[] __initconst = {
862 	{ .idle = at91rm9200_standby, .memctrl = AT91_MEMCTRL_MC},
863 	{ .idle = at91sam9_sdram_standby, .memctrl = AT91_MEMCTRL_SDRAMC},
864 	{ .idle = at91_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
865 	{ .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
866 	{ .idle = sama7g5_standby, },
867 };
868 
869 static const struct of_device_id ramc_ids[] __initconst = {
870 	{ .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] },
871 	{ .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] },
872 	{ .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] },
873 	{ .compatible = "atmel,sama5d3-ddramc", .data = &ramc_infos[3] },
874 	{ .compatible = "microchip,sama7g5-uddrc", .data = &ramc_infos[4], },
875 	{ /*sentinel*/ }
876 };
877 
878 static const struct of_device_id ramc_phy_ids[] __initconst = {
879 	{ .compatible = "microchip,sama7g5-ddr3phy", },
880 	{ /* Sentinel. */ },
881 };
882 
at91_dt_ramc(bool phy_mandatory)883 static __init int at91_dt_ramc(bool phy_mandatory)
884 {
885 	struct device_node *np;
886 	const struct of_device_id *of_id;
887 	int idx = 0;
888 	void *standby = NULL;
889 	const struct ramc_info *ramc;
890 	int ret;
891 
892 	for_each_matching_node_and_match(np, ramc_ids, &of_id) {
893 		soc_pm.data.ramc[idx] = of_iomap(np, 0);
894 		if (!soc_pm.data.ramc[idx]) {
895 			pr_err("unable to map ramc[%d] cpu registers\n", idx);
896 			ret = -ENOMEM;
897 			of_node_put(np);
898 			goto unmap_ramc;
899 		}
900 
901 		ramc = of_id->data;
902 		if (ramc) {
903 			if (!standby)
904 				standby = ramc->idle;
905 			soc_pm.data.memctrl = ramc->memctrl;
906 		}
907 
908 		idx++;
909 	}
910 
911 	if (!idx) {
912 		pr_err("unable to find compatible ram controller node in dtb\n");
913 		ret = -ENODEV;
914 		goto unmap_ramc;
915 	}
916 
917 	/* Lookup for DDR PHY node, if any. */
918 	for_each_matching_node_and_match(np, ramc_phy_ids, &of_id) {
919 		soc_pm.data.ramc_phy = of_iomap(np, 0);
920 		if (!soc_pm.data.ramc_phy) {
921 			pr_err("unable to map ramc phy cpu registers\n");
922 			ret = -ENOMEM;
923 			of_node_put(np);
924 			goto unmap_ramc;
925 		}
926 	}
927 
928 	if (phy_mandatory && !soc_pm.data.ramc_phy) {
929 		pr_err("DDR PHY is mandatory!\n");
930 		ret = -ENODEV;
931 		goto unmap_ramc;
932 	}
933 
934 	if (!standby) {
935 		pr_warn("ramc no standby function available\n");
936 		return 0;
937 	}
938 
939 	at91_cpuidle_device.dev.platform_data = standby;
940 
941 	return 0;
942 
943 unmap_ramc:
944 	while (idx)
945 		iounmap(soc_pm.data.ramc[--idx]);
946 
947 	return ret;
948 }
949 
at91rm9200_idle(void)950 static void at91rm9200_idle(void)
951 {
952 	/*
953 	 * Disable the processor clock.  The processor will be automatically
954 	 * re-enabled by an interrupt or by a reset.
955 	 */
956 	writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
957 }
958 
at91sam9_idle(void)959 static void at91sam9_idle(void)
960 {
961 	writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
962 	cpu_do_idle();
963 }
964 
at91_pm_sram_init(void)965 static void __init at91_pm_sram_init(void)
966 {
967 	struct gen_pool *sram_pool;
968 	phys_addr_t sram_pbase;
969 	unsigned long sram_base;
970 	struct device_node *node;
971 	struct platform_device *pdev = NULL;
972 
973 	for_each_compatible_node(node, NULL, "mmio-sram") {
974 		pdev = of_find_device_by_node(node);
975 		if (pdev) {
976 			of_node_put(node);
977 			break;
978 		}
979 	}
980 
981 	if (!pdev) {
982 		pr_warn("%s: failed to find sram device!\n", __func__);
983 		return;
984 	}
985 
986 	sram_pool = gen_pool_get(&pdev->dev, NULL);
987 	if (!sram_pool) {
988 		pr_warn("%s: sram pool unavailable!\n", __func__);
989 		goto out_put_device;
990 	}
991 
992 	sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
993 	if (!sram_base) {
994 		pr_warn("%s: unable to alloc sram!\n", __func__);
995 		goto out_put_device;
996 	}
997 
998 	sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
999 	at91_suspend_sram_fn = __arm_ioremap_exec(sram_pbase,
1000 					at91_pm_suspend_in_sram_sz, false);
1001 	if (!at91_suspend_sram_fn) {
1002 		pr_warn("SRAM: Could not map\n");
1003 		goto out_put_device;
1004 	}
1005 
1006 	/* Copy the pm suspend handler to SRAM */
1007 	at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
1008 			&at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
1009 	return;
1010 
1011 out_put_device:
1012 	put_device(&pdev->dev);
1013 	return;
1014 }
1015 
at91_is_pm_mode_active(int pm_mode)1016 static bool __init at91_is_pm_mode_active(int pm_mode)
1017 {
1018 	return (soc_pm.data.standby_mode == pm_mode ||
1019 		soc_pm.data.suspend_mode == pm_mode);
1020 }
1021 
at91_pm_backup_scan_memcs(unsigned long node,const char * uname,int depth,void * data)1022 static int __init at91_pm_backup_scan_memcs(unsigned long node,
1023 					    const char *uname, int depth,
1024 					    void *data)
1025 {
1026 	const char *type;
1027 	const __be32 *reg;
1028 	int *located = data;
1029 	int size;
1030 
1031 	/* Memory node already located. */
1032 	if (*located)
1033 		return 0;
1034 
1035 	type = of_get_flat_dt_prop(node, "device_type", NULL);
1036 
1037 	/* We are scanning "memory" nodes only. */
1038 	if (!type || strcmp(type, "memory"))
1039 		return 0;
1040 
1041 	reg = of_get_flat_dt_prop(node, "reg", &size);
1042 	if (reg) {
1043 		soc_pm.memcs = __va((phys_addr_t)be32_to_cpu(*reg));
1044 		*located = 1;
1045 	}
1046 
1047 	return 0;
1048 }
1049 
at91_pm_backup_init(void)1050 static int __init at91_pm_backup_init(void)
1051 {
1052 	struct gen_pool *sram_pool;
1053 	struct device_node *np;
1054 	struct platform_device *pdev;
1055 	int ret = -ENODEV, located = 0;
1056 
1057 	if (!IS_ENABLED(CONFIG_SOC_SAMA5D2) &&
1058 	    !IS_ENABLED(CONFIG_SOC_SAMA7G5))
1059 		return -EPERM;
1060 
1061 	if (!at91_is_pm_mode_active(AT91_PM_BACKUP))
1062 		return 0;
1063 
1064 	np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
1065 	if (!np)
1066 		return ret;
1067 
1068 	pdev = of_find_device_by_node(np);
1069 	of_node_put(np);
1070 	if (!pdev) {
1071 		pr_warn("%s: failed to find securam device!\n", __func__);
1072 		return ret;
1073 	}
1074 
1075 	sram_pool = gen_pool_get(&pdev->dev, NULL);
1076 	if (!sram_pool) {
1077 		pr_warn("%s: securam pool unavailable!\n", __func__);
1078 		goto securam_fail;
1079 	}
1080 
1081 	soc_pm.bu = (void *)gen_pool_alloc(sram_pool, sizeof(struct at91_pm_bu));
1082 	if (!soc_pm.bu) {
1083 		pr_warn("%s: unable to alloc securam!\n", __func__);
1084 		ret = -ENOMEM;
1085 		goto securam_fail;
1086 	}
1087 
1088 	soc_pm.bu->suspended = 0;
1089 	soc_pm.bu->canary = __pa_symbol(&canary);
1090 	soc_pm.bu->resume = __pa_symbol(cpu_resume);
1091 	if (soc_pm.data.ramc_phy) {
1092 		of_scan_flat_dt(at91_pm_backup_scan_memcs, &located);
1093 		if (!located)
1094 			goto securam_fail;
1095 	}
1096 
1097 	return 0;
1098 
1099 securam_fail:
1100 	put_device(&pdev->dev);
1101 	return ret;
1102 }
1103 
at91_pm_secure_init(void)1104 static void __init at91_pm_secure_init(void)
1105 {
1106 	int suspend_mode;
1107 	struct arm_smccc_res res;
1108 
1109 	suspend_mode = soc_pm.data.suspend_mode;
1110 
1111 	res = sam_smccc_call(SAMA5_SMC_SIP_SET_SUSPEND_MODE,
1112 			     suspend_mode, 0);
1113 	if (res.a0 == 0) {
1114 		pr_info("AT91: Secure PM: suspend mode set to %s\n",
1115 			pm_modes[suspend_mode].pattern);
1116 		return;
1117 	}
1118 
1119 	pr_warn("AT91: Secure PM: %s mode not supported !\n",
1120 		pm_modes[suspend_mode].pattern);
1121 
1122 	res = sam_smccc_call(SAMA5_SMC_SIP_GET_SUSPEND_MODE, 0, 0);
1123 	if (res.a0 == 0) {
1124 		pr_warn("AT91: Secure PM: failed to get default mode\n");
1125 		return;
1126 	}
1127 
1128 	pr_info("AT91: Secure PM: using default suspend mode %s\n",
1129 		pm_modes[suspend_mode].pattern);
1130 
1131 	soc_pm.data.suspend_mode = res.a1;
1132 }
1133 static const struct of_device_id atmel_shdwc_ids[] = {
1134 	{ .compatible = "atmel,sama5d2-shdwc" },
1135 	{ .compatible = "microchip,sam9x60-shdwc" },
1136 	{ .compatible = "microchip,sama7g5-shdwc" },
1137 	{ /* sentinel. */ }
1138 };
1139 
1140 static const struct of_device_id gmac_ids[] __initconst = {
1141 	{ .compatible = "atmel,sama5d3-gem" },
1142 	{ .compatible = "atmel,sama5d2-gem" },
1143 	{ .compatible = "atmel,sama5d29-gem" },
1144 	{ .compatible = "microchip,sama7g5-gem" },
1145 	{ },
1146 };
1147 
1148 static const struct of_device_id emac_ids[] __initconst = {
1149 	{ .compatible = "atmel,sama5d3-macb" },
1150 	{ .compatible = "microchip,sama7g5-emac" },
1151 	{ },
1152 };
1153 
1154 /*
1155  * Replaces _mode_to_replace with a supported mode that doesn't depend
1156  * on controller pointed by _map_bitmask
1157  * @_maps: u32 array containing AT91_PM_IOMAP() flags and indexed by AT91
1158  * PM mode
1159  * @_map_bitmask: AT91_PM_IOMAP() bitmask; if _mode_to_replace depends on
1160  * controller represented by _map_bitmask, _mode_to_replace needs to be
1161  * updated
1162  * @_mode_to_replace: standby_mode or suspend_mode that need to be
1163  * updated
1164  * @_mode_to_check: standby_mode or suspend_mode; this is needed here
1165  * to avoid having standby_mode and suspend_mode set with the same AT91
1166  * PM mode
1167  */
1168 #define AT91_PM_REPLACE_MODE(_maps, _map_bitmask, _mode_to_replace,	\
1169 			     _mode_to_check)				\
1170 	do {								\
1171 		if (((_maps)[(_mode_to_replace)]) & (_map_bitmask)) {	\
1172 			int _mode_to_use, _mode_complementary;		\
1173 			/* Use ULP0 if it doesn't need _map_bitmask. */	\
1174 			if (!((_maps)[AT91_PM_ULP0] & (_map_bitmask))) {\
1175 				_mode_to_use = AT91_PM_ULP0;		\
1176 				_mode_complementary = AT91_PM_STANDBY;	\
1177 			} else {					\
1178 				_mode_to_use = AT91_PM_STANDBY;		\
1179 				_mode_complementary = AT91_PM_STANDBY;	\
1180 			}						\
1181 									\
1182 			if ((_mode_to_check) != _mode_to_use)		\
1183 				(_mode_to_replace) = _mode_to_use;	\
1184 			else						\
1185 				(_mode_to_replace) = _mode_complementary;\
1186 		}							\
1187 	} while (0)
1188 
1189 /*
1190  * Replaces standby and suspend modes with default supported modes:
1191  * ULP0 and STANDBY.
1192  * @_maps: u32 array indexed by AT91 PM mode containing AT91_PM_IOMAP()
1193  * flags
1194  * @_map: controller specific name; standby and suspend mode need to be
1195  * replaced in order to not depend on this controller
1196  */
1197 #define AT91_PM_REPLACE_MODES(_maps, _map)				\
1198 	do {								\
1199 		AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1200 				     (soc_pm.data.standby_mode),	\
1201 				     (soc_pm.data.suspend_mode));	\
1202 		AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1203 				     (soc_pm.data.suspend_mode),	\
1204 				     (soc_pm.data.standby_mode));	\
1205 	} while (0)
1206 
at91_pm_get_eth_clks(struct device_node * np,struct clk_bulk_data * clks)1207 static int __init at91_pm_get_eth_clks(struct device_node *np,
1208 				       struct clk_bulk_data *clks)
1209 {
1210 	clks[AT91_PM_ETH_PCLK].clk = of_clk_get_by_name(np, "pclk");
1211 	if (IS_ERR(clks[AT91_PM_ETH_PCLK].clk))
1212 		return PTR_ERR(clks[AT91_PM_ETH_PCLK].clk);
1213 
1214 	clks[AT91_PM_ETH_HCLK].clk = of_clk_get_by_name(np, "hclk");
1215 	if (IS_ERR(clks[AT91_PM_ETH_HCLK].clk))
1216 		return PTR_ERR(clks[AT91_PM_ETH_HCLK].clk);
1217 
1218 	return 0;
1219 }
1220 
at91_pm_eth_clks_empty(struct clk_bulk_data * clks)1221 static int __init at91_pm_eth_clks_empty(struct clk_bulk_data *clks)
1222 {
1223 	return IS_ERR(clks[AT91_PM_ETH_PCLK].clk) ||
1224 	       IS_ERR(clks[AT91_PM_ETH_HCLK].clk);
1225 }
1226 
at91_pm_modes_init(const u32 * maps,int len)1227 static void __init at91_pm_modes_init(const u32 *maps, int len)
1228 {
1229 	struct at91_pm_quirk_eth *gmac = &soc_pm.quirks.eth[AT91_PM_G_ETH];
1230 	struct at91_pm_quirk_eth *emac = &soc_pm.quirks.eth[AT91_PM_E_ETH];
1231 	struct device_node *np;
1232 	int ret;
1233 
1234 	ret = at91_pm_backup_init();
1235 	if (ret) {
1236 		if (soc_pm.data.standby_mode == AT91_PM_BACKUP)
1237 			soc_pm.data.standby_mode = AT91_PM_ULP0;
1238 		if (soc_pm.data.suspend_mode == AT91_PM_BACKUP)
1239 			soc_pm.data.suspend_mode = AT91_PM_ULP0;
1240 	}
1241 
1242 	if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1243 	    maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC)) {
1244 		np = of_find_matching_node(NULL, atmel_shdwc_ids);
1245 		if (!np) {
1246 			pr_warn("%s: failed to find shdwc!\n", __func__);
1247 			AT91_PM_REPLACE_MODES(maps, SHDWC);
1248 		} else {
1249 			soc_pm.data.shdwc = of_iomap(np, 0);
1250 			of_node_put(np);
1251 		}
1252 	}
1253 
1254 	if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1255 	    maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU)) {
1256 		np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-sfrbu");
1257 		if (!np) {
1258 			pr_warn("%s: failed to find sfrbu!\n", __func__);
1259 			AT91_PM_REPLACE_MODES(maps, SFRBU);
1260 		} else {
1261 			soc_pm.data.sfrbu = of_iomap(np, 0);
1262 			of_node_put(np);
1263 		}
1264 	}
1265 
1266 	if ((at91_is_pm_mode_active(AT91_PM_ULP1) ||
1267 	     at91_is_pm_mode_active(AT91_PM_ULP0) ||
1268 	     at91_is_pm_mode_active(AT91_PM_ULP0_FAST)) &&
1269 	    (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(ETHC) ||
1270 	     maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(ETHC))) {
1271 		np = of_find_matching_node(NULL, gmac_ids);
1272 		if (!np) {
1273 			np = of_find_matching_node(NULL, emac_ids);
1274 			if (np)
1275 				goto get_emac_clks;
1276 			AT91_PM_REPLACE_MODES(maps, ETHC);
1277 			goto unmap_unused_nodes;
1278 		} else {
1279 			gmac->np = np;
1280 			at91_pm_get_eth_clks(np, gmac->clks);
1281 		}
1282 
1283 		np = of_find_matching_node(NULL, emac_ids);
1284 		if (!np) {
1285 			if (at91_pm_eth_clks_empty(gmac->clks))
1286 				AT91_PM_REPLACE_MODES(maps, ETHC);
1287 		} else {
1288 get_emac_clks:
1289 			emac->np = np;
1290 			ret = at91_pm_get_eth_clks(np, emac->clks);
1291 			if (ret && at91_pm_eth_clks_empty(gmac->clks)) {
1292 				of_node_put(gmac->np);
1293 				of_node_put(emac->np);
1294 				gmac->np = NULL;
1295 				emac->np = NULL;
1296 			}
1297 		}
1298 	}
1299 
1300 unmap_unused_nodes:
1301 	/* Unmap all unnecessary. */
1302 	if (soc_pm.data.shdwc &&
1303 	    !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1304 	      maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC))) {
1305 		iounmap(soc_pm.data.shdwc);
1306 		soc_pm.data.shdwc = NULL;
1307 	}
1308 
1309 	if (soc_pm.data.sfrbu &&
1310 	    !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1311 	      maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU))) {
1312 		iounmap(soc_pm.data.sfrbu);
1313 		soc_pm.data.sfrbu = NULL;
1314 	}
1315 
1316 	return;
1317 }
1318 
1319 struct pmc_info {
1320 	unsigned long uhp_udp_mask;
1321 	unsigned long mckr;
1322 	unsigned long version;
1323 };
1324 
1325 static const struct pmc_info pmc_infos[] __initconst = {
1326 	{
1327 		.uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP,
1328 		.mckr = 0x30,
1329 		.version = AT91_PMC_V1,
1330 	},
1331 
1332 	{
1333 		.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1334 		.mckr = 0x30,
1335 		.version = AT91_PMC_V1,
1336 	},
1337 	{
1338 		.uhp_udp_mask = AT91SAM926x_PMC_UHP,
1339 		.mckr = 0x30,
1340 		.version = AT91_PMC_V1,
1341 	},
1342 	{	.uhp_udp_mask = 0,
1343 		.mckr = 0x30,
1344 		.version = AT91_PMC_V1,
1345 	},
1346 	{
1347 		.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1348 		.mckr = 0x28,
1349 		.version = AT91_PMC_V2,
1350 	},
1351 	{
1352 		.mckr = 0x28,
1353 		.version = AT91_PMC_V2,
1354 	},
1355 
1356 };
1357 
1358 static const struct of_device_id atmel_pmc_ids[] __initconst = {
1359 	{ .compatible = "atmel,at91rm9200-pmc", .data = &pmc_infos[0] },
1360 	{ .compatible = "atmel,at91sam9260-pmc", .data = &pmc_infos[1] },
1361 	{ .compatible = "atmel,at91sam9261-pmc", .data = &pmc_infos[1] },
1362 	{ .compatible = "atmel,at91sam9263-pmc", .data = &pmc_infos[1] },
1363 	{ .compatible = "atmel,at91sam9g45-pmc", .data = &pmc_infos[2] },
1364 	{ .compatible = "atmel,at91sam9n12-pmc", .data = &pmc_infos[1] },
1365 	{ .compatible = "atmel,at91sam9rl-pmc", .data = &pmc_infos[3] },
1366 	{ .compatible = "atmel,at91sam9x5-pmc", .data = &pmc_infos[1] },
1367 	{ .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
1368 	{ .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
1369 	{ .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
1370 	{ .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[4] },
1371 	{ .compatible = "microchip,sama7g5-pmc", .data = &pmc_infos[5] },
1372 	{ /* sentinel */ },
1373 };
1374 
at91_pm_modes_validate(const int * modes,int len)1375 static void __init at91_pm_modes_validate(const int *modes, int len)
1376 {
1377 	u8 i, standby = 0, suspend = 0;
1378 	int mode;
1379 
1380 	for (i = 0; i < len; i++) {
1381 		if (standby && suspend)
1382 			break;
1383 
1384 		if (modes[i] == soc_pm.data.standby_mode && !standby) {
1385 			standby = 1;
1386 			continue;
1387 		}
1388 
1389 		if (modes[i] == soc_pm.data.suspend_mode && !suspend) {
1390 			suspend = 1;
1391 			continue;
1392 		}
1393 	}
1394 
1395 	if (!standby) {
1396 		if (soc_pm.data.suspend_mode == AT91_PM_STANDBY)
1397 			mode = AT91_PM_ULP0;
1398 		else
1399 			mode = AT91_PM_STANDBY;
1400 
1401 		pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1402 			pm_modes[soc_pm.data.standby_mode].pattern,
1403 			pm_modes[mode].pattern);
1404 		soc_pm.data.standby_mode = mode;
1405 	}
1406 
1407 	if (!suspend) {
1408 		if (soc_pm.data.standby_mode == AT91_PM_ULP0)
1409 			mode = AT91_PM_STANDBY;
1410 		else
1411 			mode = AT91_PM_ULP0;
1412 
1413 		pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1414 			pm_modes[soc_pm.data.suspend_mode].pattern,
1415 			pm_modes[mode].pattern);
1416 		soc_pm.data.suspend_mode = mode;
1417 	}
1418 }
1419 
at91_pm_init(void (* pm_idle)(void))1420 static void __init at91_pm_init(void (*pm_idle)(void))
1421 {
1422 	struct device_node *pmc_np;
1423 	const struct of_device_id *of_id;
1424 	const struct pmc_info *pmc;
1425 
1426 	if (at91_cpuidle_device.dev.platform_data)
1427 		platform_device_register(&at91_cpuidle_device);
1428 
1429 	pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
1430 	soc_pm.data.pmc = of_iomap(pmc_np, 0);
1431 	of_node_put(pmc_np);
1432 	if (!soc_pm.data.pmc) {
1433 		pr_err("AT91: PM not supported, PMC not found\n");
1434 		return;
1435 	}
1436 
1437 	pmc = of_id->data;
1438 	soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask;
1439 	soc_pm.data.pmc_mckr_offset = pmc->mckr;
1440 	soc_pm.data.pmc_version = pmc->version;
1441 
1442 	if (pm_idle)
1443 		arm_pm_idle = pm_idle;
1444 
1445 	at91_pm_sram_init();
1446 
1447 	if (at91_suspend_sram_fn) {
1448 		suspend_set_ops(&at91_pm_ops);
1449 		pr_info("AT91: PM: standby: %s, suspend: %s\n",
1450 			pm_modes[soc_pm.data.standby_mode].pattern,
1451 			pm_modes[soc_pm.data.suspend_mode].pattern);
1452 	} else {
1453 		pr_info("AT91: PM not supported, due to no SRAM allocated\n");
1454 	}
1455 }
1456 
at91rm9200_pm_init(void)1457 void __init at91rm9200_pm_init(void)
1458 {
1459 	int ret;
1460 
1461 	if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
1462 		return;
1463 
1464 	/*
1465 	 * Force STANDBY and ULP0 mode to avoid calling
1466 	 * at91_pm_modes_validate() which may increase booting time.
1467 	 * Platform supports anyway only STANDBY and ULP0 modes.
1468 	 */
1469 	soc_pm.data.standby_mode = AT91_PM_STANDBY;
1470 	soc_pm.data.suspend_mode = AT91_PM_ULP0;
1471 
1472 	ret = at91_dt_ramc(false);
1473 	if (ret)
1474 		return;
1475 
1476 	/*
1477 	 * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh.
1478 	 */
1479 	at91_ramc_write(0, AT91_MC_SDRAMC_LPR, 0);
1480 
1481 	at91_pm_init(at91rm9200_idle);
1482 }
1483 
sam9x60_pm_init(void)1484 void __init sam9x60_pm_init(void)
1485 {
1486 	static const int modes[] __initconst = {
1487 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1488 	};
1489 	static const int iomaps[] __initconst = {
1490 		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SHDWC),
1491 	};
1492 	int ret;
1493 
1494 	if (!IS_ENABLED(CONFIG_SOC_SAM9X60))
1495 		return;
1496 
1497 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1498 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1499 	ret = at91_dt_ramc(false);
1500 	if (ret)
1501 		return;
1502 
1503 	at91_pm_init(NULL);
1504 
1505 	soc_pm.ws_ids = sam9x60_ws_ids;
1506 	soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1507 }
1508 
at91sam9_pm_init(void)1509 void __init at91sam9_pm_init(void)
1510 {
1511 	int ret;
1512 
1513 	if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
1514 		return;
1515 
1516 	/*
1517 	 * Force STANDBY and ULP0 mode to avoid calling
1518 	 * at91_pm_modes_validate() which may increase booting time.
1519 	 * Platform supports anyway only STANDBY and ULP0 modes.
1520 	 */
1521 	soc_pm.data.standby_mode = AT91_PM_STANDBY;
1522 	soc_pm.data.suspend_mode = AT91_PM_ULP0;
1523 
1524 	ret = at91_dt_ramc(false);
1525 	if (ret)
1526 		return;
1527 
1528 	at91_pm_init(at91sam9_idle);
1529 }
1530 
sama5_pm_init(void)1531 void __init sama5_pm_init(void)
1532 {
1533 	static const int modes[] __initconst = {
1534 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST,
1535 	};
1536 	static const u32 iomaps[] __initconst = {
1537 		[AT91_PM_ULP0]		= AT91_PM_IOMAP(ETHC),
1538 		[AT91_PM_ULP0_FAST]	= AT91_PM_IOMAP(ETHC),
1539 	};
1540 	int ret;
1541 
1542 	if (!IS_ENABLED(CONFIG_SOC_SAMA5))
1543 		return;
1544 
1545 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1546 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1547 	ret = at91_dt_ramc(false);
1548 	if (ret)
1549 		return;
1550 
1551 	at91_pm_init(NULL);
1552 
1553 	/* Quirks applies to ULP0, ULP0 fast and ULP1 modes. */
1554 	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1555 						 BIT(AT91_PM_ULP0_FAST) |
1556 						 BIT(AT91_PM_ULP1);
1557 	/* Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup source. */
1558 	soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1559 						     BIT(AT91_PM_ULP0_FAST);
1560 }
1561 
sama5d2_pm_init(void)1562 void __init sama5d2_pm_init(void)
1563 {
1564 	static const int modes[] __initconst = {
1565 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1566 		AT91_PM_BACKUP,
1567 	};
1568 	static const u32 iomaps[] __initconst = {
1569 		[AT91_PM_ULP0]		= AT91_PM_IOMAP(ETHC),
1570 		[AT91_PM_ULP0_FAST]	= AT91_PM_IOMAP(ETHC),
1571 		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SHDWC) |
1572 					  AT91_PM_IOMAP(ETHC),
1573 		[AT91_PM_BACKUP]	= AT91_PM_IOMAP(SHDWC) |
1574 					  AT91_PM_IOMAP(SFRBU),
1575 	};
1576 	int ret;
1577 
1578 	if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
1579 		return;
1580 
1581 	if (IS_ENABLED(CONFIG_ATMEL_SECURE_PM)) {
1582 		pr_warn("AT91: Secure PM: ignoring standby mode\n");
1583 		at91_pm_secure_init();
1584 		return;
1585 	}
1586 
1587 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1588 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1589 	ret = at91_dt_ramc(false);
1590 	if (ret)
1591 		return;
1592 
1593 	at91_pm_init(NULL);
1594 
1595 	soc_pm.ws_ids = sama5d2_ws_ids;
1596 	soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws;
1597 	soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws;
1598 
1599 	soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1600 	soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1601 	soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1602 	soc_pm.sfrbu_regs.pswbu.state = BIT(3);
1603 
1604 	/* Quirk applies to ULP0, ULP0 fast and ULP1 modes. */
1605 	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1606 						 BIT(AT91_PM_ULP0_FAST) |
1607 						 BIT(AT91_PM_ULP1);
1608 	/*
1609 	 * Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup
1610 	 * source.
1611 	 */
1612 	soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1613 						     BIT(AT91_PM_ULP0_FAST);
1614 }
1615 
sama7_pm_init(void)1616 void __init sama7_pm_init(void)
1617 {
1618 	static const int modes[] __initconst = {
1619 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP1, AT91_PM_BACKUP,
1620 	};
1621 	static const u32 iomaps[] __initconst = {
1622 		[AT91_PM_ULP0]		= AT91_PM_IOMAP(SFRBU),
1623 		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SFRBU) |
1624 					  AT91_PM_IOMAP(SHDWC) |
1625 					  AT91_PM_IOMAP(ETHC),
1626 		[AT91_PM_BACKUP]	= AT91_PM_IOMAP(SFRBU) |
1627 					  AT91_PM_IOMAP(SHDWC),
1628 	};
1629 	int ret;
1630 
1631 	if (!IS_ENABLED(CONFIG_SOC_SAMA7))
1632 		return;
1633 
1634 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1635 
1636 	ret = at91_dt_ramc(true);
1637 	if (ret)
1638 		return;
1639 
1640 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1641 	at91_pm_init(NULL);
1642 
1643 	soc_pm.ws_ids = sama7g5_ws_ids;
1644 	soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1645 
1646 	soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1647 	soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1648 	soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1649 	soc_pm.sfrbu_regs.pswbu.state = BIT(2);
1650 
1651 	/* Quirks applies to ULP1 for both Ethernet interfaces. */
1652 	soc_pm.quirks.eth[AT91_PM_E_ETH].modes = BIT(AT91_PM_ULP1);
1653 	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP1);
1654 }
1655 
at91_pm_modes_select(char * str)1656 static int __init at91_pm_modes_select(char *str)
1657 {
1658 	char *s;
1659 	substring_t args[MAX_OPT_ARGS];
1660 	int standby, suspend;
1661 
1662 	if (!str)
1663 		return 0;
1664 
1665 	s = strsep(&str, ",");
1666 	standby = match_token(s, pm_modes, args);
1667 	if (standby < 0)
1668 		return 0;
1669 
1670 	suspend = match_token(str, pm_modes, args);
1671 	if (suspend < 0)
1672 		return 0;
1673 
1674 	soc_pm.data.standby_mode = standby;
1675 	soc_pm.data.suspend_mode = suspend;
1676 
1677 	return 0;
1678 }
1679 early_param("atmel.pm_modes", at91_pm_modes_select);
1680