xref: /openbmc/linux/arch/arm/mach-at91/pm.c (revision d4b564a5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * arch/arm/mach-at91/pm.c
4  * AT91 Power Management
5  *
6  * Copyright (C) 2005 David Brownell
7  */
8 
9 #include <linux/genalloc.h>
10 #include <linux/io.h>
11 #include <linux/of_address.h>
12 #include <linux/of.h>
13 #include <linux/of_fdt.h>
14 #include <linux/of_platform.h>
15 #include <linux/platform_device.h>
16 #include <linux/parser.h>
17 #include <linux/suspend.h>
18 
19 #include <linux/clk.h>
20 #include <linux/clk/at91_pmc.h>
21 #include <linux/platform_data/atmel.h>
22 
23 #include <asm/cacheflush.h>
24 #include <asm/fncpy.h>
25 #include <asm/system_misc.h>
26 #include <asm/suspend.h>
27 
28 #include "generic.h"
29 #include "pm.h"
30 #include "sam_secure.h"
31 
32 #define BACKUP_DDR_PHY_CALIBRATION	(9)
33 
34 /**
35  * struct at91_pm_bu - AT91 power management backup unit data structure
36  * @suspended: true if suspended to backup mode
37  * @reserved: reserved
38  * @canary: canary data for memory checking after exit from backup mode
39  * @resume: resume API
40  * @ddr_phy_calibration: DDR PHY calibration data: ZQ0CR0, first 8 words
41  * of the memory
42  */
43 struct at91_pm_bu {
44 	int suspended;
45 	unsigned long reserved;
46 	phys_addr_t canary;
47 	phys_addr_t resume;
48 	unsigned long ddr_phy_calibration[BACKUP_DDR_PHY_CALIBRATION];
49 };
50 
51 /**
52  * struct at91_pm_sfrbu_regs - registers mapping for SFRBU
53  * @pswbu: power switch BU control registers
54  */
55 struct at91_pm_sfrbu_regs {
56 	struct {
57 		u32 key;
58 		u32 ctrl;
59 		u32 state;
60 		u32 softsw;
61 	} pswbu;
62 };
63 
64 /**
65  * enum at91_pm_eth_clk - Ethernet clock indexes
66  * @AT91_PM_ETH_PCLK: pclk index
67  * @AT91_PM_ETH_HCLK: hclk index
68  * @AT91_PM_ETH_MAX_CLK: max index
69  */
70 enum at91_pm_eth_clk {
71 	AT91_PM_ETH_PCLK,
72 	AT91_PM_ETH_HCLK,
73 	AT91_PM_ETH_MAX_CLK,
74 };
75 
76 /**
77  * enum at91_pm_eth - Ethernet controller indexes
78  * @AT91_PM_G_ETH: gigabit Ethernet controller index
79  * @AT91_PM_E_ETH: megabit Ethernet controller index
80  * @AT91_PM_MAX_ETH: max index
81  */
82 enum at91_pm_eth {
83 	AT91_PM_G_ETH,
84 	AT91_PM_E_ETH,
85 	AT91_PM_MAX_ETH,
86 };
87 
88 /**
89  * struct at91_pm_quirk_eth - AT91 PM Ethernet quirks
90  * @dev: Ethernet device
91  * @np: Ethernet device node
92  * @clks: Ethernet clocks
93  * @modes: power management mode that this quirk applies to
94  * @dns_modes: do not suspend modes: stop suspending if Ethernet is configured
95  *	       as wakeup source but buggy and no other wakeup source is
96  *	       available
97  */
98 struct at91_pm_quirk_eth {
99 	struct device *dev;
100 	struct device_node *np;
101 	struct clk_bulk_data clks[AT91_PM_ETH_MAX_CLK];
102 	u32 modes;
103 	u32 dns_modes;
104 };
105 
106 /**
107  * struct at91_pm_quirks - AT91 PM quirks
108  * @eth: Ethernet quirks
109  */
110 struct at91_pm_quirks {
111 	struct at91_pm_quirk_eth eth[AT91_PM_MAX_ETH];
112 };
113 
114 /**
115  * struct at91_soc_pm - AT91 SoC power management data structure
116  * @config_shdwc_ws: wakeup sources configuration function for SHDWC
117  * @config_pmc_ws: wakeup srouces configuration function for PMC
118  * @ws_ids: wakup sources of_device_id array
119  * @bu: backup unit mapped data (for backup mode)
120  * @quirks: PM quirks
121  * @data: PM data to be used on last phase of suspend
122  * @sfrbu_regs: SFRBU registers mapping
123  * @memcs: memory chip select
124  */
125 struct at91_soc_pm {
126 	int (*config_shdwc_ws)(void __iomem *shdwc, u32 *mode, u32 *polarity);
127 	int (*config_pmc_ws)(void __iomem *pmc, u32 mode, u32 polarity);
128 	const struct of_device_id *ws_ids;
129 	struct at91_pm_bu *bu;
130 	struct at91_pm_quirks quirks;
131 	struct at91_pm_data data;
132 	struct at91_pm_sfrbu_regs sfrbu_regs;
133 	void *memcs;
134 };
135 
136 /**
137  * enum at91_pm_iomaps - IOs that needs to be mapped for different PM modes
138  * @AT91_PM_IOMAP_SHDWC:	SHDWC controller
139  * @AT91_PM_IOMAP_SFRBU:	SFRBU controller
140  * @AT91_PM_IOMAP_ETHC:		Ethernet controller
141  */
142 enum at91_pm_iomaps {
143 	AT91_PM_IOMAP_SHDWC,
144 	AT91_PM_IOMAP_SFRBU,
145 	AT91_PM_IOMAP_ETHC,
146 };
147 
148 #define AT91_PM_IOMAP(name)	BIT(AT91_PM_IOMAP_##name)
149 
150 static struct at91_soc_pm soc_pm = {
151 	.data = {
152 		.standby_mode = AT91_PM_STANDBY,
153 		.suspend_mode = AT91_PM_ULP0,
154 	},
155 };
156 
157 static const match_table_t pm_modes __initconst = {
158 	{ AT91_PM_STANDBY,	"standby" },
159 	{ AT91_PM_ULP0,		"ulp0" },
160 	{ AT91_PM_ULP0_FAST,    "ulp0-fast" },
161 	{ AT91_PM_ULP1,		"ulp1" },
162 	{ AT91_PM_BACKUP,	"backup" },
163 	{ -1, NULL },
164 };
165 
166 #define at91_ramc_read(id, field) \
167 	__raw_readl(soc_pm.data.ramc[id] + field)
168 
169 #define at91_ramc_write(id, field, value) \
170 	__raw_writel(value, soc_pm.data.ramc[id] + field)
171 
at91_pm_valid_state(suspend_state_t state)172 static int at91_pm_valid_state(suspend_state_t state)
173 {
174 	switch (state) {
175 		case PM_SUSPEND_ON:
176 		case PM_SUSPEND_STANDBY:
177 		case PM_SUSPEND_MEM:
178 			return 1;
179 
180 		default:
181 			return 0;
182 	}
183 }
184 
185 static int canary = 0xA5A5A5A5;
186 
187 struct wakeup_source_info {
188 	unsigned int pmc_fsmr_bit;
189 	unsigned int shdwc_mr_bit;
190 	bool set_polarity;
191 };
192 
193 static const struct wakeup_source_info ws_info[] = {
194 	{ .pmc_fsmr_bit = AT91_PMC_FSTT(10),	.set_polarity = true },
195 	{ .pmc_fsmr_bit = AT91_PMC_RTCAL,	.shdwc_mr_bit = BIT(17) },
196 	{ .pmc_fsmr_bit = AT91_PMC_USBAL },
197 	{ .pmc_fsmr_bit = AT91_PMC_SDMMC_CD },
198 	{ .pmc_fsmr_bit = AT91_PMC_RTTAL },
199 	{ .pmc_fsmr_bit = AT91_PMC_RXLP_MCE },
200 };
201 
202 static const struct of_device_id sama5d2_ws_ids[] = {
203 	{ .compatible = "atmel,sama5d2-gem",		.data = &ws_info[0] },
204 	{ .compatible = "atmel,sama5d2-rtc",		.data = &ws_info[1] },
205 	{ .compatible = "atmel,sama5d3-udc",		.data = &ws_info[2] },
206 	{ .compatible = "atmel,at91rm9200-ohci",	.data = &ws_info[2] },
207 	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
208 	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
209 	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
210 	{ .compatible = "atmel,sama5d2-sdhci",		.data = &ws_info[3] },
211 	{ /* sentinel */ }
212 };
213 
214 static const struct of_device_id sam9x60_ws_ids[] = {
215 	{ .compatible = "microchip,sam9x60-rtc",	.data = &ws_info[1] },
216 	{ .compatible = "atmel,at91rm9200-ohci",	.data = &ws_info[2] },
217 	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
218 	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
219 	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
220 	{ .compatible = "microchip,sam9x60-rtt",	.data = &ws_info[4] },
221 	{ .compatible = "cdns,sam9x60-macb",		.data = &ws_info[5] },
222 	{ /* sentinel */ }
223 };
224 
225 static const struct of_device_id sama7g5_ws_ids[] = {
226 	{ .compatible = "microchip,sama7g5-rtc",	.data = &ws_info[1] },
227 	{ .compatible = "microchip,sama7g5-ohci",	.data = &ws_info[2] },
228 	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
229 	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
230 	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
231 	{ .compatible = "microchip,sama7g5-sdhci",	.data = &ws_info[3] },
232 	{ .compatible = "microchip,sama7g5-rtt",	.data = &ws_info[4] },
233 	{ /* sentinel */ }
234 };
235 
at91_pm_config_ws(unsigned int pm_mode,bool set)236 static int at91_pm_config_ws(unsigned int pm_mode, bool set)
237 {
238 	const struct wakeup_source_info *wsi;
239 	const struct of_device_id *match;
240 	struct platform_device *pdev;
241 	struct device_node *np;
242 	unsigned int mode = 0, polarity = 0, val = 0;
243 
244 	if (pm_mode != AT91_PM_ULP1)
245 		return 0;
246 
247 	if (!soc_pm.data.pmc || !soc_pm.data.shdwc || !soc_pm.ws_ids)
248 		return -EPERM;
249 
250 	if (!set) {
251 		writel(mode, soc_pm.data.pmc + AT91_PMC_FSMR);
252 		return 0;
253 	}
254 
255 	if (soc_pm.config_shdwc_ws)
256 		soc_pm.config_shdwc_ws(soc_pm.data.shdwc, &mode, &polarity);
257 
258 	/* SHDWC.MR */
259 	val = readl(soc_pm.data.shdwc + 0x04);
260 
261 	/* Loop through defined wakeup sources. */
262 	for_each_matching_node_and_match(np, soc_pm.ws_ids, &match) {
263 		pdev = of_find_device_by_node(np);
264 		if (!pdev)
265 			continue;
266 
267 		if (device_may_wakeup(&pdev->dev)) {
268 			wsi = match->data;
269 
270 			/* Check if enabled on SHDWC. */
271 			if (wsi->shdwc_mr_bit && !(val & wsi->shdwc_mr_bit))
272 				goto put_device;
273 
274 			mode |= wsi->pmc_fsmr_bit;
275 			if (wsi->set_polarity)
276 				polarity |= wsi->pmc_fsmr_bit;
277 		}
278 
279 put_device:
280 		put_device(&pdev->dev);
281 	}
282 
283 	if (mode) {
284 		if (soc_pm.config_pmc_ws)
285 			soc_pm.config_pmc_ws(soc_pm.data.pmc, mode, polarity);
286 	} else {
287 		pr_err("AT91: PM: no ULP1 wakeup sources found!");
288 	}
289 
290 	return mode ? 0 : -EPERM;
291 }
292 
at91_sama5d2_config_shdwc_ws(void __iomem * shdwc,u32 * mode,u32 * polarity)293 static int at91_sama5d2_config_shdwc_ws(void __iomem *shdwc, u32 *mode,
294 					u32 *polarity)
295 {
296 	u32 val;
297 
298 	/* SHDWC.WUIR */
299 	val = readl(shdwc + 0x0c);
300 	*mode |= (val & 0x3ff);
301 	*polarity |= ((val >> 16) & 0x3ff);
302 
303 	return 0;
304 }
305 
at91_sama5d2_config_pmc_ws(void __iomem * pmc,u32 mode,u32 polarity)306 static int at91_sama5d2_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
307 {
308 	writel(mode, pmc + AT91_PMC_FSMR);
309 	writel(polarity, pmc + AT91_PMC_FSPR);
310 
311 	return 0;
312 }
313 
at91_sam9x60_config_pmc_ws(void __iomem * pmc,u32 mode,u32 polarity)314 static int at91_sam9x60_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
315 {
316 	writel(mode, pmc + AT91_PMC_FSMR);
317 
318 	return 0;
319 }
320 
at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth * eth)321 static bool at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth *eth)
322 {
323 	struct platform_device *pdev;
324 
325 	/* Interface NA in DT. */
326 	if (!eth->np)
327 		return false;
328 
329 	/* No quirks for this interface and current suspend mode. */
330 	if (!(eth->modes & BIT(soc_pm.data.mode)))
331 		return false;
332 
333 	if (!eth->dev) {
334 		/* Driver not probed. */
335 		pdev = of_find_device_by_node(eth->np);
336 		if (!pdev)
337 			return false;
338 		/* put_device(eth->dev) is called at the end of suspend. */
339 		eth->dev = &pdev->dev;
340 	}
341 
342 	/* No quirks if device isn't a wakeup source. */
343 	if (!device_may_wakeup(eth->dev))
344 		return false;
345 
346 	return true;
347 }
348 
at91_pm_config_quirks(bool suspend)349 static int at91_pm_config_quirks(bool suspend)
350 {
351 	struct at91_pm_quirk_eth *eth;
352 	int i, j, ret, tmp;
353 
354 	/*
355 	 * Ethernet IPs who's device_node pointers are stored into
356 	 * soc_pm.quirks.eth[].np cannot handle WoL packets while in ULP0, ULP1
357 	 * or both due to a hardware bug. If they receive WoL packets while in
358 	 * ULP0 or ULP1 IPs could stop working or the whole system could stop
359 	 * working. We cannot handle this scenario in the ethernet driver itself
360 	 * as the driver is common to multiple vendors and also we only know
361 	 * here, in this file, if we suspend to ULP0 or ULP1 mode. Thus handle
362 	 * these scenarios here, as quirks.
363 	 */
364 	for (i = 0; i < AT91_PM_MAX_ETH; i++) {
365 		eth = &soc_pm.quirks.eth[i];
366 
367 		if (!at91_pm_eth_quirk_is_valid(eth))
368 			continue;
369 
370 		/*
371 		 * For modes in dns_modes mask the system blocks if quirk is not
372 		 * applied but if applied the interface doesn't act at WoL
373 		 * events. Thus take care to avoid suspending if this interface
374 		 * is the only configured wakeup source.
375 		 */
376 		if (suspend && eth->dns_modes & BIT(soc_pm.data.mode)) {
377 			int ws_count = 0;
378 #ifdef CONFIG_PM_SLEEP
379 			struct wakeup_source *ws;
380 
381 			for_each_wakeup_source(ws) {
382 				if (ws->dev == eth->dev)
383 					continue;
384 
385 				ws_count++;
386 				break;
387 			}
388 #endif
389 
390 			/*
391 			 * Checking !ws is good for all platforms with issues
392 			 * even when both G_ETH and E_ETH are available as dns_modes
393 			 * is populated only on G_ETH interface.
394 			 */
395 			if (!ws_count) {
396 				pr_err("AT91: PM: Ethernet cannot resume from WoL!");
397 				ret = -EPERM;
398 				put_device(eth->dev);
399 				eth->dev = NULL;
400 				/* No need to revert clock settings for this eth. */
401 				i--;
402 				goto clk_unconfigure;
403 			}
404 		}
405 
406 		if (suspend) {
407 			clk_bulk_disable_unprepare(AT91_PM_ETH_MAX_CLK, eth->clks);
408 		} else {
409 			ret = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK,
410 						      eth->clks);
411 			if (ret)
412 				goto clk_unconfigure;
413 			/*
414 			 * Release the reference to eth->dev taken in
415 			 * at91_pm_eth_quirk_is_valid().
416 			 */
417 			put_device(eth->dev);
418 			eth->dev = NULL;
419 		}
420 	}
421 
422 	return 0;
423 
424 clk_unconfigure:
425 	/*
426 	 * In case of resume we reach this point if clk_prepare_enable() failed.
427 	 * we don't want to revert the previous clk_prepare_enable() for the
428 	 * other IP.
429 	 */
430 	for (j = i; j >= 0; j--) {
431 		eth = &soc_pm.quirks.eth[j];
432 		if (suspend) {
433 			if (!at91_pm_eth_quirk_is_valid(eth))
434 				continue;
435 
436 			tmp = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK, eth->clks);
437 			if (tmp) {
438 				pr_err("AT91: PM: failed to enable %s clocks\n",
439 				       j == AT91_PM_G_ETH ? "geth" : "eth");
440 			}
441 		}
442 
443 		/*
444 		 * Release the reference to eth->dev taken in
445 		 * at91_pm_eth_quirk_is_valid().
446 		 */
447 		put_device(eth->dev);
448 		eth->dev = NULL;
449 	}
450 
451 	return ret;
452 }
453 
454 /*
455  * Called after processes are frozen, but before we shutdown devices.
456  */
at91_pm_begin(suspend_state_t state)457 static int at91_pm_begin(suspend_state_t state)
458 {
459 	int ret;
460 
461 	switch (state) {
462 	case PM_SUSPEND_MEM:
463 		soc_pm.data.mode = soc_pm.data.suspend_mode;
464 		break;
465 
466 	case PM_SUSPEND_STANDBY:
467 		soc_pm.data.mode = soc_pm.data.standby_mode;
468 		break;
469 
470 	default:
471 		soc_pm.data.mode = -1;
472 	}
473 
474 	ret = at91_pm_config_ws(soc_pm.data.mode, true);
475 	if (ret)
476 		return ret;
477 
478 	if (soc_pm.data.mode == AT91_PM_BACKUP)
479 		soc_pm.bu->suspended = 1;
480 	else if (soc_pm.bu)
481 		soc_pm.bu->suspended = 0;
482 
483 	return 0;
484 }
485 
486 /*
487  * Verify that all the clocks are correct before entering
488  * slow-clock mode.
489  */
at91_pm_verify_clocks(void)490 static int at91_pm_verify_clocks(void)
491 {
492 	unsigned long scsr;
493 	int i;
494 
495 	scsr = readl(soc_pm.data.pmc + AT91_PMC_SCSR);
496 
497 	/* USB must not be using PLLB */
498 	if ((scsr & soc_pm.data.uhp_udp_mask) != 0) {
499 		pr_err("AT91: PM - Suspend-to-RAM with USB still active\n");
500 		return 0;
501 	}
502 
503 	/* PCK0..PCK3 must be disabled, or configured to use clk32k */
504 	for (i = 0; i < 4; i++) {
505 		u32 css;
506 
507 		if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
508 			continue;
509 		css = readl(soc_pm.data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
510 		if (css != AT91_PMC_CSS_SLOW) {
511 			pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css);
512 			return 0;
513 		}
514 	}
515 
516 	return 1;
517 }
518 
519 /*
520  * Call this from platform driver suspend() to see how deeply to suspend.
521  * For example, some controllers (like OHCI) need one of the PLL clocks
522  * in order to act as a wakeup source, and those are not available when
523  * going into slow clock mode.
524  *
525  * REVISIT: generalize as clk_will_be_available(clk)?  Other platforms have
526  * the very same problem (but not using at91 main_clk), and it'd be better
527  * to add one generic API rather than lots of platform-specific ones.
528  */
at91_suspend_entering_slow_clock(void)529 int at91_suspend_entering_slow_clock(void)
530 {
531 	return (soc_pm.data.mode >= AT91_PM_ULP0);
532 }
533 EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
534 
535 static void (*at91_suspend_sram_fn)(struct at91_pm_data *);
536 extern void at91_pm_suspend_in_sram(struct at91_pm_data *pm_data);
537 extern u32 at91_pm_suspend_in_sram_sz;
538 
at91_suspend_finish(unsigned long val)539 static int at91_suspend_finish(unsigned long val)
540 {
541 	unsigned char modified_gray_code[] = {
542 		0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d,
543 		0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b,
544 		0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13,
545 		0x10, 0x11,
546 	};
547 	unsigned int tmp, index;
548 	int i;
549 
550 	if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) {
551 		/*
552 		 * Bootloader will perform DDR recalibration and will try to
553 		 * restore the ZQ0SR0 with the value saved here. But the
554 		 * calibration is buggy and restoring some values from ZQ0SR0
555 		 * is forbidden and risky thus we need to provide processed
556 		 * values for these (modified gray code values).
557 		 */
558 		tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
559 
560 		/* Store pull-down output impedance select. */
561 		index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
562 		soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index];
563 
564 		/* Store pull-up output impedance select. */
565 		index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
566 		soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
567 
568 		/* Store pull-down on-die termination impedance select. */
569 		index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
570 		soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
571 
572 		/* Store pull-up on-die termination impedance select. */
573 		index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
574 		soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
575 
576 		/*
577 		 * The 1st 8 words of memory might get corrupted in the process
578 		 * of DDR PHY recalibration; it is saved here in securam and it
579 		 * will be restored later, after recalibration, by bootloader
580 		 */
581 		for (i = 1; i < BACKUP_DDR_PHY_CALIBRATION; i++)
582 			soc_pm.bu->ddr_phy_calibration[i] =
583 				*((unsigned int *)soc_pm.memcs + (i - 1));
584 	}
585 
586 	flush_cache_all();
587 	outer_disable();
588 
589 	at91_suspend_sram_fn(&soc_pm.data);
590 
591 	return 0;
592 }
593 
at91_pm_switch_ba_to_vbat(void)594 static void at91_pm_switch_ba_to_vbat(void)
595 {
596 	unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu);
597 	unsigned int val;
598 
599 	/* Just for safety. */
600 	if (!soc_pm.data.sfrbu)
601 		return;
602 
603 	val = readl(soc_pm.data.sfrbu + offset);
604 
605 	/* Already on VBAT. */
606 	if (!(val & soc_pm.sfrbu_regs.pswbu.state))
607 		return;
608 
609 	val &= ~soc_pm.sfrbu_regs.pswbu.softsw;
610 	val |= soc_pm.sfrbu_regs.pswbu.key | soc_pm.sfrbu_regs.pswbu.ctrl;
611 	writel(val, soc_pm.data.sfrbu + offset);
612 
613 	/* Wait for update. */
614 	val = readl(soc_pm.data.sfrbu + offset);
615 	while (val & soc_pm.sfrbu_regs.pswbu.state)
616 		val = readl(soc_pm.data.sfrbu + offset);
617 }
618 
at91_pm_suspend(suspend_state_t state)619 static void at91_pm_suspend(suspend_state_t state)
620 {
621 	if (soc_pm.data.mode == AT91_PM_BACKUP) {
622 		at91_pm_switch_ba_to_vbat();
623 
624 		cpu_suspend(0, at91_suspend_finish);
625 
626 		/* The SRAM is lost between suspend cycles */
627 		at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
628 					     &at91_pm_suspend_in_sram,
629 					     at91_pm_suspend_in_sram_sz);
630 	} else {
631 		at91_suspend_finish(0);
632 	}
633 
634 	outer_resume();
635 }
636 
637 /*
638  * STANDBY mode has *all* drivers suspended; ignores irqs not marked as 'wakeup'
639  * event sources; and reduces DRAM power.  But otherwise it's identical to
640  * PM_SUSPEND_ON: cpu idle, and nothing fancy done with main or cpu clocks.
641  *
642  * AT91_PM_ULP0 is like STANDBY plus slow clock mode, so drivers must
643  * suspend more deeply, the master clock switches to the clk32k and turns off
644  * the main oscillator
645  *
646  * AT91_PM_BACKUP turns off the whole SoC after placing the DDR in self refresh
647  */
at91_pm_enter(suspend_state_t state)648 static int at91_pm_enter(suspend_state_t state)
649 {
650 	int ret;
651 
652 	ret = at91_pm_config_quirks(true);
653 	if (ret)
654 		return ret;
655 
656 	switch (state) {
657 	case PM_SUSPEND_MEM:
658 	case PM_SUSPEND_STANDBY:
659 		/*
660 		 * Ensure that clocks are in a valid state.
661 		 */
662 		if (soc_pm.data.mode >= AT91_PM_ULP0 &&
663 		    !at91_pm_verify_clocks())
664 			goto error;
665 
666 		at91_pm_suspend(state);
667 
668 		break;
669 
670 	case PM_SUSPEND_ON:
671 		cpu_do_idle();
672 		break;
673 
674 	default:
675 		pr_debug("AT91: PM - bogus suspend state %d\n", state);
676 		goto error;
677 	}
678 
679 error:
680 	at91_pm_config_quirks(false);
681 	return 0;
682 }
683 
684 /*
685  * Called right prior to thawing processes.
686  */
at91_pm_end(void)687 static void at91_pm_end(void)
688 {
689 	at91_pm_config_ws(soc_pm.data.mode, false);
690 }
691 
692 
693 static const struct platform_suspend_ops at91_pm_ops = {
694 	.valid	= at91_pm_valid_state,
695 	.begin	= at91_pm_begin,
696 	.enter	= at91_pm_enter,
697 	.end	= at91_pm_end,
698 };
699 
700 static struct platform_device at91_cpuidle_device = {
701 	.name = "cpuidle-at91",
702 };
703 
704 /*
705  * The AT91RM9200 goes into self-refresh mode with this command, and will
706  * terminate self-refresh automatically on the next SDRAM access.
707  *
708  * Self-refresh mode is exited as soon as a memory access is made, but we don't
709  * know for sure when that happens. However, we need to restore the low-power
710  * mode if it was enabled before going idle. Restoring low-power mode while
711  * still in self-refresh is "not recommended", but seems to work.
712  */
at91rm9200_standby(void)713 static void at91rm9200_standby(void)
714 {
715 	asm volatile(
716 		"b    1f\n\t"
717 		".align    5\n\t"
718 		"1:  mcr    p15, 0, %0, c7, c10, 4\n\t"
719 		"    str    %2, [%1, %3]\n\t"
720 		"    mcr    p15, 0, %0, c7, c0, 4\n\t"
721 		:
722 		: "r" (0), "r" (soc_pm.data.ramc[0]),
723 		  "r" (1), "r" (AT91_MC_SDRAMC_SRR));
724 }
725 
726 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
727  * remember.
728  */
at91_ddr_standby(void)729 static void at91_ddr_standby(void)
730 {
731 	/* Those two values allow us to delay self-refresh activation
732 	 * to the maximum. */
733 	u32 lpr0, lpr1 = 0;
734 	u32 mdr, saved_mdr0, saved_mdr1 = 0;
735 	u32 saved_lpr0, saved_lpr1 = 0;
736 
737 	/* LPDDR1 --> force DDR2 mode during self-refresh */
738 	saved_mdr0 = at91_ramc_read(0, AT91_DDRSDRC_MDR);
739 	if ((saved_mdr0 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
740 		mdr = saved_mdr0 & ~AT91_DDRSDRC_MD;
741 		mdr |= AT91_DDRSDRC_MD_DDR2;
742 		at91_ramc_write(0, AT91_DDRSDRC_MDR, mdr);
743 	}
744 
745 	if (soc_pm.data.ramc[1]) {
746 		saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
747 		lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
748 		lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
749 		saved_mdr1 = at91_ramc_read(1, AT91_DDRSDRC_MDR);
750 		if ((saved_mdr1 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
751 			mdr = saved_mdr1 & ~AT91_DDRSDRC_MD;
752 			mdr |= AT91_DDRSDRC_MD_DDR2;
753 			at91_ramc_write(1, AT91_DDRSDRC_MDR, mdr);
754 		}
755 	}
756 
757 	saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
758 	lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
759 	lpr0 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
760 
761 	/* self-refresh mode now */
762 	at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
763 	if (soc_pm.data.ramc[1])
764 		at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
765 
766 	cpu_do_idle();
767 
768 	at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr0);
769 	at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
770 	if (soc_pm.data.ramc[1]) {
771 		at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr1);
772 		at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
773 	}
774 }
775 
sama5d3_ddr_standby(void)776 static void sama5d3_ddr_standby(void)
777 {
778 	u32 lpr0;
779 	u32 saved_lpr0;
780 
781 	saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
782 	lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
783 	lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
784 
785 	at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
786 
787 	cpu_do_idle();
788 
789 	at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
790 }
791 
792 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
793  * remember.
794  */
at91sam9_sdram_standby(void)795 static void at91sam9_sdram_standby(void)
796 {
797 	u32 lpr0, lpr1 = 0;
798 	u32 saved_lpr0, saved_lpr1 = 0;
799 
800 	if (soc_pm.data.ramc[1]) {
801 		saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
802 		lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
803 		lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
804 	}
805 
806 	saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
807 	lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
808 	lpr0 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
809 
810 	/* self-refresh mode now */
811 	at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
812 	if (soc_pm.data.ramc[1])
813 		at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
814 
815 	cpu_do_idle();
816 
817 	at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
818 	if (soc_pm.data.ramc[1])
819 		at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
820 }
821 
sama7g5_standby(void)822 static void sama7g5_standby(void)
823 {
824 	int pwrtmg, ratio;
825 
826 	pwrtmg = readl(soc_pm.data.ramc[0] + UDDRC_PWRCTL);
827 	ratio = readl(soc_pm.data.pmc + AT91_PMC_RATIO);
828 
829 	/*
830 	 * Place RAM into self-refresh after a maximum idle clocks. The maximum
831 	 * idle clocks is configured by bootloader in
832 	 * UDDRC_PWRMGT.SELFREF_TO_X32.
833 	 */
834 	writel(pwrtmg | UDDRC_PWRCTL_SELFREF_EN,
835 	       soc_pm.data.ramc[0] + UDDRC_PWRCTL);
836 	/* Divide CPU clock by 16. */
837 	writel(ratio & ~AT91_PMC_RATIO_RATIO, soc_pm.data.pmc + AT91_PMC_RATIO);
838 
839 	cpu_do_idle();
840 
841 	/* Restore previous configuration. */
842 	writel(ratio, soc_pm.data.pmc + AT91_PMC_RATIO);
843 	writel(pwrtmg, soc_pm.data.ramc[0] + UDDRC_PWRCTL);
844 }
845 
846 struct ramc_info {
847 	void (*idle)(void);
848 	unsigned int memctrl;
849 };
850 
851 static const struct ramc_info ramc_infos[] __initconst = {
852 	{ .idle = at91rm9200_standby, .memctrl = AT91_MEMCTRL_MC},
853 	{ .idle = at91sam9_sdram_standby, .memctrl = AT91_MEMCTRL_SDRAMC},
854 	{ .idle = at91_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
855 	{ .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
856 	{ .idle = sama7g5_standby, },
857 };
858 
859 static const struct of_device_id ramc_ids[] __initconst = {
860 	{ .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] },
861 	{ .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] },
862 	{ .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] },
863 	{ .compatible = "atmel,sama5d3-ddramc", .data = &ramc_infos[3] },
864 	{ .compatible = "microchip,sama7g5-uddrc", .data = &ramc_infos[4], },
865 	{ /*sentinel*/ }
866 };
867 
868 static const struct of_device_id ramc_phy_ids[] __initconst = {
869 	{ .compatible = "microchip,sama7g5-ddr3phy", },
870 	{ /* Sentinel. */ },
871 };
872 
at91_dt_ramc(bool phy_mandatory)873 static __init int at91_dt_ramc(bool phy_mandatory)
874 {
875 	struct device_node *np;
876 	const struct of_device_id *of_id;
877 	int idx = 0;
878 	void *standby = NULL;
879 	const struct ramc_info *ramc;
880 	int ret;
881 
882 	for_each_matching_node_and_match(np, ramc_ids, &of_id) {
883 		soc_pm.data.ramc[idx] = of_iomap(np, 0);
884 		if (!soc_pm.data.ramc[idx]) {
885 			pr_err("unable to map ramc[%d] cpu registers\n", idx);
886 			ret = -ENOMEM;
887 			of_node_put(np);
888 			goto unmap_ramc;
889 		}
890 
891 		ramc = of_id->data;
892 		if (ramc) {
893 			if (!standby)
894 				standby = ramc->idle;
895 			soc_pm.data.memctrl = ramc->memctrl;
896 		}
897 
898 		idx++;
899 	}
900 
901 	if (!idx) {
902 		pr_err("unable to find compatible ram controller node in dtb\n");
903 		ret = -ENODEV;
904 		goto unmap_ramc;
905 	}
906 
907 	/* Lookup for DDR PHY node, if any. */
908 	for_each_matching_node_and_match(np, ramc_phy_ids, &of_id) {
909 		soc_pm.data.ramc_phy = of_iomap(np, 0);
910 		if (!soc_pm.data.ramc_phy) {
911 			pr_err("unable to map ramc phy cpu registers\n");
912 			ret = -ENOMEM;
913 			of_node_put(np);
914 			goto unmap_ramc;
915 		}
916 	}
917 
918 	if (phy_mandatory && !soc_pm.data.ramc_phy) {
919 		pr_err("DDR PHY is mandatory!\n");
920 		ret = -ENODEV;
921 		goto unmap_ramc;
922 	}
923 
924 	if (!standby) {
925 		pr_warn("ramc no standby function available\n");
926 		return 0;
927 	}
928 
929 	at91_cpuidle_device.dev.platform_data = standby;
930 
931 	return 0;
932 
933 unmap_ramc:
934 	while (idx)
935 		iounmap(soc_pm.data.ramc[--idx]);
936 
937 	return ret;
938 }
939 
at91rm9200_idle(void)940 static void at91rm9200_idle(void)
941 {
942 	/*
943 	 * Disable the processor clock.  The processor will be automatically
944 	 * re-enabled by an interrupt or by a reset.
945 	 */
946 	writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
947 }
948 
at91sam9_idle(void)949 static void at91sam9_idle(void)
950 {
951 	writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
952 	cpu_do_idle();
953 }
954 
at91_pm_sram_init(void)955 static void __init at91_pm_sram_init(void)
956 {
957 	struct gen_pool *sram_pool;
958 	phys_addr_t sram_pbase;
959 	unsigned long sram_base;
960 	struct device_node *node;
961 	struct platform_device *pdev = NULL;
962 
963 	for_each_compatible_node(node, NULL, "mmio-sram") {
964 		pdev = of_find_device_by_node(node);
965 		if (pdev) {
966 			of_node_put(node);
967 			break;
968 		}
969 	}
970 
971 	if (!pdev) {
972 		pr_warn("%s: failed to find sram device!\n", __func__);
973 		return;
974 	}
975 
976 	sram_pool = gen_pool_get(&pdev->dev, NULL);
977 	if (!sram_pool) {
978 		pr_warn("%s: sram pool unavailable!\n", __func__);
979 		goto out_put_device;
980 	}
981 
982 	sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
983 	if (!sram_base) {
984 		pr_warn("%s: unable to alloc sram!\n", __func__);
985 		goto out_put_device;
986 	}
987 
988 	sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
989 	at91_suspend_sram_fn = __arm_ioremap_exec(sram_pbase,
990 					at91_pm_suspend_in_sram_sz, false);
991 	if (!at91_suspend_sram_fn) {
992 		pr_warn("SRAM: Could not map\n");
993 		goto out_put_device;
994 	}
995 
996 	/* Copy the pm suspend handler to SRAM */
997 	at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
998 			&at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
999 	return;
1000 
1001 out_put_device:
1002 	put_device(&pdev->dev);
1003 	return;
1004 }
1005 
at91_is_pm_mode_active(int pm_mode)1006 static bool __init at91_is_pm_mode_active(int pm_mode)
1007 {
1008 	return (soc_pm.data.standby_mode == pm_mode ||
1009 		soc_pm.data.suspend_mode == pm_mode);
1010 }
1011 
at91_pm_backup_scan_memcs(unsigned long node,const char * uname,int depth,void * data)1012 static int __init at91_pm_backup_scan_memcs(unsigned long node,
1013 					    const char *uname, int depth,
1014 					    void *data)
1015 {
1016 	const char *type;
1017 	const __be32 *reg;
1018 	int *located = data;
1019 	int size;
1020 
1021 	/* Memory node already located. */
1022 	if (*located)
1023 		return 0;
1024 
1025 	type = of_get_flat_dt_prop(node, "device_type", NULL);
1026 
1027 	/* We are scanning "memory" nodes only. */
1028 	if (!type || strcmp(type, "memory"))
1029 		return 0;
1030 
1031 	reg = of_get_flat_dt_prop(node, "reg", &size);
1032 	if (reg) {
1033 		soc_pm.memcs = __va((phys_addr_t)be32_to_cpu(*reg));
1034 		*located = 1;
1035 	}
1036 
1037 	return 0;
1038 }
1039 
at91_pm_backup_init(void)1040 static int __init at91_pm_backup_init(void)
1041 {
1042 	struct gen_pool *sram_pool;
1043 	struct device_node *np;
1044 	struct platform_device *pdev;
1045 	int ret = -ENODEV, located = 0;
1046 
1047 	if (!IS_ENABLED(CONFIG_SOC_SAMA5D2) &&
1048 	    !IS_ENABLED(CONFIG_SOC_SAMA7G5))
1049 		return -EPERM;
1050 
1051 	if (!at91_is_pm_mode_active(AT91_PM_BACKUP))
1052 		return 0;
1053 
1054 	np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
1055 	if (!np)
1056 		return ret;
1057 
1058 	pdev = of_find_device_by_node(np);
1059 	of_node_put(np);
1060 	if (!pdev) {
1061 		pr_warn("%s: failed to find securam device!\n", __func__);
1062 		return ret;
1063 	}
1064 
1065 	sram_pool = gen_pool_get(&pdev->dev, NULL);
1066 	if (!sram_pool) {
1067 		pr_warn("%s: securam pool unavailable!\n", __func__);
1068 		goto securam_fail;
1069 	}
1070 
1071 	soc_pm.bu = (void *)gen_pool_alloc(sram_pool, sizeof(struct at91_pm_bu));
1072 	if (!soc_pm.bu) {
1073 		pr_warn("%s: unable to alloc securam!\n", __func__);
1074 		ret = -ENOMEM;
1075 		goto securam_fail;
1076 	}
1077 
1078 	soc_pm.bu->suspended = 0;
1079 	soc_pm.bu->canary = __pa_symbol(&canary);
1080 	soc_pm.bu->resume = __pa_symbol(cpu_resume);
1081 	if (soc_pm.data.ramc_phy) {
1082 		of_scan_flat_dt(at91_pm_backup_scan_memcs, &located);
1083 		if (!located)
1084 			goto securam_fail;
1085 	}
1086 
1087 	return 0;
1088 
1089 securam_fail:
1090 	put_device(&pdev->dev);
1091 	return ret;
1092 }
1093 
at91_pm_secure_init(void)1094 static void __init at91_pm_secure_init(void)
1095 {
1096 	int suspend_mode;
1097 	struct arm_smccc_res res;
1098 
1099 	suspend_mode = soc_pm.data.suspend_mode;
1100 
1101 	res = sam_smccc_call(SAMA5_SMC_SIP_SET_SUSPEND_MODE,
1102 			     suspend_mode, 0);
1103 	if (res.a0 == 0) {
1104 		pr_info("AT91: Secure PM: suspend mode set to %s\n",
1105 			pm_modes[suspend_mode].pattern);
1106 		return;
1107 	}
1108 
1109 	pr_warn("AT91: Secure PM: %s mode not supported !\n",
1110 		pm_modes[suspend_mode].pattern);
1111 
1112 	res = sam_smccc_call(SAMA5_SMC_SIP_GET_SUSPEND_MODE, 0, 0);
1113 	if (res.a0 == 0) {
1114 		pr_warn("AT91: Secure PM: failed to get default mode\n");
1115 		return;
1116 	}
1117 
1118 	pr_info("AT91: Secure PM: using default suspend mode %s\n",
1119 		pm_modes[suspend_mode].pattern);
1120 
1121 	soc_pm.data.suspend_mode = res.a1;
1122 }
1123 static const struct of_device_id atmel_shdwc_ids[] = {
1124 	{ .compatible = "atmel,sama5d2-shdwc" },
1125 	{ .compatible = "microchip,sam9x60-shdwc" },
1126 	{ .compatible = "microchip,sama7g5-shdwc" },
1127 	{ /* sentinel. */ }
1128 };
1129 
1130 static const struct of_device_id gmac_ids[] __initconst = {
1131 	{ .compatible = "atmel,sama5d3-gem" },
1132 	{ .compatible = "atmel,sama5d2-gem" },
1133 	{ .compatible = "atmel,sama5d29-gem" },
1134 	{ .compatible = "microchip,sama7g5-gem" },
1135 	{ },
1136 };
1137 
1138 static const struct of_device_id emac_ids[] __initconst = {
1139 	{ .compatible = "atmel,sama5d3-macb" },
1140 	{ .compatible = "microchip,sama7g5-emac" },
1141 	{ },
1142 };
1143 
1144 /*
1145  * Replaces _mode_to_replace with a supported mode that doesn't depend
1146  * on controller pointed by _map_bitmask
1147  * @_maps: u32 array containing AT91_PM_IOMAP() flags and indexed by AT91
1148  * PM mode
1149  * @_map_bitmask: AT91_PM_IOMAP() bitmask; if _mode_to_replace depends on
1150  * controller represented by _map_bitmask, _mode_to_replace needs to be
1151  * updated
1152  * @_mode_to_replace: standby_mode or suspend_mode that need to be
1153  * updated
1154  * @_mode_to_check: standby_mode or suspend_mode; this is needed here
1155  * to avoid having standby_mode and suspend_mode set with the same AT91
1156  * PM mode
1157  */
1158 #define AT91_PM_REPLACE_MODE(_maps, _map_bitmask, _mode_to_replace,	\
1159 			     _mode_to_check)				\
1160 	do {								\
1161 		if (((_maps)[(_mode_to_replace)]) & (_map_bitmask)) {	\
1162 			int _mode_to_use, _mode_complementary;		\
1163 			/* Use ULP0 if it doesn't need _map_bitmask. */	\
1164 			if (!((_maps)[AT91_PM_ULP0] & (_map_bitmask))) {\
1165 				_mode_to_use = AT91_PM_ULP0;		\
1166 				_mode_complementary = AT91_PM_STANDBY;	\
1167 			} else {					\
1168 				_mode_to_use = AT91_PM_STANDBY;		\
1169 				_mode_complementary = AT91_PM_STANDBY;	\
1170 			}						\
1171 									\
1172 			if ((_mode_to_check) != _mode_to_use)		\
1173 				(_mode_to_replace) = _mode_to_use;	\
1174 			else						\
1175 				(_mode_to_replace) = _mode_complementary;\
1176 		}							\
1177 	} while (0)
1178 
1179 /*
1180  * Replaces standby and suspend modes with default supported modes:
1181  * ULP0 and STANDBY.
1182  * @_maps: u32 array indexed by AT91 PM mode containing AT91_PM_IOMAP()
1183  * flags
1184  * @_map: controller specific name; standby and suspend mode need to be
1185  * replaced in order to not depend on this controller
1186  */
1187 #define AT91_PM_REPLACE_MODES(_maps, _map)				\
1188 	do {								\
1189 		AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1190 				     (soc_pm.data.standby_mode),	\
1191 				     (soc_pm.data.suspend_mode));	\
1192 		AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1193 				     (soc_pm.data.suspend_mode),	\
1194 				     (soc_pm.data.standby_mode));	\
1195 	} while (0)
1196 
at91_pm_get_eth_clks(struct device_node * np,struct clk_bulk_data * clks)1197 static int __init at91_pm_get_eth_clks(struct device_node *np,
1198 				       struct clk_bulk_data *clks)
1199 {
1200 	clks[AT91_PM_ETH_PCLK].clk = of_clk_get_by_name(np, "pclk");
1201 	if (IS_ERR(clks[AT91_PM_ETH_PCLK].clk))
1202 		return PTR_ERR(clks[AT91_PM_ETH_PCLK].clk);
1203 
1204 	clks[AT91_PM_ETH_HCLK].clk = of_clk_get_by_name(np, "hclk");
1205 	if (IS_ERR(clks[AT91_PM_ETH_HCLK].clk))
1206 		return PTR_ERR(clks[AT91_PM_ETH_HCLK].clk);
1207 
1208 	return 0;
1209 }
1210 
at91_pm_eth_clks_empty(struct clk_bulk_data * clks)1211 static int __init at91_pm_eth_clks_empty(struct clk_bulk_data *clks)
1212 {
1213 	return IS_ERR(clks[AT91_PM_ETH_PCLK].clk) ||
1214 	       IS_ERR(clks[AT91_PM_ETH_HCLK].clk);
1215 }
1216 
at91_pm_modes_init(const u32 * maps,int len)1217 static void __init at91_pm_modes_init(const u32 *maps, int len)
1218 {
1219 	struct at91_pm_quirk_eth *gmac = &soc_pm.quirks.eth[AT91_PM_G_ETH];
1220 	struct at91_pm_quirk_eth *emac = &soc_pm.quirks.eth[AT91_PM_E_ETH];
1221 	struct device_node *np;
1222 	int ret;
1223 
1224 	ret = at91_pm_backup_init();
1225 	if (ret) {
1226 		if (soc_pm.data.standby_mode == AT91_PM_BACKUP)
1227 			soc_pm.data.standby_mode = AT91_PM_ULP0;
1228 		if (soc_pm.data.suspend_mode == AT91_PM_BACKUP)
1229 			soc_pm.data.suspend_mode = AT91_PM_ULP0;
1230 	}
1231 
1232 	if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1233 	    maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC)) {
1234 		np = of_find_matching_node(NULL, atmel_shdwc_ids);
1235 		if (!np) {
1236 			pr_warn("%s: failed to find shdwc!\n", __func__);
1237 			AT91_PM_REPLACE_MODES(maps, SHDWC);
1238 		} else {
1239 			soc_pm.data.shdwc = of_iomap(np, 0);
1240 			of_node_put(np);
1241 		}
1242 	}
1243 
1244 	if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1245 	    maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU)) {
1246 		np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-sfrbu");
1247 		if (!np) {
1248 			pr_warn("%s: failed to find sfrbu!\n", __func__);
1249 			AT91_PM_REPLACE_MODES(maps, SFRBU);
1250 		} else {
1251 			soc_pm.data.sfrbu = of_iomap(np, 0);
1252 			of_node_put(np);
1253 		}
1254 	}
1255 
1256 	if ((at91_is_pm_mode_active(AT91_PM_ULP1) ||
1257 	     at91_is_pm_mode_active(AT91_PM_ULP0) ||
1258 	     at91_is_pm_mode_active(AT91_PM_ULP0_FAST)) &&
1259 	    (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(ETHC) ||
1260 	     maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(ETHC))) {
1261 		np = of_find_matching_node(NULL, gmac_ids);
1262 		if (!np) {
1263 			np = of_find_matching_node(NULL, emac_ids);
1264 			if (np)
1265 				goto get_emac_clks;
1266 			AT91_PM_REPLACE_MODES(maps, ETHC);
1267 			goto unmap_unused_nodes;
1268 		} else {
1269 			gmac->np = np;
1270 			at91_pm_get_eth_clks(np, gmac->clks);
1271 		}
1272 
1273 		np = of_find_matching_node(NULL, emac_ids);
1274 		if (!np) {
1275 			if (at91_pm_eth_clks_empty(gmac->clks))
1276 				AT91_PM_REPLACE_MODES(maps, ETHC);
1277 		} else {
1278 get_emac_clks:
1279 			emac->np = np;
1280 			ret = at91_pm_get_eth_clks(np, emac->clks);
1281 			if (ret && at91_pm_eth_clks_empty(gmac->clks)) {
1282 				of_node_put(gmac->np);
1283 				of_node_put(emac->np);
1284 				gmac->np = NULL;
1285 				emac->np = NULL;
1286 			}
1287 		}
1288 	}
1289 
1290 unmap_unused_nodes:
1291 	/* Unmap all unnecessary. */
1292 	if (soc_pm.data.shdwc &&
1293 	    !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1294 	      maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC))) {
1295 		iounmap(soc_pm.data.shdwc);
1296 		soc_pm.data.shdwc = NULL;
1297 	}
1298 
1299 	if (soc_pm.data.sfrbu &&
1300 	    !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1301 	      maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU))) {
1302 		iounmap(soc_pm.data.sfrbu);
1303 		soc_pm.data.sfrbu = NULL;
1304 	}
1305 
1306 	return;
1307 }
1308 
1309 struct pmc_info {
1310 	unsigned long uhp_udp_mask;
1311 	unsigned long mckr;
1312 	unsigned long version;
1313 };
1314 
1315 static const struct pmc_info pmc_infos[] __initconst = {
1316 	{
1317 		.uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP,
1318 		.mckr = 0x30,
1319 		.version = AT91_PMC_V1,
1320 	},
1321 
1322 	{
1323 		.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1324 		.mckr = 0x30,
1325 		.version = AT91_PMC_V1,
1326 	},
1327 	{
1328 		.uhp_udp_mask = AT91SAM926x_PMC_UHP,
1329 		.mckr = 0x30,
1330 		.version = AT91_PMC_V1,
1331 	},
1332 	{	.uhp_udp_mask = 0,
1333 		.mckr = 0x30,
1334 		.version = AT91_PMC_V1,
1335 	},
1336 	{
1337 		.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1338 		.mckr = 0x28,
1339 		.version = AT91_PMC_V2,
1340 	},
1341 	{
1342 		.mckr = 0x28,
1343 		.version = AT91_PMC_V2,
1344 	},
1345 
1346 };
1347 
1348 static const struct of_device_id atmel_pmc_ids[] __initconst = {
1349 	{ .compatible = "atmel,at91rm9200-pmc", .data = &pmc_infos[0] },
1350 	{ .compatible = "atmel,at91sam9260-pmc", .data = &pmc_infos[1] },
1351 	{ .compatible = "atmel,at91sam9261-pmc", .data = &pmc_infos[1] },
1352 	{ .compatible = "atmel,at91sam9263-pmc", .data = &pmc_infos[1] },
1353 	{ .compatible = "atmel,at91sam9g45-pmc", .data = &pmc_infos[2] },
1354 	{ .compatible = "atmel,at91sam9n12-pmc", .data = &pmc_infos[1] },
1355 	{ .compatible = "atmel,at91sam9rl-pmc", .data = &pmc_infos[3] },
1356 	{ .compatible = "atmel,at91sam9x5-pmc", .data = &pmc_infos[1] },
1357 	{ .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
1358 	{ .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
1359 	{ .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
1360 	{ .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[4] },
1361 	{ .compatible = "microchip,sama7g5-pmc", .data = &pmc_infos[5] },
1362 	{ /* sentinel */ },
1363 };
1364 
at91_pm_modes_validate(const int * modes,int len)1365 static void __init at91_pm_modes_validate(const int *modes, int len)
1366 {
1367 	u8 i, standby = 0, suspend = 0;
1368 	int mode;
1369 
1370 	for (i = 0; i < len; i++) {
1371 		if (standby && suspend)
1372 			break;
1373 
1374 		if (modes[i] == soc_pm.data.standby_mode && !standby) {
1375 			standby = 1;
1376 			continue;
1377 		}
1378 
1379 		if (modes[i] == soc_pm.data.suspend_mode && !suspend) {
1380 			suspend = 1;
1381 			continue;
1382 		}
1383 	}
1384 
1385 	if (!standby) {
1386 		if (soc_pm.data.suspend_mode == AT91_PM_STANDBY)
1387 			mode = AT91_PM_ULP0;
1388 		else
1389 			mode = AT91_PM_STANDBY;
1390 
1391 		pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1392 			pm_modes[soc_pm.data.standby_mode].pattern,
1393 			pm_modes[mode].pattern);
1394 		soc_pm.data.standby_mode = mode;
1395 	}
1396 
1397 	if (!suspend) {
1398 		if (soc_pm.data.standby_mode == AT91_PM_ULP0)
1399 			mode = AT91_PM_STANDBY;
1400 		else
1401 			mode = AT91_PM_ULP0;
1402 
1403 		pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1404 			pm_modes[soc_pm.data.suspend_mode].pattern,
1405 			pm_modes[mode].pattern);
1406 		soc_pm.data.suspend_mode = mode;
1407 	}
1408 }
1409 
at91_pm_init(void (* pm_idle)(void))1410 static void __init at91_pm_init(void (*pm_idle)(void))
1411 {
1412 	struct device_node *pmc_np;
1413 	const struct of_device_id *of_id;
1414 	const struct pmc_info *pmc;
1415 
1416 	if (at91_cpuidle_device.dev.platform_data)
1417 		platform_device_register(&at91_cpuidle_device);
1418 
1419 	pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
1420 	soc_pm.data.pmc = of_iomap(pmc_np, 0);
1421 	of_node_put(pmc_np);
1422 	if (!soc_pm.data.pmc) {
1423 		pr_err("AT91: PM not supported, PMC not found\n");
1424 		return;
1425 	}
1426 
1427 	pmc = of_id->data;
1428 	soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask;
1429 	soc_pm.data.pmc_mckr_offset = pmc->mckr;
1430 	soc_pm.data.pmc_version = pmc->version;
1431 
1432 	if (pm_idle)
1433 		arm_pm_idle = pm_idle;
1434 
1435 	at91_pm_sram_init();
1436 
1437 	if (at91_suspend_sram_fn) {
1438 		suspend_set_ops(&at91_pm_ops);
1439 		pr_info("AT91: PM: standby: %s, suspend: %s\n",
1440 			pm_modes[soc_pm.data.standby_mode].pattern,
1441 			pm_modes[soc_pm.data.suspend_mode].pattern);
1442 	} else {
1443 		pr_info("AT91: PM not supported, due to no SRAM allocated\n");
1444 	}
1445 }
1446 
at91rm9200_pm_init(void)1447 void __init at91rm9200_pm_init(void)
1448 {
1449 	int ret;
1450 
1451 	if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
1452 		return;
1453 
1454 	/*
1455 	 * Force STANDBY and ULP0 mode to avoid calling
1456 	 * at91_pm_modes_validate() which may increase booting time.
1457 	 * Platform supports anyway only STANDBY and ULP0 modes.
1458 	 */
1459 	soc_pm.data.standby_mode = AT91_PM_STANDBY;
1460 	soc_pm.data.suspend_mode = AT91_PM_ULP0;
1461 
1462 	ret = at91_dt_ramc(false);
1463 	if (ret)
1464 		return;
1465 
1466 	/*
1467 	 * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh.
1468 	 */
1469 	at91_ramc_write(0, AT91_MC_SDRAMC_LPR, 0);
1470 
1471 	at91_pm_init(at91rm9200_idle);
1472 }
1473 
sam9x60_pm_init(void)1474 void __init sam9x60_pm_init(void)
1475 {
1476 	static const int modes[] __initconst = {
1477 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1478 	};
1479 	static const int iomaps[] __initconst = {
1480 		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SHDWC),
1481 	};
1482 	int ret;
1483 
1484 	if (!IS_ENABLED(CONFIG_SOC_SAM9X60))
1485 		return;
1486 
1487 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1488 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1489 	ret = at91_dt_ramc(false);
1490 	if (ret)
1491 		return;
1492 
1493 	at91_pm_init(NULL);
1494 
1495 	soc_pm.ws_ids = sam9x60_ws_ids;
1496 	soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1497 }
1498 
at91sam9_pm_init(void)1499 void __init at91sam9_pm_init(void)
1500 {
1501 	int ret;
1502 
1503 	if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
1504 		return;
1505 
1506 	/*
1507 	 * Force STANDBY and ULP0 mode to avoid calling
1508 	 * at91_pm_modes_validate() which may increase booting time.
1509 	 * Platform supports anyway only STANDBY and ULP0 modes.
1510 	 */
1511 	soc_pm.data.standby_mode = AT91_PM_STANDBY;
1512 	soc_pm.data.suspend_mode = AT91_PM_ULP0;
1513 
1514 	ret = at91_dt_ramc(false);
1515 	if (ret)
1516 		return;
1517 
1518 	at91_pm_init(at91sam9_idle);
1519 }
1520 
sama5_pm_init(void)1521 void __init sama5_pm_init(void)
1522 {
1523 	static const int modes[] __initconst = {
1524 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST,
1525 	};
1526 	static const u32 iomaps[] __initconst = {
1527 		[AT91_PM_ULP0]		= AT91_PM_IOMAP(ETHC),
1528 		[AT91_PM_ULP0_FAST]	= AT91_PM_IOMAP(ETHC),
1529 	};
1530 	int ret;
1531 
1532 	if (!IS_ENABLED(CONFIG_SOC_SAMA5))
1533 		return;
1534 
1535 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1536 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1537 	ret = at91_dt_ramc(false);
1538 	if (ret)
1539 		return;
1540 
1541 	at91_pm_init(NULL);
1542 
1543 	/* Quirks applies to ULP0, ULP0 fast and ULP1 modes. */
1544 	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1545 						 BIT(AT91_PM_ULP0_FAST) |
1546 						 BIT(AT91_PM_ULP1);
1547 	/* Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup source. */
1548 	soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1549 						     BIT(AT91_PM_ULP0_FAST);
1550 }
1551 
sama5d2_pm_init(void)1552 void __init sama5d2_pm_init(void)
1553 {
1554 	static const int modes[] __initconst = {
1555 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1556 		AT91_PM_BACKUP,
1557 	};
1558 	static const u32 iomaps[] __initconst = {
1559 		[AT91_PM_ULP0]		= AT91_PM_IOMAP(ETHC),
1560 		[AT91_PM_ULP0_FAST]	= AT91_PM_IOMAP(ETHC),
1561 		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SHDWC) |
1562 					  AT91_PM_IOMAP(ETHC),
1563 		[AT91_PM_BACKUP]	= AT91_PM_IOMAP(SHDWC) |
1564 					  AT91_PM_IOMAP(SFRBU),
1565 	};
1566 	int ret;
1567 
1568 	if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
1569 		return;
1570 
1571 	if (IS_ENABLED(CONFIG_ATMEL_SECURE_PM)) {
1572 		pr_warn("AT91: Secure PM: ignoring standby mode\n");
1573 		at91_pm_secure_init();
1574 		return;
1575 	}
1576 
1577 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1578 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1579 	ret = at91_dt_ramc(false);
1580 	if (ret)
1581 		return;
1582 
1583 	at91_pm_init(NULL);
1584 
1585 	soc_pm.ws_ids = sama5d2_ws_ids;
1586 	soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws;
1587 	soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws;
1588 
1589 	soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1590 	soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1591 	soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1592 	soc_pm.sfrbu_regs.pswbu.state = BIT(3);
1593 
1594 	/* Quirk applies to ULP0, ULP0 fast and ULP1 modes. */
1595 	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1596 						 BIT(AT91_PM_ULP0_FAST) |
1597 						 BIT(AT91_PM_ULP1);
1598 	/*
1599 	 * Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup
1600 	 * source.
1601 	 */
1602 	soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1603 						     BIT(AT91_PM_ULP0_FAST);
1604 }
1605 
sama7_pm_init(void)1606 void __init sama7_pm_init(void)
1607 {
1608 	static const int modes[] __initconst = {
1609 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP1, AT91_PM_BACKUP,
1610 	};
1611 	static const u32 iomaps[] __initconst = {
1612 		[AT91_PM_ULP0]		= AT91_PM_IOMAP(SFRBU),
1613 		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SFRBU) |
1614 					  AT91_PM_IOMAP(SHDWC) |
1615 					  AT91_PM_IOMAP(ETHC),
1616 		[AT91_PM_BACKUP]	= AT91_PM_IOMAP(SFRBU) |
1617 					  AT91_PM_IOMAP(SHDWC),
1618 	};
1619 	int ret;
1620 
1621 	if (!IS_ENABLED(CONFIG_SOC_SAMA7))
1622 		return;
1623 
1624 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1625 
1626 	ret = at91_dt_ramc(true);
1627 	if (ret)
1628 		return;
1629 
1630 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1631 	at91_pm_init(NULL);
1632 
1633 	soc_pm.ws_ids = sama7g5_ws_ids;
1634 	soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1635 
1636 	soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1637 	soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1638 	soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1639 	soc_pm.sfrbu_regs.pswbu.state = BIT(2);
1640 
1641 	/* Quirks applies to ULP1 for both Ethernet interfaces. */
1642 	soc_pm.quirks.eth[AT91_PM_E_ETH].modes = BIT(AT91_PM_ULP1);
1643 	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP1);
1644 }
1645 
at91_pm_modes_select(char * str)1646 static int __init at91_pm_modes_select(char *str)
1647 {
1648 	char *s;
1649 	substring_t args[MAX_OPT_ARGS];
1650 	int standby, suspend;
1651 
1652 	if (!str)
1653 		return 0;
1654 
1655 	s = strsep(&str, ",");
1656 	standby = match_token(s, pm_modes, args);
1657 	if (standby < 0)
1658 		return 0;
1659 
1660 	suspend = match_token(str, pm_modes, args);
1661 	if (suspend < 0)
1662 		return 0;
1663 
1664 	soc_pm.data.standby_mode = standby;
1665 	soc_pm.data.suspend_mode = suspend;
1666 
1667 	return 0;
1668 }
1669 early_param("atmel.pm_modes", at91_pm_modes_select);
1670