xref: /openbmc/linux/arch/arm/mach-pxa/pxa3xx.c (revision 5c6603e7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/arch/arm/mach-pxa/pxa3xx.c
4  *
5  * code specific to pxa3xx aka Monahans
6  *
7  * Copyright (C) 2006 Marvell International Ltd.
8  *
9  * 2007-09-02: eric miao <eric.miao@marvell.com>
10  *             initial version
11  */
12 #include <linux/dmaengine.h>
13 #include <linux/dma/pxa-dma.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/gpio-pxa.h>
18 #include <linux/pm.h>
19 #include <linux/platform_device.h>
20 #include <linux/irq.h>
21 #include <linux/irqchip.h>
22 #include <linux/io.h>
23 #include <linux/of.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/platform_data/i2c-pxa.h>
26 #include <linux/platform_data/mmp_dma.h>
27 #include <linux/soc/pxa/cpu.h>
28 #include <linux/clk/pxa.h>
29 
30 #include <asm/mach/map.h>
31 #include <asm/suspend.h>
32 #include <mach/pxa3xx-regs.h>
33 #include <mach/reset.h>
34 #include <linux/platform_data/usb-ohci-pxa27x.h>
35 #include "pm.h"
36 #include "addr-map.h"
37 #include <mach/smemc.h>
38 #include <mach/irqs.h>
39 
40 #include "generic.h"
41 #include "devices.h"
42 
43 #define PECR_IE(n)	((1 << ((n) * 2)) << 28)
44 #define PECR_IS(n)	((1 << ((n) * 2)) << 29)
45 
46 extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
47 
48 /*
49  * NAND NFC: DFI bus arbitration subset
50  */
51 #define NDCR			(*(volatile u32 __iomem*)(NAND_VIRT + 0))
52 #define NDCR_ND_ARB_EN		(1 << 12)
53 #define NDCR_ND_ARB_CNTL	(1 << 19)
54 
55 #ifdef CONFIG_PM
56 
57 #define ISRAM_START	0x5c000000
58 #define ISRAM_SIZE	SZ_256K
59 
60 static void __iomem *sram;
61 static unsigned long wakeup_src;
62 
63 /*
64  * Enter a standby mode (S0D1C2 or S0D2C2).  Upon wakeup, the dynamic
65  * memory controller has to be reinitialised, so we place some code
66  * in the SRAM to perform this function.
67  *
68  * We disable FIQs across the standby - otherwise, we might receive a
69  * FIQ while the SDRAM is unavailable.
70  */
71 static void pxa3xx_cpu_standby(unsigned int pwrmode)
72 {
73 	void (*fn)(unsigned int) = (void __force *)(sram + 0x8000);
74 
75 	memcpy_toio(sram + 0x8000, pm_enter_standby_start,
76 		    pm_enter_standby_end - pm_enter_standby_start);
77 
78 	AD2D0SR = ~0;
79 	AD2D1SR = ~0;
80 	AD2D0ER = wakeup_src;
81 	AD2D1ER = 0;
82 	ASCR = ASCR;
83 	ARSR = ARSR;
84 
85 	local_fiq_disable();
86 	fn(pwrmode);
87 	local_fiq_enable();
88 
89 	AD2D0ER = 0;
90 	AD2D1ER = 0;
91 }
92 
93 /*
94  * NOTE:  currently, the OBM (OEM Boot Module) binary comes along with
95  * PXA3xx development kits assumes that the resuming process continues
96  * with the address stored within the first 4 bytes of SDRAM. The PSPR
97  * register is used privately by BootROM and OBM, and _must_ be set to
98  * 0x5c014000 for the moment.
99  */
100 static void pxa3xx_cpu_pm_suspend(void)
101 {
102 	volatile unsigned long *p = (volatile void *)0xc0000000;
103 	unsigned long saved_data = *p;
104 #ifndef CONFIG_IWMMXT
105 	u64 acc0;
106 
107 	asm volatile(".arch_extension xscale\n\t"
108 		     "mra %Q0, %R0, acc0" : "=r" (acc0));
109 #endif
110 
111 	/* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
112 	CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
113 	CKENB |= 1 << (CKEN_HSIO2 & 0x1f);
114 
115 	/* clear and setup wakeup source */
116 	AD3SR = ~0;
117 	AD3ER = wakeup_src;
118 	ASCR = ASCR;
119 	ARSR = ARSR;
120 
121 	PCFR |= (1u << 13);			/* L1_DIS */
122 	PCFR &= ~((1u << 12) | (1u << 1));	/* L0_EN | SL_ROD */
123 
124 	PSPR = 0x5c014000;
125 
126 	/* overwrite with the resume address */
127 	*p = __pa_symbol(cpu_resume);
128 
129 	cpu_suspend(0, pxa3xx_finish_suspend);
130 
131 	*p = saved_data;
132 
133 	AD3ER = 0;
134 
135 #ifndef CONFIG_IWMMXT
136 	asm volatile(".arch_extension xscale\n\t"
137 		     "mar acc0, %Q0, %R0" : "=r" (acc0));
138 #endif
139 }
140 
141 static void pxa3xx_cpu_pm_enter(suspend_state_t state)
142 {
143 	/*
144 	 * Don't sleep if no wakeup sources are defined
145 	 */
146 	if (wakeup_src == 0) {
147 		printk(KERN_ERR "Not suspending: no wakeup sources\n");
148 		return;
149 	}
150 
151 	switch (state) {
152 	case PM_SUSPEND_STANDBY:
153 		pxa3xx_cpu_standby(PXA3xx_PM_S0D2C2);
154 		break;
155 
156 	case PM_SUSPEND_MEM:
157 		pxa3xx_cpu_pm_suspend();
158 		break;
159 	}
160 }
161 
162 static int pxa3xx_cpu_pm_valid(suspend_state_t state)
163 {
164 	return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
165 }
166 
167 static struct pxa_cpu_pm_fns pxa3xx_cpu_pm_fns = {
168 	.valid		= pxa3xx_cpu_pm_valid,
169 	.enter		= pxa3xx_cpu_pm_enter,
170 };
171 
172 static void __init pxa3xx_init_pm(void)
173 {
174 	sram = ioremap(ISRAM_START, ISRAM_SIZE);
175 	if (!sram) {
176 		printk(KERN_ERR "Unable to map ISRAM: disabling standby/suspend\n");
177 		return;
178 	}
179 
180 	/*
181 	 * Since we copy wakeup code into the SRAM, we need to ensure
182 	 * that it is preserved over the low power modes.  Note: bit 8
183 	 * is undocumented in the developer manual, but must be set.
184 	 */
185 	AD1R |= ADXR_L2 | ADXR_R0;
186 	AD2R |= ADXR_L2 | ADXR_R0;
187 	AD3R |= ADXR_L2 | ADXR_R0;
188 
189 	/*
190 	 * Clear the resume enable registers.
191 	 */
192 	AD1D0ER = 0;
193 	AD2D0ER = 0;
194 	AD2D1ER = 0;
195 	AD3ER = 0;
196 
197 	pxa_cpu_pm_fns = &pxa3xx_cpu_pm_fns;
198 }
199 
200 static int pxa3xx_set_wake(struct irq_data *d, unsigned int on)
201 {
202 	unsigned long flags, mask = 0;
203 
204 	switch (d->irq) {
205 	case IRQ_SSP3:
206 		mask = ADXER_MFP_WSSP3;
207 		break;
208 	case IRQ_MSL:
209 		mask = ADXER_WMSL0;
210 		break;
211 	case IRQ_USBH2:
212 	case IRQ_USBH1:
213 		mask = ADXER_WUSBH;
214 		break;
215 	case IRQ_KEYPAD:
216 		mask = ADXER_WKP;
217 		break;
218 	case IRQ_AC97:
219 		mask = ADXER_MFP_WAC97;
220 		break;
221 	case IRQ_USIM:
222 		mask = ADXER_WUSIM0;
223 		break;
224 	case IRQ_SSP2:
225 		mask = ADXER_MFP_WSSP2;
226 		break;
227 	case IRQ_I2C:
228 		mask = ADXER_MFP_WI2C;
229 		break;
230 	case IRQ_STUART:
231 		mask = ADXER_MFP_WUART3;
232 		break;
233 	case IRQ_BTUART:
234 		mask = ADXER_MFP_WUART2;
235 		break;
236 	case IRQ_FFUART:
237 		mask = ADXER_MFP_WUART1;
238 		break;
239 	case IRQ_MMC:
240 		mask = ADXER_MFP_WMMC1;
241 		break;
242 	case IRQ_SSP:
243 		mask = ADXER_MFP_WSSP1;
244 		break;
245 	case IRQ_RTCAlrm:
246 		mask = ADXER_WRTC;
247 		break;
248 	case IRQ_SSP4:
249 		mask = ADXER_MFP_WSSP4;
250 		break;
251 	case IRQ_TSI:
252 		mask = ADXER_WTSI;
253 		break;
254 	case IRQ_USIM2:
255 		mask = ADXER_WUSIM1;
256 		break;
257 	case IRQ_MMC2:
258 		mask = ADXER_MFP_WMMC2;
259 		break;
260 	case IRQ_NAND:
261 		mask = ADXER_MFP_WFLASH;
262 		break;
263 	case IRQ_USB2:
264 		mask = ADXER_WUSB2;
265 		break;
266 	case IRQ_WAKEUP0:
267 		mask = ADXER_WEXTWAKE0;
268 		break;
269 	case IRQ_WAKEUP1:
270 		mask = ADXER_WEXTWAKE1;
271 		break;
272 	case IRQ_MMC3:
273 		mask = ADXER_MFP_GEN12;
274 		break;
275 	default:
276 		return -EINVAL;
277 	}
278 
279 	local_irq_save(flags);
280 	if (on)
281 		wakeup_src |= mask;
282 	else
283 		wakeup_src &= ~mask;
284 	local_irq_restore(flags);
285 
286 	return 0;
287 }
288 #else
289 static inline void pxa3xx_init_pm(void) {}
290 #define pxa3xx_set_wake	NULL
291 #endif
292 
293 static void pxa_ack_ext_wakeup(struct irq_data *d)
294 {
295 	PECR |= PECR_IS(d->irq - IRQ_WAKEUP0);
296 }
297 
298 static void pxa_mask_ext_wakeup(struct irq_data *d)
299 {
300 	pxa_mask_irq(d);
301 	PECR &= ~PECR_IE(d->irq - IRQ_WAKEUP0);
302 }
303 
304 static void pxa_unmask_ext_wakeup(struct irq_data *d)
305 {
306 	pxa_unmask_irq(d);
307 	PECR |= PECR_IE(d->irq - IRQ_WAKEUP0);
308 }
309 
310 static int pxa_set_ext_wakeup_type(struct irq_data *d, unsigned int flow_type)
311 {
312 	if (flow_type & IRQ_TYPE_EDGE_RISING)
313 		PWER |= 1 << (d->irq - IRQ_WAKEUP0);
314 
315 	if (flow_type & IRQ_TYPE_EDGE_FALLING)
316 		PWER |= 1 << (d->irq - IRQ_WAKEUP0 + 2);
317 
318 	return 0;
319 }
320 
321 static struct irq_chip pxa_ext_wakeup_chip = {
322 	.name		= "WAKEUP",
323 	.irq_ack	= pxa_ack_ext_wakeup,
324 	.irq_mask	= pxa_mask_ext_wakeup,
325 	.irq_unmask	= pxa_unmask_ext_wakeup,
326 	.irq_set_type	= pxa_set_ext_wakeup_type,
327 };
328 
329 static void __init pxa_init_ext_wakeup_irq(int (*fn)(struct irq_data *,
330 					   unsigned int))
331 {
332 	int irq;
333 
334 	for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) {
335 		irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip,
336 					 handle_edge_irq);
337 		irq_clear_status_flags(irq, IRQ_NOREQUEST);
338 	}
339 
340 	pxa_ext_wakeup_chip.irq_set_wake = fn;
341 }
342 
343 static void __init __pxa3xx_init_irq(void)
344 {
345 	/* enable CP6 access */
346 	u32 value;
347 	__asm__ __volatile__("mrc p15, 0, %0, c15, c1, 0\n": "=r"(value));
348 	value |= (1 << 6);
349 	__asm__ __volatile__("mcr p15, 0, %0, c15, c1, 0\n": :"r"(value));
350 
351 	pxa_init_ext_wakeup_irq(pxa3xx_set_wake);
352 }
353 
354 void __init pxa3xx_init_irq(void)
355 {
356 	__pxa3xx_init_irq();
357 	pxa_init_irq(56, pxa3xx_set_wake);
358 }
359 
360 #ifdef CONFIG_OF
361 static int __init __init
362 pxa3xx_dt_init_irq(struct device_node *node, struct device_node *parent)
363 {
364 	__pxa3xx_init_irq();
365 	pxa_dt_irq_init(pxa3xx_set_wake);
366 	set_handle_irq(ichp_handle_irq);
367 
368 	return 0;
369 }
370 IRQCHIP_DECLARE(pxa3xx_intc, "marvell,pxa-intc", pxa3xx_dt_init_irq);
371 #endif	/* CONFIG_OF */
372 
373 static struct map_desc pxa3xx_io_desc[] __initdata = {
374 	{	/* Mem Ctl */
375 		.virtual	= (unsigned long)SMEMC_VIRT,
376 		.pfn		= __phys_to_pfn(PXA3XX_SMEMC_BASE),
377 		.length		= SMEMC_SIZE,
378 		.type		= MT_DEVICE
379 	}, {
380 		.virtual	= (unsigned long)NAND_VIRT,
381 		.pfn		= __phys_to_pfn(NAND_PHYS),
382 		.length		= NAND_SIZE,
383 		.type		= MT_DEVICE
384 	},
385 };
386 
387 void __init pxa3xx_map_io(void)
388 {
389 	pxa_map_io();
390 	iotable_init(ARRAY_AND_SIZE(pxa3xx_io_desc));
391 	pxa3xx_get_clk_frequency_khz(1);
392 }
393 
394 /*
395  * device registration specific to PXA3xx.
396  */
397 
398 void __init pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info)
399 {
400 	pxa_register_device(&pxa3xx_device_i2c_power, info);
401 }
402 
403 static struct pxa_gpio_platform_data pxa3xx_gpio_pdata = {
404 	.irq_base	= PXA_GPIO_TO_IRQ(0),
405 };
406 
407 static struct platform_device *devices[] __initdata = {
408 	&pxa27x_device_udc,
409 	&pxa_device_pmu,
410 	&pxa_device_i2s,
411 	&pxa_device_asoc_ssp1,
412 	&pxa_device_asoc_ssp2,
413 	&pxa_device_asoc_ssp3,
414 	&pxa_device_asoc_ssp4,
415 	&pxa_device_asoc_platform,
416 	&pxa_device_rtc,
417 	&pxa3xx_device_ssp1,
418 	&pxa3xx_device_ssp2,
419 	&pxa3xx_device_ssp3,
420 	&pxa3xx_device_ssp4,
421 	&pxa27x_device_pwm0,
422 	&pxa27x_device_pwm1,
423 };
424 
425 static const struct dma_slave_map pxa3xx_slave_map[] = {
426 	/* PXA25x, PXA27x and PXA3xx common entries */
427 	{ "pxa2xx-ac97", "pcm_pcm_mic_mono", PDMA_FILTER_PARAM(LOWEST, 8) },
428 	{ "pxa2xx-ac97", "pcm_pcm_aux_mono_in", PDMA_FILTER_PARAM(LOWEST, 9) },
429 	{ "pxa2xx-ac97", "pcm_pcm_aux_mono_out",
430 	  PDMA_FILTER_PARAM(LOWEST, 10) },
431 	{ "pxa2xx-ac97", "pcm_pcm_stereo_in", PDMA_FILTER_PARAM(LOWEST, 11) },
432 	{ "pxa2xx-ac97", "pcm_pcm_stereo_out", PDMA_FILTER_PARAM(LOWEST, 12) },
433 	{ "pxa-ssp-dai.0", "rx", PDMA_FILTER_PARAM(LOWEST, 13) },
434 	{ "pxa-ssp-dai.0", "tx", PDMA_FILTER_PARAM(LOWEST, 14) },
435 	{ "pxa-ssp-dai.1", "rx", PDMA_FILTER_PARAM(LOWEST, 15) },
436 	{ "pxa-ssp-dai.1", "tx", PDMA_FILTER_PARAM(LOWEST, 16) },
437 	{ "pxa2xx-ir", "rx", PDMA_FILTER_PARAM(LOWEST, 17) },
438 	{ "pxa2xx-ir", "tx", PDMA_FILTER_PARAM(LOWEST, 18) },
439 	{ "pxa2xx-mci.0", "rx", PDMA_FILTER_PARAM(LOWEST, 21) },
440 	{ "pxa2xx-mci.0", "tx", PDMA_FILTER_PARAM(LOWEST, 22) },
441 	{ "pxa-ssp-dai.2", "rx", PDMA_FILTER_PARAM(LOWEST, 66) },
442 	{ "pxa-ssp-dai.2", "tx", PDMA_FILTER_PARAM(LOWEST, 67) },
443 
444 	/* PXA3xx specific map */
445 	{ "pxa-ssp-dai.3", "rx", PDMA_FILTER_PARAM(LOWEST, 2) },
446 	{ "pxa-ssp-dai.3", "tx", PDMA_FILTER_PARAM(LOWEST, 3) },
447 	{ "pxa2xx-mci.1", "rx", PDMA_FILTER_PARAM(LOWEST, 93) },
448 	{ "pxa2xx-mci.1", "tx", PDMA_FILTER_PARAM(LOWEST, 94) },
449 	{ "pxa3xx-nand", "data", PDMA_FILTER_PARAM(LOWEST, 97) },
450 	{ "pxa2xx-mci.2", "rx", PDMA_FILTER_PARAM(LOWEST, 100) },
451 	{ "pxa2xx-mci.2", "tx", PDMA_FILTER_PARAM(LOWEST, 101) },
452 };
453 
454 static struct mmp_dma_platdata pxa3xx_dma_pdata = {
455 	.dma_channels	= 32,
456 	.nb_requestors	= 100,
457 	.slave_map	= pxa3xx_slave_map,
458 	.slave_map_cnt	= ARRAY_SIZE(pxa3xx_slave_map),
459 };
460 
461 static int __init pxa3xx_init(void)
462 {
463 	int ret = 0;
464 
465 	if (cpu_is_pxa3xx()) {
466 
467 		pxa_register_wdt(ARSR);
468 
469 		/*
470 		 * clear RDH bit every time after reset
471 		 *
472 		 * Note: the last 3 bits DxS are write-1-to-clear so carefully
473 		 * preserve them here in case they will be referenced later
474 		 */
475 		ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
476 
477 		/*
478 		 * Disable DFI bus arbitration, to prevent a system bus lock if
479 		 * somebody disables the NAND clock (unused clock) while this
480 		 * bit remains set.
481 		 */
482 		NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL;
483 
484 		pxa3xx_init_pm();
485 
486 		enable_irq_wake(IRQ_WAKEUP0);
487 		if (cpu_is_pxa320())
488 			enable_irq_wake(IRQ_WAKEUP1);
489 
490 		register_syscore_ops(&pxa_irq_syscore_ops);
491 		register_syscore_ops(&pxa3xx_mfp_syscore_ops);
492 
493 		if (of_have_populated_dt())
494 			return 0;
495 
496 		pxa2xx_set_dmac_info(&pxa3xx_dma_pdata);
497 		ret = platform_add_devices(devices, ARRAY_SIZE(devices));
498 		if (ret)
499 			return ret;
500 		if (cpu_is_pxa300() || cpu_is_pxa310() || cpu_is_pxa320()) {
501 			platform_device_add_data(&pxa3xx_device_gpio,
502 						 &pxa3xx_gpio_pdata,
503 						 sizeof(pxa3xx_gpio_pdata));
504 			ret = platform_device_register(&pxa3xx_device_gpio);
505 		}
506 	}
507 
508 	return ret;
509 }
510 
511 postcore_initcall(pxa3xx_init);
512