xref: /openbmc/linux/arch/arm/mach-pxa/pxa3xx.c (revision ce79f3a1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/arch/arm/mach-pxa/pxa3xx.c
4  *
5  * code specific to pxa3xx aka Monahans
6  *
7  * Copyright (C) 2006 Marvell International Ltd.
8  *
9  * 2007-09-02: eric miao <eric.miao@marvell.com>
10  *             initial version
11  */
12 #include <linux/dmaengine.h>
13 #include <linux/dma/pxa-dma.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/gpio-pxa.h>
18 #include <linux/pm.h>
19 #include <linux/platform_device.h>
20 #include <linux/irq.h>
21 #include <linux/irqchip.h>
22 #include <linux/io.h>
23 #include <linux/of.h>
24 #include <linux/syscore_ops.h>
25 #include <linux/platform_data/i2c-pxa.h>
26 #include <linux/platform_data/mmp_dma.h>
27 #include <linux/soc/pxa/cpu.h>
28 #include <linux/clk/pxa.h>
29 
30 #include <asm/mach/map.h>
31 #include <asm/suspend.h>
32 #include "pxa3xx-regs.h"
33 #include "reset.h"
34 #include <linux/platform_data/usb-ohci-pxa27x.h>
35 #include "pm.h"
36 #include "addr-map.h"
37 #include "smemc.h"
38 #include "irqs.h"
39 
40 #include "generic.h"
41 #include "devices.h"
42 
43 #define PECR_IE(n)	((1 << ((n) * 2)) << 28)
44 #define PECR_IS(n)	((1 << ((n) * 2)) << 29)
45 
46 extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
47 
48 /*
49  * NAND NFC: DFI bus arbitration subset
50  */
51 #define NDCR			(*(volatile u32 __iomem*)(NAND_VIRT + 0))
52 #define NDCR_ND_ARB_EN		(1 << 12)
53 #define NDCR_ND_ARB_CNTL	(1 << 19)
54 
55 #define CKEN_BOOT  		11      /* < Boot rom clock enable */
56 #define CKEN_TPM   		19      /* < TPM clock enable */
57 #define CKEN_HSIO2 		41      /* < HSIO2 clock enable */
58 
59 #ifdef CONFIG_PM
60 
61 #define ISRAM_START	0x5c000000
62 #define ISRAM_SIZE	SZ_256K
63 
64 static void __iomem *sram;
65 static unsigned long wakeup_src;
66 
67 /*
68  * Enter a standby mode (S0D1C2 or S0D2C2).  Upon wakeup, the dynamic
69  * memory controller has to be reinitialised, so we place some code
70  * in the SRAM to perform this function.
71  *
72  * We disable FIQs across the standby - otherwise, we might receive a
73  * FIQ while the SDRAM is unavailable.
74  */
pxa3xx_cpu_standby(unsigned int pwrmode)75 static void pxa3xx_cpu_standby(unsigned int pwrmode)
76 {
77 	void (*fn)(unsigned int) = (void __force *)(sram + 0x8000);
78 
79 	memcpy_toio(sram + 0x8000, pm_enter_standby_start,
80 		    pm_enter_standby_end - pm_enter_standby_start);
81 
82 	AD2D0SR = ~0;
83 	AD2D1SR = ~0;
84 	AD2D0ER = wakeup_src;
85 	AD2D1ER = 0;
86 	ASCR = ASCR;
87 	ARSR = ARSR;
88 
89 	local_fiq_disable();
90 	fn(pwrmode);
91 	local_fiq_enable();
92 
93 	AD2D0ER = 0;
94 	AD2D1ER = 0;
95 }
96 
97 /*
98  * NOTE:  currently, the OBM (OEM Boot Module) binary comes along with
99  * PXA3xx development kits assumes that the resuming process continues
100  * with the address stored within the first 4 bytes of SDRAM. The PSPR
101  * register is used privately by BootROM and OBM, and _must_ be set to
102  * 0x5c014000 for the moment.
103  */
pxa3xx_cpu_pm_suspend(void)104 static void pxa3xx_cpu_pm_suspend(void)
105 {
106 	volatile unsigned long *p = (volatile void *)0xc0000000;
107 	unsigned long saved_data = *p;
108 #ifndef CONFIG_IWMMXT
109 	u64 acc0;
110 
111 #ifdef CONFIG_CC_IS_GCC
112 	asm volatile(".arch_extension xscale\n\t"
113 		     "mra %Q0, %R0, acc0" : "=r" (acc0));
114 #else
115 	asm volatile("mrrc p0, 0, %Q0, %R0, c0" : "=r" (acc0));
116 #endif
117 #endif
118 
119 	/* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
120 	CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
121 	CKENB |= 1 << (CKEN_HSIO2 & 0x1f);
122 
123 	/* clear and setup wakeup source */
124 	AD3SR = ~0;
125 	AD3ER = wakeup_src;
126 	ASCR = ASCR;
127 	ARSR = ARSR;
128 
129 	PCFR |= (1u << 13);			/* L1_DIS */
130 	PCFR &= ~((1u << 12) | (1u << 1));	/* L0_EN | SL_ROD */
131 
132 	PSPR = 0x5c014000;
133 
134 	/* overwrite with the resume address */
135 	*p = __pa_symbol(cpu_resume);
136 
137 	cpu_suspend(0, pxa3xx_finish_suspend);
138 
139 	*p = saved_data;
140 
141 	AD3ER = 0;
142 
143 #ifndef CONFIG_IWMMXT
144 #ifndef CONFIG_AS_IS_LLVM
145 	asm volatile(".arch_extension xscale\n\t"
146 		     "mar acc0, %Q0, %R0" : "=r" (acc0));
147 #else
148 	asm volatile("mcrr p0, 0, %Q0, %R0, c0" :: "r" (acc0));
149 #endif
150 #endif
151 }
152 
pxa3xx_cpu_pm_enter(suspend_state_t state)153 static void pxa3xx_cpu_pm_enter(suspend_state_t state)
154 {
155 	/*
156 	 * Don't sleep if no wakeup sources are defined
157 	 */
158 	if (wakeup_src == 0) {
159 		printk(KERN_ERR "Not suspending: no wakeup sources\n");
160 		return;
161 	}
162 
163 	switch (state) {
164 	case PM_SUSPEND_STANDBY:
165 		pxa3xx_cpu_standby(PXA3xx_PM_S0D2C2);
166 		break;
167 
168 	case PM_SUSPEND_MEM:
169 		pxa3xx_cpu_pm_suspend();
170 		break;
171 	}
172 }
173 
pxa3xx_cpu_pm_valid(suspend_state_t state)174 static int pxa3xx_cpu_pm_valid(suspend_state_t state)
175 {
176 	return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
177 }
178 
179 static struct pxa_cpu_pm_fns pxa3xx_cpu_pm_fns = {
180 	.valid		= pxa3xx_cpu_pm_valid,
181 	.enter		= pxa3xx_cpu_pm_enter,
182 };
183 
pxa3xx_init_pm(void)184 static void __init pxa3xx_init_pm(void)
185 {
186 	sram = ioremap(ISRAM_START, ISRAM_SIZE);
187 	if (!sram) {
188 		printk(KERN_ERR "Unable to map ISRAM: disabling standby/suspend\n");
189 		return;
190 	}
191 
192 	/*
193 	 * Since we copy wakeup code into the SRAM, we need to ensure
194 	 * that it is preserved over the low power modes.  Note: bit 8
195 	 * is undocumented in the developer manual, but must be set.
196 	 */
197 	AD1R |= ADXR_L2 | ADXR_R0;
198 	AD2R |= ADXR_L2 | ADXR_R0;
199 	AD3R |= ADXR_L2 | ADXR_R0;
200 
201 	/*
202 	 * Clear the resume enable registers.
203 	 */
204 	AD1D0ER = 0;
205 	AD2D0ER = 0;
206 	AD2D1ER = 0;
207 	AD3ER = 0;
208 
209 	pxa_cpu_pm_fns = &pxa3xx_cpu_pm_fns;
210 }
211 
pxa3xx_set_wake(struct irq_data * d,unsigned int on)212 static int pxa3xx_set_wake(struct irq_data *d, unsigned int on)
213 {
214 	unsigned long flags, mask = 0;
215 
216 	switch (d->irq) {
217 	case IRQ_SSP3:
218 		mask = ADXER_MFP_WSSP3;
219 		break;
220 	case IRQ_MSL:
221 		mask = ADXER_WMSL0;
222 		break;
223 	case IRQ_USBH2:
224 	case IRQ_USBH1:
225 		mask = ADXER_WUSBH;
226 		break;
227 	case IRQ_KEYPAD:
228 		mask = ADXER_WKP;
229 		break;
230 	case IRQ_AC97:
231 		mask = ADXER_MFP_WAC97;
232 		break;
233 	case IRQ_USIM:
234 		mask = ADXER_WUSIM0;
235 		break;
236 	case IRQ_SSP2:
237 		mask = ADXER_MFP_WSSP2;
238 		break;
239 	case IRQ_I2C:
240 		mask = ADXER_MFP_WI2C;
241 		break;
242 	case IRQ_STUART:
243 		mask = ADXER_MFP_WUART3;
244 		break;
245 	case IRQ_BTUART:
246 		mask = ADXER_MFP_WUART2;
247 		break;
248 	case IRQ_FFUART:
249 		mask = ADXER_MFP_WUART1;
250 		break;
251 	case IRQ_MMC:
252 		mask = ADXER_MFP_WMMC1;
253 		break;
254 	case IRQ_SSP:
255 		mask = ADXER_MFP_WSSP1;
256 		break;
257 	case IRQ_RTCAlrm:
258 		mask = ADXER_WRTC;
259 		break;
260 	case IRQ_SSP4:
261 		mask = ADXER_MFP_WSSP4;
262 		break;
263 	case IRQ_TSI:
264 		mask = ADXER_WTSI;
265 		break;
266 	case IRQ_USIM2:
267 		mask = ADXER_WUSIM1;
268 		break;
269 	case IRQ_MMC2:
270 		mask = ADXER_MFP_WMMC2;
271 		break;
272 	case IRQ_NAND:
273 		mask = ADXER_MFP_WFLASH;
274 		break;
275 	case IRQ_USB2:
276 		mask = ADXER_WUSB2;
277 		break;
278 	case IRQ_WAKEUP0:
279 		mask = ADXER_WEXTWAKE0;
280 		break;
281 	case IRQ_WAKEUP1:
282 		mask = ADXER_WEXTWAKE1;
283 		break;
284 	case IRQ_MMC3:
285 		mask = ADXER_MFP_GEN12;
286 		break;
287 	default:
288 		return -EINVAL;
289 	}
290 
291 	local_irq_save(flags);
292 	if (on)
293 		wakeup_src |= mask;
294 	else
295 		wakeup_src &= ~mask;
296 	local_irq_restore(flags);
297 
298 	return 0;
299 }
300 #else
pxa3xx_init_pm(void)301 static inline void pxa3xx_init_pm(void) {}
302 #define pxa3xx_set_wake	NULL
303 #endif
304 
pxa_ack_ext_wakeup(struct irq_data * d)305 static void pxa_ack_ext_wakeup(struct irq_data *d)
306 {
307 	PECR |= PECR_IS(d->irq - IRQ_WAKEUP0);
308 }
309 
pxa_mask_ext_wakeup(struct irq_data * d)310 static void pxa_mask_ext_wakeup(struct irq_data *d)
311 {
312 	pxa_mask_irq(d);
313 	PECR &= ~PECR_IE(d->irq - IRQ_WAKEUP0);
314 }
315 
pxa_unmask_ext_wakeup(struct irq_data * d)316 static void pxa_unmask_ext_wakeup(struct irq_data *d)
317 {
318 	pxa_unmask_irq(d);
319 	PECR |= PECR_IE(d->irq - IRQ_WAKEUP0);
320 }
321 
pxa_set_ext_wakeup_type(struct irq_data * d,unsigned int flow_type)322 static int pxa_set_ext_wakeup_type(struct irq_data *d, unsigned int flow_type)
323 {
324 	if (flow_type & IRQ_TYPE_EDGE_RISING)
325 		PWER |= 1 << (d->irq - IRQ_WAKEUP0);
326 
327 	if (flow_type & IRQ_TYPE_EDGE_FALLING)
328 		PWER |= 1 << (d->irq - IRQ_WAKEUP0 + 2);
329 
330 	return 0;
331 }
332 
333 static struct irq_chip pxa_ext_wakeup_chip = {
334 	.name		= "WAKEUP",
335 	.irq_ack	= pxa_ack_ext_wakeup,
336 	.irq_mask	= pxa_mask_ext_wakeup,
337 	.irq_unmask	= pxa_unmask_ext_wakeup,
338 	.irq_set_type	= pxa_set_ext_wakeup_type,
339 };
340 
pxa_init_ext_wakeup_irq(int (* fn)(struct irq_data *,unsigned int))341 static void __init pxa_init_ext_wakeup_irq(int (*fn)(struct irq_data *,
342 					   unsigned int))
343 {
344 	int irq;
345 
346 	for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) {
347 		irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip,
348 					 handle_edge_irq);
349 		irq_clear_status_flags(irq, IRQ_NOREQUEST);
350 	}
351 
352 	pxa_ext_wakeup_chip.irq_set_wake = fn;
353 }
354 
__pxa3xx_init_irq(void)355 static void __init __pxa3xx_init_irq(void)
356 {
357 	/* enable CP6 access */
358 	u32 value;
359 	__asm__ __volatile__("mrc p15, 0, %0, c15, c1, 0\n": "=r"(value));
360 	value |= (1 << 6);
361 	__asm__ __volatile__("mcr p15, 0, %0, c15, c1, 0\n": :"r"(value));
362 
363 	pxa_init_ext_wakeup_irq(pxa3xx_set_wake);
364 }
365 
366 static int __init __init
pxa3xx_dt_init_irq(struct device_node * node,struct device_node * parent)367 pxa3xx_dt_init_irq(struct device_node *node, struct device_node *parent)
368 {
369 	__pxa3xx_init_irq();
370 	pxa_dt_irq_init(pxa3xx_set_wake);
371 	set_handle_irq(ichp_handle_irq);
372 
373 	return 0;
374 }
375 IRQCHIP_DECLARE(pxa3xx_intc, "marvell,pxa-intc", pxa3xx_dt_init_irq);
376 
377 static struct map_desc pxa3xx_io_desc[] __initdata = {
378 	{	/* Mem Ctl */
379 		.virtual	= (unsigned long)SMEMC_VIRT,
380 		.pfn		= __phys_to_pfn(PXA3XX_SMEMC_BASE),
381 		.length		= SMEMC_SIZE,
382 		.type		= MT_DEVICE
383 	}, {
384 		.virtual	= (unsigned long)NAND_VIRT,
385 		.pfn		= __phys_to_pfn(NAND_PHYS),
386 		.length		= NAND_SIZE,
387 		.type		= MT_DEVICE
388 	},
389 };
390 
pxa3xx_map_io(void)391 void __init pxa3xx_map_io(void)
392 {
393 	pxa_map_io();
394 	iotable_init(ARRAY_AND_SIZE(pxa3xx_io_desc));
395 	pxa3xx_get_clk_frequency_khz(1);
396 }
397 
pxa3xx_init(void)398 static int __init pxa3xx_init(void)
399 {
400 	int ret = 0;
401 
402 	if (cpu_is_pxa3xx()) {
403 
404 		pxa_register_wdt(ARSR);
405 
406 		/*
407 		 * clear RDH bit every time after reset
408 		 *
409 		 * Note: the last 3 bits DxS are write-1-to-clear so carefully
410 		 * preserve them here in case they will be referenced later
411 		 */
412 		ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
413 
414 		/*
415 		 * Disable DFI bus arbitration, to prevent a system bus lock if
416 		 * somebody disables the NAND clock (unused clock) while this
417 		 * bit remains set.
418 		 */
419 		NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL;
420 
421 		pxa3xx_init_pm();
422 
423 		enable_irq_wake(IRQ_WAKEUP0);
424 		if (cpu_is_pxa320())
425 			enable_irq_wake(IRQ_WAKEUP1);
426 
427 		register_syscore_ops(&pxa_irq_syscore_ops);
428 		register_syscore_ops(&pxa3xx_mfp_syscore_ops);
429 	}
430 
431 	return ret;
432 }
433 
434 postcore_initcall(pxa3xx_init);
435