xref: /openbmc/linux/arch/arm/mach-pxa/pxa3xx.c (revision 8730046c)
1 /*
2  * linux/arch/arm/mach-pxa/pxa3xx.c
3  *
4  * code specific to pxa3xx aka Monahans
5  *
6  * Copyright (C) 2006 Marvell International Ltd.
7  *
8  * 2007-09-02: eric miao <eric.miao@marvell.com>
9  *             initial version
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  */
15 #include <linux/module.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/gpio-pxa.h>
19 #include <linux/pm.h>
20 #include <linux/platform_device.h>
21 #include <linux/irq.h>
22 #include <linux/irqchip.h>
23 #include <linux/io.h>
24 #include <linux/of.h>
25 #include <linux/syscore_ops.h>
26 #include <linux/i2c/pxa-i2c.h>
27 
28 #include <asm/mach/map.h>
29 #include <asm/suspend.h>
30 #include <mach/hardware.h>
31 #include <mach/pxa3xx-regs.h>
32 #include <mach/reset.h>
33 #include <linux/platform_data/usb-ohci-pxa27x.h>
34 #include "pm.h"
35 #include <mach/dma.h>
36 #include <mach/smemc.h>
37 #include <mach/irqs.h>
38 
39 #include "generic.h"
40 #include "devices.h"
41 
42 #define PECR_IE(n)	((1 << ((n) * 2)) << 28)
43 #define PECR_IS(n)	((1 << ((n) * 2)) << 29)
44 
45 extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
46 
47 /*
48  * NAND NFC: DFI bus arbitration subset
49  */
50 #define NDCR			(*(volatile u32 __iomem*)(NAND_VIRT + 0))
51 #define NDCR_ND_ARB_EN		(1 << 12)
52 #define NDCR_ND_ARB_CNTL	(1 << 19)
53 
54 #ifdef CONFIG_PM
55 
56 #define ISRAM_START	0x5c000000
57 #define ISRAM_SIZE	SZ_256K
58 
59 static void __iomem *sram;
60 static unsigned long wakeup_src;
61 
62 /*
63  * Enter a standby mode (S0D1C2 or S0D2C2).  Upon wakeup, the dynamic
64  * memory controller has to be reinitialised, so we place some code
65  * in the SRAM to perform this function.
66  *
67  * We disable FIQs across the standby - otherwise, we might receive a
68  * FIQ while the SDRAM is unavailable.
69  */
70 static void pxa3xx_cpu_standby(unsigned int pwrmode)
71 {
72 	void (*fn)(unsigned int) = (void __force *)(sram + 0x8000);
73 
74 	memcpy_toio(sram + 0x8000, pm_enter_standby_start,
75 		    pm_enter_standby_end - pm_enter_standby_start);
76 
77 	AD2D0SR = ~0;
78 	AD2D1SR = ~0;
79 	AD2D0ER = wakeup_src;
80 	AD2D1ER = 0;
81 	ASCR = ASCR;
82 	ARSR = ARSR;
83 
84 	local_fiq_disable();
85 	fn(pwrmode);
86 	local_fiq_enable();
87 
88 	AD2D0ER = 0;
89 	AD2D1ER = 0;
90 }
91 
92 /*
93  * NOTE:  currently, the OBM (OEM Boot Module) binary comes along with
94  * PXA3xx development kits assumes that the resuming process continues
95  * with the address stored within the first 4 bytes of SDRAM. The PSPR
96  * register is used privately by BootROM and OBM, and _must_ be set to
97  * 0x5c014000 for the moment.
98  */
99 static void pxa3xx_cpu_pm_suspend(void)
100 {
101 	volatile unsigned long *p = (volatile void *)0xc0000000;
102 	unsigned long saved_data = *p;
103 #ifndef CONFIG_IWMMXT
104 	u64 acc0;
105 
106 	asm volatile(".arch_extension xscale\n\t"
107 		     "mra %Q0, %R0, acc0" : "=r" (acc0));
108 #endif
109 
110 	/* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
111 	CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
112 	CKENB |= 1 << (CKEN_HSIO2 & 0x1f);
113 
114 	/* clear and setup wakeup source */
115 	AD3SR = ~0;
116 	AD3ER = wakeup_src;
117 	ASCR = ASCR;
118 	ARSR = ARSR;
119 
120 	PCFR |= (1u << 13);			/* L1_DIS */
121 	PCFR &= ~((1u << 12) | (1u << 1));	/* L0_EN | SL_ROD */
122 
123 	PSPR = 0x5c014000;
124 
125 	/* overwrite with the resume address */
126 	*p = virt_to_phys(cpu_resume);
127 
128 	cpu_suspend(0, pxa3xx_finish_suspend);
129 
130 	*p = saved_data;
131 
132 	AD3ER = 0;
133 
134 #ifndef CONFIG_IWMMXT
135 	asm volatile(".arch_extension xscale\n\t"
136 		     "mar acc0, %Q0, %R0" : "=r" (acc0));
137 #endif
138 }
139 
140 static void pxa3xx_cpu_pm_enter(suspend_state_t state)
141 {
142 	/*
143 	 * Don't sleep if no wakeup sources are defined
144 	 */
145 	if (wakeup_src == 0) {
146 		printk(KERN_ERR "Not suspending: no wakeup sources\n");
147 		return;
148 	}
149 
150 	switch (state) {
151 	case PM_SUSPEND_STANDBY:
152 		pxa3xx_cpu_standby(PXA3xx_PM_S0D2C2);
153 		break;
154 
155 	case PM_SUSPEND_MEM:
156 		pxa3xx_cpu_pm_suspend();
157 		break;
158 	}
159 }
160 
161 static int pxa3xx_cpu_pm_valid(suspend_state_t state)
162 {
163 	return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
164 }
165 
166 static struct pxa_cpu_pm_fns pxa3xx_cpu_pm_fns = {
167 	.valid		= pxa3xx_cpu_pm_valid,
168 	.enter		= pxa3xx_cpu_pm_enter,
169 };
170 
171 static void __init pxa3xx_init_pm(void)
172 {
173 	sram = ioremap(ISRAM_START, ISRAM_SIZE);
174 	if (!sram) {
175 		printk(KERN_ERR "Unable to map ISRAM: disabling standby/suspend\n");
176 		return;
177 	}
178 
179 	/*
180 	 * Since we copy wakeup code into the SRAM, we need to ensure
181 	 * that it is preserved over the low power modes.  Note: bit 8
182 	 * is undocumented in the developer manual, but must be set.
183 	 */
184 	AD1R |= ADXR_L2 | ADXR_R0;
185 	AD2R |= ADXR_L2 | ADXR_R0;
186 	AD3R |= ADXR_L2 | ADXR_R0;
187 
188 	/*
189 	 * Clear the resume enable registers.
190 	 */
191 	AD1D0ER = 0;
192 	AD2D0ER = 0;
193 	AD2D1ER = 0;
194 	AD3ER = 0;
195 
196 	pxa_cpu_pm_fns = &pxa3xx_cpu_pm_fns;
197 }
198 
199 static int pxa3xx_set_wake(struct irq_data *d, unsigned int on)
200 {
201 	unsigned long flags, mask = 0;
202 
203 	switch (d->irq) {
204 	case IRQ_SSP3:
205 		mask = ADXER_MFP_WSSP3;
206 		break;
207 	case IRQ_MSL:
208 		mask = ADXER_WMSL0;
209 		break;
210 	case IRQ_USBH2:
211 	case IRQ_USBH1:
212 		mask = ADXER_WUSBH;
213 		break;
214 	case IRQ_KEYPAD:
215 		mask = ADXER_WKP;
216 		break;
217 	case IRQ_AC97:
218 		mask = ADXER_MFP_WAC97;
219 		break;
220 	case IRQ_USIM:
221 		mask = ADXER_WUSIM0;
222 		break;
223 	case IRQ_SSP2:
224 		mask = ADXER_MFP_WSSP2;
225 		break;
226 	case IRQ_I2C:
227 		mask = ADXER_MFP_WI2C;
228 		break;
229 	case IRQ_STUART:
230 		mask = ADXER_MFP_WUART3;
231 		break;
232 	case IRQ_BTUART:
233 		mask = ADXER_MFP_WUART2;
234 		break;
235 	case IRQ_FFUART:
236 		mask = ADXER_MFP_WUART1;
237 		break;
238 	case IRQ_MMC:
239 		mask = ADXER_MFP_WMMC1;
240 		break;
241 	case IRQ_SSP:
242 		mask = ADXER_MFP_WSSP1;
243 		break;
244 	case IRQ_RTCAlrm:
245 		mask = ADXER_WRTC;
246 		break;
247 	case IRQ_SSP4:
248 		mask = ADXER_MFP_WSSP4;
249 		break;
250 	case IRQ_TSI:
251 		mask = ADXER_WTSI;
252 		break;
253 	case IRQ_USIM2:
254 		mask = ADXER_WUSIM1;
255 		break;
256 	case IRQ_MMC2:
257 		mask = ADXER_MFP_WMMC2;
258 		break;
259 	case IRQ_NAND:
260 		mask = ADXER_MFP_WFLASH;
261 		break;
262 	case IRQ_USB2:
263 		mask = ADXER_WUSB2;
264 		break;
265 	case IRQ_WAKEUP0:
266 		mask = ADXER_WEXTWAKE0;
267 		break;
268 	case IRQ_WAKEUP1:
269 		mask = ADXER_WEXTWAKE1;
270 		break;
271 	case IRQ_MMC3:
272 		mask = ADXER_MFP_GEN12;
273 		break;
274 	default:
275 		return -EINVAL;
276 	}
277 
278 	local_irq_save(flags);
279 	if (on)
280 		wakeup_src |= mask;
281 	else
282 		wakeup_src &= ~mask;
283 	local_irq_restore(flags);
284 
285 	return 0;
286 }
287 #else
288 static inline void pxa3xx_init_pm(void) {}
289 #define pxa3xx_set_wake	NULL
290 #endif
291 
292 static void pxa_ack_ext_wakeup(struct irq_data *d)
293 {
294 	PECR |= PECR_IS(d->irq - IRQ_WAKEUP0);
295 }
296 
297 static void pxa_mask_ext_wakeup(struct irq_data *d)
298 {
299 	pxa_mask_irq(d);
300 	PECR &= ~PECR_IE(d->irq - IRQ_WAKEUP0);
301 }
302 
303 static void pxa_unmask_ext_wakeup(struct irq_data *d)
304 {
305 	pxa_unmask_irq(d);
306 	PECR |= PECR_IE(d->irq - IRQ_WAKEUP0);
307 }
308 
309 static int pxa_set_ext_wakeup_type(struct irq_data *d, unsigned int flow_type)
310 {
311 	if (flow_type & IRQ_TYPE_EDGE_RISING)
312 		PWER |= 1 << (d->irq - IRQ_WAKEUP0);
313 
314 	if (flow_type & IRQ_TYPE_EDGE_FALLING)
315 		PWER |= 1 << (d->irq - IRQ_WAKEUP0 + 2);
316 
317 	return 0;
318 }
319 
320 static struct irq_chip pxa_ext_wakeup_chip = {
321 	.name		= "WAKEUP",
322 	.irq_ack	= pxa_ack_ext_wakeup,
323 	.irq_mask	= pxa_mask_ext_wakeup,
324 	.irq_unmask	= pxa_unmask_ext_wakeup,
325 	.irq_set_type	= pxa_set_ext_wakeup_type,
326 };
327 
328 static void __init pxa_init_ext_wakeup_irq(int (*fn)(struct irq_data *,
329 					   unsigned int))
330 {
331 	int irq;
332 
333 	for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) {
334 		irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip,
335 					 handle_edge_irq);
336 		irq_clear_status_flags(irq, IRQ_NOREQUEST);
337 	}
338 
339 	pxa_ext_wakeup_chip.irq_set_wake = fn;
340 }
341 
342 static void __init __pxa3xx_init_irq(void)
343 {
344 	/* enable CP6 access */
345 	u32 value;
346 	__asm__ __volatile__("mrc p15, 0, %0, c15, c1, 0\n": "=r"(value));
347 	value |= (1 << 6);
348 	__asm__ __volatile__("mcr p15, 0, %0, c15, c1, 0\n": :"r"(value));
349 
350 	pxa_init_ext_wakeup_irq(pxa3xx_set_wake);
351 }
352 
353 void __init pxa3xx_init_irq(void)
354 {
355 	__pxa3xx_init_irq();
356 	pxa_init_irq(56, pxa3xx_set_wake);
357 }
358 
359 #ifdef CONFIG_OF
360 static int __init __init
361 pxa3xx_dt_init_irq(struct device_node *node, struct device_node *parent)
362 {
363 	__pxa3xx_init_irq();
364 	pxa_dt_irq_init(pxa3xx_set_wake);
365 	set_handle_irq(ichp_handle_irq);
366 
367 	return 0;
368 }
369 IRQCHIP_DECLARE(pxa3xx_intc, "marvell,pxa-intc", pxa3xx_dt_init_irq);
370 #endif	/* CONFIG_OF */
371 
372 static struct map_desc pxa3xx_io_desc[] __initdata = {
373 	{	/* Mem Ctl */
374 		.virtual	= (unsigned long)SMEMC_VIRT,
375 		.pfn		= __phys_to_pfn(PXA3XX_SMEMC_BASE),
376 		.length		= SMEMC_SIZE,
377 		.type		= MT_DEVICE
378 	}, {
379 		.virtual	= (unsigned long)NAND_VIRT,
380 		.pfn		= __phys_to_pfn(NAND_PHYS),
381 		.length		= NAND_SIZE,
382 		.type		= MT_DEVICE
383 	},
384 };
385 
386 void __init pxa3xx_map_io(void)
387 {
388 	pxa_map_io();
389 	iotable_init(ARRAY_AND_SIZE(pxa3xx_io_desc));
390 	pxa3xx_get_clk_frequency_khz(1);
391 }
392 
393 /*
394  * device registration specific to PXA3xx.
395  */
396 
397 void __init pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info)
398 {
399 	pxa_register_device(&pxa3xx_device_i2c_power, info);
400 }
401 
402 static struct pxa_gpio_platform_data pxa3xx_gpio_pdata = {
403 	.irq_base	= PXA_GPIO_TO_IRQ(0),
404 };
405 
406 static struct platform_device *devices[] __initdata = {
407 	&pxa27x_device_udc,
408 	&pxa_device_pmu,
409 	&pxa_device_i2s,
410 	&pxa_device_asoc_ssp1,
411 	&pxa_device_asoc_ssp2,
412 	&pxa_device_asoc_ssp3,
413 	&pxa_device_asoc_ssp4,
414 	&pxa_device_asoc_platform,
415 	&pxa_device_rtc,
416 	&pxa3xx_device_ssp1,
417 	&pxa3xx_device_ssp2,
418 	&pxa3xx_device_ssp3,
419 	&pxa3xx_device_ssp4,
420 	&pxa27x_device_pwm0,
421 	&pxa27x_device_pwm1,
422 };
423 
424 static int __init pxa3xx_init(void)
425 {
426 	int ret = 0;
427 
428 	if (cpu_is_pxa3xx()) {
429 
430 		reset_status = ARSR;
431 
432 		/*
433 		 * clear RDH bit every time after reset
434 		 *
435 		 * Note: the last 3 bits DxS are write-1-to-clear so carefully
436 		 * preserve them here in case they will be referenced later
437 		 */
438 		ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
439 
440 		/*
441 		 * Disable DFI bus arbitration, to prevent a system bus lock if
442 		 * somebody disables the NAND clock (unused clock) while this
443 		 * bit remains set.
444 		 */
445 		NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL;
446 
447 		pxa3xx_init_pm();
448 
449 		register_syscore_ops(&pxa_irq_syscore_ops);
450 		register_syscore_ops(&pxa3xx_mfp_syscore_ops);
451 
452 		if (of_have_populated_dt())
453 			return 0;
454 
455 		pxa2xx_set_dmac_info(32, 100);
456 		ret = platform_add_devices(devices, ARRAY_SIZE(devices));
457 		if (ret)
458 			return ret;
459 		if (cpu_is_pxa300() || cpu_is_pxa310() || cpu_is_pxa320()) {
460 			platform_device_add_data(&pxa3xx_device_gpio,
461 						 &pxa3xx_gpio_pdata,
462 						 sizeof(pxa3xx_gpio_pdata));
463 			ret = platform_device_register(&pxa3xx_device_gpio);
464 		}
465 	}
466 
467 	return ret;
468 }
469 
470 postcore_initcall(pxa3xx_init);
471