1 /*
2  * OMAP WakeupGen Source file
3  *
4  * OMAP WakeupGen is the interrupt controller extension used along
5  * with ARM GIC to wake the CPU out from low power states on
6  * external interrupts. It is responsible for generating wakeup
7  * event from the incoming interrupts and enable bits. It is
8  * implemented in MPU always ON power domain. During normal operation,
9  * WakeupGen delivers external interrupts directly to the GIC.
10  *
11  * Copyright (C) 2011 Texas Instruments, Inc.
12  *	Santosh Shilimkar <santosh.shilimkar@ti.com>
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License version 2 as
16  * published by the Free Software Foundation.
17  */
18 
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/io.h>
22 #include <linux/irq.h>
23 #include <linux/irqchip.h>
24 #include <linux/irqdomain.h>
25 #include <linux/of_address.h>
26 #include <linux/platform_device.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/cpu_pm.h>
30 
31 #include "omap-wakeupgen.h"
32 #include "omap-secure.h"
33 
34 #include "soc.h"
35 #include "omap4-sar-layout.h"
36 #include "common.h"
37 #include "pm.h"
38 
39 #define AM43XX_NR_REG_BANKS	7
40 #define AM43XX_IRQS		224
41 #define MAX_NR_REG_BANKS	AM43XX_NR_REG_BANKS
42 #define MAX_IRQS		AM43XX_IRQS
43 #define DEFAULT_NR_REG_BANKS	5
44 #define DEFAULT_IRQS		160
45 #define WKG_MASK_ALL		0x00000000
46 #define WKG_UNMASK_ALL		0xffffffff
47 #define CPU_ENA_OFFSET		0x400
48 #define CPU0_ID			0x0
49 #define CPU1_ID			0x1
50 #define OMAP4_NR_BANKS		4
51 #define OMAP4_NR_IRQS		128
52 
53 static void __iomem *wakeupgen_base;
54 static void __iomem *sar_base;
55 static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
56 static unsigned int irq_target_cpu[MAX_IRQS];
57 static unsigned int irq_banks = DEFAULT_NR_REG_BANKS;
58 static unsigned int max_irqs = DEFAULT_IRQS;
59 static unsigned int omap_secure_apis;
60 
61 #ifdef CONFIG_CPU_PM
62 static unsigned int wakeupgen_context[MAX_NR_REG_BANKS];
63 #endif
64 
65 struct omap_wakeupgen_ops {
66 	void (*save_context)(void);
67 	void (*restore_context)(void);
68 };
69 
70 static struct omap_wakeupgen_ops *wakeupgen_ops;
71 
72 /*
73  * Static helper functions.
74  */
75 static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
76 {
77 	return readl_relaxed(wakeupgen_base + OMAP_WKG_ENB_A_0 +
78 				(cpu * CPU_ENA_OFFSET) + (idx * 4));
79 }
80 
81 static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
82 {
83 	writel_relaxed(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
84 				(cpu * CPU_ENA_OFFSET) + (idx * 4));
85 }
86 
87 static inline void sar_writel(u32 val, u32 offset, u8 idx)
88 {
89 	writel_relaxed(val, sar_base + offset + (idx * 4));
90 }
91 
92 static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
93 {
94 	/*
95 	 * Each WakeupGen register controls 32 interrupt.
96 	 * i.e. 1 bit per SPI IRQ
97 	 */
98 	*reg_index = irq >> 5;
99 	*bit_posn = irq %= 32;
100 
101 	return 0;
102 }
103 
104 static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
105 {
106 	u32 val, bit_number;
107 	u8 i;
108 
109 	if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
110 		return;
111 
112 	val = wakeupgen_readl(i, cpu);
113 	val &= ~BIT(bit_number);
114 	wakeupgen_writel(val, i, cpu);
115 }
116 
117 static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
118 {
119 	u32 val, bit_number;
120 	u8 i;
121 
122 	if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
123 		return;
124 
125 	val = wakeupgen_readl(i, cpu);
126 	val |= BIT(bit_number);
127 	wakeupgen_writel(val, i, cpu);
128 }
129 
130 /*
131  * Architecture specific Mask extension
132  */
133 static void wakeupgen_mask(struct irq_data *d)
134 {
135 	unsigned long flags;
136 
137 	raw_spin_lock_irqsave(&wakeupgen_lock, flags);
138 	_wakeupgen_clear(d->hwirq, irq_target_cpu[d->hwirq]);
139 	raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
140 	irq_chip_mask_parent(d);
141 }
142 
143 /*
144  * Architecture specific Unmask extension
145  */
146 static void wakeupgen_unmask(struct irq_data *d)
147 {
148 	unsigned long flags;
149 
150 	raw_spin_lock_irqsave(&wakeupgen_lock, flags);
151 	_wakeupgen_set(d->hwirq, irq_target_cpu[d->hwirq]);
152 	raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
153 	irq_chip_unmask_parent(d);
154 }
155 
156 #ifdef CONFIG_HOTPLUG_CPU
157 static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
158 
159 static void _wakeupgen_save_masks(unsigned int cpu)
160 {
161 	u8 i;
162 
163 	for (i = 0; i < irq_banks; i++)
164 		per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
165 }
166 
167 static void _wakeupgen_restore_masks(unsigned int cpu)
168 {
169 	u8 i;
170 
171 	for (i = 0; i < irq_banks; i++)
172 		wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
173 }
174 
175 static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
176 {
177 	u8 i;
178 
179 	for (i = 0; i < irq_banks; i++)
180 		wakeupgen_writel(reg, i, cpu);
181 }
182 
183 /*
184  * Mask or unmask all interrupts on given CPU.
185  *	0 = Mask all interrupts on the 'cpu'
186  *	1 = Unmask all interrupts on the 'cpu'
187  * Ensure that the initial mask is maintained. This is faster than
188  * iterating through GIC registers to arrive at the correct masks.
189  */
190 static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
191 {
192 	unsigned long flags;
193 
194 	raw_spin_lock_irqsave(&wakeupgen_lock, flags);
195 	if (set) {
196 		_wakeupgen_save_masks(cpu);
197 		_wakeupgen_set_all(cpu, WKG_MASK_ALL);
198 	} else {
199 		_wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
200 		_wakeupgen_restore_masks(cpu);
201 	}
202 	raw_spin_unlock_irqrestore(&wakeupgen_lock, flags);
203 }
204 #endif
205 
206 #ifdef CONFIG_CPU_PM
207 static inline void omap4_irq_save_context(void)
208 {
209 	u32 i, val;
210 
211 	if (omap_rev() == OMAP4430_REV_ES1_0)
212 		return;
213 
214 	for (i = 0; i < irq_banks; i++) {
215 		/* Save the CPUx interrupt mask for IRQ 0 to 127 */
216 		val = wakeupgen_readl(i, 0);
217 		sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
218 		val = wakeupgen_readl(i, 1);
219 		sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);
220 
221 		/*
222 		 * Disable the secure interrupts for CPUx. The restore
223 		 * code blindly restores secure and non-secure interrupt
224 		 * masks from SAR RAM. Secure interrupts are not suppose
225 		 * to be enabled from HLOS. So overwrite the SAR location
226 		 * so that the secure interrupt remains disabled.
227 		 */
228 		sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
229 		sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
230 	}
231 
232 	/* Save AuxBoot* registers */
233 	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
234 	writel_relaxed(val, sar_base + AUXCOREBOOT0_OFFSET);
235 	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_1);
236 	writel_relaxed(val, sar_base + AUXCOREBOOT1_OFFSET);
237 
238 	/* Save SyncReq generation logic */
239 	val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
240 	writel_relaxed(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
241 	val = readl_relaxed(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
242 	writel_relaxed(val, sar_base + PTMSYNCREQ_EN_OFFSET);
243 
244 	/* Set the Backup Bit Mask status */
245 	val = readl_relaxed(sar_base + SAR_BACKUP_STATUS_OFFSET);
246 	val |= SAR_BACKUP_STATUS_WAKEUPGEN;
247 	writel_relaxed(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
248 
249 }
250 
251 static inline void omap5_irq_save_context(void)
252 {
253 	u32 i, val;
254 
255 	for (i = 0; i < irq_banks; i++) {
256 		/* Save the CPUx interrupt mask for IRQ 0 to 159 */
257 		val = wakeupgen_readl(i, 0);
258 		sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i);
259 		val = wakeupgen_readl(i, 1);
260 		sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i);
261 		sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
262 		sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
263 	}
264 
265 	/* Save AuxBoot* registers */
266 	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
267 	writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET);
268 	val = readl_relaxed(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
269 	writel_relaxed(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET);
270 
271 	/* Set the Backup Bit Mask status */
272 	val = readl_relaxed(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
273 	val |= SAR_BACKUP_STATUS_WAKEUPGEN;
274 	writel_relaxed(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
275 
276 }
277 
278 static inline void am43xx_irq_save_context(void)
279 {
280 	u32 i;
281 
282 	for (i = 0; i < irq_banks; i++) {
283 		wakeupgen_context[i] = wakeupgen_readl(i, 0);
284 		wakeupgen_writel(0, i, CPU0_ID);
285 	}
286 }
287 
288 /*
289  * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
290  * ROM code. WakeupGen IP is integrated along with GIC to manage the
291  * interrupt wakeups from CPU low power states. It manages
292  * masking/unmasking of Shared peripheral interrupts(SPI). So the
293  * interrupt enable/disable control should be in sync and consistent
294  * at WakeupGen and GIC so that interrupts are not lost.
295  */
296 static void irq_save_context(void)
297 {
298 	/* DRA7 has no SAR to save */
299 	if (soc_is_dra7xx())
300 		return;
301 
302 	if (!sar_base)
303 		sar_base = omap4_get_sar_ram_base();
304 	if (wakeupgen_ops && wakeupgen_ops->save_context)
305 		wakeupgen_ops->save_context();
306 }
307 
308 /*
309  * Clear WakeupGen SAR backup status.
310  */
311 static void irq_sar_clear(void)
312 {
313 	u32 val;
314 	u32 offset = SAR_BACKUP_STATUS_OFFSET;
315 	/* DRA7 has no SAR to save */
316 	if (soc_is_dra7xx())
317 		return;
318 
319 	if (soc_is_omap54xx())
320 		offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
321 
322 	val = readl_relaxed(sar_base + offset);
323 	val &= ~SAR_BACKUP_STATUS_WAKEUPGEN;
324 	writel_relaxed(val, sar_base + offset);
325 }
326 
327 static void am43xx_irq_restore_context(void)
328 {
329 	u32 i;
330 
331 	for (i = 0; i < irq_banks; i++)
332 		wakeupgen_writel(wakeupgen_context[i], i, CPU0_ID);
333 }
334 
335 static void irq_restore_context(void)
336 {
337 	if (wakeupgen_ops && wakeupgen_ops->restore_context)
338 		wakeupgen_ops->restore_context();
339 }
340 
341 /*
342  * Save GIC and Wakeupgen interrupt context using secure API
343  * for HS/EMU devices.
344  */
345 static void irq_save_secure_context(void)
346 {
347 	u32 ret;
348 	ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX,
349 				FLAG_START_CRITICAL,
350 				0, 0, 0, 0, 0);
351 	if (ret != API_HAL_RET_VALUE_OK)
352 		pr_err("GIC and Wakeupgen context save failed\n");
353 }
354 
355 /* Define ops for context save and restore for each SoC */
356 static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {
357 	.save_context = omap4_irq_save_context,
358 	.restore_context = irq_sar_clear,
359 };
360 
361 static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {
362 	.save_context = omap5_irq_save_context,
363 	.restore_context = irq_sar_clear,
364 };
365 
366 static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {
367 	.save_context = am43xx_irq_save_context,
368 	.restore_context = am43xx_irq_restore_context,
369 };
370 #else
371 static struct omap_wakeupgen_ops omap4_wakeupgen_ops = {};
372 static struct omap_wakeupgen_ops omap5_wakeupgen_ops = {};
373 static struct omap_wakeupgen_ops am43xx_wakeupgen_ops = {};
374 #endif
375 
376 #ifdef CONFIG_HOTPLUG_CPU
377 static int omap_wakeupgen_cpu_online(unsigned int cpu)
378 {
379 	wakeupgen_irqmask_all(cpu, 0);
380 	return 0;
381 }
382 
383 static int omap_wakeupgen_cpu_dead(unsigned int cpu)
384 {
385 	wakeupgen_irqmask_all(cpu, 1);
386 	return 0;
387 }
388 
389 static void __init irq_hotplug_init(void)
390 {
391 	cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/omap-wake:online",
392 				  omap_wakeupgen_cpu_online, NULL);
393 	cpuhp_setup_state_nocalls(CPUHP_ARM_OMAP_WAKE_DEAD,
394 				  "arm/omap-wake:dead", NULL,
395 				  omap_wakeupgen_cpu_dead);
396 }
397 #else
398 static void __init irq_hotplug_init(void)
399 {}
400 #endif
401 
402 #ifdef CONFIG_CPU_PM
403 static int irq_notifier(struct notifier_block *self, unsigned long cmd,	void *v)
404 {
405 	switch (cmd) {
406 	case CPU_CLUSTER_PM_ENTER:
407 		if (omap_type() == OMAP2_DEVICE_TYPE_GP)
408 			irq_save_context();
409 		else
410 			irq_save_secure_context();
411 		break;
412 	case CPU_CLUSTER_PM_EXIT:
413 		if (omap_type() == OMAP2_DEVICE_TYPE_GP)
414 			irq_restore_context();
415 		break;
416 	}
417 	return NOTIFY_OK;
418 }
419 
420 static struct notifier_block irq_notifier_block = {
421 	.notifier_call = irq_notifier,
422 };
423 
424 static void __init irq_pm_init(void)
425 {
426 	/* FIXME: Remove this when MPU OSWR support is added */
427 	if (!IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
428 		cpu_pm_register_notifier(&irq_notifier_block);
429 }
430 #else
431 static void __init irq_pm_init(void)
432 {}
433 #endif
434 
435 void __iomem *omap_get_wakeupgen_base(void)
436 {
437 	return wakeupgen_base;
438 }
439 
440 int omap_secure_apis_support(void)
441 {
442 	return omap_secure_apis;
443 }
444 
445 static struct irq_chip wakeupgen_chip = {
446 	.name			= "WUGEN",
447 	.irq_eoi		= irq_chip_eoi_parent,
448 	.irq_mask		= wakeupgen_mask,
449 	.irq_unmask		= wakeupgen_unmask,
450 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
451 	.irq_set_type		= irq_chip_set_type_parent,
452 	.flags			= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
453 #ifdef CONFIG_SMP
454 	.irq_set_affinity	= irq_chip_set_affinity_parent,
455 #endif
456 };
457 
458 static int wakeupgen_domain_translate(struct irq_domain *d,
459 				      struct irq_fwspec *fwspec,
460 				      unsigned long *hwirq,
461 				      unsigned int *type)
462 {
463 	if (is_of_node(fwspec->fwnode)) {
464 		if (fwspec->param_count != 3)
465 			return -EINVAL;
466 
467 		/* No PPI should point to this domain */
468 		if (fwspec->param[0] != 0)
469 			return -EINVAL;
470 
471 		*hwirq = fwspec->param[1];
472 		*type = fwspec->param[2];
473 		return 0;
474 	}
475 
476 	return -EINVAL;
477 }
478 
479 static int wakeupgen_domain_alloc(struct irq_domain *domain,
480 				  unsigned int virq,
481 				  unsigned int nr_irqs, void *data)
482 {
483 	struct irq_fwspec *fwspec = data;
484 	struct irq_fwspec parent_fwspec;
485 	irq_hw_number_t hwirq;
486 	int i;
487 
488 	if (fwspec->param_count != 3)
489 		return -EINVAL;	/* Not GIC compliant */
490 	if (fwspec->param[0] != 0)
491 		return -EINVAL;	/* No PPI should point to this domain */
492 
493 	hwirq = fwspec->param[1];
494 	if (hwirq >= MAX_IRQS)
495 		return -EINVAL;	/* Can't deal with this */
496 
497 	for (i = 0; i < nr_irqs; i++)
498 		irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
499 					      &wakeupgen_chip, NULL);
500 
501 	parent_fwspec = *fwspec;
502 	parent_fwspec.fwnode = domain->parent->fwnode;
503 	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
504 					    &parent_fwspec);
505 }
506 
507 static const struct irq_domain_ops wakeupgen_domain_ops = {
508 	.translate	= wakeupgen_domain_translate,
509 	.alloc		= wakeupgen_domain_alloc,
510 	.free		= irq_domain_free_irqs_common,
511 };
512 
513 /*
514  * Initialise the wakeupgen module.
515  */
516 static int __init wakeupgen_init(struct device_node *node,
517 				 struct device_node *parent)
518 {
519 	struct irq_domain *parent_domain, *domain;
520 	int i;
521 	unsigned int boot_cpu = smp_processor_id();
522 	u32 val;
523 
524 	if (!parent) {
525 		pr_err("%s: no parent, giving up\n", node->full_name);
526 		return -ENODEV;
527 	}
528 
529 	parent_domain = irq_find_host(parent);
530 	if (!parent_domain) {
531 		pr_err("%s: unable to obtain parent domain\n", node->full_name);
532 		return -ENXIO;
533 	}
534 	/* Not supported on OMAP4 ES1.0 silicon */
535 	if (omap_rev() == OMAP4430_REV_ES1_0) {
536 		WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
537 		return -EPERM;
538 	}
539 
540 	/* Static mapping, never released */
541 	wakeupgen_base = of_iomap(node, 0);
542 	if (WARN_ON(!wakeupgen_base))
543 		return -ENOMEM;
544 
545 	if (cpu_is_omap44xx()) {
546 		irq_banks = OMAP4_NR_BANKS;
547 		max_irqs = OMAP4_NR_IRQS;
548 		omap_secure_apis = 1;
549 		wakeupgen_ops = &omap4_wakeupgen_ops;
550 	} else if (soc_is_omap54xx()) {
551 		wakeupgen_ops = &omap5_wakeupgen_ops;
552 	} else if (soc_is_am43xx()) {
553 		irq_banks = AM43XX_NR_REG_BANKS;
554 		max_irqs = AM43XX_IRQS;
555 		wakeupgen_ops = &am43xx_wakeupgen_ops;
556 	}
557 
558 	domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs,
559 					  node, &wakeupgen_domain_ops,
560 					  NULL);
561 	if (!domain) {
562 		iounmap(wakeupgen_base);
563 		return -ENOMEM;
564 	}
565 
566 	/* Clear all IRQ bitmasks at wakeupGen level */
567 	for (i = 0; i < irq_banks; i++) {
568 		wakeupgen_writel(0, i, CPU0_ID);
569 		if (!soc_is_am43xx())
570 			wakeupgen_writel(0, i, CPU1_ID);
571 	}
572 
573 	/*
574 	 * FIXME: Add support to set_smp_affinity() once the core
575 	 * GIC code has necessary hooks in place.
576 	 */
577 
578 	/* Associate all the IRQs to boot CPU like GIC init does. */
579 	for (i = 0; i < max_irqs; i++)
580 		irq_target_cpu[i] = boot_cpu;
581 
582 	/*
583 	 * Enables OMAP5 ES2 PM Mode using ES2_PM_MODE in AMBA_IF_MODE
584 	 * 0x0:	ES1 behavior, CPU cores would enter and exit OFF mode together.
585 	 * 0x1:	ES2 behavior, CPU cores are allowed to enter/exit OFF mode
586 	 * independently.
587 	 * This needs to be set one time thanks to always ON domain.
588 	 *
589 	 * We do not support ES1 behavior anymore. OMAP5 is assumed to be
590 	 * ES2.0, and the same is applicable for DRA7.
591 	 */
592 	if (soc_is_omap54xx() || soc_is_dra7xx()) {
593 		val = __raw_readl(wakeupgen_base + OMAP_AMBA_IF_MODE);
594 		val |= BIT(5);
595 		omap_smc1(OMAP5_MON_AMBA_IF_INDEX, val);
596 	}
597 
598 	irq_hotplug_init();
599 	irq_pm_init();
600 
601 	return 0;
602 }
603 IRQCHIP_DECLARE(ti_wakeupgen, "ti,omap4-wugen-mpu", wakeupgen_init);
604