1 /*
2  * Atmel AT91 AIC5 (Advanced Interrupt Controller) driver
3  *
4  *  Copyright (C) 2004 SAN People
5  *  Copyright (C) 2004 ATMEL
6  *  Copyright (C) Rick Bronson
7  *  Copyright (C) 2014 Free Electrons
8  *
9  *  Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
10  *
11  * This file is licensed under the terms of the GNU General Public
12  * License version 2.  This program is licensed "as is" without any
13  * warranty of any kind, whether express or implied.
14  */
15 
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/mm.h>
19 #include <linux/bitmap.h>
20 #include <linux/types.h>
21 #include <linux/irq.h>
22 #include <linux/irqchip.h>
23 #include <linux/of.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
26 #include <linux/irqdomain.h>
27 #include <linux/err.h>
28 #include <linux/slab.h>
29 #include <linux/io.h>
30 
31 #include <asm/exception.h>
32 #include <asm/mach/irq.h>
33 
34 #include "irq-atmel-aic-common.h"
35 
36 /* Number of irq lines managed by AIC */
37 #define NR_AIC5_IRQS	128
38 
39 #define AT91_AIC5_SSR		0x0
40 #define AT91_AIC5_INTSEL_MSK	(0x7f << 0)
41 
42 #define AT91_AIC5_SMR			0x4
43 
44 #define AT91_AIC5_SVR			0x8
45 #define AT91_AIC5_IVR			0x10
46 #define AT91_AIC5_FVR			0x14
47 #define AT91_AIC5_ISR			0x18
48 
49 #define AT91_AIC5_IPR0			0x20
50 #define AT91_AIC5_IPR1			0x24
51 #define AT91_AIC5_IPR2			0x28
52 #define AT91_AIC5_IPR3			0x2c
53 #define AT91_AIC5_IMR			0x30
54 #define AT91_AIC5_CISR			0x34
55 
56 #define AT91_AIC5_IECR			0x40
57 #define AT91_AIC5_IDCR			0x44
58 #define AT91_AIC5_ICCR			0x48
59 #define AT91_AIC5_ISCR			0x4c
60 #define AT91_AIC5_EOICR			0x38
61 #define AT91_AIC5_SPU			0x3c
62 #define AT91_AIC5_DCR			0x6c
63 
64 #define AT91_AIC5_FFER			0x50
65 #define AT91_AIC5_FFDR			0x54
66 #define AT91_AIC5_FFSR			0x58
67 
68 static struct irq_domain *aic5_domain;
69 
70 static asmlinkage void __exception_irq_entry
71 aic5_handle(struct pt_regs *regs)
72 {
73 	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(aic5_domain, 0);
74 	u32 irqnr;
75 	u32 irqstat;
76 
77 	irqnr = irq_reg_readl(bgc, AT91_AIC5_IVR);
78 	irqstat = irq_reg_readl(bgc, AT91_AIC5_ISR);
79 
80 	if (!irqstat)
81 		irq_reg_writel(bgc, 0, AT91_AIC5_EOICR);
82 	else
83 		handle_domain_irq(aic5_domain, irqnr, regs);
84 }
85 
86 static void aic5_mask(struct irq_data *d)
87 {
88 	struct irq_domain *domain = d->domain;
89 	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
90 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
91 
92 	/*
93 	 * Disable interrupt on AIC5. We always take the lock of the
94 	 * first irq chip as all chips share the same registers.
95 	 */
96 	irq_gc_lock(bgc);
97 	irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
98 	irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
99 	gc->mask_cache &= ~d->mask;
100 	irq_gc_unlock(bgc);
101 }
102 
103 static void aic5_unmask(struct irq_data *d)
104 {
105 	struct irq_domain *domain = d->domain;
106 	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
107 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
108 
109 	/*
110 	 * Enable interrupt on AIC5. We always take the lock of the
111 	 * first irq chip as all chips share the same registers.
112 	 */
113 	irq_gc_lock(bgc);
114 	irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
115 	irq_reg_writel(gc, 1, AT91_AIC5_IECR);
116 	gc->mask_cache |= d->mask;
117 	irq_gc_unlock(bgc);
118 }
119 
120 static int aic5_retrigger(struct irq_data *d)
121 {
122 	struct irq_domain *domain = d->domain;
123 	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
124 
125 	/* Enable interrupt on AIC5 */
126 	irq_gc_lock(bgc);
127 	irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
128 	irq_reg_writel(bgc, 1, AT91_AIC5_ISCR);
129 	irq_gc_unlock(bgc);
130 
131 	return 0;
132 }
133 
134 static int aic5_set_type(struct irq_data *d, unsigned type)
135 {
136 	struct irq_domain *domain = d->domain;
137 	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
138 	unsigned int smr;
139 	int ret;
140 
141 	irq_gc_lock(bgc);
142 	irq_reg_writel(bgc, d->hwirq, AT91_AIC5_SSR);
143 	smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
144 	ret = aic_common_set_type(d, type, &smr);
145 	if (!ret)
146 		irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
147 	irq_gc_unlock(bgc);
148 
149 	return ret;
150 }
151 
152 #ifdef CONFIG_PM
153 static u32 *smr_cache;
154 
155 static void aic5_suspend(struct irq_data *d)
156 {
157 	struct irq_domain *domain = d->domain;
158 	struct irq_domain_chip_generic *dgc = domain->gc;
159 	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
160 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
161 	int i;
162 	u32 mask;
163 
164 	if (smr_cache)
165 		for (i = 0; i < domain->revmap_size; i++) {
166 			irq_reg_writel(bgc, i, AT91_AIC5_SSR);
167 			smr_cache[i] = irq_reg_readl(bgc, AT91_AIC5_SMR);
168 		}
169 
170 	irq_gc_lock(bgc);
171 	for (i = 0; i < dgc->irqs_per_chip; i++) {
172 		mask = 1 << i;
173 		if ((mask & gc->mask_cache) == (mask & gc->wake_active))
174 			continue;
175 
176 		irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
177 		if (mask & gc->wake_active)
178 			irq_reg_writel(bgc, 1, AT91_AIC5_IECR);
179 		else
180 			irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
181 	}
182 	irq_gc_unlock(bgc);
183 }
184 
185 static void aic5_resume(struct irq_data *d)
186 {
187 	struct irq_domain *domain = d->domain;
188 	struct irq_domain_chip_generic *dgc = domain->gc;
189 	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
190 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
191 	int i;
192 	u32 mask;
193 
194 	irq_gc_lock(bgc);
195 
196 	if (smr_cache) {
197 		irq_reg_writel(bgc, 0xffffffff, AT91_AIC5_SPU);
198 		for (i = 0; i < domain->revmap_size; i++) {
199 			irq_reg_writel(bgc, i, AT91_AIC5_SSR);
200 			irq_reg_writel(bgc, i, AT91_AIC5_SVR);
201 			irq_reg_writel(bgc, smr_cache[i], AT91_AIC5_SMR);
202 		}
203 	}
204 
205 	for (i = 0; i < dgc->irqs_per_chip; i++) {
206 		mask = 1 << i;
207 
208 		if (!smr_cache &&
209 		    ((mask & gc->mask_cache) == (mask & gc->wake_active)))
210 			continue;
211 
212 		irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
213 		if (mask & gc->mask_cache)
214 			irq_reg_writel(bgc, 1, AT91_AIC5_IECR);
215 		else
216 			irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
217 	}
218 	irq_gc_unlock(bgc);
219 }
220 
221 static void aic5_pm_shutdown(struct irq_data *d)
222 {
223 	struct irq_domain *domain = d->domain;
224 	struct irq_domain_chip_generic *dgc = domain->gc;
225 	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0);
226 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
227 	int i;
228 
229 	irq_gc_lock(bgc);
230 	for (i = 0; i < dgc->irqs_per_chip; i++) {
231 		irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR);
232 		irq_reg_writel(bgc, 1, AT91_AIC5_IDCR);
233 		irq_reg_writel(bgc, 1, AT91_AIC5_ICCR);
234 	}
235 	irq_gc_unlock(bgc);
236 }
237 #else
238 #define aic5_suspend		NULL
239 #define aic5_resume		NULL
240 #define aic5_pm_shutdown	NULL
241 #endif /* CONFIG_PM */
242 
243 static void __init aic5_hw_init(struct irq_domain *domain)
244 {
245 	struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
246 	int i;
247 
248 	/*
249 	 * Perform 8 End Of Interrupt Command to make sure AIC
250 	 * will not Lock out nIRQ
251 	 */
252 	for (i = 0; i < 8; i++)
253 		irq_reg_writel(gc, 0, AT91_AIC5_EOICR);
254 
255 	/*
256 	 * Spurious Interrupt ID in Spurious Vector Register.
257 	 * When there is no current interrupt, the IRQ Vector Register
258 	 * reads the value stored in AIC_SPU
259 	 */
260 	irq_reg_writel(gc, 0xffffffff, AT91_AIC5_SPU);
261 
262 	/* No debugging in AIC: Debug (Protect) Control Register */
263 	irq_reg_writel(gc, 0, AT91_AIC5_DCR);
264 
265 	/* Disable and clear all interrupts initially */
266 	for (i = 0; i < domain->revmap_size; i++) {
267 		irq_reg_writel(gc, i, AT91_AIC5_SSR);
268 		irq_reg_writel(gc, i, AT91_AIC5_SVR);
269 		irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
270 		irq_reg_writel(gc, 1, AT91_AIC5_ICCR);
271 	}
272 }
273 
274 static int aic5_irq_domain_xlate(struct irq_domain *d,
275 				 struct device_node *ctrlr,
276 				 const u32 *intspec, unsigned int intsize,
277 				 irq_hw_number_t *out_hwirq,
278 				 unsigned int *out_type)
279 {
280 	struct irq_chip_generic *bgc = irq_get_domain_generic_chip(d, 0);
281 	unsigned long flags;
282 	unsigned smr;
283 	int ret;
284 
285 	if (!bgc)
286 		return -EINVAL;
287 
288 	ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize,
289 					  out_hwirq, out_type);
290 	if (ret)
291 		return ret;
292 
293 	irq_gc_lock_irqsave(bgc, flags);
294 	irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
295 	smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
296 	aic_common_set_priority(intspec[2], &smr);
297 	irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
298 	irq_gc_unlock_irqrestore(bgc, flags);
299 
300 	return ret;
301 }
302 
303 static const struct irq_domain_ops aic5_irq_ops = {
304 	.map	= irq_map_generic_chip,
305 	.xlate	= aic5_irq_domain_xlate,
306 };
307 
308 static void __init sama5d3_aic_irq_fixup(void)
309 {
310 	aic_common_rtc_irq_fixup();
311 }
312 
313 static const struct of_device_id aic5_irq_fixups[] __initconst = {
314 	{ .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
315 	{ .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
316 	{ .compatible = "microchip,sam9x60", .data = sama5d3_aic_irq_fixup },
317 	{ /* sentinel */ },
318 };
319 
320 static int __init aic5_of_init(struct device_node *node,
321 			       struct device_node *parent,
322 			       int nirqs)
323 {
324 	struct irq_chip_generic *gc;
325 	struct irq_domain *domain;
326 	int nchips;
327 	int i;
328 
329 	if (nirqs > NR_AIC5_IRQS)
330 		return -EINVAL;
331 
332 	if (aic5_domain)
333 		return -EEXIST;
334 
335 	domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5",
336 				    nirqs, aic5_irq_fixups);
337 	if (IS_ERR(domain))
338 		return PTR_ERR(domain);
339 
340 	aic5_domain = domain;
341 	nchips = aic5_domain->revmap_size / 32;
342 	for (i = 0; i < nchips; i++) {
343 		gc = irq_get_domain_generic_chip(domain, i * 32);
344 
345 		gc->chip_types[0].regs.eoi = AT91_AIC5_EOICR;
346 		gc->chip_types[0].chip.irq_mask = aic5_mask;
347 		gc->chip_types[0].chip.irq_unmask = aic5_unmask;
348 		gc->chip_types[0].chip.irq_retrigger = aic5_retrigger;
349 		gc->chip_types[0].chip.irq_set_type = aic5_set_type;
350 		gc->chip_types[0].chip.irq_suspend = aic5_suspend;
351 		gc->chip_types[0].chip.irq_resume = aic5_resume;
352 		gc->chip_types[0].chip.irq_pm_shutdown = aic5_pm_shutdown;
353 	}
354 
355 	aic5_hw_init(domain);
356 	set_handle_irq(aic5_handle);
357 
358 	return 0;
359 }
360 
361 #define NR_SAMA5D2_IRQS		77
362 
363 static int __init sama5d2_aic5_of_init(struct device_node *node,
364 				       struct device_node *parent)
365 {
366 #ifdef CONFIG_PM
367 	smr_cache = kcalloc(DIV_ROUND_UP(NR_SAMA5D2_IRQS, 32) * 32,
368 			    sizeof(*smr_cache), GFP_KERNEL);
369 	if (!smr_cache)
370 		return -ENOMEM;
371 #endif
372 
373 	return aic5_of_init(node, parent, NR_SAMA5D2_IRQS);
374 }
375 IRQCHIP_DECLARE(sama5d2_aic5, "atmel,sama5d2-aic", sama5d2_aic5_of_init);
376 
377 #define NR_SAMA5D3_IRQS		48
378 
379 static int __init sama5d3_aic5_of_init(struct device_node *node,
380 				       struct device_node *parent)
381 {
382 	return aic5_of_init(node, parent, NR_SAMA5D3_IRQS);
383 }
384 IRQCHIP_DECLARE(sama5d3_aic5, "atmel,sama5d3-aic", sama5d3_aic5_of_init);
385 
386 #define NR_SAMA5D4_IRQS		68
387 
388 static int __init sama5d4_aic5_of_init(struct device_node *node,
389 				       struct device_node *parent)
390 {
391 	return aic5_of_init(node, parent, NR_SAMA5D4_IRQS);
392 }
393 IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init);
394 
395 #define NR_SAM9X60_IRQS		50
396 
397 static int __init sam9x60_aic5_of_init(struct device_node *node,
398 				       struct device_node *parent)
399 {
400 	return aic5_of_init(node, parent, NR_SAM9X60_IRQS);
401 }
402 IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init);
403