xref: /openbmc/linux/drivers/irqchip/spear-shirq.c (revision f37ecbce)
1df1590d9SViresh Kumar /*
2df1590d9SViresh Kumar  * SPEAr platform shared irq layer source file
3df1590d9SViresh Kumar  *
4df1590d9SViresh Kumar  * Copyright (C) 2009-2012 ST Microelectronics
5df1590d9SViresh Kumar  * Viresh Kumar <viresh.linux@gmail.com>
6df1590d9SViresh Kumar  *
7df1590d9SViresh Kumar  * Copyright (C) 2012 ST Microelectronics
89cc23682SViresh Kumar  * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
9df1590d9SViresh Kumar  *
10df1590d9SViresh Kumar  * This file is licensed under the terms of the GNU General Public
11df1590d9SViresh Kumar  * License version 2. This program is licensed "as is" without any
12df1590d9SViresh Kumar  * warranty of any kind, whether express or implied.
13df1590d9SViresh Kumar  */
14df1590d9SViresh Kumar #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15df1590d9SViresh Kumar 
16df1590d9SViresh Kumar #include <linux/err.h>
17df1590d9SViresh Kumar #include <linux/export.h>
18df1590d9SViresh Kumar #include <linux/interrupt.h>
19df1590d9SViresh Kumar #include <linux/io.h>
20df1590d9SViresh Kumar #include <linux/irq.h>
21df1590d9SViresh Kumar #include <linux/irqdomain.h>
22df1590d9SViresh Kumar #include <linux/of.h>
23df1590d9SViresh Kumar #include <linux/of_address.h>
24df1590d9SViresh Kumar #include <linux/of_irq.h>
25df1590d9SViresh Kumar #include <linux/spinlock.h>
26df1590d9SViresh Kumar 
27e9c51558SRob Herring #include "irqchip.h"
28e9c51558SRob Herring 
29078bc005SThomas Gleixner /*
30078bc005SThomas Gleixner  * struct shirq_regs: shared irq register configuration
31078bc005SThomas Gleixner  *
32078bc005SThomas Gleixner  * enb_reg: enable register offset
33078bc005SThomas Gleixner  * reset_to_enb: val 1 indicates, we need to clear bit for enabling interrupt
34078bc005SThomas Gleixner  * status_reg: status register offset
35078bc005SThomas Gleixner  * status_reg_mask: status register valid mask
36078bc005SThomas Gleixner  * clear_reg: clear register offset
37078bc005SThomas Gleixner  * reset_to_clear: val 1 indicates, we need to clear bit for clearing interrupt
38078bc005SThomas Gleixner  */
39078bc005SThomas Gleixner struct shirq_regs {
40078bc005SThomas Gleixner 	u32 enb_reg;
41078bc005SThomas Gleixner 	u32 reset_to_enb;
42078bc005SThomas Gleixner 	u32 status_reg;
43078bc005SThomas Gleixner 	u32 clear_reg;
44078bc005SThomas Gleixner 	u32 reset_to_clear;
45078bc005SThomas Gleixner };
46078bc005SThomas Gleixner 
47078bc005SThomas Gleixner /*
48078bc005SThomas Gleixner  * struct spear_shirq: shared irq structure
49078bc005SThomas Gleixner  *
50078bc005SThomas Gleixner  * irq_base: base irq in linux domain
51078bc005SThomas Gleixner  * irq_nr: no. of shared interrupts in a particular block
52078bc005SThomas Gleixner  * irq_bit_off: starting bit offset in the status register
53078bc005SThomas Gleixner  * invalid_irq: irq group is currently disabled
54078bc005SThomas Gleixner  * base: base address of shared irq register
55078bc005SThomas Gleixner  * regs: register configuration for shared irq block
56078bc005SThomas Gleixner  */
57078bc005SThomas Gleixner struct spear_shirq {
58078bc005SThomas Gleixner 	u32 irq_base;
59078bc005SThomas Gleixner 	u32 irq_nr;
60078bc005SThomas Gleixner 	u32 irq_bit_off;
61078bc005SThomas Gleixner 	int invalid_irq;
62078bc005SThomas Gleixner 	void __iomem *base;
63078bc005SThomas Gleixner 	struct shirq_regs regs;
64078bc005SThomas Gleixner };
65078bc005SThomas Gleixner 
66df1590d9SViresh Kumar static DEFINE_SPINLOCK(lock);
67df1590d9SViresh Kumar 
68df1590d9SViresh Kumar /* spear300 shared irq registers offsets and masks */
69df1590d9SViresh Kumar #define SPEAR300_INT_ENB_MASK_REG	0x54
70df1590d9SViresh Kumar #define SPEAR300_INT_STS_MASK_REG	0x58
71df1590d9SViresh Kumar 
72df1590d9SViresh Kumar static struct spear_shirq spear300_shirq_ras1 = {
73df1590d9SViresh Kumar 	.irq_nr = 9,
74df1590d9SViresh Kumar 	.irq_bit_off = 0,
75df1590d9SViresh Kumar 	.regs = {
76df1590d9SViresh Kumar 		.enb_reg = SPEAR300_INT_ENB_MASK_REG,
77df1590d9SViresh Kumar 		.status_reg = SPEAR300_INT_STS_MASK_REG,
78df1590d9SViresh Kumar 		.clear_reg = -1,
79df1590d9SViresh Kumar 	},
80df1590d9SViresh Kumar };
81df1590d9SViresh Kumar 
82df1590d9SViresh Kumar static struct spear_shirq *spear300_shirq_blocks[] = {
83df1590d9SViresh Kumar 	&spear300_shirq_ras1,
84df1590d9SViresh Kumar };
85df1590d9SViresh Kumar 
86df1590d9SViresh Kumar /* spear310 shared irq registers offsets and masks */
87df1590d9SViresh Kumar #define SPEAR310_INT_STS_MASK_REG	0x04
88df1590d9SViresh Kumar 
89df1590d9SViresh Kumar static struct spear_shirq spear310_shirq_ras1 = {
90df1590d9SViresh Kumar 	.irq_nr = 8,
91df1590d9SViresh Kumar 	.irq_bit_off = 0,
92df1590d9SViresh Kumar 	.regs = {
93df1590d9SViresh Kumar 		.enb_reg = -1,
94df1590d9SViresh Kumar 		.status_reg = SPEAR310_INT_STS_MASK_REG,
95df1590d9SViresh Kumar 		.clear_reg = -1,
96df1590d9SViresh Kumar 	},
97df1590d9SViresh Kumar };
98df1590d9SViresh Kumar 
99df1590d9SViresh Kumar static struct spear_shirq spear310_shirq_ras2 = {
100df1590d9SViresh Kumar 	.irq_nr = 5,
101df1590d9SViresh Kumar 	.irq_bit_off = 8,
102df1590d9SViresh Kumar 	.regs = {
103df1590d9SViresh Kumar 		.enb_reg = -1,
104df1590d9SViresh Kumar 		.status_reg = SPEAR310_INT_STS_MASK_REG,
105df1590d9SViresh Kumar 		.clear_reg = -1,
106df1590d9SViresh Kumar 	},
107df1590d9SViresh Kumar };
108df1590d9SViresh Kumar 
109df1590d9SViresh Kumar static struct spear_shirq spear310_shirq_ras3 = {
110df1590d9SViresh Kumar 	.irq_nr = 1,
111df1590d9SViresh Kumar 	.irq_bit_off = 13,
112df1590d9SViresh Kumar 	.regs = {
113df1590d9SViresh Kumar 		.enb_reg = -1,
114df1590d9SViresh Kumar 		.status_reg = SPEAR310_INT_STS_MASK_REG,
115df1590d9SViresh Kumar 		.clear_reg = -1,
116df1590d9SViresh Kumar 	},
117df1590d9SViresh Kumar };
118df1590d9SViresh Kumar 
119df1590d9SViresh Kumar static struct spear_shirq spear310_shirq_intrcomm_ras = {
120df1590d9SViresh Kumar 	.irq_nr = 3,
121df1590d9SViresh Kumar 	.irq_bit_off = 14,
122df1590d9SViresh Kumar 	.regs = {
123df1590d9SViresh Kumar 		.enb_reg = -1,
124df1590d9SViresh Kumar 		.status_reg = SPEAR310_INT_STS_MASK_REG,
125df1590d9SViresh Kumar 		.clear_reg = -1,
126df1590d9SViresh Kumar 	},
127df1590d9SViresh Kumar };
128df1590d9SViresh Kumar 
129df1590d9SViresh Kumar static struct spear_shirq *spear310_shirq_blocks[] = {
130df1590d9SViresh Kumar 	&spear310_shirq_ras1,
131df1590d9SViresh Kumar 	&spear310_shirq_ras2,
132df1590d9SViresh Kumar 	&spear310_shirq_ras3,
133df1590d9SViresh Kumar 	&spear310_shirq_intrcomm_ras,
134df1590d9SViresh Kumar };
135df1590d9SViresh Kumar 
136df1590d9SViresh Kumar /* spear320 shared irq registers offsets and masks */
137df1590d9SViresh Kumar #define SPEAR320_INT_STS_MASK_REG		0x04
138df1590d9SViresh Kumar #define SPEAR320_INT_CLR_MASK_REG		0x04
139df1590d9SViresh Kumar #define SPEAR320_INT_ENB_MASK_REG		0x08
140df1590d9SViresh Kumar 
141df1590d9SViresh Kumar static struct spear_shirq spear320_shirq_ras1 = {
142df1590d9SViresh Kumar 	.irq_nr = 3,
143df1590d9SViresh Kumar 	.irq_bit_off = 7,
144df1590d9SViresh Kumar 	.regs = {
145df1590d9SViresh Kumar 		.enb_reg = -1,
146df1590d9SViresh Kumar 		.status_reg = SPEAR320_INT_STS_MASK_REG,
147df1590d9SViresh Kumar 		.clear_reg = SPEAR320_INT_CLR_MASK_REG,
148df1590d9SViresh Kumar 		.reset_to_clear = 1,
149df1590d9SViresh Kumar 	},
150df1590d9SViresh Kumar };
151df1590d9SViresh Kumar 
152df1590d9SViresh Kumar static struct spear_shirq spear320_shirq_ras2 = {
153df1590d9SViresh Kumar 	.irq_nr = 1,
154df1590d9SViresh Kumar 	.irq_bit_off = 10,
155df1590d9SViresh Kumar 	.regs = {
156df1590d9SViresh Kumar 		.enb_reg = -1,
157df1590d9SViresh Kumar 		.status_reg = SPEAR320_INT_STS_MASK_REG,
158df1590d9SViresh Kumar 		.clear_reg = SPEAR320_INT_CLR_MASK_REG,
159df1590d9SViresh Kumar 		.reset_to_clear = 1,
160df1590d9SViresh Kumar 	},
161df1590d9SViresh Kumar };
162df1590d9SViresh Kumar 
163df1590d9SViresh Kumar static struct spear_shirq spear320_shirq_ras3 = {
1644f436603SThomas Gleixner 	.irq_nr = 7,
165df1590d9SViresh Kumar 	.irq_bit_off = 0,
166df1590d9SViresh Kumar 	.invalid_irq = 1,
167df1590d9SViresh Kumar 	.regs = {
168df1590d9SViresh Kumar 		.enb_reg = SPEAR320_INT_ENB_MASK_REG,
169df1590d9SViresh Kumar 		.reset_to_enb = 1,
170df1590d9SViresh Kumar 		.status_reg = SPEAR320_INT_STS_MASK_REG,
171df1590d9SViresh Kumar 		.clear_reg = SPEAR320_INT_CLR_MASK_REG,
172df1590d9SViresh Kumar 		.reset_to_clear = 1,
173df1590d9SViresh Kumar 	},
174df1590d9SViresh Kumar };
175df1590d9SViresh Kumar 
176df1590d9SViresh Kumar static struct spear_shirq spear320_shirq_intrcomm_ras = {
177df1590d9SViresh Kumar 	.irq_nr = 11,
178df1590d9SViresh Kumar 	.irq_bit_off = 11,
179df1590d9SViresh Kumar 	.regs = {
180df1590d9SViresh Kumar 		.enb_reg = -1,
181df1590d9SViresh Kumar 		.status_reg = SPEAR320_INT_STS_MASK_REG,
182df1590d9SViresh Kumar 		.clear_reg = SPEAR320_INT_CLR_MASK_REG,
183df1590d9SViresh Kumar 		.reset_to_clear = 1,
184df1590d9SViresh Kumar 	},
185df1590d9SViresh Kumar };
186df1590d9SViresh Kumar 
187df1590d9SViresh Kumar static struct spear_shirq *spear320_shirq_blocks[] = {
188df1590d9SViresh Kumar 	&spear320_shirq_ras3,
189df1590d9SViresh Kumar 	&spear320_shirq_ras1,
190df1590d9SViresh Kumar 	&spear320_shirq_ras2,
191df1590d9SViresh Kumar 	&spear320_shirq_intrcomm_ras,
192df1590d9SViresh Kumar };
193df1590d9SViresh Kumar 
194df1590d9SViresh Kumar static void shirq_irq_mask_unmask(struct irq_data *d, bool mask)
195df1590d9SViresh Kumar {
196df1590d9SViresh Kumar 	struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
197df1590d9SViresh Kumar 	u32 val, offset = d->irq - shirq->irq_base;
198df1590d9SViresh Kumar 	unsigned long flags;
199df1590d9SViresh Kumar 
200df1590d9SViresh Kumar 	if (shirq->regs.enb_reg == -1)
201df1590d9SViresh Kumar 		return;
202df1590d9SViresh Kumar 
203df1590d9SViresh Kumar 	spin_lock_irqsave(&lock, flags);
204df1590d9SViresh Kumar 	val = readl(shirq->base + shirq->regs.enb_reg);
205df1590d9SViresh Kumar 
206df1590d9SViresh Kumar 	if (mask ^ shirq->regs.reset_to_enb)
207df1590d9SViresh Kumar 		val &= ~(0x1 << shirq->irq_bit_off << offset);
208df1590d9SViresh Kumar 	else
209df1590d9SViresh Kumar 		val |= 0x1 << shirq->irq_bit_off << offset;
210df1590d9SViresh Kumar 
211df1590d9SViresh Kumar 	writel(val, shirq->base + shirq->regs.enb_reg);
212df1590d9SViresh Kumar 	spin_unlock_irqrestore(&lock, flags);
213df1590d9SViresh Kumar 
214df1590d9SViresh Kumar }
215df1590d9SViresh Kumar 
216df1590d9SViresh Kumar static void shirq_irq_mask(struct irq_data *d)
217df1590d9SViresh Kumar {
218df1590d9SViresh Kumar 	shirq_irq_mask_unmask(d, 1);
219df1590d9SViresh Kumar }
220df1590d9SViresh Kumar 
221df1590d9SViresh Kumar static void shirq_irq_unmask(struct irq_data *d)
222df1590d9SViresh Kumar {
223df1590d9SViresh Kumar 	shirq_irq_mask_unmask(d, 0);
224df1590d9SViresh Kumar }
225df1590d9SViresh Kumar 
226df1590d9SViresh Kumar static struct irq_chip shirq_chip = {
227df1590d9SViresh Kumar 	.name		= "spear-shirq",
228df1590d9SViresh Kumar 	.irq_ack	= shirq_irq_mask,
229df1590d9SViresh Kumar 	.irq_mask	= shirq_irq_mask,
230df1590d9SViresh Kumar 	.irq_unmask	= shirq_irq_unmask,
231df1590d9SViresh Kumar };
232df1590d9SViresh Kumar 
233df1590d9SViresh Kumar static void shirq_handler(unsigned irq, struct irq_desc *desc)
234df1590d9SViresh Kumar {
235df1590d9SViresh Kumar 	u32 i, j, val, mask, tmp;
236df1590d9SViresh Kumar 	struct irq_chip *chip;
237df1590d9SViresh Kumar 	struct spear_shirq *shirq = irq_get_handler_data(irq);
238df1590d9SViresh Kumar 
239df1590d9SViresh Kumar 	chip = irq_get_chip(irq);
240df1590d9SViresh Kumar 	chip->irq_ack(&desc->irq_data);
241df1590d9SViresh Kumar 
242df1590d9SViresh Kumar 	mask = ((0x1 << shirq->irq_nr) - 1) << shirq->irq_bit_off;
243df1590d9SViresh Kumar 	while ((val = readl(shirq->base + shirq->regs.status_reg) &
244df1590d9SViresh Kumar 				mask)) {
245df1590d9SViresh Kumar 
246df1590d9SViresh Kumar 		val >>= shirq->irq_bit_off;
247df1590d9SViresh Kumar 		for (i = 0, j = 1; i < shirq->irq_nr; i++, j <<= 1) {
248df1590d9SViresh Kumar 
249df1590d9SViresh Kumar 			if (!(j & val))
250df1590d9SViresh Kumar 				continue;
251df1590d9SViresh Kumar 
252df1590d9SViresh Kumar 			generic_handle_irq(shirq->irq_base + i);
253df1590d9SViresh Kumar 
254df1590d9SViresh Kumar 			/* clear interrupt */
255df1590d9SViresh Kumar 			if (shirq->regs.clear_reg == -1)
256df1590d9SViresh Kumar 				continue;
257df1590d9SViresh Kumar 
258df1590d9SViresh Kumar 			tmp = readl(shirq->base + shirq->regs.clear_reg);
259df1590d9SViresh Kumar 			if (shirq->regs.reset_to_clear)
260df1590d9SViresh Kumar 				tmp &= ~(j << shirq->irq_bit_off);
261df1590d9SViresh Kumar 			else
262df1590d9SViresh Kumar 				tmp |= (j << shirq->irq_bit_off);
263df1590d9SViresh Kumar 			writel(tmp, shirq->base + shirq->regs.clear_reg);
264df1590d9SViresh Kumar 		}
265df1590d9SViresh Kumar 	}
266df1590d9SViresh Kumar 	chip->irq_unmask(&desc->irq_data);
267df1590d9SViresh Kumar }
268df1590d9SViresh Kumar 
269f37ecbceSThomas Gleixner static void __init spear_shirq_register(struct spear_shirq *shirq,
270f37ecbceSThomas Gleixner 					int parent_irq)
271df1590d9SViresh Kumar {
272df1590d9SViresh Kumar 	int i;
273df1590d9SViresh Kumar 
274df1590d9SViresh Kumar 	if (shirq->invalid_irq)
275df1590d9SViresh Kumar 		return;
276df1590d9SViresh Kumar 
277f37ecbceSThomas Gleixner 	irq_set_chained_handler(parent_irq, shirq_handler);
278f37ecbceSThomas Gleixner 	irq_set_handler_data(parent_irq, shirq);
279f37ecbceSThomas Gleixner 
280df1590d9SViresh Kumar 	for (i = 0; i < shirq->irq_nr; i++) {
281df1590d9SViresh Kumar 		irq_set_chip_and_handler(shirq->irq_base + i,
282df1590d9SViresh Kumar 					 &shirq_chip, handle_simple_irq);
283df1590d9SViresh Kumar 		set_irq_flags(shirq->irq_base + i, IRQF_VALID);
284df1590d9SViresh Kumar 		irq_set_chip_data(shirq->irq_base + i, shirq);
285df1590d9SViresh Kumar 	}
286df1590d9SViresh Kumar }
287df1590d9SViresh Kumar 
288df1590d9SViresh Kumar static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
289df1590d9SViresh Kumar 		struct device_node *np)
290df1590d9SViresh Kumar {
291f37ecbceSThomas Gleixner 	int i, parent_irq, irq_base, hwirq = 0, irq_nr = 0;
292a26c06f9SThomas Gleixner 	struct irq_domain *shirq_domain;
293df1590d9SViresh Kumar 	void __iomem *base;
294df1590d9SViresh Kumar 
295df1590d9SViresh Kumar 	base = of_iomap(np, 0);
296df1590d9SViresh Kumar 	if (!base) {
297df1590d9SViresh Kumar 		pr_err("%s: failed to map shirq registers\n", __func__);
298df1590d9SViresh Kumar 		return -ENXIO;
299df1590d9SViresh Kumar 	}
300df1590d9SViresh Kumar 
301df1590d9SViresh Kumar 	for (i = 0; i < block_nr; i++)
302df1590d9SViresh Kumar 		irq_nr += shirq_blocks[i]->irq_nr;
303df1590d9SViresh Kumar 
304df1590d9SViresh Kumar 	irq_base = irq_alloc_descs(-1, 0, irq_nr, 0);
305df1590d9SViresh Kumar 	if (IS_ERR_VALUE(irq_base)) {
306df1590d9SViresh Kumar 		pr_err("%s: irq desc alloc failed\n", __func__);
307df1590d9SViresh Kumar 		goto err_unmap;
308df1590d9SViresh Kumar 	}
309df1590d9SViresh Kumar 
310df1590d9SViresh Kumar 	shirq_domain = irq_domain_add_legacy(np, irq_nr, irq_base, 0,
311df1590d9SViresh Kumar 			&irq_domain_simple_ops, NULL);
312df1590d9SViresh Kumar 	if (WARN_ON(!shirq_domain)) {
313df1590d9SViresh Kumar 		pr_warn("%s: irq domain init failed\n", __func__);
314df1590d9SViresh Kumar 		goto err_free_desc;
315df1590d9SViresh Kumar 	}
316df1590d9SViresh Kumar 
317df1590d9SViresh Kumar 	for (i = 0; i < block_nr; i++) {
318df1590d9SViresh Kumar 		shirq_blocks[i]->base = base;
319df1590d9SViresh Kumar 		shirq_blocks[i]->irq_base = irq_find_mapping(shirq_domain,
320df1590d9SViresh Kumar 				hwirq);
321df1590d9SViresh Kumar 
322f37ecbceSThomas Gleixner 		parent_irq = irq_of_parse_and_map(np, i);
323f37ecbceSThomas Gleixner 		spear_shirq_register(shirq_blocks[i], parent_irq);
324df1590d9SViresh Kumar 		hwirq += shirq_blocks[i]->irq_nr;
325df1590d9SViresh Kumar 	}
326df1590d9SViresh Kumar 
327df1590d9SViresh Kumar 	return 0;
328df1590d9SViresh Kumar 
329df1590d9SViresh Kumar err_free_desc:
330df1590d9SViresh Kumar 	irq_free_descs(irq_base, irq_nr);
331df1590d9SViresh Kumar err_unmap:
332df1590d9SViresh Kumar 	iounmap(base);
333df1590d9SViresh Kumar 	return -ENXIO;
334df1590d9SViresh Kumar }
335df1590d9SViresh Kumar 
336078bc005SThomas Gleixner static int __init spear300_shirq_of_init(struct device_node *np,
337df1590d9SViresh Kumar 					 struct device_node *parent)
338df1590d9SViresh Kumar {
339df1590d9SViresh Kumar 	return shirq_init(spear300_shirq_blocks,
340df1590d9SViresh Kumar 			ARRAY_SIZE(spear300_shirq_blocks), np);
341df1590d9SViresh Kumar }
342e9c51558SRob Herring IRQCHIP_DECLARE(spear300_shirq, "st,spear300-shirq", spear300_shirq_of_init);
343df1590d9SViresh Kumar 
344078bc005SThomas Gleixner static int __init spear310_shirq_of_init(struct device_node *np,
345df1590d9SViresh Kumar 					 struct device_node *parent)
346df1590d9SViresh Kumar {
347df1590d9SViresh Kumar 	return shirq_init(spear310_shirq_blocks,
348df1590d9SViresh Kumar 			ARRAY_SIZE(spear310_shirq_blocks), np);
349df1590d9SViresh Kumar }
350e9c51558SRob Herring IRQCHIP_DECLARE(spear310_shirq, "st,spear310-shirq", spear310_shirq_of_init);
351df1590d9SViresh Kumar 
352078bc005SThomas Gleixner static int __init spear320_shirq_of_init(struct device_node *np,
353df1590d9SViresh Kumar 					 struct device_node *parent)
354df1590d9SViresh Kumar {
355df1590d9SViresh Kumar 	return shirq_init(spear320_shirq_blocks,
356df1590d9SViresh Kumar 			ARRAY_SIZE(spear320_shirq_blocks), np);
357df1590d9SViresh Kumar }
358e9c51558SRob Herring IRQCHIP_DECLARE(spear320_shirq, "st,spear320-shirq", spear320_shirq_of_init);
359