xref: /openbmc/linux/drivers/irqchip/spear-shirq.c (revision df1590d9)
1df1590d9SViresh Kumar /*
2df1590d9SViresh Kumar  * SPEAr platform shared irq layer source file
3df1590d9SViresh Kumar  *
4df1590d9SViresh Kumar  * Copyright (C) 2009-2012 ST Microelectronics
5df1590d9SViresh Kumar  * Viresh Kumar <viresh.linux@gmail.com>
6df1590d9SViresh Kumar  *
7df1590d9SViresh Kumar  * Copyright (C) 2012 ST Microelectronics
8df1590d9SViresh Kumar  * Shiraz Hashim <shiraz.hashim@st.com>
9df1590d9SViresh Kumar  *
10df1590d9SViresh Kumar  * This file is licensed under the terms of the GNU General Public
11df1590d9SViresh Kumar  * License version 2. This program is licensed "as is" without any
12df1590d9SViresh Kumar  * warranty of any kind, whether express or implied.
13df1590d9SViresh Kumar  */
14df1590d9SViresh Kumar #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15df1590d9SViresh Kumar 
16df1590d9SViresh Kumar #include <linux/err.h>
17df1590d9SViresh Kumar #include <linux/export.h>
18df1590d9SViresh Kumar #include <linux/interrupt.h>
19df1590d9SViresh Kumar #include <linux/io.h>
20df1590d9SViresh Kumar #include <linux/irq.h>
21df1590d9SViresh Kumar #include <linux/irqdomain.h>
22df1590d9SViresh Kumar #include <linux/irqchip/spear-shirq.h>
23df1590d9SViresh Kumar #include <linux/of.h>
24df1590d9SViresh Kumar #include <linux/of_address.h>
25df1590d9SViresh Kumar #include <linux/of_irq.h>
26df1590d9SViresh Kumar #include <linux/spinlock.h>
27df1590d9SViresh Kumar 
28df1590d9SViresh Kumar static DEFINE_SPINLOCK(lock);
29df1590d9SViresh Kumar 
30df1590d9SViresh Kumar /* spear300 shared irq registers offsets and masks */
31df1590d9SViresh Kumar #define SPEAR300_INT_ENB_MASK_REG	0x54
32df1590d9SViresh Kumar #define SPEAR300_INT_STS_MASK_REG	0x58
33df1590d9SViresh Kumar 
34df1590d9SViresh Kumar static struct spear_shirq spear300_shirq_ras1 = {
35df1590d9SViresh Kumar 	.irq_nr = 9,
36df1590d9SViresh Kumar 	.irq_bit_off = 0,
37df1590d9SViresh Kumar 	.regs = {
38df1590d9SViresh Kumar 		.enb_reg = SPEAR300_INT_ENB_MASK_REG,
39df1590d9SViresh Kumar 		.status_reg = SPEAR300_INT_STS_MASK_REG,
40df1590d9SViresh Kumar 		.clear_reg = -1,
41df1590d9SViresh Kumar 	},
42df1590d9SViresh Kumar };
43df1590d9SViresh Kumar 
44df1590d9SViresh Kumar static struct spear_shirq *spear300_shirq_blocks[] = {
45df1590d9SViresh Kumar 	&spear300_shirq_ras1,
46df1590d9SViresh Kumar };
47df1590d9SViresh Kumar 
48df1590d9SViresh Kumar /* spear310 shared irq registers offsets and masks */
49df1590d9SViresh Kumar #define SPEAR310_INT_STS_MASK_REG	0x04
50df1590d9SViresh Kumar 
51df1590d9SViresh Kumar static struct spear_shirq spear310_shirq_ras1 = {
52df1590d9SViresh Kumar 	.irq_nr = 8,
53df1590d9SViresh Kumar 	.irq_bit_off = 0,
54df1590d9SViresh Kumar 	.regs = {
55df1590d9SViresh Kumar 		.enb_reg = -1,
56df1590d9SViresh Kumar 		.status_reg = SPEAR310_INT_STS_MASK_REG,
57df1590d9SViresh Kumar 		.clear_reg = -1,
58df1590d9SViresh Kumar 	},
59df1590d9SViresh Kumar };
60df1590d9SViresh Kumar 
61df1590d9SViresh Kumar static struct spear_shirq spear310_shirq_ras2 = {
62df1590d9SViresh Kumar 	.irq_nr = 5,
63df1590d9SViresh Kumar 	.irq_bit_off = 8,
64df1590d9SViresh Kumar 	.regs = {
65df1590d9SViresh Kumar 		.enb_reg = -1,
66df1590d9SViresh Kumar 		.status_reg = SPEAR310_INT_STS_MASK_REG,
67df1590d9SViresh Kumar 		.clear_reg = -1,
68df1590d9SViresh Kumar 	},
69df1590d9SViresh Kumar };
70df1590d9SViresh Kumar 
71df1590d9SViresh Kumar static struct spear_shirq spear310_shirq_ras3 = {
72df1590d9SViresh Kumar 	.irq_nr = 1,
73df1590d9SViresh Kumar 	.irq_bit_off = 13,
74df1590d9SViresh Kumar 	.regs = {
75df1590d9SViresh Kumar 		.enb_reg = -1,
76df1590d9SViresh Kumar 		.status_reg = SPEAR310_INT_STS_MASK_REG,
77df1590d9SViresh Kumar 		.clear_reg = -1,
78df1590d9SViresh Kumar 	},
79df1590d9SViresh Kumar };
80df1590d9SViresh Kumar 
81df1590d9SViresh Kumar static struct spear_shirq spear310_shirq_intrcomm_ras = {
82df1590d9SViresh Kumar 	.irq_nr = 3,
83df1590d9SViresh Kumar 	.irq_bit_off = 14,
84df1590d9SViresh Kumar 	.regs = {
85df1590d9SViresh Kumar 		.enb_reg = -1,
86df1590d9SViresh Kumar 		.status_reg = SPEAR310_INT_STS_MASK_REG,
87df1590d9SViresh Kumar 		.clear_reg = -1,
88df1590d9SViresh Kumar 	},
89df1590d9SViresh Kumar };
90df1590d9SViresh Kumar 
91df1590d9SViresh Kumar static struct spear_shirq *spear310_shirq_blocks[] = {
92df1590d9SViresh Kumar 	&spear310_shirq_ras1,
93df1590d9SViresh Kumar 	&spear310_shirq_ras2,
94df1590d9SViresh Kumar 	&spear310_shirq_ras3,
95df1590d9SViresh Kumar 	&spear310_shirq_intrcomm_ras,
96df1590d9SViresh Kumar };
97df1590d9SViresh Kumar 
98df1590d9SViresh Kumar /* spear320 shared irq registers offsets and masks */
99df1590d9SViresh Kumar #define SPEAR320_INT_STS_MASK_REG		0x04
100df1590d9SViresh Kumar #define SPEAR320_INT_CLR_MASK_REG		0x04
101df1590d9SViresh Kumar #define SPEAR320_INT_ENB_MASK_REG		0x08
102df1590d9SViresh Kumar 
103df1590d9SViresh Kumar static struct spear_shirq spear320_shirq_ras1 = {
104df1590d9SViresh Kumar 	.irq_nr = 3,
105df1590d9SViresh Kumar 	.irq_bit_off = 7,
106df1590d9SViresh Kumar 	.regs = {
107df1590d9SViresh Kumar 		.enb_reg = -1,
108df1590d9SViresh Kumar 		.status_reg = SPEAR320_INT_STS_MASK_REG,
109df1590d9SViresh Kumar 		.clear_reg = SPEAR320_INT_CLR_MASK_REG,
110df1590d9SViresh Kumar 		.reset_to_clear = 1,
111df1590d9SViresh Kumar 	},
112df1590d9SViresh Kumar };
113df1590d9SViresh Kumar 
114df1590d9SViresh Kumar static struct spear_shirq spear320_shirq_ras2 = {
115df1590d9SViresh Kumar 	.irq_nr = 1,
116df1590d9SViresh Kumar 	.irq_bit_off = 10,
117df1590d9SViresh Kumar 	.regs = {
118df1590d9SViresh Kumar 		.enb_reg = -1,
119df1590d9SViresh Kumar 		.status_reg = SPEAR320_INT_STS_MASK_REG,
120df1590d9SViresh Kumar 		.clear_reg = SPEAR320_INT_CLR_MASK_REG,
121df1590d9SViresh Kumar 		.reset_to_clear = 1,
122df1590d9SViresh Kumar 	},
123df1590d9SViresh Kumar };
124df1590d9SViresh Kumar 
125df1590d9SViresh Kumar static struct spear_shirq spear320_shirq_ras3 = {
126df1590d9SViresh Kumar 	.irq_nr = 3,
127df1590d9SViresh Kumar 	.irq_bit_off = 0,
128df1590d9SViresh Kumar 	.invalid_irq = 1,
129df1590d9SViresh Kumar 	.regs = {
130df1590d9SViresh Kumar 		.enb_reg = SPEAR320_INT_ENB_MASK_REG,
131df1590d9SViresh Kumar 		.reset_to_enb = 1,
132df1590d9SViresh Kumar 		.status_reg = SPEAR320_INT_STS_MASK_REG,
133df1590d9SViresh Kumar 		.clear_reg = SPEAR320_INT_CLR_MASK_REG,
134df1590d9SViresh Kumar 		.reset_to_clear = 1,
135df1590d9SViresh Kumar 	},
136df1590d9SViresh Kumar };
137df1590d9SViresh Kumar 
138df1590d9SViresh Kumar static struct spear_shirq spear320_shirq_intrcomm_ras = {
139df1590d9SViresh Kumar 	.irq_nr = 11,
140df1590d9SViresh Kumar 	.irq_bit_off = 11,
141df1590d9SViresh Kumar 	.regs = {
142df1590d9SViresh Kumar 		.enb_reg = -1,
143df1590d9SViresh Kumar 		.status_reg = SPEAR320_INT_STS_MASK_REG,
144df1590d9SViresh Kumar 		.clear_reg = SPEAR320_INT_CLR_MASK_REG,
145df1590d9SViresh Kumar 		.reset_to_clear = 1,
146df1590d9SViresh Kumar 	},
147df1590d9SViresh Kumar };
148df1590d9SViresh Kumar 
149df1590d9SViresh Kumar static struct spear_shirq *spear320_shirq_blocks[] = {
150df1590d9SViresh Kumar 	&spear320_shirq_ras3,
151df1590d9SViresh Kumar 	&spear320_shirq_ras1,
152df1590d9SViresh Kumar 	&spear320_shirq_ras2,
153df1590d9SViresh Kumar 	&spear320_shirq_intrcomm_ras,
154df1590d9SViresh Kumar };
155df1590d9SViresh Kumar 
156df1590d9SViresh Kumar static void shirq_irq_mask_unmask(struct irq_data *d, bool mask)
157df1590d9SViresh Kumar {
158df1590d9SViresh Kumar 	struct spear_shirq *shirq = irq_data_get_irq_chip_data(d);
159df1590d9SViresh Kumar 	u32 val, offset = d->irq - shirq->irq_base;
160df1590d9SViresh Kumar 	unsigned long flags;
161df1590d9SViresh Kumar 
162df1590d9SViresh Kumar 	if (shirq->regs.enb_reg == -1)
163df1590d9SViresh Kumar 		return;
164df1590d9SViresh Kumar 
165df1590d9SViresh Kumar 	spin_lock_irqsave(&lock, flags);
166df1590d9SViresh Kumar 	val = readl(shirq->base + shirq->regs.enb_reg);
167df1590d9SViresh Kumar 
168df1590d9SViresh Kumar 	if (mask ^ shirq->regs.reset_to_enb)
169df1590d9SViresh Kumar 		val &= ~(0x1 << shirq->irq_bit_off << offset);
170df1590d9SViresh Kumar 	else
171df1590d9SViresh Kumar 		val |= 0x1 << shirq->irq_bit_off << offset;
172df1590d9SViresh Kumar 
173df1590d9SViresh Kumar 	writel(val, shirq->base + shirq->regs.enb_reg);
174df1590d9SViresh Kumar 	spin_unlock_irqrestore(&lock, flags);
175df1590d9SViresh Kumar 
176df1590d9SViresh Kumar }
177df1590d9SViresh Kumar 
178df1590d9SViresh Kumar static void shirq_irq_mask(struct irq_data *d)
179df1590d9SViresh Kumar {
180df1590d9SViresh Kumar 	shirq_irq_mask_unmask(d, 1);
181df1590d9SViresh Kumar }
182df1590d9SViresh Kumar 
183df1590d9SViresh Kumar static void shirq_irq_unmask(struct irq_data *d)
184df1590d9SViresh Kumar {
185df1590d9SViresh Kumar 	shirq_irq_mask_unmask(d, 0);
186df1590d9SViresh Kumar }
187df1590d9SViresh Kumar 
188df1590d9SViresh Kumar static struct irq_chip shirq_chip = {
189df1590d9SViresh Kumar 	.name		= "spear-shirq",
190df1590d9SViresh Kumar 	.irq_ack	= shirq_irq_mask,
191df1590d9SViresh Kumar 	.irq_mask	= shirq_irq_mask,
192df1590d9SViresh Kumar 	.irq_unmask	= shirq_irq_unmask,
193df1590d9SViresh Kumar };
194df1590d9SViresh Kumar 
195df1590d9SViresh Kumar static void shirq_handler(unsigned irq, struct irq_desc *desc)
196df1590d9SViresh Kumar {
197df1590d9SViresh Kumar 	u32 i, j, val, mask, tmp;
198df1590d9SViresh Kumar 	struct irq_chip *chip;
199df1590d9SViresh Kumar 	struct spear_shirq *shirq = irq_get_handler_data(irq);
200df1590d9SViresh Kumar 
201df1590d9SViresh Kumar 	chip = irq_get_chip(irq);
202df1590d9SViresh Kumar 	chip->irq_ack(&desc->irq_data);
203df1590d9SViresh Kumar 
204df1590d9SViresh Kumar 	mask = ((0x1 << shirq->irq_nr) - 1) << shirq->irq_bit_off;
205df1590d9SViresh Kumar 	while ((val = readl(shirq->base + shirq->regs.status_reg) &
206df1590d9SViresh Kumar 				mask)) {
207df1590d9SViresh Kumar 
208df1590d9SViresh Kumar 		val >>= shirq->irq_bit_off;
209df1590d9SViresh Kumar 		for (i = 0, j = 1; i < shirq->irq_nr; i++, j <<= 1) {
210df1590d9SViresh Kumar 
211df1590d9SViresh Kumar 			if (!(j & val))
212df1590d9SViresh Kumar 				continue;
213df1590d9SViresh Kumar 
214df1590d9SViresh Kumar 			generic_handle_irq(shirq->irq_base + i);
215df1590d9SViresh Kumar 
216df1590d9SViresh Kumar 			/* clear interrupt */
217df1590d9SViresh Kumar 			if (shirq->regs.clear_reg == -1)
218df1590d9SViresh Kumar 				continue;
219df1590d9SViresh Kumar 
220df1590d9SViresh Kumar 			tmp = readl(shirq->base + shirq->regs.clear_reg);
221df1590d9SViresh Kumar 			if (shirq->regs.reset_to_clear)
222df1590d9SViresh Kumar 				tmp &= ~(j << shirq->irq_bit_off);
223df1590d9SViresh Kumar 			else
224df1590d9SViresh Kumar 				tmp |= (j << shirq->irq_bit_off);
225df1590d9SViresh Kumar 			writel(tmp, shirq->base + shirq->regs.clear_reg);
226df1590d9SViresh Kumar 		}
227df1590d9SViresh Kumar 	}
228df1590d9SViresh Kumar 	chip->irq_unmask(&desc->irq_data);
229df1590d9SViresh Kumar }
230df1590d9SViresh Kumar 
231df1590d9SViresh Kumar static void __init spear_shirq_register(struct spear_shirq *shirq)
232df1590d9SViresh Kumar {
233df1590d9SViresh Kumar 	int i;
234df1590d9SViresh Kumar 
235df1590d9SViresh Kumar 	if (shirq->invalid_irq)
236df1590d9SViresh Kumar 		return;
237df1590d9SViresh Kumar 
238df1590d9SViresh Kumar 	irq_set_chained_handler(shirq->irq, shirq_handler);
239df1590d9SViresh Kumar 	for (i = 0; i < shirq->irq_nr; i++) {
240df1590d9SViresh Kumar 		irq_set_chip_and_handler(shirq->irq_base + i,
241df1590d9SViresh Kumar 					 &shirq_chip, handle_simple_irq);
242df1590d9SViresh Kumar 		set_irq_flags(shirq->irq_base + i, IRQF_VALID);
243df1590d9SViresh Kumar 		irq_set_chip_data(shirq->irq_base + i, shirq);
244df1590d9SViresh Kumar 	}
245df1590d9SViresh Kumar 
246df1590d9SViresh Kumar 	irq_set_handler_data(shirq->irq, shirq);
247df1590d9SViresh Kumar }
248df1590d9SViresh Kumar 
249df1590d9SViresh Kumar static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
250df1590d9SViresh Kumar 		struct device_node *np)
251df1590d9SViresh Kumar {
252df1590d9SViresh Kumar 	int i, irq_base, hwirq = 0, irq_nr = 0;
253df1590d9SViresh Kumar 	static struct irq_domain *shirq_domain;
254df1590d9SViresh Kumar 	void __iomem *base;
255df1590d9SViresh Kumar 
256df1590d9SViresh Kumar 	base = of_iomap(np, 0);
257df1590d9SViresh Kumar 	if (!base) {
258df1590d9SViresh Kumar 		pr_err("%s: failed to map shirq registers\n", __func__);
259df1590d9SViresh Kumar 		return -ENXIO;
260df1590d9SViresh Kumar 	}
261df1590d9SViresh Kumar 
262df1590d9SViresh Kumar 	for (i = 0; i < block_nr; i++)
263df1590d9SViresh Kumar 		irq_nr += shirq_blocks[i]->irq_nr;
264df1590d9SViresh Kumar 
265df1590d9SViresh Kumar 	irq_base = irq_alloc_descs(-1, 0, irq_nr, 0);
266df1590d9SViresh Kumar 	if (IS_ERR_VALUE(irq_base)) {
267df1590d9SViresh Kumar 		pr_err("%s: irq desc alloc failed\n", __func__);
268df1590d9SViresh Kumar 		goto err_unmap;
269df1590d9SViresh Kumar 	}
270df1590d9SViresh Kumar 
271df1590d9SViresh Kumar 	shirq_domain = irq_domain_add_legacy(np, irq_nr, irq_base, 0,
272df1590d9SViresh Kumar 			&irq_domain_simple_ops, NULL);
273df1590d9SViresh Kumar 	if (WARN_ON(!shirq_domain)) {
274df1590d9SViresh Kumar 		pr_warn("%s: irq domain init failed\n", __func__);
275df1590d9SViresh Kumar 		goto err_free_desc;
276df1590d9SViresh Kumar 	}
277df1590d9SViresh Kumar 
278df1590d9SViresh Kumar 	for (i = 0; i < block_nr; i++) {
279df1590d9SViresh Kumar 		shirq_blocks[i]->base = base;
280df1590d9SViresh Kumar 		shirq_blocks[i]->irq_base = irq_find_mapping(shirq_domain,
281df1590d9SViresh Kumar 				hwirq);
282df1590d9SViresh Kumar 		shirq_blocks[i]->irq = irq_of_parse_and_map(np, i);
283df1590d9SViresh Kumar 
284df1590d9SViresh Kumar 		spear_shirq_register(shirq_blocks[i]);
285df1590d9SViresh Kumar 		hwirq += shirq_blocks[i]->irq_nr;
286df1590d9SViresh Kumar 	}
287df1590d9SViresh Kumar 
288df1590d9SViresh Kumar 	return 0;
289df1590d9SViresh Kumar 
290df1590d9SViresh Kumar err_free_desc:
291df1590d9SViresh Kumar 	irq_free_descs(irq_base, irq_nr);
292df1590d9SViresh Kumar err_unmap:
293df1590d9SViresh Kumar 	iounmap(base);
294df1590d9SViresh Kumar 	return -ENXIO;
295df1590d9SViresh Kumar }
296df1590d9SViresh Kumar 
297df1590d9SViresh Kumar int __init spear300_shirq_of_init(struct device_node *np,
298df1590d9SViresh Kumar 		struct device_node *parent)
299df1590d9SViresh Kumar {
300df1590d9SViresh Kumar 	return shirq_init(spear300_shirq_blocks,
301df1590d9SViresh Kumar 			ARRAY_SIZE(spear300_shirq_blocks), np);
302df1590d9SViresh Kumar }
303df1590d9SViresh Kumar 
304df1590d9SViresh Kumar int __init spear310_shirq_of_init(struct device_node *np,
305df1590d9SViresh Kumar 		struct device_node *parent)
306df1590d9SViresh Kumar {
307df1590d9SViresh Kumar 	return shirq_init(spear310_shirq_blocks,
308df1590d9SViresh Kumar 			ARRAY_SIZE(spear310_shirq_blocks), np);
309df1590d9SViresh Kumar }
310df1590d9SViresh Kumar 
311df1590d9SViresh Kumar int __init spear320_shirq_of_init(struct device_node *np,
312df1590d9SViresh Kumar 		struct device_node *parent)
313df1590d9SViresh Kumar {
314df1590d9SViresh Kumar 	return shirq_init(spear320_shirq_blocks,
315df1590d9SViresh Kumar 			ARRAY_SIZE(spear320_shirq_blocks), np);
316df1590d9SViresh Kumar }
317