xref: /openbmc/linux/arch/powerpc/platforms/4xx/uic.c (revision 2bdd5238)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * arch/powerpc/sysdev/uic.c
4  *
5  * IBM PowerPC 4xx Universal Interrupt Controller
6  *
7  * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
8  */
9 #include <linux/kernel.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/reboot.h>
13 #include <linux/slab.h>
14 #include <linux/stddef.h>
15 #include <linux/sched.h>
16 #include <linux/signal.h>
17 #include <linux/device.h>
18 #include <linux/spinlock.h>
19 #include <linux/irq.h>
20 #include <linux/interrupt.h>
21 #include <linux/kernel_stat.h>
22 #include <asm/irq.h>
23 #include <asm/io.h>
24 #include <asm/prom.h>
25 #include <asm/dcr.h>
26 
27 #define NR_UIC_INTS	32
28 
29 #define UIC_SR		0x0
30 #define UIC_ER		0x2
31 #define UIC_CR		0x3
32 #define UIC_PR		0x4
33 #define UIC_TR		0x5
34 #define UIC_MSR		0x6
35 #define UIC_VR		0x7
36 #define UIC_VCR		0x8
37 
38 struct uic *primary_uic;
39 
40 struct uic {
41 	int index;
42 	int dcrbase;
43 
44 	raw_spinlock_t lock;
45 
46 	/* The remapper for this UIC */
47 	struct irq_domain	*irqhost;
48 };
49 
50 static void uic_unmask_irq(struct irq_data *d)
51 {
52 	struct uic *uic = irq_data_get_irq_chip_data(d);
53 	unsigned int src = irqd_to_hwirq(d);
54 	unsigned long flags;
55 	u32 er, sr;
56 
57 	sr = 1 << (31-src);
58 	raw_spin_lock_irqsave(&uic->lock, flags);
59 	/* ack level-triggered interrupts here */
60 	if (irqd_is_level_type(d))
61 		mtdcr(uic->dcrbase + UIC_SR, sr);
62 	er = mfdcr(uic->dcrbase + UIC_ER);
63 	er |= sr;
64 	mtdcr(uic->dcrbase + UIC_ER, er);
65 	raw_spin_unlock_irqrestore(&uic->lock, flags);
66 }
67 
68 static void uic_mask_irq(struct irq_data *d)
69 {
70 	struct uic *uic = irq_data_get_irq_chip_data(d);
71 	unsigned int src = irqd_to_hwirq(d);
72 	unsigned long flags;
73 	u32 er;
74 
75 	raw_spin_lock_irqsave(&uic->lock, flags);
76 	er = mfdcr(uic->dcrbase + UIC_ER);
77 	er &= ~(1 << (31 - src));
78 	mtdcr(uic->dcrbase + UIC_ER, er);
79 	raw_spin_unlock_irqrestore(&uic->lock, flags);
80 }
81 
82 static void uic_ack_irq(struct irq_data *d)
83 {
84 	struct uic *uic = irq_data_get_irq_chip_data(d);
85 	unsigned int src = irqd_to_hwirq(d);
86 	unsigned long flags;
87 
88 	raw_spin_lock_irqsave(&uic->lock, flags);
89 	mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src));
90 	raw_spin_unlock_irqrestore(&uic->lock, flags);
91 }
92 
93 static void uic_mask_ack_irq(struct irq_data *d)
94 {
95 	struct uic *uic = irq_data_get_irq_chip_data(d);
96 	unsigned int src = irqd_to_hwirq(d);
97 	unsigned long flags;
98 	u32 er, sr;
99 
100 	sr = 1 << (31-src);
101 	raw_spin_lock_irqsave(&uic->lock, flags);
102 	er = mfdcr(uic->dcrbase + UIC_ER);
103 	er &= ~sr;
104 	mtdcr(uic->dcrbase + UIC_ER, er);
105  	/* On the UIC, acking (i.e. clearing the SR bit)
106 	 * a level irq will have no effect if the interrupt
107 	 * is still asserted by the device, even if
108 	 * the interrupt is already masked. Therefore
109 	 * we only ack the egde interrupts here, while
110 	 * level interrupts are ack'ed after the actual
111 	 * isr call in the uic_unmask_irq()
112 	 */
113 	if (!irqd_is_level_type(d))
114 		mtdcr(uic->dcrbase + UIC_SR, sr);
115 	raw_spin_unlock_irqrestore(&uic->lock, flags);
116 }
117 
118 static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
119 {
120 	struct uic *uic = irq_data_get_irq_chip_data(d);
121 	unsigned int src = irqd_to_hwirq(d);
122 	unsigned long flags;
123 	int trigger, polarity;
124 	u32 tr, pr, mask;
125 
126 	switch (flow_type & IRQ_TYPE_SENSE_MASK) {
127 	case IRQ_TYPE_NONE:
128 		uic_mask_irq(d);
129 		return 0;
130 
131 	case IRQ_TYPE_EDGE_RISING:
132 		trigger = 1; polarity = 1;
133 		break;
134 	case IRQ_TYPE_EDGE_FALLING:
135 		trigger = 1; polarity = 0;
136 		break;
137 	case IRQ_TYPE_LEVEL_HIGH:
138 		trigger = 0; polarity = 1;
139 		break;
140 	case IRQ_TYPE_LEVEL_LOW:
141 		trigger = 0; polarity = 0;
142 		break;
143 	default:
144 		return -EINVAL;
145 	}
146 
147 	mask = ~(1 << (31 - src));
148 
149 	raw_spin_lock_irqsave(&uic->lock, flags);
150 	tr = mfdcr(uic->dcrbase + UIC_TR);
151 	pr = mfdcr(uic->dcrbase + UIC_PR);
152 	tr = (tr & mask) | (trigger << (31-src));
153 	pr = (pr & mask) | (polarity << (31-src));
154 
155 	mtdcr(uic->dcrbase + UIC_PR, pr);
156 	mtdcr(uic->dcrbase + UIC_TR, tr);
157 	mtdcr(uic->dcrbase + UIC_SR, ~mask);
158 
159 	raw_spin_unlock_irqrestore(&uic->lock, flags);
160 
161 	return 0;
162 }
163 
164 static struct irq_chip uic_irq_chip = {
165 	.name		= "UIC",
166 	.irq_unmask	= uic_unmask_irq,
167 	.irq_mask	= uic_mask_irq,
168 	.irq_mask_ack	= uic_mask_ack_irq,
169 	.irq_ack	= uic_ack_irq,
170 	.irq_set_type	= uic_set_irq_type,
171 };
172 
173 static int uic_host_map(struct irq_domain *h, unsigned int virq,
174 			irq_hw_number_t hw)
175 {
176 	struct uic *uic = h->host_data;
177 
178 	irq_set_chip_data(virq, uic);
179 	/* Despite the name, handle_level_irq() works for both level
180 	 * and edge irqs on UIC.  FIXME: check this is correct */
181 	irq_set_chip_and_handler(virq, &uic_irq_chip, handle_level_irq);
182 
183 	/* Set default irq type */
184 	irq_set_irq_type(virq, IRQ_TYPE_NONE);
185 
186 	return 0;
187 }
188 
189 static const struct irq_domain_ops uic_host_ops = {
190 	.map	= uic_host_map,
191 	.xlate	= irq_domain_xlate_twocell,
192 };
193 
194 static void uic_irq_cascade(struct irq_desc *desc)
195 {
196 	struct irq_chip *chip = irq_desc_get_chip(desc);
197 	struct irq_data *idata = irq_desc_get_irq_data(desc);
198 	struct uic *uic = irq_desc_get_handler_data(desc);
199 	u32 msr;
200 	int src;
201 
202 	raw_spin_lock(&desc->lock);
203 	if (irqd_is_level_type(idata))
204 		chip->irq_mask(idata);
205 	else
206 		chip->irq_mask_ack(idata);
207 	raw_spin_unlock(&desc->lock);
208 
209 	msr = mfdcr(uic->dcrbase + UIC_MSR);
210 	if (!msr) /* spurious interrupt */
211 		goto uic_irq_ret;
212 
213 	src = 32 - ffs(msr);
214 
215 	generic_handle_domain_irq(uic->irqhost, src);
216 
217 uic_irq_ret:
218 	raw_spin_lock(&desc->lock);
219 	if (irqd_is_level_type(idata))
220 		chip->irq_ack(idata);
221 	if (!irqd_irq_disabled(idata) && chip->irq_unmask)
222 		chip->irq_unmask(idata);
223 	raw_spin_unlock(&desc->lock);
224 }
225 
226 static struct uic * __init uic_init_one(struct device_node *node)
227 {
228 	struct uic *uic;
229 	const u32 *indexp, *dcrreg;
230 	int len;
231 
232 	BUG_ON(! of_device_is_compatible(node, "ibm,uic"));
233 
234 	uic = kzalloc(sizeof(*uic), GFP_KERNEL);
235 	if (! uic)
236 		return NULL; /* FIXME: panic? */
237 
238 	raw_spin_lock_init(&uic->lock);
239 	indexp = of_get_property(node, "cell-index", &len);
240 	if (!indexp || (len != sizeof(u32))) {
241 		printk(KERN_ERR "uic: Device node %pOF has missing or invalid "
242 		       "cell-index property\n", node);
243 		return NULL;
244 	}
245 	uic->index = *indexp;
246 
247 	dcrreg = of_get_property(node, "dcr-reg", &len);
248 	if (!dcrreg || (len != 2*sizeof(u32))) {
249 		printk(KERN_ERR "uic: Device node %pOF has missing or invalid "
250 		       "dcr-reg property\n", node);
251 		return NULL;
252 	}
253 	uic->dcrbase = *dcrreg;
254 
255 	uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops,
256 					     uic);
257 	if (! uic->irqhost)
258 		return NULL; /* FIXME: panic? */
259 
260 	/* Start with all interrupts disabled, level and non-critical */
261 	mtdcr(uic->dcrbase + UIC_ER, 0);
262 	mtdcr(uic->dcrbase + UIC_CR, 0);
263 	mtdcr(uic->dcrbase + UIC_TR, 0);
264 	/* Clear any pending interrupts, in case the firmware left some */
265 	mtdcr(uic->dcrbase + UIC_SR, 0xffffffff);
266 
267 	printk ("UIC%d (%d IRQ sources) at DCR 0x%x\n", uic->index,
268 		NR_UIC_INTS, uic->dcrbase);
269 
270 	return uic;
271 }
272 
273 void __init uic_init_tree(void)
274 {
275 	struct device_node *np;
276 	struct uic *uic;
277 	const u32 *interrupts;
278 
279 	/* First locate and initialize the top-level UIC */
280 	for_each_compatible_node(np, NULL, "ibm,uic") {
281 		interrupts = of_get_property(np, "interrupts", NULL);
282 		if (!interrupts)
283 			break;
284 	}
285 
286 	BUG_ON(!np); /* uic_init_tree() assumes there's a UIC as the
287 		      * top-level interrupt controller */
288 	primary_uic = uic_init_one(np);
289 	if (!primary_uic)
290 		panic("Unable to initialize primary UIC %pOF\n", np);
291 
292 	irq_set_default_host(primary_uic->irqhost);
293 	of_node_put(np);
294 
295 	/* The scan again for cascaded UICs */
296 	for_each_compatible_node(np, NULL, "ibm,uic") {
297 		interrupts = of_get_property(np, "interrupts", NULL);
298 		if (interrupts) {
299 			/* Secondary UIC */
300 			int cascade_virq;
301 
302 			uic = uic_init_one(np);
303 			if (! uic)
304 				panic("Unable to initialize a secondary UIC %pOF\n",
305 				      np);
306 
307 			cascade_virq = irq_of_parse_and_map(np, 0);
308 
309 			irq_set_handler_data(cascade_virq, uic);
310 			irq_set_chained_handler(cascade_virq, uic_irq_cascade);
311 
312 			/* FIXME: setup critical cascade?? */
313 		}
314 	}
315 }
316 
317 /* Return an interrupt vector or 0 if no interrupt is pending. */
318 unsigned int uic_get_irq(void)
319 {
320 	u32 msr;
321 	int src;
322 
323 	BUG_ON(! primary_uic);
324 
325 	msr = mfdcr(primary_uic->dcrbase + UIC_MSR);
326 	src = 32 - ffs(msr);
327 
328 	return irq_linear_revmap(primary_uic->irqhost, src);
329 }
330