1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2004-2008 Cavium Networks
7  */
8 #include <linux/irq.h>
9 #include <linux/interrupt.h>
10 #include <linux/hardirq.h>
11 
12 #include <asm/octeon/octeon.h>
13 
14 DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
15 DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
16 DEFINE_SPINLOCK(octeon_irq_msi_lock);
17 
18 static void octeon_irq_core_ack(unsigned int irq)
19 {
20 	unsigned int bit = irq - OCTEON_IRQ_SW0;
21 	/*
22 	 * We don't need to disable IRQs to make these atomic since
23 	 * they are already disabled earlier in the low level
24 	 * interrupt code.
25 	 */
26 	clear_c0_status(0x100 << bit);
27 	/* The two user interrupts must be cleared manually. */
28 	if (bit < 2)
29 		clear_c0_cause(0x100 << bit);
30 }
31 
32 static void octeon_irq_core_eoi(unsigned int irq)
33 {
34 	struct irq_desc *desc = irq_desc + irq;
35 	unsigned int bit = irq - OCTEON_IRQ_SW0;
36 	/*
37 	 * If an IRQ is being processed while we are disabling it the
38 	 * handler will attempt to unmask the interrupt after it has
39 	 * been disabled.
40 	 */
41 	if (desc->status & IRQ_DISABLED)
42 		return;
43 
44 	/* There is a race here.  We should fix it.  */
45 
46 	/*
47 	 * We don't need to disable IRQs to make these atomic since
48 	 * they are already disabled earlier in the low level
49 	 * interrupt code.
50 	 */
51 	set_c0_status(0x100 << bit);
52 }
53 
54 static void octeon_irq_core_enable(unsigned int irq)
55 {
56 	unsigned long flags;
57 	unsigned int bit = irq - OCTEON_IRQ_SW0;
58 
59 	/*
60 	 * We need to disable interrupts to make sure our updates are
61 	 * atomic.
62 	 */
63 	local_irq_save(flags);
64 	set_c0_status(0x100 << bit);
65 	local_irq_restore(flags);
66 }
67 
68 static void octeon_irq_core_disable_local(unsigned int irq)
69 {
70 	unsigned long flags;
71 	unsigned int bit = irq - OCTEON_IRQ_SW0;
72 	/*
73 	 * We need to disable interrupts to make sure our updates are
74 	 * atomic.
75 	 */
76 	local_irq_save(flags);
77 	clear_c0_status(0x100 << bit);
78 	local_irq_restore(flags);
79 }
80 
81 static void octeon_irq_core_disable(unsigned int irq)
82 {
83 #ifdef CONFIG_SMP
84 	on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
85 		    (void *) (long) irq, 1);
86 #else
87 	octeon_irq_core_disable_local(irq);
88 #endif
89 }
90 
91 static struct irq_chip octeon_irq_chip_core = {
92 	.name = "Core",
93 	.enable = octeon_irq_core_enable,
94 	.disable = octeon_irq_core_disable,
95 	.ack = octeon_irq_core_ack,
96 	.eoi = octeon_irq_core_eoi,
97 };
98 
99 
100 static void octeon_irq_ciu0_ack(unsigned int irq)
101 {
102 	/*
103 	 * In order to avoid any locking accessing the CIU, we
104 	 * acknowledge CIU interrupts by disabling all of them.  This
105 	 * way we can use a per core register and avoid any out of
106 	 * core locking requirements.  This has the side affect that
107 	 * CIU interrupts can't be processed recursively.
108 	 *
109 	 * We don't need to disable IRQs to make these atomic since
110 	 * they are already disabled earlier in the low level
111 	 * interrupt code.
112 	 */
113 	clear_c0_status(0x100 << 2);
114 }
115 
116 static void octeon_irq_ciu0_eoi(unsigned int irq)
117 {
118 	/*
119 	 * Enable all CIU interrupts again.  We don't need to disable
120 	 * IRQs to make these atomic since they are already disabled
121 	 * earlier in the low level interrupt code.
122 	 */
123 	set_c0_status(0x100 << 2);
124 }
125 
126 static void octeon_irq_ciu0_enable(unsigned int irq)
127 {
128 	int coreid = cvmx_get_core_num();
129 	unsigned long flags;
130 	uint64_t en0;
131 	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
132 
133 	/*
134 	 * A read lock is used here to make sure only one core is ever
135 	 * updating the CIU enable bits at a time. During an enable
136 	 * the cores don't interfere with each other. During a disable
137 	 * the write lock stops any enables that might cause a
138 	 * problem.
139 	 */
140 	read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
141 	en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
142 	en0 |= 1ull << bit;
143 	cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
144 	cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
145 	read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
146 }
147 
148 static void octeon_irq_ciu0_disable(unsigned int irq)
149 {
150 	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
151 	unsigned long flags;
152 	uint64_t en0;
153 #ifdef CONFIG_SMP
154 	int cpu;
155 	write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
156 	for_each_online_cpu(cpu) {
157 		int coreid = cpu_logical_map(cpu);
158 		en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
159 		en0 &= ~(1ull << bit);
160 		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
161 	}
162 	/*
163 	 * We need to do a read after the last update to make sure all
164 	 * of them are done.
165 	 */
166 	cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
167 	write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
168 #else
169 	int coreid = cvmx_get_core_num();
170 	local_irq_save(flags);
171 	en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
172 	en0 &= ~(1ull << bit);
173 	cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
174 	cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
175 	local_irq_restore(flags);
176 #endif
177 }
178 
179 #ifdef CONFIG_SMP
180 static void octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
181 {
182 	int cpu;
183 	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
184 
185 	write_lock(&octeon_irq_ciu0_rwlock);
186 	for_each_online_cpu(cpu) {
187 		int coreid = cpu_logical_map(cpu);
188 		uint64_t en0 =
189 			cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
190 		if (cpumask_test_cpu(cpu, dest))
191 			en0 |= 1ull << bit;
192 		else
193 			en0 &= ~(1ull << bit);
194 		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
195 	}
196 	/*
197 	 * We need to do a read after the last update to make sure all
198 	 * of them are done.
199 	 */
200 	cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
201 	write_unlock(&octeon_irq_ciu0_rwlock);
202 }
203 #endif
204 
205 static struct irq_chip octeon_irq_chip_ciu0 = {
206 	.name = "CIU0",
207 	.enable = octeon_irq_ciu0_enable,
208 	.disable = octeon_irq_ciu0_disable,
209 	.ack = octeon_irq_ciu0_ack,
210 	.eoi = octeon_irq_ciu0_eoi,
211 #ifdef CONFIG_SMP
212 	.set_affinity = octeon_irq_ciu0_set_affinity,
213 #endif
214 };
215 
216 
217 static void octeon_irq_ciu1_ack(unsigned int irq)
218 {
219 	/*
220 	 * In order to avoid any locking accessing the CIU, we
221 	 * acknowledge CIU interrupts by disabling all of them.  This
222 	 * way we can use a per core register and avoid any out of
223 	 * core locking requirements.  This has the side affect that
224 	 * CIU interrupts can't be processed recursively.  We don't
225 	 * need to disable IRQs to make these atomic since they are
226 	 * already disabled earlier in the low level interrupt code.
227 	 */
228 	clear_c0_status(0x100 << 3);
229 }
230 
231 static void octeon_irq_ciu1_eoi(unsigned int irq)
232 {
233 	/*
234 	 * Enable all CIU interrupts again.  We don't need to disable
235 	 * IRQs to make these atomic since they are already disabled
236 	 * earlier in the low level interrupt code.
237 	 */
238 	set_c0_status(0x100 << 3);
239 }
240 
241 static void octeon_irq_ciu1_enable(unsigned int irq)
242 {
243 	int coreid = cvmx_get_core_num();
244 	unsigned long flags;
245 	uint64_t en1;
246 	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
247 
248 	/*
249 	 * A read lock is used here to make sure only one core is ever
250 	 * updating the CIU enable bits at a time.  During an enable
251 	 * the cores don't interfere with each other.  During a disable
252 	 * the write lock stops any enables that might cause a
253 	 * problem.
254 	 */
255 	read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
256 	en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
257 	en1 |= 1ull << bit;
258 	cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
259 	cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
260 	read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
261 }
262 
263 static void octeon_irq_ciu1_disable(unsigned int irq)
264 {
265 	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
266 	unsigned long flags;
267 	uint64_t en1;
268 #ifdef CONFIG_SMP
269 	int cpu;
270 	write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
271 	for_each_online_cpu(cpu) {
272 		int coreid = cpu_logical_map(cpu);
273 		en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
274 		en1 &= ~(1ull << bit);
275 		cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
276 	}
277 	/*
278 	 * We need to do a read after the last update to make sure all
279 	 * of them are done.
280 	 */
281 	cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
282 	write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
283 #else
284 	int coreid = cvmx_get_core_num();
285 	local_irq_save(flags);
286 	en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
287 	en1 &= ~(1ull << bit);
288 	cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
289 	cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
290 	local_irq_restore(flags);
291 #endif
292 }
293 
294 #ifdef CONFIG_SMP
295 static void octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest)
296 {
297 	int cpu;
298 	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
299 
300 	write_lock(&octeon_irq_ciu1_rwlock);
301 	for_each_online_cpu(cpu) {
302 		int coreid = cpu_logical_map(cpu);
303 		uint64_t en1 =
304 			cvmx_read_csr(CVMX_CIU_INTX_EN1
305 				(coreid * 2 + 1));
306 		if (cpumask_test_cpu(cpu, dest))
307 			en1 |= 1ull << bit;
308 		else
309 			en1 &= ~(1ull << bit);
310 		cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
311 	}
312 	/*
313 	 * We need to do a read after the last update to make sure all
314 	 * of them are done.
315 	 */
316 	cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
317 	write_unlock(&octeon_irq_ciu1_rwlock);
318 }
319 #endif
320 
321 static struct irq_chip octeon_irq_chip_ciu1 = {
322 	.name = "CIU1",
323 	.enable = octeon_irq_ciu1_enable,
324 	.disable = octeon_irq_ciu1_disable,
325 	.ack = octeon_irq_ciu1_ack,
326 	.eoi = octeon_irq_ciu1_eoi,
327 #ifdef CONFIG_SMP
328 	.set_affinity = octeon_irq_ciu1_set_affinity,
329 #endif
330 };
331 
332 #ifdef CONFIG_PCI_MSI
333 
334 static void octeon_irq_msi_ack(unsigned int irq)
335 {
336 	if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
337 		/* These chips have PCI */
338 		cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
339 			       1ull << (irq - OCTEON_IRQ_MSI_BIT0));
340 	} else {
341 		/*
342 		 * These chips have PCIe. Thankfully the ACK doesn't
343 		 * need any locking.
344 		 */
345 		cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
346 			       1ull << (irq - OCTEON_IRQ_MSI_BIT0));
347 	}
348 }
349 
350 static void octeon_irq_msi_eoi(unsigned int irq)
351 {
352 	/* Nothing needed */
353 }
354 
355 static void octeon_irq_msi_enable(unsigned int irq)
356 {
357 	if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
358 		/*
359 		 * Octeon PCI doesn't have the ability to mask/unmask
360 		 * MSI interrupts individually.  Instead of
361 		 * masking/unmasking them in groups of 16, we simple
362 		 * assume MSI devices are well behaved.  MSI
363 		 * interrupts are always enable and the ACK is assumed
364 		 * to be enough.
365 		 */
366 	} else {
367 		/* These chips have PCIe.  Note that we only support
368 		 * the first 64 MSI interrupts.  Unfortunately all the
369 		 * MSI enables are in the same register.  We use
370 		 * MSI0's lock to control access to them all.
371 		 */
372 		uint64_t en;
373 		unsigned long flags;
374 		spin_lock_irqsave(&octeon_irq_msi_lock, flags);
375 		en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
376 		en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
377 		cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
378 		cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
379 		spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
380 	}
381 }
382 
383 static void octeon_irq_msi_disable(unsigned int irq)
384 {
385 	if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
386 		/* See comment in enable */
387 	} else {
388 		/*
389 		 * These chips have PCIe.  Note that we only support
390 		 * the first 64 MSI interrupts.  Unfortunately all the
391 		 * MSI enables are in the same register.  We use
392 		 * MSI0's lock to control access to them all.
393 		 */
394 		uint64_t en;
395 		unsigned long flags;
396 		spin_lock_irqsave(&octeon_irq_msi_lock, flags);
397 		en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
398 		en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
399 		cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
400 		cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
401 		spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
402 	}
403 }
404 
405 static struct irq_chip octeon_irq_chip_msi = {
406 	.name = "MSI",
407 	.enable = octeon_irq_msi_enable,
408 	.disable = octeon_irq_msi_disable,
409 	.ack = octeon_irq_msi_ack,
410 	.eoi = octeon_irq_msi_eoi,
411 };
412 #endif
413 
414 void __init arch_init_irq(void)
415 {
416 	int irq;
417 
418 #ifdef CONFIG_SMP
419 	/* Set the default affinity to the boot cpu. */
420 	cpumask_clear(irq_default_affinity);
421 	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
422 #endif
423 
424 	if (NR_IRQS < OCTEON_IRQ_LAST)
425 		pr_err("octeon_irq_init: NR_IRQS is set too low\n");
426 
427 	/* 0 - 15 reserved for i8259 master and slave controller. */
428 
429 	/* 17 - 23 Mips internal */
430 	for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
431 		set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
432 					 handle_percpu_irq);
433 	}
434 
435 	/* 24 - 87 CIU_INT_SUM0 */
436 	for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
437 		set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu0,
438 					 handle_percpu_irq);
439 	}
440 
441 	/* 88 - 151 CIU_INT_SUM1 */
442 	for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
443 		set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu1,
444 					 handle_percpu_irq);
445 	}
446 
447 #ifdef CONFIG_PCI_MSI
448 	/* 152 - 215 PCI/PCIe MSI interrupts */
449 	for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
450 		set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
451 					 handle_percpu_irq);
452 	}
453 #endif
454 	set_c0_status(0x300 << 2);
455 }
456 
457 asmlinkage void plat_irq_dispatch(void)
458 {
459 	const unsigned long core_id = cvmx_get_core_num();
460 	const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
461 	const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
462 	const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
463 	const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
464 	unsigned long cop0_cause;
465 	unsigned long cop0_status;
466 	uint64_t ciu_en;
467 	uint64_t ciu_sum;
468 
469 	while (1) {
470 		cop0_cause = read_c0_cause();
471 		cop0_status = read_c0_status();
472 		cop0_cause &= cop0_status;
473 		cop0_cause &= ST0_IM;
474 
475 		if (unlikely(cop0_cause & STATUSF_IP2)) {
476 			ciu_sum = cvmx_read_csr(ciu_sum0_address);
477 			ciu_en = cvmx_read_csr(ciu_en0_address);
478 			ciu_sum &= ciu_en;
479 			if (likely(ciu_sum))
480 				do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
481 			else
482 				spurious_interrupt();
483 		} else if (unlikely(cop0_cause & STATUSF_IP3)) {
484 			ciu_sum = cvmx_read_csr(ciu_sum1_address);
485 			ciu_en = cvmx_read_csr(ciu_en1_address);
486 			ciu_sum &= ciu_en;
487 			if (likely(ciu_sum))
488 				do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
489 			else
490 				spurious_interrupt();
491 		} else if (likely(cop0_cause)) {
492 			do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
493 		} else {
494 			break;
495 		}
496 	}
497 }
498