1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2004-2008 Cavium Networks
7  */
8 #include <linux/irq.h>
9 #include <linux/interrupt.h>
10 #include <linux/smp.h>
11 
12 #include <asm/octeon/octeon.h>
13 #include <asm/octeon/cvmx-pexp-defs.h>
14 #include <asm/octeon/cvmx-npi-defs.h>
15 
16 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
17 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
18 
19 static int octeon_coreid_for_cpu(int cpu)
20 {
21 #ifdef CONFIG_SMP
22 	return cpu_logical_map(cpu);
23 #else
24 	return cvmx_get_core_num();
25 #endif
26 }
27 
28 static void octeon_irq_core_ack(unsigned int irq)
29 {
30 	unsigned int bit = irq - OCTEON_IRQ_SW0;
31 	/*
32 	 * We don't need to disable IRQs to make these atomic since
33 	 * they are already disabled earlier in the low level
34 	 * interrupt code.
35 	 */
36 	clear_c0_status(0x100 << bit);
37 	/* The two user interrupts must be cleared manually. */
38 	if (bit < 2)
39 		clear_c0_cause(0x100 << bit);
40 }
41 
42 static void octeon_irq_core_eoi(unsigned int irq)
43 {
44 	struct irq_desc *desc = irq_desc + irq;
45 	unsigned int bit = irq - OCTEON_IRQ_SW0;
46 	/*
47 	 * If an IRQ is being processed while we are disabling it the
48 	 * handler will attempt to unmask the interrupt after it has
49 	 * been disabled.
50 	 */
51 	if (desc->status & IRQ_DISABLED)
52 		return;
53 	/*
54 	 * We don't need to disable IRQs to make these atomic since
55 	 * they are already disabled earlier in the low level
56 	 * interrupt code.
57 	 */
58 	set_c0_status(0x100 << bit);
59 }
60 
61 static void octeon_irq_core_enable(unsigned int irq)
62 {
63 	unsigned long flags;
64 	unsigned int bit = irq - OCTEON_IRQ_SW0;
65 
66 	/*
67 	 * We need to disable interrupts to make sure our updates are
68 	 * atomic.
69 	 */
70 	local_irq_save(flags);
71 	set_c0_status(0x100 << bit);
72 	local_irq_restore(flags);
73 }
74 
75 static void octeon_irq_core_disable_local(unsigned int irq)
76 {
77 	unsigned long flags;
78 	unsigned int bit = irq - OCTEON_IRQ_SW0;
79 	/*
80 	 * We need to disable interrupts to make sure our updates are
81 	 * atomic.
82 	 */
83 	local_irq_save(flags);
84 	clear_c0_status(0x100 << bit);
85 	local_irq_restore(flags);
86 }
87 
88 static void octeon_irq_core_disable(unsigned int irq)
89 {
90 #ifdef CONFIG_SMP
91 	on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
92 		    (void *) (long) irq, 1);
93 #else
94 	octeon_irq_core_disable_local(irq);
95 #endif
96 }
97 
98 static struct irq_chip octeon_irq_chip_core = {
99 	.name = "Core",
100 	.enable = octeon_irq_core_enable,
101 	.disable = octeon_irq_core_disable,
102 	.ack = octeon_irq_core_ack,
103 	.eoi = octeon_irq_core_eoi,
104 };
105 
106 
107 static void octeon_irq_ciu0_ack(unsigned int irq)
108 {
109 	/*
110 	 * In order to avoid any locking accessing the CIU, we
111 	 * acknowledge CIU interrupts by disabling all of them.  This
112 	 * way we can use a per core register and avoid any out of
113 	 * core locking requirements.  This has the side affect that
114 	 * CIU interrupts can't be processed recursively.
115 	 *
116 	 * We don't need to disable IRQs to make these atomic since
117 	 * they are already disabled earlier in the low level
118 	 * interrupt code.
119 	 */
120 	clear_c0_status(0x100 << 2);
121 }
122 
123 static void octeon_irq_ciu0_eoi(unsigned int irq)
124 {
125 	/*
126 	 * Enable all CIU interrupts again.  We don't need to disable
127 	 * IRQs to make these atomic since they are already disabled
128 	 * earlier in the low level interrupt code.
129 	 */
130 	set_c0_status(0x100 << 2);
131 }
132 
133 static void octeon_irq_ciu0_enable(unsigned int irq)
134 {
135 	int coreid = cvmx_get_core_num();
136 	unsigned long flags;
137 	uint64_t en0;
138 	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
139 
140 	raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
141 	en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
142 	en0 |= 1ull << bit;
143 	cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
144 	cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
145 	raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
146 }
147 
148 static void octeon_irq_ciu0_disable(unsigned int irq)
149 {
150 	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
151 	unsigned long flags;
152 	uint64_t en0;
153 	int cpu;
154 	raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
155 	for_each_online_cpu(cpu) {
156 		int coreid = octeon_coreid_for_cpu(cpu);
157 		en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
158 		en0 &= ~(1ull << bit);
159 		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
160 	}
161 	/*
162 	 * We need to do a read after the last update to make sure all
163 	 * of them are done.
164 	 */
165 	cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
166 	raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
167 }
168 
169 /*
170  * Enable the irq on the current core for chips that have the EN*_W1{S,C}
171  * registers.
172  */
173 static void octeon_irq_ciu0_enable_v2(unsigned int irq)
174 {
175 	int index = cvmx_get_core_num() * 2;
176 	u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
177 
178 	cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
179 }
180 
181 /*
182  * Disable the irq on the current core for chips that have the EN*_W1{S,C}
183  * registers.
184  */
185 static void octeon_irq_ciu0_ack_v2(unsigned int irq)
186 {
187 	int index = cvmx_get_core_num() * 2;
188 	u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
189 
190 	cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
191 }
192 
193 /*
194  * CIU timer type interrupts must be acknoleged by writing a '1' bit
195  * to their sum0 bit.
196  */
197 static void octeon_irq_ciu0_timer_ack(unsigned int irq)
198 {
199 	int index = cvmx_get_core_num() * 2;
200 	uint64_t mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
201 	cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
202 }
203 
204 static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq)
205 {
206 	octeon_irq_ciu0_timer_ack(irq);
207 	octeon_irq_ciu0_ack(irq);
208 }
209 
210 static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq)
211 {
212 	octeon_irq_ciu0_timer_ack(irq);
213 	octeon_irq_ciu0_ack_v2(irq);
214 }
215 
216 /*
217  * Enable the irq on the current core for chips that have the EN*_W1{S,C}
218  * registers.
219  */
220 static void octeon_irq_ciu0_eoi_v2(unsigned int irq)
221 {
222 	struct irq_desc *desc = irq_desc + irq;
223 	int index = cvmx_get_core_num() * 2;
224 	u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
225 
226 	if ((desc->status & IRQ_DISABLED) == 0)
227 		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
228 }
229 
230 /*
231  * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
232  * registers.
233  */
234 static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
235 {
236 	u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
237 	int index;
238 	int cpu;
239 	for_each_online_cpu(cpu) {
240 		index = octeon_coreid_for_cpu(cpu) * 2;
241 		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
242 	}
243 }
244 
245 #ifdef CONFIG_SMP
246 static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
247 {
248 	int cpu;
249 	unsigned long flags;
250 	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
251 
252 	raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
253 	for_each_online_cpu(cpu) {
254 		int coreid = octeon_coreid_for_cpu(cpu);
255 		uint64_t en0 =
256 			cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
257 		if (cpumask_test_cpu(cpu, dest))
258 			en0 |= 1ull << bit;
259 		else
260 			en0 &= ~(1ull << bit);
261 		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
262 	}
263 	/*
264 	 * We need to do a read after the last update to make sure all
265 	 * of them are done.
266 	 */
267 	cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
268 	raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
269 
270 	return 0;
271 }
272 
273 /*
274  * Set affinity for the irq for chips that have the EN*_W1{S,C}
275  * registers.
276  */
277 static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
278 					   const struct cpumask *dest)
279 {
280 	int cpu;
281 	int index;
282 	u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
283 	for_each_online_cpu(cpu) {
284 		index = octeon_coreid_for_cpu(cpu) * 2;
285 		if (cpumask_test_cpu(cpu, dest))
286 			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
287 		else
288 			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
289 	}
290 	return 0;
291 }
292 #endif
293 
294 /*
295  * Newer octeon chips have support for lockless CIU operation.
296  */
297 static struct irq_chip octeon_irq_chip_ciu0_v2 = {
298 	.name = "CIU0",
299 	.enable = octeon_irq_ciu0_enable_v2,
300 	.disable = octeon_irq_ciu0_disable_all_v2,
301 	.ack = octeon_irq_ciu0_ack_v2,
302 	.eoi = octeon_irq_ciu0_eoi_v2,
303 #ifdef CONFIG_SMP
304 	.set_affinity = octeon_irq_ciu0_set_affinity_v2,
305 #endif
306 };
307 
308 static struct irq_chip octeon_irq_chip_ciu0 = {
309 	.name = "CIU0",
310 	.enable = octeon_irq_ciu0_enable,
311 	.disable = octeon_irq_ciu0_disable,
312 	.ack = octeon_irq_ciu0_ack,
313 	.eoi = octeon_irq_ciu0_eoi,
314 #ifdef CONFIG_SMP
315 	.set_affinity = octeon_irq_ciu0_set_affinity,
316 #endif
317 };
318 
319 static struct irq_chip octeon_irq_chip_ciu0_timer_v2 = {
320 	.name = "CIU0-T",
321 	.enable = octeon_irq_ciu0_enable_v2,
322 	.disable = octeon_irq_ciu0_disable_all_v2,
323 	.ack = octeon_irq_ciu0_timer_ack_v2,
324 	.eoi = octeon_irq_ciu0_eoi_v2,
325 #ifdef CONFIG_SMP
326 	.set_affinity = octeon_irq_ciu0_set_affinity_v2,
327 #endif
328 };
329 
330 static struct irq_chip octeon_irq_chip_ciu0_timer = {
331 	.name = "CIU0-T",
332 	.enable = octeon_irq_ciu0_enable,
333 	.disable = octeon_irq_ciu0_disable,
334 	.ack = octeon_irq_ciu0_timer_ack_v1,
335 	.eoi = octeon_irq_ciu0_eoi,
336 #ifdef CONFIG_SMP
337 	.set_affinity = octeon_irq_ciu0_set_affinity,
338 #endif
339 };
340 
341 
342 static void octeon_irq_ciu1_ack(unsigned int irq)
343 {
344 	/*
345 	 * In order to avoid any locking accessing the CIU, we
346 	 * acknowledge CIU interrupts by disabling all of them.  This
347 	 * way we can use a per core register and avoid any out of
348 	 * core locking requirements.  This has the side affect that
349 	 * CIU interrupts can't be processed recursively.  We don't
350 	 * need to disable IRQs to make these atomic since they are
351 	 * already disabled earlier in the low level interrupt code.
352 	 */
353 	clear_c0_status(0x100 << 3);
354 }
355 
356 static void octeon_irq_ciu1_eoi(unsigned int irq)
357 {
358 	/*
359 	 * Enable all CIU interrupts again.  We don't need to disable
360 	 * IRQs to make these atomic since they are already disabled
361 	 * earlier in the low level interrupt code.
362 	 */
363 	set_c0_status(0x100 << 3);
364 }
365 
366 static void octeon_irq_ciu1_enable(unsigned int irq)
367 {
368 	int coreid = cvmx_get_core_num();
369 	unsigned long flags;
370 	uint64_t en1;
371 	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
372 
373 	raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
374 	en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
375 	en1 |= 1ull << bit;
376 	cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
377 	cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
378 	raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
379 }
380 
381 static void octeon_irq_ciu1_disable(unsigned int irq)
382 {
383 	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
384 	unsigned long flags;
385 	uint64_t en1;
386 	int cpu;
387 	raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
388 	for_each_online_cpu(cpu) {
389 		int coreid = octeon_coreid_for_cpu(cpu);
390 		en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
391 		en1 &= ~(1ull << bit);
392 		cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
393 	}
394 	/*
395 	 * We need to do a read after the last update to make sure all
396 	 * of them are done.
397 	 */
398 	cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
399 	raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
400 }
401 
402 /*
403  * Enable the irq on the current core for chips that have the EN*_W1{S,C}
404  * registers.
405  */
406 static void octeon_irq_ciu1_enable_v2(unsigned int irq)
407 {
408 	int index = cvmx_get_core_num() * 2 + 1;
409 	u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
410 
411 	cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
412 }
413 
414 /*
415  * Disable the irq on the current core for chips that have the EN*_W1{S,C}
416  * registers.
417  */
418 static void octeon_irq_ciu1_ack_v2(unsigned int irq)
419 {
420 	int index = cvmx_get_core_num() * 2 + 1;
421 	u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
422 
423 	cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
424 }
425 
426 /*
427  * Enable the irq on the current core for chips that have the EN*_W1{S,C}
428  * registers.
429  */
430 static void octeon_irq_ciu1_eoi_v2(unsigned int irq)
431 {
432 	struct irq_desc *desc = irq_desc + irq;
433 	int index = cvmx_get_core_num() * 2 + 1;
434 	u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
435 
436 	if ((desc->status & IRQ_DISABLED) == 0)
437 		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
438 }
439 
440 /*
441  * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
442  * registers.
443  */
444 static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
445 {
446 	u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
447 	int index;
448 	int cpu;
449 	for_each_online_cpu(cpu) {
450 		index = octeon_coreid_for_cpu(cpu) * 2 + 1;
451 		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
452 	}
453 }
454 
455 #ifdef CONFIG_SMP
456 static int octeon_irq_ciu1_set_affinity(unsigned int irq,
457 					const struct cpumask *dest)
458 {
459 	int cpu;
460 	unsigned long flags;
461 	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
462 
463 	raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
464 	for_each_online_cpu(cpu) {
465 		int coreid = octeon_coreid_for_cpu(cpu);
466 		uint64_t en1 =
467 			cvmx_read_csr(CVMX_CIU_INTX_EN1
468 				(coreid * 2 + 1));
469 		if (cpumask_test_cpu(cpu, dest))
470 			en1 |= 1ull << bit;
471 		else
472 			en1 &= ~(1ull << bit);
473 		cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
474 	}
475 	/*
476 	 * We need to do a read after the last update to make sure all
477 	 * of them are done.
478 	 */
479 	cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
480 	raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
481 
482 	return 0;
483 }
484 
485 /*
486  * Set affinity for the irq for chips that have the EN*_W1{S,C}
487  * registers.
488  */
489 static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
490 					   const struct cpumask *dest)
491 {
492 	int cpu;
493 	int index;
494 	u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
495 	for_each_online_cpu(cpu) {
496 		index = octeon_coreid_for_cpu(cpu) * 2 + 1;
497 		if (cpumask_test_cpu(cpu, dest))
498 			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
499 		else
500 			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
501 	}
502 	return 0;
503 }
504 #endif
505 
506 /*
507  * Newer octeon chips have support for lockless CIU operation.
508  */
509 static struct irq_chip octeon_irq_chip_ciu1_v2 = {
510 	.name = "CIU0",
511 	.enable = octeon_irq_ciu1_enable_v2,
512 	.disable = octeon_irq_ciu1_disable_all_v2,
513 	.ack = octeon_irq_ciu1_ack_v2,
514 	.eoi = octeon_irq_ciu1_eoi_v2,
515 #ifdef CONFIG_SMP
516 	.set_affinity = octeon_irq_ciu1_set_affinity_v2,
517 #endif
518 };
519 
520 static struct irq_chip octeon_irq_chip_ciu1 = {
521 	.name = "CIU1",
522 	.enable = octeon_irq_ciu1_enable,
523 	.disable = octeon_irq_ciu1_disable,
524 	.ack = octeon_irq_ciu1_ack,
525 	.eoi = octeon_irq_ciu1_eoi,
526 #ifdef CONFIG_SMP
527 	.set_affinity = octeon_irq_ciu1_set_affinity,
528 #endif
529 };
530 
531 #ifdef CONFIG_PCI_MSI
532 
533 static DEFINE_RAW_SPINLOCK(octeon_irq_msi_lock);
534 
535 static void octeon_irq_msi_ack(unsigned int irq)
536 {
537 	if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
538 		/* These chips have PCI */
539 		cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
540 			       1ull << (irq - OCTEON_IRQ_MSI_BIT0));
541 	} else {
542 		/*
543 		 * These chips have PCIe. Thankfully the ACK doesn't
544 		 * need any locking.
545 		 */
546 		cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
547 			       1ull << (irq - OCTEON_IRQ_MSI_BIT0));
548 	}
549 }
550 
551 static void octeon_irq_msi_eoi(unsigned int irq)
552 {
553 	/* Nothing needed */
554 }
555 
556 static void octeon_irq_msi_enable(unsigned int irq)
557 {
558 	if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
559 		/*
560 		 * Octeon PCI doesn't have the ability to mask/unmask
561 		 * MSI interrupts individually.  Instead of
562 		 * masking/unmasking them in groups of 16, we simple
563 		 * assume MSI devices are well behaved.  MSI
564 		 * interrupts are always enable and the ACK is assumed
565 		 * to be enough.
566 		 */
567 	} else {
568 		/* These chips have PCIe.  Note that we only support
569 		 * the first 64 MSI interrupts.  Unfortunately all the
570 		 * MSI enables are in the same register.  We use
571 		 * MSI0's lock to control access to them all.
572 		 */
573 		uint64_t en;
574 		unsigned long flags;
575 		raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags);
576 		en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
577 		en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
578 		cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
579 		cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
580 		raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
581 	}
582 }
583 
584 static void octeon_irq_msi_disable(unsigned int irq)
585 {
586 	if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
587 		/* See comment in enable */
588 	} else {
589 		/*
590 		 * These chips have PCIe.  Note that we only support
591 		 * the first 64 MSI interrupts.  Unfortunately all the
592 		 * MSI enables are in the same register.  We use
593 		 * MSI0's lock to control access to them all.
594 		 */
595 		uint64_t en;
596 		unsigned long flags;
597 		raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags);
598 		en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
599 		en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
600 		cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
601 		cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
602 		raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
603 	}
604 }
605 
606 static struct irq_chip octeon_irq_chip_msi = {
607 	.name = "MSI",
608 	.enable = octeon_irq_msi_enable,
609 	.disable = octeon_irq_msi_disable,
610 	.ack = octeon_irq_msi_ack,
611 	.eoi = octeon_irq_msi_eoi,
612 };
613 #endif
614 
615 void __init arch_init_irq(void)
616 {
617 	int irq;
618 	struct irq_chip *chip0;
619 	struct irq_chip *chip0_timer;
620 	struct irq_chip *chip1;
621 
622 #ifdef CONFIG_SMP
623 	/* Set the default affinity to the boot cpu. */
624 	cpumask_clear(irq_default_affinity);
625 	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
626 #endif
627 
628 	if (NR_IRQS < OCTEON_IRQ_LAST)
629 		pr_err("octeon_irq_init: NR_IRQS is set too low\n");
630 
631 	if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
632 	    OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
633 	    OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
634 		chip0 = &octeon_irq_chip_ciu0_v2;
635 		chip0_timer = &octeon_irq_chip_ciu0_timer_v2;
636 		chip1 = &octeon_irq_chip_ciu1_v2;
637 	} else {
638 		chip0 = &octeon_irq_chip_ciu0;
639 		chip0_timer = &octeon_irq_chip_ciu0_timer;
640 		chip1 = &octeon_irq_chip_ciu1;
641 	}
642 
643 	/* 0 - 15 reserved for i8259 master and slave controller. */
644 
645 	/* 17 - 23 Mips internal */
646 	for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
647 		set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
648 					 handle_percpu_irq);
649 	}
650 
651 	/* 24 - 87 CIU_INT_SUM0 */
652 	for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
653 		switch (irq) {
654 		case OCTEON_IRQ_GMX_DRP0:
655 		case OCTEON_IRQ_GMX_DRP1:
656 		case OCTEON_IRQ_IPD_DRP:
657 		case OCTEON_IRQ_KEY_ZERO:
658 		case OCTEON_IRQ_TIMER0:
659 		case OCTEON_IRQ_TIMER1:
660 		case OCTEON_IRQ_TIMER2:
661 		case OCTEON_IRQ_TIMER3:
662 			set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq);
663 			break;
664 		default:
665 			set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
666 			break;
667 		}
668 	}
669 
670 	/* 88 - 151 CIU_INT_SUM1 */
671 	for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
672 		set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
673 	}
674 
675 #ifdef CONFIG_PCI_MSI
676 	/* 152 - 215 PCI/PCIe MSI interrupts */
677 	for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
678 		set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
679 					 handle_percpu_irq);
680 	}
681 #endif
682 	set_c0_status(0x300 << 2);
683 }
684 
685 asmlinkage void plat_irq_dispatch(void)
686 {
687 	const unsigned long core_id = cvmx_get_core_num();
688 	const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
689 	const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
690 	const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
691 	const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
692 	unsigned long cop0_cause;
693 	unsigned long cop0_status;
694 	uint64_t ciu_en;
695 	uint64_t ciu_sum;
696 
697 	while (1) {
698 		cop0_cause = read_c0_cause();
699 		cop0_status = read_c0_status();
700 		cop0_cause &= cop0_status;
701 		cop0_cause &= ST0_IM;
702 
703 		if (unlikely(cop0_cause & STATUSF_IP2)) {
704 			ciu_sum = cvmx_read_csr(ciu_sum0_address);
705 			ciu_en = cvmx_read_csr(ciu_en0_address);
706 			ciu_sum &= ciu_en;
707 			if (likely(ciu_sum))
708 				do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
709 			else
710 				spurious_interrupt();
711 		} else if (unlikely(cop0_cause & STATUSF_IP3)) {
712 			ciu_sum = cvmx_read_csr(ciu_sum1_address);
713 			ciu_en = cvmx_read_csr(ciu_en1_address);
714 			ciu_sum &= ciu_en;
715 			if (likely(ciu_sum))
716 				do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
717 			else
718 				spurious_interrupt();
719 		} else if (likely(cop0_cause)) {
720 			do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
721 		} else {
722 			break;
723 		}
724 	}
725 }
726 
727 #ifdef CONFIG_HOTPLUG_CPU
728 static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
729 {
730 	unsigned int isset;
731 	int coreid = octeon_coreid_for_cpu(cpu);
732 	int bit = (irq < OCTEON_IRQ_WDOG0) ?
733 		   irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
734        if (irq < 64) {
735 		isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
736 			(1ull << bit)) >> bit;
737        } else {
738 	       isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
739 			(1ull << bit)) >> bit;
740        }
741        return isset;
742 }
743 
744 void fixup_irqs(void)
745 {
746        int irq;
747 
748 	for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
749 		octeon_irq_core_disable_local(irq);
750 
751 	for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
752 		if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
753 			/* ciu irq migrates to next cpu */
754 			octeon_irq_chip_ciu0.disable(irq);
755 			octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
756 		}
757 	}
758 
759 #if 0
760 	for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
761 		octeon_irq_mailbox_mask(irq);
762 #endif
763 	for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
764 		if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
765 			/* ciu irq migrates to next cpu */
766 			octeon_irq_chip_ciu0.disable(irq);
767 			octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
768 		}
769 	}
770 
771 	for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
772 		if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
773 			/* ciu irq migrates to next cpu */
774 			octeon_irq_chip_ciu1.disable(irq);
775 			octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
776 		}
777 	}
778 }
779 
780 #endif /* CONFIG_HOTPLUG_CPU */
781