1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
7  */
8 #include <linux/irq.h>
9 #include <linux/interrupt.h>
10 #include <linux/smp.h>
11 
12 #include <asm/octeon/octeon.h>
13 
14 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
15 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
16 
17 static int octeon_coreid_for_cpu(int cpu)
18 {
19 #ifdef CONFIG_SMP
20 	return cpu_logical_map(cpu);
21 #else
22 	return cvmx_get_core_num();
23 #endif
24 }
25 
26 static void octeon_irq_core_ack(unsigned int irq)
27 {
28 	unsigned int bit = irq - OCTEON_IRQ_SW0;
29 	/*
30 	 * We don't need to disable IRQs to make these atomic since
31 	 * they are already disabled earlier in the low level
32 	 * interrupt code.
33 	 */
34 	clear_c0_status(0x100 << bit);
35 	/* The two user interrupts must be cleared manually. */
36 	if (bit < 2)
37 		clear_c0_cause(0x100 << bit);
38 }
39 
40 static void octeon_irq_core_eoi(unsigned int irq)
41 {
42 	struct irq_desc *desc = irq_to_desc(irq);
43 	unsigned int bit = irq - OCTEON_IRQ_SW0;
44 	/*
45 	 * If an IRQ is being processed while we are disabling it the
46 	 * handler will attempt to unmask the interrupt after it has
47 	 * been disabled.
48 	 */
49 	if ((unlikely(desc->status & IRQ_DISABLED)))
50 		return;
51 	/*
52 	 * We don't need to disable IRQs to make these atomic since
53 	 * they are already disabled earlier in the low level
54 	 * interrupt code.
55 	 */
56 	set_c0_status(0x100 << bit);
57 }
58 
59 static void octeon_irq_core_enable(unsigned int irq)
60 {
61 	unsigned long flags;
62 	unsigned int bit = irq - OCTEON_IRQ_SW0;
63 
64 	/*
65 	 * We need to disable interrupts to make sure our updates are
66 	 * atomic.
67 	 */
68 	local_irq_save(flags);
69 	set_c0_status(0x100 << bit);
70 	local_irq_restore(flags);
71 }
72 
73 static void octeon_irq_core_disable_local(unsigned int irq)
74 {
75 	unsigned long flags;
76 	unsigned int bit = irq - OCTEON_IRQ_SW0;
77 	/*
78 	 * We need to disable interrupts to make sure our updates are
79 	 * atomic.
80 	 */
81 	local_irq_save(flags);
82 	clear_c0_status(0x100 << bit);
83 	local_irq_restore(flags);
84 }
85 
86 static void octeon_irq_core_disable(unsigned int irq)
87 {
88 #ifdef CONFIG_SMP
89 	on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
90 		    (void *) (long) irq, 1);
91 #else
92 	octeon_irq_core_disable_local(irq);
93 #endif
94 }
95 
96 static struct irq_chip octeon_irq_chip_core = {
97 	.name = "Core",
98 	.enable = octeon_irq_core_enable,
99 	.disable = octeon_irq_core_disable,
100 	.ack = octeon_irq_core_ack,
101 	.eoi = octeon_irq_core_eoi,
102 };
103 
104 
105 static void octeon_irq_ciu0_ack(unsigned int irq)
106 {
107 	switch (irq) {
108 	case OCTEON_IRQ_GMX_DRP0:
109 	case OCTEON_IRQ_GMX_DRP1:
110 	case OCTEON_IRQ_IPD_DRP:
111 	case OCTEON_IRQ_KEY_ZERO:
112 	case OCTEON_IRQ_TIMER0:
113 	case OCTEON_IRQ_TIMER1:
114 	case OCTEON_IRQ_TIMER2:
115 	case OCTEON_IRQ_TIMER3:
116 	{
117 		int index = cvmx_get_core_num() * 2;
118 		u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
119 		/*
120 		 * CIU timer type interrupts must be acknoleged by
121 		 * writing a '1' bit to their sum0 bit.
122 		 */
123 		cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
124 		break;
125 	}
126 	default:
127 		break;
128 	}
129 
130 	/*
131 	 * In order to avoid any locking accessing the CIU, we
132 	 * acknowledge CIU interrupts by disabling all of them.  This
133 	 * way we can use a per core register and avoid any out of
134 	 * core locking requirements.  This has the side affect that
135 	 * CIU interrupts can't be processed recursively.
136 	 *
137 	 * We don't need to disable IRQs to make these atomic since
138 	 * they are already disabled earlier in the low level
139 	 * interrupt code.
140 	 */
141 	clear_c0_status(0x100 << 2);
142 }
143 
144 static void octeon_irq_ciu0_eoi(unsigned int irq)
145 {
146 	/*
147 	 * Enable all CIU interrupts again.  We don't need to disable
148 	 * IRQs to make these atomic since they are already disabled
149 	 * earlier in the low level interrupt code.
150 	 */
151 	set_c0_status(0x100 << 2);
152 }
153 
154 static int next_coreid_for_irq(struct irq_desc *desc)
155 {
156 
157 #ifdef CONFIG_SMP
158 	int coreid;
159 	int weight = cpumask_weight(desc->affinity);
160 
161 	if (weight > 1) {
162 		int cpu = smp_processor_id();
163 		for (;;) {
164 			cpu = cpumask_next(cpu, desc->affinity);
165 			if (cpu >= nr_cpu_ids) {
166 				cpu = -1;
167 				continue;
168 			} else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
169 				break;
170 			}
171 		}
172 		coreid = octeon_coreid_for_cpu(cpu);
173 	} else if (weight == 1) {
174 		coreid = octeon_coreid_for_cpu(cpumask_first(desc->affinity));
175 	} else {
176 		coreid = cvmx_get_core_num();
177 	}
178 	return coreid;
179 #else
180 	return cvmx_get_core_num();
181 #endif
182 }
183 
184 static void octeon_irq_ciu0_enable(unsigned int irq)
185 {
186 	struct irq_desc *desc = irq_to_desc(irq);
187 	int coreid = next_coreid_for_irq(desc);
188 	unsigned long flags;
189 	uint64_t en0;
190 	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
191 
192 	raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
193 	en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
194 	en0 |= 1ull << bit;
195 	cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
196 	cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
197 	raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
198 }
199 
200 static void octeon_irq_ciu0_enable_mbox(unsigned int irq)
201 {
202 	int coreid = cvmx_get_core_num();
203 	unsigned long flags;
204 	uint64_t en0;
205 	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
206 
207 	raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
208 	en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
209 	en0 |= 1ull << bit;
210 	cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
211 	cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
212 	raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
213 }
214 
215 static void octeon_irq_ciu0_disable(unsigned int irq)
216 {
217 	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
218 	unsigned long flags;
219 	uint64_t en0;
220 	int cpu;
221 	raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
222 	for_each_online_cpu(cpu) {
223 		int coreid = octeon_coreid_for_cpu(cpu);
224 		en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
225 		en0 &= ~(1ull << bit);
226 		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
227 	}
228 	/*
229 	 * We need to do a read after the last update to make sure all
230 	 * of them are done.
231 	 */
232 	cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
233 	raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
234 }
235 
236 /*
237  * Enable the irq on the next core in the affinity set for chips that
238  * have the EN*_W1{S,C} registers.
239  */
240 static void octeon_irq_ciu0_enable_v2(unsigned int irq)
241 {
242 	int index;
243 	u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
244 	struct irq_desc *desc = irq_to_desc(irq);
245 
246 	if ((desc->status & IRQ_DISABLED) == 0) {
247 		index = next_coreid_for_irq(desc) * 2;
248 		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
249 	}
250 }
251 
252 /*
253  * Enable the irq on the current CPU for chips that
254  * have the EN*_W1{S,C} registers.
255  */
256 static void octeon_irq_ciu0_enable_mbox_v2(unsigned int irq)
257 {
258 	int index;
259 	u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
260 
261 	index = cvmx_get_core_num() * 2;
262 	cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
263 }
264 
265 /*
266  * Disable the irq on the current core for chips that have the EN*_W1{S,C}
267  * registers.
268  */
269 static void octeon_irq_ciu0_ack_v2(unsigned int irq)
270 {
271 	int index = cvmx_get_core_num() * 2;
272 	u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
273 
274 	switch (irq) {
275 	case OCTEON_IRQ_GMX_DRP0:
276 	case OCTEON_IRQ_GMX_DRP1:
277 	case OCTEON_IRQ_IPD_DRP:
278 	case OCTEON_IRQ_KEY_ZERO:
279 	case OCTEON_IRQ_TIMER0:
280 	case OCTEON_IRQ_TIMER1:
281 	case OCTEON_IRQ_TIMER2:
282 	case OCTEON_IRQ_TIMER3:
283 		/*
284 		 * CIU timer type interrupts must be acknoleged by
285 		 * writing a '1' bit to their sum0 bit.
286 		 */
287 		cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
288 		break;
289 	default:
290 		break;
291 	}
292 
293 	cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
294 }
295 
296 /*
297  * Enable the irq on the current core for chips that have the EN*_W1{S,C}
298  * registers.
299  */
300 static void octeon_irq_ciu0_eoi_mbox_v2(unsigned int irq)
301 {
302 	struct irq_desc *desc = irq_to_desc(irq);
303 	int index = cvmx_get_core_num() * 2;
304 	u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
305 
306 	if (likely((desc->status & IRQ_DISABLED) == 0))
307 		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
308 }
309 
310 /*
311  * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
312  * registers.
313  */
314 static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
315 {
316 	u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
317 	int index;
318 	int cpu;
319 	for_each_online_cpu(cpu) {
320 		index = octeon_coreid_for_cpu(cpu) * 2;
321 		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
322 	}
323 }
324 
325 #ifdef CONFIG_SMP
326 static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
327 {
328 	int cpu;
329 	struct irq_desc *desc = irq_to_desc(irq);
330 	int enable_one = (desc->status & IRQ_DISABLED) == 0;
331 	unsigned long flags;
332 	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
333 
334 	/*
335 	 * For non-v2 CIU, we will allow only single CPU affinity.
336 	 * This removes the need to do locking in the .ack/.eoi
337 	 * functions.
338 	 */
339 	if (cpumask_weight(dest) != 1)
340 		return -EINVAL;
341 
342 	raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
343 	for_each_online_cpu(cpu) {
344 		int coreid = octeon_coreid_for_cpu(cpu);
345 		uint64_t en0 =
346 			cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
347 		if (cpumask_test_cpu(cpu, dest) && enable_one) {
348 			enable_one = 0;
349 			en0 |= 1ull << bit;
350 		} else {
351 			en0 &= ~(1ull << bit);
352 		}
353 		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
354 	}
355 	/*
356 	 * We need to do a read after the last update to make sure all
357 	 * of them are done.
358 	 */
359 	cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
360 	raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
361 
362 	return 0;
363 }
364 
365 /*
366  * Set affinity for the irq for chips that have the EN*_W1{S,C}
367  * registers.
368  */
369 static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
370 					   const struct cpumask *dest)
371 {
372 	int cpu;
373 	int index;
374 	struct irq_desc *desc = irq_to_desc(irq);
375 	int enable_one = (desc->status & IRQ_DISABLED) == 0;
376 	u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
377 
378 	for_each_online_cpu(cpu) {
379 		index = octeon_coreid_for_cpu(cpu) * 2;
380 		if (cpumask_test_cpu(cpu, dest) && enable_one) {
381 			enable_one = 0;
382 			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
383 		} else {
384 			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
385 		}
386 	}
387 	return 0;
388 }
389 #endif
390 
391 /*
392  * Newer octeon chips have support for lockless CIU operation.
393  */
394 static struct irq_chip octeon_irq_chip_ciu0_v2 = {
395 	.name = "CIU0",
396 	.enable = octeon_irq_ciu0_enable_v2,
397 	.disable = octeon_irq_ciu0_disable_all_v2,
398 	.eoi = octeon_irq_ciu0_enable_v2,
399 #ifdef CONFIG_SMP
400 	.set_affinity = octeon_irq_ciu0_set_affinity_v2,
401 #endif
402 };
403 
404 static struct irq_chip octeon_irq_chip_ciu0 = {
405 	.name = "CIU0",
406 	.enable = octeon_irq_ciu0_enable,
407 	.disable = octeon_irq_ciu0_disable,
408 	.eoi = octeon_irq_ciu0_eoi,
409 #ifdef CONFIG_SMP
410 	.set_affinity = octeon_irq_ciu0_set_affinity,
411 #endif
412 };
413 
414 /* The mbox versions don't do any affinity or round-robin. */
415 static struct irq_chip octeon_irq_chip_ciu0_mbox_v2 = {
416 	.name = "CIU0-M",
417 	.enable = octeon_irq_ciu0_enable_mbox_v2,
418 	.disable = octeon_irq_ciu0_disable,
419 	.eoi = octeon_irq_ciu0_eoi_mbox_v2,
420 };
421 
422 static struct irq_chip octeon_irq_chip_ciu0_mbox = {
423 	.name = "CIU0-M",
424 	.enable = octeon_irq_ciu0_enable_mbox,
425 	.disable = octeon_irq_ciu0_disable,
426 	.eoi = octeon_irq_ciu0_eoi,
427 };
428 
429 static void octeon_irq_ciu1_ack(unsigned int irq)
430 {
431 	/*
432 	 * In order to avoid any locking accessing the CIU, we
433 	 * acknowledge CIU interrupts by disabling all of them.  This
434 	 * way we can use a per core register and avoid any out of
435 	 * core locking requirements.  This has the side affect that
436 	 * CIU interrupts can't be processed recursively.  We don't
437 	 * need to disable IRQs to make these atomic since they are
438 	 * already disabled earlier in the low level interrupt code.
439 	 */
440 	clear_c0_status(0x100 << 3);
441 }
442 
443 static void octeon_irq_ciu1_eoi(unsigned int irq)
444 {
445 	/*
446 	 * Enable all CIU interrupts again.  We don't need to disable
447 	 * IRQs to make these atomic since they are already disabled
448 	 * earlier in the low level interrupt code.
449 	 */
450 	set_c0_status(0x100 << 3);
451 }
452 
453 static void octeon_irq_ciu1_enable(unsigned int irq)
454 {
455 	struct irq_desc *desc = irq_to_desc(irq);
456 	int coreid = next_coreid_for_irq(desc);
457 	unsigned long flags;
458 	uint64_t en1;
459 	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
460 
461 	raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
462 	en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
463 	en1 |= 1ull << bit;
464 	cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
465 	cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
466 	raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
467 }
468 
469 /*
470  * Watchdog interrupts are special.  They are associated with a single
471  * core, so we hardwire the affinity to that core.
472  */
473 static void octeon_irq_ciu1_wd_enable(unsigned int irq)
474 {
475 	unsigned long flags;
476 	uint64_t en1;
477 	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
478 	int coreid = bit;
479 
480 	raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
481 	en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
482 	en1 |= 1ull << bit;
483 	cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
484 	cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
485 	raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
486 }
487 
488 static void octeon_irq_ciu1_disable(unsigned int irq)
489 {
490 	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
491 	unsigned long flags;
492 	uint64_t en1;
493 	int cpu;
494 	raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
495 	for_each_online_cpu(cpu) {
496 		int coreid = octeon_coreid_for_cpu(cpu);
497 		en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
498 		en1 &= ~(1ull << bit);
499 		cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
500 	}
501 	/*
502 	 * We need to do a read after the last update to make sure all
503 	 * of them are done.
504 	 */
505 	cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
506 	raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
507 }
508 
509 /*
510  * Enable the irq on the current core for chips that have the EN*_W1{S,C}
511  * registers.
512  */
513 static void octeon_irq_ciu1_enable_v2(unsigned int irq)
514 {
515 	int index;
516 	u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
517 	struct irq_desc *desc = irq_to_desc(irq);
518 
519 	if ((desc->status & IRQ_DISABLED) == 0) {
520 		index = next_coreid_for_irq(desc) * 2 + 1;
521 		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
522 	}
523 }
524 
525 /*
526  * Watchdog interrupts are special.  They are associated with a single
527  * core, so we hardwire the affinity to that core.
528  */
529 static void octeon_irq_ciu1_wd_enable_v2(unsigned int irq)
530 {
531 	int index;
532 	int coreid = irq - OCTEON_IRQ_WDOG0;
533 	u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
534 	struct irq_desc *desc = irq_to_desc(irq);
535 
536 	if ((desc->status & IRQ_DISABLED) == 0) {
537 		index = coreid * 2 + 1;
538 		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
539 	}
540 }
541 
542 /*
543  * Disable the irq on the current core for chips that have the EN*_W1{S,C}
544  * registers.
545  */
546 static void octeon_irq_ciu1_ack_v2(unsigned int irq)
547 {
548 	int index = cvmx_get_core_num() * 2 + 1;
549 	u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
550 
551 	cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
552 }
553 
554 /*
555  * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
556  * registers.
557  */
558 static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
559 {
560 	u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
561 	int index;
562 	int cpu;
563 	for_each_online_cpu(cpu) {
564 		index = octeon_coreid_for_cpu(cpu) * 2 + 1;
565 		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
566 	}
567 }
568 
569 #ifdef CONFIG_SMP
570 static int octeon_irq_ciu1_set_affinity(unsigned int irq,
571 					const struct cpumask *dest)
572 {
573 	int cpu;
574 	struct irq_desc *desc = irq_to_desc(irq);
575 	int enable_one = (desc->status & IRQ_DISABLED) == 0;
576 	unsigned long flags;
577 	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
578 
579 	/*
580 	 * For non-v2 CIU, we will allow only single CPU affinity.
581 	 * This removes the need to do locking in the .ack/.eoi
582 	 * functions.
583 	 */
584 	if (cpumask_weight(dest) != 1)
585 		return -EINVAL;
586 
587 	raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
588 	for_each_online_cpu(cpu) {
589 		int coreid = octeon_coreid_for_cpu(cpu);
590 		uint64_t en1 =
591 			cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
592 		if (cpumask_test_cpu(cpu, dest) && enable_one) {
593 			enable_one = 0;
594 			en1 |= 1ull << bit;
595 		} else {
596 			en1 &= ~(1ull << bit);
597 		}
598 		cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
599 	}
600 	/*
601 	 * We need to do a read after the last update to make sure all
602 	 * of them are done.
603 	 */
604 	cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
605 	raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
606 
607 	return 0;
608 }
609 
610 /*
611  * Set affinity for the irq for chips that have the EN*_W1{S,C}
612  * registers.
613  */
614 static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
615 					   const struct cpumask *dest)
616 {
617 	int cpu;
618 	int index;
619 	struct irq_desc *desc = irq_to_desc(irq);
620 	int enable_one = (desc->status & IRQ_DISABLED) == 0;
621 	u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
622 	for_each_online_cpu(cpu) {
623 		index = octeon_coreid_for_cpu(cpu) * 2 + 1;
624 		if (cpumask_test_cpu(cpu, dest) && enable_one) {
625 			enable_one = 0;
626 			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
627 		} else {
628 			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
629 		}
630 	}
631 	return 0;
632 }
633 #endif
634 
635 /*
636  * Newer octeon chips have support for lockless CIU operation.
637  */
638 static struct irq_chip octeon_irq_chip_ciu1_v2 = {
639 	.name = "CIU1",
640 	.enable = octeon_irq_ciu1_enable_v2,
641 	.disable = octeon_irq_ciu1_disable_all_v2,
642 	.eoi = octeon_irq_ciu1_enable_v2,
643 #ifdef CONFIG_SMP
644 	.set_affinity = octeon_irq_ciu1_set_affinity_v2,
645 #endif
646 };
647 
648 static struct irq_chip octeon_irq_chip_ciu1 = {
649 	.name = "CIU1",
650 	.enable = octeon_irq_ciu1_enable,
651 	.disable = octeon_irq_ciu1_disable,
652 	.eoi = octeon_irq_ciu1_eoi,
653 #ifdef CONFIG_SMP
654 	.set_affinity = octeon_irq_ciu1_set_affinity,
655 #endif
656 };
657 
658 static struct irq_chip octeon_irq_chip_ciu1_wd_v2 = {
659 	.name = "CIU1-W",
660 	.enable = octeon_irq_ciu1_wd_enable_v2,
661 	.disable = octeon_irq_ciu1_disable_all_v2,
662 	.eoi = octeon_irq_ciu1_wd_enable_v2,
663 };
664 
665 static struct irq_chip octeon_irq_chip_ciu1_wd = {
666 	.name = "CIU1-W",
667 	.enable = octeon_irq_ciu1_wd_enable,
668 	.disable = octeon_irq_ciu1_disable,
669 	.eoi = octeon_irq_ciu1_eoi,
670 };
671 
672 static void (*octeon_ciu0_ack)(unsigned int);
673 static void (*octeon_ciu1_ack)(unsigned int);
674 
675 void __init arch_init_irq(void)
676 {
677 	unsigned int irq;
678 	struct irq_chip *chip0;
679 	struct irq_chip *chip0_mbox;
680 	struct irq_chip *chip1;
681 	struct irq_chip *chip1_wd;
682 
683 #ifdef CONFIG_SMP
684 	/* Set the default affinity to the boot cpu. */
685 	cpumask_clear(irq_default_affinity);
686 	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
687 #endif
688 
689 	if (NR_IRQS < OCTEON_IRQ_LAST)
690 		pr_err("octeon_irq_init: NR_IRQS is set too low\n");
691 
692 	if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
693 	    OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
694 	    OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
695 		octeon_ciu0_ack = octeon_irq_ciu0_ack_v2;
696 		octeon_ciu1_ack = octeon_irq_ciu1_ack_v2;
697 		chip0 = &octeon_irq_chip_ciu0_v2;
698 		chip0_mbox = &octeon_irq_chip_ciu0_mbox_v2;
699 		chip1 = &octeon_irq_chip_ciu1_v2;
700 		chip1_wd = &octeon_irq_chip_ciu1_wd_v2;
701 	} else {
702 		octeon_ciu0_ack = octeon_irq_ciu0_ack;
703 		octeon_ciu1_ack = octeon_irq_ciu1_ack;
704 		chip0 = &octeon_irq_chip_ciu0;
705 		chip0_mbox = &octeon_irq_chip_ciu0_mbox;
706 		chip1 = &octeon_irq_chip_ciu1;
707 		chip1_wd = &octeon_irq_chip_ciu1_wd;
708 	}
709 
710 	/* 0 - 15 reserved for i8259 master and slave controller. */
711 
712 	/* 17 - 23 Mips internal */
713 	for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
714 		set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
715 					 handle_percpu_irq);
716 	}
717 
718 	/* 24 - 87 CIU_INT_SUM0 */
719 	for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
720 		switch (irq) {
721 		case OCTEON_IRQ_MBOX0:
722 		case OCTEON_IRQ_MBOX1:
723 			set_irq_chip_and_handler(irq, chip0_mbox, handle_percpu_irq);
724 			break;
725 		default:
726 			set_irq_chip_and_handler(irq, chip0, handle_fasteoi_irq);
727 			break;
728 		}
729 	}
730 
731 	/* 88 - 151 CIU_INT_SUM1 */
732 	for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_WDOG15; irq++)
733 		set_irq_chip_and_handler(irq, chip1_wd, handle_fasteoi_irq);
734 
735 	for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED151; irq++)
736 		set_irq_chip_and_handler(irq, chip1, handle_fasteoi_irq);
737 
738 	set_c0_status(0x300 << 2);
739 }
740 
741 asmlinkage void plat_irq_dispatch(void)
742 {
743 	const unsigned long core_id = cvmx_get_core_num();
744 	const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
745 	const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
746 	const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
747 	const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
748 	unsigned long cop0_cause;
749 	unsigned long cop0_status;
750 	uint64_t ciu_en;
751 	uint64_t ciu_sum;
752 	unsigned int irq;
753 
754 	while (1) {
755 		cop0_cause = read_c0_cause();
756 		cop0_status = read_c0_status();
757 		cop0_cause &= cop0_status;
758 		cop0_cause &= ST0_IM;
759 
760 		if (unlikely(cop0_cause & STATUSF_IP2)) {
761 			ciu_sum = cvmx_read_csr(ciu_sum0_address);
762 			ciu_en = cvmx_read_csr(ciu_en0_address);
763 			ciu_sum &= ciu_en;
764 			if (likely(ciu_sum)) {
765 				irq = fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1;
766 				octeon_ciu0_ack(irq);
767 				do_IRQ(irq);
768 			} else {
769 				spurious_interrupt();
770 			}
771 		} else if (unlikely(cop0_cause & STATUSF_IP3)) {
772 			ciu_sum = cvmx_read_csr(ciu_sum1_address);
773 			ciu_en = cvmx_read_csr(ciu_en1_address);
774 			ciu_sum &= ciu_en;
775 			if (likely(ciu_sum)) {
776 				irq = fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1;
777 				octeon_ciu1_ack(irq);
778 				do_IRQ(irq);
779 			} else {
780 				spurious_interrupt();
781 			}
782 		} else if (likely(cop0_cause)) {
783 			do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
784 		} else {
785 			break;
786 		}
787 	}
788 }
789 
790 #ifdef CONFIG_HOTPLUG_CPU
791 
792 void fixup_irqs(void)
793 {
794 	int irq;
795 	struct irq_desc *desc;
796 	cpumask_t new_affinity;
797 	unsigned long flags;
798 	int do_set_affinity;
799 	int cpu;
800 
801 	cpu = smp_processor_id();
802 
803 	for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
804 		octeon_irq_core_disable_local(irq);
805 
806 	for (irq = OCTEON_IRQ_WORKQ0; irq < OCTEON_IRQ_LAST; irq++) {
807 		desc = irq_to_desc(irq);
808 		switch (irq) {
809 		case OCTEON_IRQ_MBOX0:
810 		case OCTEON_IRQ_MBOX1:
811 			/* The eoi function will disable them on this CPU. */
812 			desc->chip->eoi(irq);
813 			break;
814 		case OCTEON_IRQ_WDOG0:
815 		case OCTEON_IRQ_WDOG1:
816 		case OCTEON_IRQ_WDOG2:
817 		case OCTEON_IRQ_WDOG3:
818 		case OCTEON_IRQ_WDOG4:
819 		case OCTEON_IRQ_WDOG5:
820 		case OCTEON_IRQ_WDOG6:
821 		case OCTEON_IRQ_WDOG7:
822 		case OCTEON_IRQ_WDOG8:
823 		case OCTEON_IRQ_WDOG9:
824 		case OCTEON_IRQ_WDOG10:
825 		case OCTEON_IRQ_WDOG11:
826 		case OCTEON_IRQ_WDOG12:
827 		case OCTEON_IRQ_WDOG13:
828 		case OCTEON_IRQ_WDOG14:
829 		case OCTEON_IRQ_WDOG15:
830 			/*
831 			 * These have special per CPU semantics and
832 			 * are handled in the watchdog driver.
833 			 */
834 			break;
835 		default:
836 			raw_spin_lock_irqsave(&desc->lock, flags);
837 			/*
838 			 * If this irq has an action, it is in use and
839 			 * must be migrated if it has affinity to this
840 			 * cpu.
841 			 */
842 			if (desc->action && cpumask_test_cpu(cpu, desc->affinity)) {
843 				if (cpumask_weight(desc->affinity) > 1) {
844 					/*
845 					 * It has multi CPU affinity,
846 					 * just remove this CPU from
847 					 * the affinity set.
848 					 */
849 					cpumask_copy(&new_affinity, desc->affinity);
850 					cpumask_clear_cpu(cpu, &new_affinity);
851 				} else {
852 					/*
853 					 * Otherwise, put it on lowest
854 					 * numbered online CPU.
855 					 */
856 					cpumask_clear(&new_affinity);
857 					cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
858 				}
859 				do_set_affinity = 1;
860 			} else {
861 				do_set_affinity = 0;
862 			}
863 			raw_spin_unlock_irqrestore(&desc->lock, flags);
864 
865 			if (do_set_affinity)
866 				irq_set_affinity(irq, &new_affinity);
867 
868 			break;
869 		}
870 	}
871 }
872 
873 #endif /* CONFIG_HOTPLUG_CPU */
874