1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2004-2008 Cavium Networks
7  */
8 #include <linux/irq.h>
9 #include <linux/interrupt.h>
10 #include <linux/hardirq.h>
11 
12 #include <asm/octeon/octeon.h>
13 
14 DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
15 DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
16 DEFINE_SPINLOCK(octeon_irq_msi_lock);
17 
18 static void octeon_irq_core_ack(unsigned int irq)
19 {
20 	unsigned int bit = irq - OCTEON_IRQ_SW0;
21 	/*
22 	 * We don't need to disable IRQs to make these atomic since
23 	 * they are already disabled earlier in the low level
24 	 * interrupt code.
25 	 */
26 	clear_c0_status(0x100 << bit);
27 	/* The two user interrupts must be cleared manually. */
28 	if (bit < 2)
29 		clear_c0_cause(0x100 << bit);
30 }
31 
32 static void octeon_irq_core_eoi(unsigned int irq)
33 {
34 	struct irq_desc *desc = irq_desc + irq;
35 	unsigned int bit = irq - OCTEON_IRQ_SW0;
36 	/*
37 	 * If an IRQ is being processed while we are disabling it the
38 	 * handler will attempt to unmask the interrupt after it has
39 	 * been disabled.
40 	 */
41 	if (desc->status & IRQ_DISABLED)
42 		return;
43 
44 	/* There is a race here.  We should fix it.  */
45 
46 	/*
47 	 * We don't need to disable IRQs to make these atomic since
48 	 * they are already disabled earlier in the low level
49 	 * interrupt code.
50 	 */
51 	set_c0_status(0x100 << bit);
52 }
53 
54 static void octeon_irq_core_enable(unsigned int irq)
55 {
56 	unsigned long flags;
57 	unsigned int bit = irq - OCTEON_IRQ_SW0;
58 
59 	/*
60 	 * We need to disable interrupts to make sure our updates are
61 	 * atomic.
62 	 */
63 	local_irq_save(flags);
64 	set_c0_status(0x100 << bit);
65 	local_irq_restore(flags);
66 }
67 
68 static void octeon_irq_core_disable_local(unsigned int irq)
69 {
70 	unsigned long flags;
71 	unsigned int bit = irq - OCTEON_IRQ_SW0;
72 	/*
73 	 * We need to disable interrupts to make sure our updates are
74 	 * atomic.
75 	 */
76 	local_irq_save(flags);
77 	clear_c0_status(0x100 << bit);
78 	local_irq_restore(flags);
79 }
80 
81 static void octeon_irq_core_disable(unsigned int irq)
82 {
83 #ifdef CONFIG_SMP
84 	on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
85 		    (void *) (long) irq, 1);
86 #else
87 	octeon_irq_core_disable_local(irq);
88 #endif
89 }
90 
91 static struct irq_chip octeon_irq_chip_core = {
92 	.name = "Core",
93 	.enable = octeon_irq_core_enable,
94 	.disable = octeon_irq_core_disable,
95 	.ack = octeon_irq_core_ack,
96 	.eoi = octeon_irq_core_eoi,
97 };
98 
99 
100 static void octeon_irq_ciu0_ack(unsigned int irq)
101 {
102 	/*
103 	 * In order to avoid any locking accessing the CIU, we
104 	 * acknowledge CIU interrupts by disabling all of them.  This
105 	 * way we can use a per core register and avoid any out of
106 	 * core locking requirements.  This has the side affect that
107 	 * CIU interrupts can't be processed recursively.
108 	 *
109 	 * We don't need to disable IRQs to make these atomic since
110 	 * they are already disabled earlier in the low level
111 	 * interrupt code.
112 	 */
113 	clear_c0_status(0x100 << 2);
114 }
115 
116 static void octeon_irq_ciu0_eoi(unsigned int irq)
117 {
118 	/*
119 	 * Enable all CIU interrupts again.  We don't need to disable
120 	 * IRQs to make these atomic since they are already disabled
121 	 * earlier in the low level interrupt code.
122 	 */
123 	set_c0_status(0x100 << 2);
124 }
125 
126 static void octeon_irq_ciu0_enable(unsigned int irq)
127 {
128 	int coreid = cvmx_get_core_num();
129 	unsigned long flags;
130 	uint64_t en0;
131 	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
132 
133 	/*
134 	 * A read lock is used here to make sure only one core is ever
135 	 * updating the CIU enable bits at a time. During an enable
136 	 * the cores don't interfere with each other. During a disable
137 	 * the write lock stops any enables that might cause a
138 	 * problem.
139 	 */
140 	read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
141 	en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
142 	en0 |= 1ull << bit;
143 	cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
144 	cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
145 	read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
146 }
147 
148 static void octeon_irq_ciu0_disable(unsigned int irq)
149 {
150 	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
151 	unsigned long flags;
152 	uint64_t en0;
153 #ifdef CONFIG_SMP
154 	int cpu;
155 	write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
156 	for_each_online_cpu(cpu) {
157 		int coreid = cpu_logical_map(cpu);
158 		en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
159 		en0 &= ~(1ull << bit);
160 		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
161 	}
162 	/*
163 	 * We need to do a read after the last update to make sure all
164 	 * of them are done.
165 	 */
166 	cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
167 	write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
168 #else
169 	int coreid = cvmx_get_core_num();
170 	local_irq_save(flags);
171 	en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
172 	en0 &= ~(1ull << bit);
173 	cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
174 	cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
175 	local_irq_restore(flags);
176 #endif
177 }
178 
179 #ifdef CONFIG_SMP
180 static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
181 {
182 	int cpu;
183 	int bit = irq - OCTEON_IRQ_WORKQ0;	/* Bit 0-63 of EN0 */
184 
185 	write_lock(&octeon_irq_ciu0_rwlock);
186 	for_each_online_cpu(cpu) {
187 		int coreid = cpu_logical_map(cpu);
188 		uint64_t en0 =
189 			cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
190 		if (cpumask_test_cpu(cpu, dest))
191 			en0 |= 1ull << bit;
192 		else
193 			en0 &= ~(1ull << bit);
194 		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
195 	}
196 	/*
197 	 * We need to do a read after the last update to make sure all
198 	 * of them are done.
199 	 */
200 	cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
201 	write_unlock(&octeon_irq_ciu0_rwlock);
202 
203 	return 0;
204 }
205 #endif
206 
207 static struct irq_chip octeon_irq_chip_ciu0 = {
208 	.name = "CIU0",
209 	.enable = octeon_irq_ciu0_enable,
210 	.disable = octeon_irq_ciu0_disable,
211 	.ack = octeon_irq_ciu0_ack,
212 	.eoi = octeon_irq_ciu0_eoi,
213 #ifdef CONFIG_SMP
214 	.set_affinity = octeon_irq_ciu0_set_affinity,
215 #endif
216 };
217 
218 
219 static void octeon_irq_ciu1_ack(unsigned int irq)
220 {
221 	/*
222 	 * In order to avoid any locking accessing the CIU, we
223 	 * acknowledge CIU interrupts by disabling all of them.  This
224 	 * way we can use a per core register and avoid any out of
225 	 * core locking requirements.  This has the side affect that
226 	 * CIU interrupts can't be processed recursively.  We don't
227 	 * need to disable IRQs to make these atomic since they are
228 	 * already disabled earlier in the low level interrupt code.
229 	 */
230 	clear_c0_status(0x100 << 3);
231 }
232 
233 static void octeon_irq_ciu1_eoi(unsigned int irq)
234 {
235 	/*
236 	 * Enable all CIU interrupts again.  We don't need to disable
237 	 * IRQs to make these atomic since they are already disabled
238 	 * earlier in the low level interrupt code.
239 	 */
240 	set_c0_status(0x100 << 3);
241 }
242 
243 static void octeon_irq_ciu1_enable(unsigned int irq)
244 {
245 	int coreid = cvmx_get_core_num();
246 	unsigned long flags;
247 	uint64_t en1;
248 	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
249 
250 	/*
251 	 * A read lock is used here to make sure only one core is ever
252 	 * updating the CIU enable bits at a time.  During an enable
253 	 * the cores don't interfere with each other.  During a disable
254 	 * the write lock stops any enables that might cause a
255 	 * problem.
256 	 */
257 	read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
258 	en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
259 	en1 |= 1ull << bit;
260 	cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
261 	cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
262 	read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
263 }
264 
265 static void octeon_irq_ciu1_disable(unsigned int irq)
266 {
267 	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
268 	unsigned long flags;
269 	uint64_t en1;
270 #ifdef CONFIG_SMP
271 	int cpu;
272 	write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
273 	for_each_online_cpu(cpu) {
274 		int coreid = cpu_logical_map(cpu);
275 		en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
276 		en1 &= ~(1ull << bit);
277 		cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
278 	}
279 	/*
280 	 * We need to do a read after the last update to make sure all
281 	 * of them are done.
282 	 */
283 	cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
284 	write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
285 #else
286 	int coreid = cvmx_get_core_num();
287 	local_irq_save(flags);
288 	en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
289 	en1 &= ~(1ull << bit);
290 	cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
291 	cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
292 	local_irq_restore(flags);
293 #endif
294 }
295 
296 #ifdef CONFIG_SMP
297 static int octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest)
298 {
299 	int cpu;
300 	int bit = irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
301 
302 	write_lock(&octeon_irq_ciu1_rwlock);
303 	for_each_online_cpu(cpu) {
304 		int coreid = cpu_logical_map(cpu);
305 		uint64_t en1 =
306 			cvmx_read_csr(CVMX_CIU_INTX_EN1
307 				(coreid * 2 + 1));
308 		if (cpumask_test_cpu(cpu, dest))
309 			en1 |= 1ull << bit;
310 		else
311 			en1 &= ~(1ull << bit);
312 		cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
313 	}
314 	/*
315 	 * We need to do a read after the last update to make sure all
316 	 * of them are done.
317 	 */
318 	cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
319 	write_unlock(&octeon_irq_ciu1_rwlock);
320 
321 	return 0;
322 }
323 #endif
324 
325 static struct irq_chip octeon_irq_chip_ciu1 = {
326 	.name = "CIU1",
327 	.enable = octeon_irq_ciu1_enable,
328 	.disable = octeon_irq_ciu1_disable,
329 	.ack = octeon_irq_ciu1_ack,
330 	.eoi = octeon_irq_ciu1_eoi,
331 #ifdef CONFIG_SMP
332 	.set_affinity = octeon_irq_ciu1_set_affinity,
333 #endif
334 };
335 
336 #ifdef CONFIG_PCI_MSI
337 
338 static void octeon_irq_msi_ack(unsigned int irq)
339 {
340 	if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
341 		/* These chips have PCI */
342 		cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
343 			       1ull << (irq - OCTEON_IRQ_MSI_BIT0));
344 	} else {
345 		/*
346 		 * These chips have PCIe. Thankfully the ACK doesn't
347 		 * need any locking.
348 		 */
349 		cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
350 			       1ull << (irq - OCTEON_IRQ_MSI_BIT0));
351 	}
352 }
353 
354 static void octeon_irq_msi_eoi(unsigned int irq)
355 {
356 	/* Nothing needed */
357 }
358 
359 static void octeon_irq_msi_enable(unsigned int irq)
360 {
361 	if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
362 		/*
363 		 * Octeon PCI doesn't have the ability to mask/unmask
364 		 * MSI interrupts individually.  Instead of
365 		 * masking/unmasking them in groups of 16, we simple
366 		 * assume MSI devices are well behaved.  MSI
367 		 * interrupts are always enable and the ACK is assumed
368 		 * to be enough.
369 		 */
370 	} else {
371 		/* These chips have PCIe.  Note that we only support
372 		 * the first 64 MSI interrupts.  Unfortunately all the
373 		 * MSI enables are in the same register.  We use
374 		 * MSI0's lock to control access to them all.
375 		 */
376 		uint64_t en;
377 		unsigned long flags;
378 		spin_lock_irqsave(&octeon_irq_msi_lock, flags);
379 		en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
380 		en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
381 		cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
382 		cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
383 		spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
384 	}
385 }
386 
387 static void octeon_irq_msi_disable(unsigned int irq)
388 {
389 	if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
390 		/* See comment in enable */
391 	} else {
392 		/*
393 		 * These chips have PCIe.  Note that we only support
394 		 * the first 64 MSI interrupts.  Unfortunately all the
395 		 * MSI enables are in the same register.  We use
396 		 * MSI0's lock to control access to them all.
397 		 */
398 		uint64_t en;
399 		unsigned long flags;
400 		spin_lock_irqsave(&octeon_irq_msi_lock, flags);
401 		en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
402 		en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
403 		cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
404 		cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
405 		spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
406 	}
407 }
408 
409 static struct irq_chip octeon_irq_chip_msi = {
410 	.name = "MSI",
411 	.enable = octeon_irq_msi_enable,
412 	.disable = octeon_irq_msi_disable,
413 	.ack = octeon_irq_msi_ack,
414 	.eoi = octeon_irq_msi_eoi,
415 };
416 #endif
417 
418 void __init arch_init_irq(void)
419 {
420 	int irq;
421 
422 #ifdef CONFIG_SMP
423 	/* Set the default affinity to the boot cpu. */
424 	cpumask_clear(irq_default_affinity);
425 	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
426 #endif
427 
428 	if (NR_IRQS < OCTEON_IRQ_LAST)
429 		pr_err("octeon_irq_init: NR_IRQS is set too low\n");
430 
431 	/* 0 - 15 reserved for i8259 master and slave controller. */
432 
433 	/* 17 - 23 Mips internal */
434 	for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
435 		set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
436 					 handle_percpu_irq);
437 	}
438 
439 	/* 24 - 87 CIU_INT_SUM0 */
440 	for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
441 		set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu0,
442 					 handle_percpu_irq);
443 	}
444 
445 	/* 88 - 151 CIU_INT_SUM1 */
446 	for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
447 		set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu1,
448 					 handle_percpu_irq);
449 	}
450 
451 #ifdef CONFIG_PCI_MSI
452 	/* 152 - 215 PCI/PCIe MSI interrupts */
453 	for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
454 		set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
455 					 handle_percpu_irq);
456 	}
457 #endif
458 	set_c0_status(0x300 << 2);
459 }
460 
461 asmlinkage void plat_irq_dispatch(void)
462 {
463 	const unsigned long core_id = cvmx_get_core_num();
464 	const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
465 	const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
466 	const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
467 	const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
468 	unsigned long cop0_cause;
469 	unsigned long cop0_status;
470 	uint64_t ciu_en;
471 	uint64_t ciu_sum;
472 
473 	while (1) {
474 		cop0_cause = read_c0_cause();
475 		cop0_status = read_c0_status();
476 		cop0_cause &= cop0_status;
477 		cop0_cause &= ST0_IM;
478 
479 		if (unlikely(cop0_cause & STATUSF_IP2)) {
480 			ciu_sum = cvmx_read_csr(ciu_sum0_address);
481 			ciu_en = cvmx_read_csr(ciu_en0_address);
482 			ciu_sum &= ciu_en;
483 			if (likely(ciu_sum))
484 				do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
485 			else
486 				spurious_interrupt();
487 		} else if (unlikely(cop0_cause & STATUSF_IP3)) {
488 			ciu_sum = cvmx_read_csr(ciu_sum1_address);
489 			ciu_en = cvmx_read_csr(ciu_en1_address);
490 			ciu_sum &= ciu_en;
491 			if (likely(ciu_sum))
492 				do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
493 			else
494 				spurious_interrupt();
495 		} else if (likely(cop0_cause)) {
496 			do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
497 		} else {
498 			break;
499 		}
500 	}
501 }
502