xref: /openbmc/linux/arch/powerpc/kernel/irq.c (revision 4800cd83)
1 /*
2  *  Derived from arch/i386/kernel/irq.c
3  *    Copyright (C) 1992 Linus Torvalds
4  *  Adapted from arch/i386 by Gary Thomas
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
7  *    Copyright (C) 1996-2001 Cort Dougan
8  *  Adapted for Power Macintosh by Paul Mackerras
9  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version
14  * 2 of the License, or (at your option) any later version.
15  *
16  * This file contains the code used by various IRQ handling routines:
17  * asking for different IRQ's should be done through these routines
18  * instead of just grabbing them. Thus setups with different IRQ numbers
19  * shouldn't result in any weird surprises, and installing new handlers
20  * should be easier.
21  *
22  * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
23  * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
24  * mask register (of which only 16 are defined), hence the weird shifting
25  * and complement of the cached_irq_mask.  I want to be able to stuff
26  * this right into the SIU SMASK register.
27  * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28  * to reduce code space and undefined function references.
29  */
30 
31 #undef DEBUG
32 
33 #include <linux/module.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/bootmem.h>
54 #include <linux/pci.h>
55 #include <linux/debugfs.h>
56 #include <linux/of.h>
57 #include <linux/of_irq.h>
58 
59 #include <asm/uaccess.h>
60 #include <asm/system.h>
61 #include <asm/io.h>
62 #include <asm/pgtable.h>
63 #include <asm/irq.h>
64 #include <asm/cache.h>
65 #include <asm/prom.h>
66 #include <asm/ptrace.h>
67 #include <asm/machdep.h>
68 #include <asm/udbg.h>
69 #include <asm/dbell.h>
70 #include <asm/smp.h>
71 
72 #ifdef CONFIG_PPC64
73 #include <asm/paca.h>
74 #include <asm/firmware.h>
75 #include <asm/lv1call.h>
76 #endif
77 #define CREATE_TRACE_POINTS
78 #include <asm/trace.h>
79 
80 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
81 EXPORT_PER_CPU_SYMBOL(irq_stat);
82 
83 int __irq_offset_value;
84 
85 #ifdef CONFIG_PPC32
86 EXPORT_SYMBOL(__irq_offset_value);
87 atomic_t ppc_n_lost_interrupts;
88 
89 #ifdef CONFIG_TAU_INT
90 extern int tau_initialized;
91 extern int tau_interrupts(int);
92 #endif
93 #endif /* CONFIG_PPC32 */
94 
95 #ifdef CONFIG_PPC64
96 
97 #ifndef CONFIG_SPARSE_IRQ
98 EXPORT_SYMBOL(irq_desc);
99 #endif
100 
101 int distribute_irqs = 1;
102 
103 static inline notrace unsigned long get_hard_enabled(void)
104 {
105 	unsigned long enabled;
106 
107 	__asm__ __volatile__("lbz %0,%1(13)"
108 	: "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
109 
110 	return enabled;
111 }
112 
113 static inline notrace void set_soft_enabled(unsigned long enable)
114 {
115 	__asm__ __volatile__("stb %0,%1(13)"
116 	: : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
117 }
118 
119 notrace void arch_local_irq_restore(unsigned long en)
120 {
121 	/*
122 	 * get_paca()->soft_enabled = en;
123 	 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
124 	 * That was allowed before, and in such a case we do need to take care
125 	 * that gcc will set soft_enabled directly via r13, not choose to use
126 	 * an intermediate register, lest we're preempted to a different cpu.
127 	 */
128 	set_soft_enabled(en);
129 	if (!en)
130 		return;
131 
132 #ifdef CONFIG_PPC_STD_MMU_64
133 	if (firmware_has_feature(FW_FEATURE_ISERIES)) {
134 		/*
135 		 * Do we need to disable preemption here?  Not really: in the
136 		 * unlikely event that we're preempted to a different cpu in
137 		 * between getting r13, loading its lppaca_ptr, and loading
138 		 * its any_int, we might call iseries_handle_interrupts without
139 		 * an interrupt pending on the new cpu, but that's no disaster,
140 		 * is it?  And the business of preempting us off the old cpu
141 		 * would itself involve a local_irq_restore which handles the
142 		 * interrupt to that cpu.
143 		 *
144 		 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
145 		 * to avoid any preemption checking added into get_paca().
146 		 */
147 		if (local_paca->lppaca_ptr->int_dword.any_int)
148 			iseries_handle_interrupts();
149 	}
150 #endif /* CONFIG_PPC_STD_MMU_64 */
151 
152 	/*
153 	 * if (get_paca()->hard_enabled) return;
154 	 * But again we need to take care that gcc gets hard_enabled directly
155 	 * via r13, not choose to use an intermediate register, lest we're
156 	 * preempted to a different cpu in between the two instructions.
157 	 */
158 	if (get_hard_enabled())
159 		return;
160 
161 #if defined(CONFIG_BOOKE) && defined(CONFIG_SMP)
162 	/* Check for pending doorbell interrupts and resend to ourself */
163 	doorbell_check_self();
164 #endif
165 
166 	/*
167 	 * Need to hard-enable interrupts here.  Since currently disabled,
168 	 * no need to take further asm precautions against preemption; but
169 	 * use local_paca instead of get_paca() to avoid preemption checking.
170 	 */
171 	local_paca->hard_enabled = en;
172 
173 #ifndef CONFIG_BOOKE
174 	/* On server, re-trigger the decrementer if it went negative since
175 	 * some processors only trigger on edge transitions of the sign bit.
176 	 *
177 	 * BookE has a level sensitive decrementer (latches in TSR) so we
178 	 * don't need that
179 	 */
180 	if ((int)mfspr(SPRN_DEC) < 0)
181 		mtspr(SPRN_DEC, 1);
182 #endif /* CONFIG_BOOKE */
183 
184 	/*
185 	 * Force the delivery of pending soft-disabled interrupts on PS3.
186 	 * Any HV call will have this side effect.
187 	 */
188 	if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
189 		u64 tmp;
190 		lv1_get_version_info(&tmp);
191 	}
192 
193 	__hard_irq_enable();
194 }
195 EXPORT_SYMBOL(arch_local_irq_restore);
196 #endif /* CONFIG_PPC64 */
197 
198 static int show_other_interrupts(struct seq_file *p, int prec)
199 {
200 	int j;
201 
202 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
203 	if (tau_initialized) {
204 		seq_printf(p, "%*s: ", prec, "TAU");
205 		for_each_online_cpu(j)
206 			seq_printf(p, "%10u ", tau_interrupts(j));
207 		seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
208 	}
209 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
210 
211 	seq_printf(p, "%*s: ", prec, "LOC");
212 	for_each_online_cpu(j)
213 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
214         seq_printf(p, "  Local timer interrupts\n");
215 
216 	seq_printf(p, "%*s: ", prec, "SPU");
217 	for_each_online_cpu(j)
218 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
219 	seq_printf(p, "  Spurious interrupts\n");
220 
221 	seq_printf(p, "%*s: ", prec, "CNT");
222 	for_each_online_cpu(j)
223 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
224 	seq_printf(p, "  Performance monitoring interrupts\n");
225 
226 	seq_printf(p, "%*s: ", prec, "MCE");
227 	for_each_online_cpu(j)
228 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
229 	seq_printf(p, "  Machine check exceptions\n");
230 
231 	return 0;
232 }
233 
234 int show_interrupts(struct seq_file *p, void *v)
235 {
236 	unsigned long flags, any_count = 0;
237 	int i = *(loff_t *) v, j, prec;
238 	struct irqaction *action;
239 	struct irq_desc *desc;
240 
241 	if (i > nr_irqs)
242 		return 0;
243 
244 	for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
245 		j *= 10;
246 
247 	if (i == nr_irqs)
248 		return show_other_interrupts(p, prec);
249 
250 	/* print header */
251 	if (i == 0) {
252 		seq_printf(p, "%*s", prec + 8, "");
253 		for_each_online_cpu(j)
254 			seq_printf(p, "CPU%-8d", j);
255 		seq_putc(p, '\n');
256 	}
257 
258 	desc = irq_to_desc(i);
259 	if (!desc)
260 		return 0;
261 
262 	raw_spin_lock_irqsave(&desc->lock, flags);
263 	for_each_online_cpu(j)
264 		any_count |= kstat_irqs_cpu(i, j);
265 	action = desc->action;
266 	if (!action && !any_count)
267 		goto out;
268 
269 	seq_printf(p, "%*d: ", prec, i);
270 	for_each_online_cpu(j)
271 		seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
272 
273 	if (desc->chip)
274 		seq_printf(p, "  %-16s", desc->chip->name);
275 	else
276 		seq_printf(p, "  %-16s", "None");
277 	seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
278 
279 	if (action) {
280 		seq_printf(p, "     %s", action->name);
281 		while ((action = action->next) != NULL)
282 			seq_printf(p, ", %s", action->name);
283 	}
284 
285 	seq_putc(p, '\n');
286 out:
287 	raw_spin_unlock_irqrestore(&desc->lock, flags);
288 	return 0;
289 }
290 
291 /*
292  * /proc/stat helpers
293  */
294 u64 arch_irq_stat_cpu(unsigned int cpu)
295 {
296 	u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
297 
298 	sum += per_cpu(irq_stat, cpu).pmu_irqs;
299 	sum += per_cpu(irq_stat, cpu).mce_exceptions;
300 	sum += per_cpu(irq_stat, cpu).spurious_irqs;
301 
302 	return sum;
303 }
304 
305 #ifdef CONFIG_HOTPLUG_CPU
306 void fixup_irqs(const struct cpumask *map)
307 {
308 	struct irq_desc *desc;
309 	unsigned int irq;
310 	static int warned;
311 	cpumask_var_t mask;
312 
313 	alloc_cpumask_var(&mask, GFP_KERNEL);
314 
315 	for_each_irq(irq) {
316 		desc = irq_to_desc(irq);
317 		if (!desc)
318 			continue;
319 
320 		if (desc->status & IRQ_PER_CPU)
321 			continue;
322 
323 		cpumask_and(mask, desc->affinity, map);
324 		if (cpumask_any(mask) >= nr_cpu_ids) {
325 			printk("Breaking affinity for irq %i\n", irq);
326 			cpumask_copy(mask, map);
327 		}
328 		if (desc->chip->set_affinity)
329 			desc->chip->set_affinity(irq, mask);
330 		else if (desc->action && !(warned++))
331 			printk("Cannot set affinity for irq %i\n", irq);
332 	}
333 
334 	free_cpumask_var(mask);
335 
336 	local_irq_enable();
337 	mdelay(1);
338 	local_irq_disable();
339 }
340 #endif
341 
342 static inline void handle_one_irq(unsigned int irq)
343 {
344 	struct thread_info *curtp, *irqtp;
345 	unsigned long saved_sp_limit;
346 	struct irq_desc *desc;
347 
348 	/* Switch to the irq stack to handle this */
349 	curtp = current_thread_info();
350 	irqtp = hardirq_ctx[smp_processor_id()];
351 
352 	if (curtp == irqtp) {
353 		/* We're already on the irq stack, just handle it */
354 		generic_handle_irq(irq);
355 		return;
356 	}
357 
358 	desc = irq_to_desc(irq);
359 	saved_sp_limit = current->thread.ksp_limit;
360 
361 	irqtp->task = curtp->task;
362 	irqtp->flags = 0;
363 
364 	/* Copy the softirq bits in preempt_count so that the
365 	 * softirq checks work in the hardirq context. */
366 	irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
367 			       (curtp->preempt_count & SOFTIRQ_MASK);
368 
369 	current->thread.ksp_limit = (unsigned long)irqtp +
370 		_ALIGN_UP(sizeof(struct thread_info), 16);
371 
372 	call_handle_irq(irq, desc, irqtp, desc->handle_irq);
373 	current->thread.ksp_limit = saved_sp_limit;
374 	irqtp->task = NULL;
375 
376 	/* Set any flag that may have been set on the
377 	 * alternate stack
378 	 */
379 	if (irqtp->flags)
380 		set_bits(irqtp->flags, &curtp->flags);
381 }
382 
383 static inline void check_stack_overflow(void)
384 {
385 #ifdef CONFIG_DEBUG_STACKOVERFLOW
386 	long sp;
387 
388 	sp = __get_SP() & (THREAD_SIZE-1);
389 
390 	/* check for stack overflow: is there less than 2KB free? */
391 	if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
392 		printk("do_IRQ: stack overflow: %ld\n",
393 			sp - sizeof(struct thread_info));
394 		dump_stack();
395 	}
396 #endif
397 }
398 
399 void do_IRQ(struct pt_regs *regs)
400 {
401 	struct pt_regs *old_regs = set_irq_regs(regs);
402 	unsigned int irq;
403 
404 	trace_irq_entry(regs);
405 
406 	irq_enter();
407 
408 	check_stack_overflow();
409 
410 	irq = ppc_md.get_irq();
411 
412 	if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
413 		handle_one_irq(irq);
414 	else if (irq != NO_IRQ_IGNORE)
415 		__get_cpu_var(irq_stat).spurious_irqs++;
416 
417 	irq_exit();
418 	set_irq_regs(old_regs);
419 
420 #ifdef CONFIG_PPC_ISERIES
421 	if (firmware_has_feature(FW_FEATURE_ISERIES) &&
422 			get_lppaca()->int_dword.fields.decr_int) {
423 		get_lppaca()->int_dword.fields.decr_int = 0;
424 		/* Signal a fake decrementer interrupt */
425 		timer_interrupt(regs);
426 	}
427 #endif
428 
429 	trace_irq_exit(regs);
430 }
431 
432 void __init init_IRQ(void)
433 {
434 	if (ppc_md.init_IRQ)
435 		ppc_md.init_IRQ();
436 
437 	exc_lvl_ctx_init();
438 
439 	irq_ctx_init();
440 }
441 
442 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
443 struct thread_info   *critirq_ctx[NR_CPUS] __read_mostly;
444 struct thread_info    *dbgirq_ctx[NR_CPUS] __read_mostly;
445 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
446 
447 void exc_lvl_ctx_init(void)
448 {
449 	struct thread_info *tp;
450 	int i, hw_cpu;
451 
452 	for_each_possible_cpu(i) {
453 		hw_cpu = get_hard_smp_processor_id(i);
454 		memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE);
455 		tp = critirq_ctx[hw_cpu];
456 		tp->cpu = i;
457 		tp->preempt_count = 0;
458 
459 #ifdef CONFIG_BOOKE
460 		memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE);
461 		tp = dbgirq_ctx[hw_cpu];
462 		tp->cpu = i;
463 		tp->preempt_count = 0;
464 
465 		memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE);
466 		tp = mcheckirq_ctx[hw_cpu];
467 		tp->cpu = i;
468 		tp->preempt_count = HARDIRQ_OFFSET;
469 #endif
470 	}
471 }
472 #endif
473 
474 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
475 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
476 
477 void irq_ctx_init(void)
478 {
479 	struct thread_info *tp;
480 	int i;
481 
482 	for_each_possible_cpu(i) {
483 		memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
484 		tp = softirq_ctx[i];
485 		tp->cpu = i;
486 		tp->preempt_count = 0;
487 
488 		memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
489 		tp = hardirq_ctx[i];
490 		tp->cpu = i;
491 		tp->preempt_count = HARDIRQ_OFFSET;
492 	}
493 }
494 
495 static inline void do_softirq_onstack(void)
496 {
497 	struct thread_info *curtp, *irqtp;
498 	unsigned long saved_sp_limit = current->thread.ksp_limit;
499 
500 	curtp = current_thread_info();
501 	irqtp = softirq_ctx[smp_processor_id()];
502 	irqtp->task = curtp->task;
503 	current->thread.ksp_limit = (unsigned long)irqtp +
504 				    _ALIGN_UP(sizeof(struct thread_info), 16);
505 	call_do_softirq(irqtp);
506 	current->thread.ksp_limit = saved_sp_limit;
507 	irqtp->task = NULL;
508 }
509 
510 void do_softirq(void)
511 {
512 	unsigned long flags;
513 
514 	if (in_interrupt())
515 		return;
516 
517 	local_irq_save(flags);
518 
519 	if (local_softirq_pending())
520 		do_softirq_onstack();
521 
522 	local_irq_restore(flags);
523 }
524 
525 
526 /*
527  * IRQ controller and virtual interrupts
528  */
529 
530 static LIST_HEAD(irq_hosts);
531 static DEFINE_RAW_SPINLOCK(irq_big_lock);
532 static unsigned int revmap_trees_allocated;
533 static DEFINE_MUTEX(revmap_trees_mutex);
534 struct irq_map_entry irq_map[NR_IRQS];
535 static unsigned int irq_virq_count = NR_IRQS;
536 static struct irq_host *irq_default_host;
537 
538 irq_hw_number_t virq_to_hw(unsigned int virq)
539 {
540 	return irq_map[virq].hwirq;
541 }
542 EXPORT_SYMBOL_GPL(virq_to_hw);
543 
544 static int default_irq_host_match(struct irq_host *h, struct device_node *np)
545 {
546 	return h->of_node != NULL && h->of_node == np;
547 }
548 
549 struct irq_host *irq_alloc_host(struct device_node *of_node,
550 				unsigned int revmap_type,
551 				unsigned int revmap_arg,
552 				struct irq_host_ops *ops,
553 				irq_hw_number_t inval_irq)
554 {
555 	struct irq_host *host;
556 	unsigned int size = sizeof(struct irq_host);
557 	unsigned int i;
558 	unsigned int *rmap;
559 	unsigned long flags;
560 
561 	/* Allocate structure and revmap table if using linear mapping */
562 	if (revmap_type == IRQ_HOST_MAP_LINEAR)
563 		size += revmap_arg * sizeof(unsigned int);
564 	host = zalloc_maybe_bootmem(size, GFP_KERNEL);
565 	if (host == NULL)
566 		return NULL;
567 
568 	/* Fill structure */
569 	host->revmap_type = revmap_type;
570 	host->inval_irq = inval_irq;
571 	host->ops = ops;
572 	host->of_node = of_node_get(of_node);
573 
574 	if (host->ops->match == NULL)
575 		host->ops->match = default_irq_host_match;
576 
577 	raw_spin_lock_irqsave(&irq_big_lock, flags);
578 
579 	/* If it's a legacy controller, check for duplicates and
580 	 * mark it as allocated (we use irq 0 host pointer for that
581 	 */
582 	if (revmap_type == IRQ_HOST_MAP_LEGACY) {
583 		if (irq_map[0].host != NULL) {
584 			raw_spin_unlock_irqrestore(&irq_big_lock, flags);
585 			/* If we are early boot, we can't free the structure,
586 			 * too bad...
587 			 * this will be fixed once slab is made available early
588 			 * instead of the current cruft
589 			 */
590 			if (mem_init_done) {
591 				of_node_put(host->of_node);
592 				kfree(host);
593 			}
594 			return NULL;
595 		}
596 		irq_map[0].host = host;
597 	}
598 
599 	list_add(&host->link, &irq_hosts);
600 	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
601 
602 	/* Additional setups per revmap type */
603 	switch(revmap_type) {
604 	case IRQ_HOST_MAP_LEGACY:
605 		/* 0 is always the invalid number for legacy */
606 		host->inval_irq = 0;
607 		/* setup us as the host for all legacy interrupts */
608 		for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
609 			irq_map[i].hwirq = i;
610 			smp_wmb();
611 			irq_map[i].host = host;
612 			smp_wmb();
613 
614 			/* Clear norequest flags */
615 			irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
616 
617 			/* Legacy flags are left to default at this point,
618 			 * one can then use irq_create_mapping() to
619 			 * explicitly change them
620 			 */
621 			ops->map(host, i, i);
622 		}
623 		break;
624 	case IRQ_HOST_MAP_LINEAR:
625 		rmap = (unsigned int *)(host + 1);
626 		for (i = 0; i < revmap_arg; i++)
627 			rmap[i] = NO_IRQ;
628 		host->revmap_data.linear.size = revmap_arg;
629 		smp_wmb();
630 		host->revmap_data.linear.revmap = rmap;
631 		break;
632 	default:
633 		break;
634 	}
635 
636 	pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
637 
638 	return host;
639 }
640 
641 struct irq_host *irq_find_host(struct device_node *node)
642 {
643 	struct irq_host *h, *found = NULL;
644 	unsigned long flags;
645 
646 	/* We might want to match the legacy controller last since
647 	 * it might potentially be set to match all interrupts in
648 	 * the absence of a device node. This isn't a problem so far
649 	 * yet though...
650 	 */
651 	raw_spin_lock_irqsave(&irq_big_lock, flags);
652 	list_for_each_entry(h, &irq_hosts, link)
653 		if (h->ops->match(h, node)) {
654 			found = h;
655 			break;
656 		}
657 	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
658 	return found;
659 }
660 EXPORT_SYMBOL_GPL(irq_find_host);
661 
662 void irq_set_default_host(struct irq_host *host)
663 {
664 	pr_debug("irq: Default host set to @0x%p\n", host);
665 
666 	irq_default_host = host;
667 }
668 
669 void irq_set_virq_count(unsigned int count)
670 {
671 	pr_debug("irq: Trying to set virq count to %d\n", count);
672 
673 	BUG_ON(count < NUM_ISA_INTERRUPTS);
674 	if (count < NR_IRQS)
675 		irq_virq_count = count;
676 }
677 
678 static int irq_setup_virq(struct irq_host *host, unsigned int virq,
679 			    irq_hw_number_t hwirq)
680 {
681 	struct irq_desc *desc;
682 
683 	desc = irq_to_desc_alloc_node(virq, 0);
684 	if (!desc) {
685 		pr_debug("irq: -> allocating desc failed\n");
686 		goto error;
687 	}
688 
689 	/* Clear IRQ_NOREQUEST flag */
690 	desc->status &= ~IRQ_NOREQUEST;
691 
692 	/* map it */
693 	smp_wmb();
694 	irq_map[virq].hwirq = hwirq;
695 	smp_mb();
696 
697 	if (host->ops->map(host, virq, hwirq)) {
698 		pr_debug("irq: -> mapping failed, freeing\n");
699 		goto error;
700 	}
701 
702 	return 0;
703 
704 error:
705 	irq_free_virt(virq, 1);
706 	return -1;
707 }
708 
709 unsigned int irq_create_direct_mapping(struct irq_host *host)
710 {
711 	unsigned int virq;
712 
713 	if (host == NULL)
714 		host = irq_default_host;
715 
716 	BUG_ON(host == NULL);
717 	WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
718 
719 	virq = irq_alloc_virt(host, 1, 0);
720 	if (virq == NO_IRQ) {
721 		pr_debug("irq: create_direct virq allocation failed\n");
722 		return NO_IRQ;
723 	}
724 
725 	pr_debug("irq: create_direct obtained virq %d\n", virq);
726 
727 	if (irq_setup_virq(host, virq, virq))
728 		return NO_IRQ;
729 
730 	return virq;
731 }
732 
733 unsigned int irq_create_mapping(struct irq_host *host,
734 				irq_hw_number_t hwirq)
735 {
736 	unsigned int virq, hint;
737 
738 	pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
739 
740 	/* Look for default host if nececssary */
741 	if (host == NULL)
742 		host = irq_default_host;
743 	if (host == NULL) {
744 		printk(KERN_WARNING "irq_create_mapping called for"
745 		       " NULL host, hwirq=%lx\n", hwirq);
746 		WARN_ON(1);
747 		return NO_IRQ;
748 	}
749 	pr_debug("irq: -> using host @%p\n", host);
750 
751 	/* Check if mapping already exist, if it does, call
752 	 * host->ops->map() to update the flags
753 	 */
754 	virq = irq_find_mapping(host, hwirq);
755 	if (virq != NO_IRQ) {
756 		if (host->ops->remap)
757 			host->ops->remap(host, virq, hwirq);
758 		pr_debug("irq: -> existing mapping on virq %d\n", virq);
759 		return virq;
760 	}
761 
762 	/* Get a virtual interrupt number */
763 	if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
764 		/* Handle legacy */
765 		virq = (unsigned int)hwirq;
766 		if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
767 			return NO_IRQ;
768 		return virq;
769 	} else {
770 		/* Allocate a virtual interrupt number */
771 		hint = hwirq % irq_virq_count;
772 		virq = irq_alloc_virt(host, 1, hint);
773 		if (virq == NO_IRQ) {
774 			pr_debug("irq: -> virq allocation failed\n");
775 			return NO_IRQ;
776 		}
777 	}
778 
779 	if (irq_setup_virq(host, virq, hwirq))
780 		return NO_IRQ;
781 
782 	printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
783 		hwirq, host->of_node ? host->of_node->full_name : "null", virq);
784 
785 	return virq;
786 }
787 EXPORT_SYMBOL_GPL(irq_create_mapping);
788 
789 unsigned int irq_create_of_mapping(struct device_node *controller,
790 				   const u32 *intspec, unsigned int intsize)
791 {
792 	struct irq_host *host;
793 	irq_hw_number_t hwirq;
794 	unsigned int type = IRQ_TYPE_NONE;
795 	unsigned int virq;
796 
797 	if (controller == NULL)
798 		host = irq_default_host;
799 	else
800 		host = irq_find_host(controller);
801 	if (host == NULL) {
802 		printk(KERN_WARNING "irq: no irq host found for %s !\n",
803 		       controller->full_name);
804 		return NO_IRQ;
805 	}
806 
807 	/* If host has no translation, then we assume interrupt line */
808 	if (host->ops->xlate == NULL)
809 		hwirq = intspec[0];
810 	else {
811 		if (host->ops->xlate(host, controller, intspec, intsize,
812 				     &hwirq, &type))
813 			return NO_IRQ;
814 	}
815 
816 	/* Create mapping */
817 	virq = irq_create_mapping(host, hwirq);
818 	if (virq == NO_IRQ)
819 		return virq;
820 
821 	/* Set type if specified and different than the current one */
822 	if (type != IRQ_TYPE_NONE &&
823 	    type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
824 		set_irq_type(virq, type);
825 	return virq;
826 }
827 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
828 
829 void irq_dispose_mapping(unsigned int virq)
830 {
831 	struct irq_host *host;
832 	irq_hw_number_t hwirq;
833 
834 	if (virq == NO_IRQ)
835 		return;
836 
837 	host = irq_map[virq].host;
838 	WARN_ON (host == NULL);
839 	if (host == NULL)
840 		return;
841 
842 	/* Never unmap legacy interrupts */
843 	if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
844 		return;
845 
846 	/* remove chip and handler */
847 	set_irq_chip_and_handler(virq, NULL, NULL);
848 
849 	/* Make sure it's completed */
850 	synchronize_irq(virq);
851 
852 	/* Tell the PIC about it */
853 	if (host->ops->unmap)
854 		host->ops->unmap(host, virq);
855 	smp_mb();
856 
857 	/* Clear reverse map */
858 	hwirq = irq_map[virq].hwirq;
859 	switch(host->revmap_type) {
860 	case IRQ_HOST_MAP_LINEAR:
861 		if (hwirq < host->revmap_data.linear.size)
862 			host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
863 		break;
864 	case IRQ_HOST_MAP_TREE:
865 		/*
866 		 * Check if radix tree allocated yet, if not then nothing to
867 		 * remove.
868 		 */
869 		smp_rmb();
870 		if (revmap_trees_allocated < 1)
871 			break;
872 		mutex_lock(&revmap_trees_mutex);
873 		radix_tree_delete(&host->revmap_data.tree, hwirq);
874 		mutex_unlock(&revmap_trees_mutex);
875 		break;
876 	}
877 
878 	/* Destroy map */
879 	smp_mb();
880 	irq_map[virq].hwirq = host->inval_irq;
881 
882 	/* Set some flags */
883 	irq_to_desc(virq)->status |= IRQ_NOREQUEST;
884 
885 	/* Free it */
886 	irq_free_virt(virq, 1);
887 }
888 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
889 
890 unsigned int irq_find_mapping(struct irq_host *host,
891 			      irq_hw_number_t hwirq)
892 {
893 	unsigned int i;
894 	unsigned int hint = hwirq % irq_virq_count;
895 
896 	/* Look for default host if nececssary */
897 	if (host == NULL)
898 		host = irq_default_host;
899 	if (host == NULL)
900 		return NO_IRQ;
901 
902 	/* legacy -> bail early */
903 	if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
904 		return hwirq;
905 
906 	/* Slow path does a linear search of the map */
907 	if (hint < NUM_ISA_INTERRUPTS)
908 		hint = NUM_ISA_INTERRUPTS;
909 	i = hint;
910 	do  {
911 		if (irq_map[i].host == host &&
912 		    irq_map[i].hwirq == hwirq)
913 			return i;
914 		i++;
915 		if (i >= irq_virq_count)
916 			i = NUM_ISA_INTERRUPTS;
917 	} while(i != hint);
918 	return NO_IRQ;
919 }
920 EXPORT_SYMBOL_GPL(irq_find_mapping);
921 
922 
923 unsigned int irq_radix_revmap_lookup(struct irq_host *host,
924 				     irq_hw_number_t hwirq)
925 {
926 	struct irq_map_entry *ptr;
927 	unsigned int virq;
928 
929 	WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
930 
931 	/*
932 	 * Check if the radix tree exists and has bee initialized.
933 	 * If not, we fallback to slow mode
934 	 */
935 	if (revmap_trees_allocated < 2)
936 		return irq_find_mapping(host, hwirq);
937 
938 	/* Now try to resolve */
939 	/*
940 	 * No rcu_read_lock(ing) needed, the ptr returned can't go under us
941 	 * as it's referencing an entry in the static irq_map table.
942 	 */
943 	ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
944 
945 	/*
946 	 * If found in radix tree, then fine.
947 	 * Else fallback to linear lookup - this should not happen in practice
948 	 * as it means that we failed to insert the node in the radix tree.
949 	 */
950 	if (ptr)
951 		virq = ptr - irq_map;
952 	else
953 		virq = irq_find_mapping(host, hwirq);
954 
955 	return virq;
956 }
957 
958 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
959 			     irq_hw_number_t hwirq)
960 {
961 
962 	WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
963 
964 	/*
965 	 * Check if the radix tree exists yet.
966 	 * If not, then the irq will be inserted into the tree when it gets
967 	 * initialized.
968 	 */
969 	smp_rmb();
970 	if (revmap_trees_allocated < 1)
971 		return;
972 
973 	if (virq != NO_IRQ) {
974 		mutex_lock(&revmap_trees_mutex);
975 		radix_tree_insert(&host->revmap_data.tree, hwirq,
976 				  &irq_map[virq]);
977 		mutex_unlock(&revmap_trees_mutex);
978 	}
979 }
980 
981 unsigned int irq_linear_revmap(struct irq_host *host,
982 			       irq_hw_number_t hwirq)
983 {
984 	unsigned int *revmap;
985 
986 	WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
987 
988 	/* Check revmap bounds */
989 	if (unlikely(hwirq >= host->revmap_data.linear.size))
990 		return irq_find_mapping(host, hwirq);
991 
992 	/* Check if revmap was allocated */
993 	revmap = host->revmap_data.linear.revmap;
994 	if (unlikely(revmap == NULL))
995 		return irq_find_mapping(host, hwirq);
996 
997 	/* Fill up revmap with slow path if no mapping found */
998 	if (unlikely(revmap[hwirq] == NO_IRQ))
999 		revmap[hwirq] = irq_find_mapping(host, hwirq);
1000 
1001 	return revmap[hwirq];
1002 }
1003 
1004 unsigned int irq_alloc_virt(struct irq_host *host,
1005 			    unsigned int count,
1006 			    unsigned int hint)
1007 {
1008 	unsigned long flags;
1009 	unsigned int i, j, found = NO_IRQ;
1010 
1011 	if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
1012 		return NO_IRQ;
1013 
1014 	raw_spin_lock_irqsave(&irq_big_lock, flags);
1015 
1016 	/* Use hint for 1 interrupt if any */
1017 	if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
1018 	    hint < irq_virq_count && irq_map[hint].host == NULL) {
1019 		found = hint;
1020 		goto hint_found;
1021 	}
1022 
1023 	/* Look for count consecutive numbers in the allocatable
1024 	 * (non-legacy) space
1025 	 */
1026 	for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1027 		if (irq_map[i].host != NULL)
1028 			j = 0;
1029 		else
1030 			j++;
1031 
1032 		if (j == count) {
1033 			found = i - count + 1;
1034 			break;
1035 		}
1036 	}
1037 	if (found == NO_IRQ) {
1038 		raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1039 		return NO_IRQ;
1040 	}
1041  hint_found:
1042 	for (i = found; i < (found + count); i++) {
1043 		irq_map[i].hwirq = host->inval_irq;
1044 		smp_wmb();
1045 		irq_map[i].host = host;
1046 	}
1047 	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1048 	return found;
1049 }
1050 
1051 void irq_free_virt(unsigned int virq, unsigned int count)
1052 {
1053 	unsigned long flags;
1054 	unsigned int i;
1055 
1056 	WARN_ON (virq < NUM_ISA_INTERRUPTS);
1057 	WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1058 
1059 	raw_spin_lock_irqsave(&irq_big_lock, flags);
1060 	for (i = virq; i < (virq + count); i++) {
1061 		struct irq_host *host;
1062 
1063 		if (i < NUM_ISA_INTERRUPTS ||
1064 		    (virq + count) > irq_virq_count)
1065 			continue;
1066 
1067 		host = irq_map[i].host;
1068 		irq_map[i].hwirq = host->inval_irq;
1069 		smp_wmb();
1070 		irq_map[i].host = NULL;
1071 	}
1072 	raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1073 }
1074 
1075 int arch_early_irq_init(void)
1076 {
1077 	struct irq_desc *desc;
1078 	int i;
1079 
1080 	for (i = 0; i < NR_IRQS; i++) {
1081 		desc = irq_to_desc(i);
1082 		if (desc)
1083 			desc->status |= IRQ_NOREQUEST;
1084 	}
1085 
1086 	return 0;
1087 }
1088 
1089 int arch_init_chip_data(struct irq_desc *desc, int node)
1090 {
1091 	desc->status |= IRQ_NOREQUEST;
1092 	return 0;
1093 }
1094 
1095 /* We need to create the radix trees late */
1096 static int irq_late_init(void)
1097 {
1098 	struct irq_host *h;
1099 	unsigned int i;
1100 
1101 	/*
1102 	 * No mutual exclusion with respect to accessors of the tree is needed
1103 	 * here as the synchronization is done via the state variable
1104 	 * revmap_trees_allocated.
1105 	 */
1106 	list_for_each_entry(h, &irq_hosts, link) {
1107 		if (h->revmap_type == IRQ_HOST_MAP_TREE)
1108 			INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1109 	}
1110 
1111 	/*
1112 	 * Make sure the radix trees inits are visible before setting
1113 	 * the flag
1114 	 */
1115 	smp_wmb();
1116 	revmap_trees_allocated = 1;
1117 
1118 	/*
1119 	 * Insert the reverse mapping for those interrupts already present
1120 	 * in irq_map[].
1121 	 */
1122 	mutex_lock(&revmap_trees_mutex);
1123 	for (i = 0; i < irq_virq_count; i++) {
1124 		if (irq_map[i].host &&
1125 		    (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1126 			radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1127 					  irq_map[i].hwirq, &irq_map[i]);
1128 	}
1129 	mutex_unlock(&revmap_trees_mutex);
1130 
1131 	/*
1132 	 * Make sure the radix trees insertions are visible before setting
1133 	 * the flag
1134 	 */
1135 	smp_wmb();
1136 	revmap_trees_allocated = 2;
1137 
1138 	return 0;
1139 }
1140 arch_initcall(irq_late_init);
1141 
1142 #ifdef CONFIG_VIRQ_DEBUG
1143 static int virq_debug_show(struct seq_file *m, void *private)
1144 {
1145 	unsigned long flags;
1146 	struct irq_desc *desc;
1147 	const char *p;
1148 	static const char none[] = "none";
1149 	int i;
1150 
1151 	seq_printf(m, "%-5s  %-7s  %-15s  %s\n", "virq", "hwirq",
1152 		      "chip name", "host name");
1153 
1154 	for (i = 1; i < nr_irqs; i++) {
1155 		desc = irq_to_desc(i);
1156 		if (!desc)
1157 			continue;
1158 
1159 		raw_spin_lock_irqsave(&desc->lock, flags);
1160 
1161 		if (desc->action && desc->action->handler) {
1162 			seq_printf(m, "%5d  ", i);
1163 			seq_printf(m, "0x%05lx  ", virq_to_hw(i));
1164 
1165 			if (desc->chip && desc->chip->name)
1166 				p = desc->chip->name;
1167 			else
1168 				p = none;
1169 			seq_printf(m, "%-15s  ", p);
1170 
1171 			if (irq_map[i].host && irq_map[i].host->of_node)
1172 				p = irq_map[i].host->of_node->full_name;
1173 			else
1174 				p = none;
1175 			seq_printf(m, "%s\n", p);
1176 		}
1177 
1178 		raw_spin_unlock_irqrestore(&desc->lock, flags);
1179 	}
1180 
1181 	return 0;
1182 }
1183 
1184 static int virq_debug_open(struct inode *inode, struct file *file)
1185 {
1186 	return single_open(file, virq_debug_show, inode->i_private);
1187 }
1188 
1189 static const struct file_operations virq_debug_fops = {
1190 	.open = virq_debug_open,
1191 	.read = seq_read,
1192 	.llseek = seq_lseek,
1193 	.release = single_release,
1194 };
1195 
1196 static int __init irq_debugfs_init(void)
1197 {
1198 	if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1199 				 NULL, &virq_debug_fops) == NULL)
1200 		return -ENOMEM;
1201 
1202 	return 0;
1203 }
1204 __initcall(irq_debugfs_init);
1205 #endif /* CONFIG_VIRQ_DEBUG */
1206 
1207 #ifdef CONFIG_PPC64
1208 static int __init setup_noirqdistrib(char *str)
1209 {
1210 	distribute_irqs = 0;
1211 	return 1;
1212 }
1213 
1214 __setup("noirqdistrib", setup_noirqdistrib);
1215 #endif /* CONFIG_PPC64 */
1216