xref: /openbmc/linux/arch/powerpc/kernel/irq.c (revision 160b8e75)
1 /*
2  *  Derived from arch/i386/kernel/irq.c
3  *    Copyright (C) 1992 Linus Torvalds
4  *  Adapted from arch/i386 by Gary Thomas
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
7  *    Copyright (C) 1996-2001 Cort Dougan
8  *  Adapted for Power Macintosh by Paul Mackerras
9  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version
14  * 2 of the License, or (at your option) any later version.
15  *
16  * This file contains the code used by various IRQ handling routines:
17  * asking for different IRQ's should be done through these routines
18  * instead of just grabbing them. Thus setups with different IRQ numbers
19  * shouldn't result in any weird surprises, and installing new handlers
20  * should be easier.
21  *
22  * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
23  * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
24  * mask register (of which only 16 are defined), hence the weird shifting
25  * and complement of the cached_irq_mask.  I want to be able to stuff
26  * this right into the SIU SMASK register.
27  * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
28  * to reduce code space and undefined function references.
29  */
30 
31 #undef DEBUG
32 
33 #include <linux/export.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/pci.h>
54 #include <linux/debugfs.h>
55 #include <linux/of.h>
56 #include <linux/of_irq.h>
57 
58 #include <linux/uaccess.h>
59 #include <asm/io.h>
60 #include <asm/pgtable.h>
61 #include <asm/irq.h>
62 #include <asm/cache.h>
63 #include <asm/prom.h>
64 #include <asm/ptrace.h>
65 #include <asm/machdep.h>
66 #include <asm/udbg.h>
67 #include <asm/smp.h>
68 #include <asm/livepatch.h>
69 #include <asm/asm-prototypes.h>
70 #include <asm/hw_irq.h>
71 
72 #ifdef CONFIG_PPC64
73 #include <asm/paca.h>
74 #include <asm/firmware.h>
75 #include <asm/lv1call.h>
76 #endif
77 #define CREATE_TRACE_POINTS
78 #include <asm/trace.h>
79 #include <asm/cpu_has_feature.h>
80 
81 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
82 EXPORT_PER_CPU_SYMBOL(irq_stat);
83 
84 int __irq_offset_value;
85 
86 #ifdef CONFIG_PPC32
87 EXPORT_SYMBOL(__irq_offset_value);
88 atomic_t ppc_n_lost_interrupts;
89 
90 #ifdef CONFIG_TAU_INT
91 extern int tau_initialized;
92 extern int tau_interrupts(int);
93 #endif
94 #endif /* CONFIG_PPC32 */
95 
96 #ifdef CONFIG_PPC64
97 
98 int distribute_irqs = 1;
99 
100 static inline notrace unsigned long get_irq_happened(void)
101 {
102 	unsigned long happened;
103 
104 	__asm__ __volatile__("lbz %0,%1(13)"
105 	: "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
106 
107 	return happened;
108 }
109 
110 static inline notrace int decrementer_check_overflow(void)
111 {
112  	u64 now = get_tb_or_rtc();
113 	u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
114 
115 	return now >= *next_tb;
116 }
117 
118 /* This is called whenever we are re-enabling interrupts
119  * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
120  * there's an EE, DEC or DBELL to generate.
121  *
122  * This is called in two contexts: From arch_local_irq_restore()
123  * before soft-enabling interrupts, and from the exception exit
124  * path when returning from an interrupt from a soft-disabled to
125  * a soft enabled context. In both case we have interrupts hard
126  * disabled.
127  *
128  * We take care of only clearing the bits we handled in the
129  * PACA irq_happened field since we can only re-emit one at a
130  * time and we don't want to "lose" one.
131  */
132 notrace unsigned int __check_irq_replay(void)
133 {
134 	/*
135 	 * We use local_paca rather than get_paca() to avoid all
136 	 * the debug_smp_processor_id() business in this low level
137 	 * function
138 	 */
139 	unsigned char happened = local_paca->irq_happened;
140 
141 	/*
142 	 * We are responding to the next interrupt, so interrupt-off
143 	 * latencies should be reset here.
144 	 */
145 	trace_hardirqs_on();
146 	trace_hardirqs_off();
147 
148 	if (happened & PACA_IRQ_HARD_DIS) {
149 		/* Clear bit 0 which we wouldn't clear otherwise */
150 		local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
151 
152 		/*
153 		 * We may have missed a decrementer interrupt if hard disabled.
154 		 * Check the decrementer register in case we had a rollover
155 		 * while hard disabled.
156 		 */
157 		if (!(happened & PACA_IRQ_DEC)) {
158 			if (decrementer_check_overflow()) {
159 				local_paca->irq_happened |= PACA_IRQ_DEC;
160 				happened |= PACA_IRQ_DEC;
161 			}
162 		}
163 	}
164 
165 	/*
166 	 * Force the delivery of pending soft-disabled interrupts on PS3.
167 	 * Any HV call will have this side effect.
168 	 */
169 	if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
170 		u64 tmp, tmp2;
171 		lv1_get_version_info(&tmp, &tmp2);
172 	}
173 
174 	/*
175 	 * Check if an hypervisor Maintenance interrupt happened.
176 	 * This is a higher priority interrupt than the others, so
177 	 * replay it first.
178 	 */
179 	if (happened & PACA_IRQ_HMI) {
180 		local_paca->irq_happened &= ~PACA_IRQ_HMI;
181 		return 0xe60;
182 	}
183 
184 	if (happened & PACA_IRQ_DEC) {
185 		local_paca->irq_happened &= ~PACA_IRQ_DEC;
186 		return 0x900;
187 	}
188 
189 	if (happened & PACA_IRQ_PMI) {
190 		local_paca->irq_happened &= ~PACA_IRQ_PMI;
191 		return 0xf00;
192 	}
193 
194 	if (happened & PACA_IRQ_EE) {
195 		local_paca->irq_happened &= ~PACA_IRQ_EE;
196 		return 0x500;
197 	}
198 
199 #ifdef CONFIG_PPC_BOOK3E
200 	/*
201 	 * Check if an EPR external interrupt happened this bit is typically
202 	 * set if we need to handle another "edge" interrupt from within the
203 	 * MPIC "EPR" handler.
204 	 */
205 	if (happened & PACA_IRQ_EE_EDGE) {
206 		local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
207 		return 0x500;
208 	}
209 
210 	if (happened & PACA_IRQ_DBELL) {
211 		local_paca->irq_happened &= ~PACA_IRQ_DBELL;
212 		return 0x280;
213 	}
214 #else
215 	if (happened & PACA_IRQ_DBELL) {
216 		local_paca->irq_happened &= ~PACA_IRQ_DBELL;
217 		return 0xa00;
218 	}
219 #endif /* CONFIG_PPC_BOOK3E */
220 
221 	/* There should be nothing left ! */
222 	BUG_ON(local_paca->irq_happened != 0);
223 
224 	return 0;
225 }
226 
227 notrace void arch_local_irq_restore(unsigned long mask)
228 {
229 	unsigned char irq_happened;
230 	unsigned int replay;
231 
232 	/* Write the new soft-enabled value */
233 	irq_soft_mask_set(mask);
234 	if (mask)
235 		return;
236 
237 	/*
238 	 * From this point onward, we can take interrupts, preempt,
239 	 * etc... unless we got hard-disabled. We check if an event
240 	 * happened. If none happened, we know we can just return.
241 	 *
242 	 * We may have preempted before the check below, in which case
243 	 * we are checking the "new" CPU instead of the old one. This
244 	 * is only a problem if an event happened on the "old" CPU.
245 	 *
246 	 * External interrupt events will have caused interrupts to
247 	 * be hard-disabled, so there is no problem, we
248 	 * cannot have preempted.
249 	 */
250 	irq_happened = get_irq_happened();
251 	if (!irq_happened)
252 		return;
253 
254 	/*
255 	 * We need to hard disable to get a trusted value from
256 	 * __check_irq_replay(). We also need to soft-disable
257 	 * again to avoid warnings in there due to the use of
258 	 * per-cpu variables.
259 	 *
260 	 * We know that if the value in irq_happened is exactly 0x01
261 	 * then we are already hard disabled (there are other less
262 	 * common cases that we'll ignore for now), so we skip the
263 	 * (expensive) mtmsrd.
264 	 */
265 	if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
266 		__hard_irq_disable();
267 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
268 	else {
269 		/*
270 		 * We should already be hard disabled here. We had bugs
271 		 * where that wasn't the case so let's dbl check it and
272 		 * warn if we are wrong. Only do that when IRQ tracing
273 		 * is enabled as mfmsr() can be costly.
274 		 */
275 		if (WARN_ON(mfmsr() & MSR_EE))
276 			__hard_irq_disable();
277 	}
278 #endif
279 
280 	irq_soft_mask_set(IRQS_ALL_DISABLED);
281 	trace_hardirqs_off();
282 
283 	/*
284 	 * Check if anything needs to be re-emitted. We haven't
285 	 * soft-enabled yet to avoid warnings in decrementer_check_overflow
286 	 * accessing per-cpu variables
287 	 */
288 	replay = __check_irq_replay();
289 
290 	/* We can soft-enable now */
291 	trace_hardirqs_on();
292 	irq_soft_mask_set(IRQS_ENABLED);
293 
294 	/*
295 	 * And replay if we have to. This will return with interrupts
296 	 * hard-enabled.
297 	 */
298 	if (replay) {
299 		__replay_interrupt(replay);
300 		return;
301 	}
302 
303 	/* Finally, let's ensure we are hard enabled */
304 	__hard_irq_enable();
305 }
306 EXPORT_SYMBOL(arch_local_irq_restore);
307 
308 /*
309  * This is specifically called by assembly code to re-enable interrupts
310  * if they are currently disabled. This is typically called before
311  * schedule() or do_signal() when returning to userspace. We do it
312  * in C to avoid the burden of dealing with lockdep etc...
313  *
314  * NOTE: This is called with interrupts hard disabled but not marked
315  * as such in paca->irq_happened, so we need to resync this.
316  */
317 void notrace restore_interrupts(void)
318 {
319 	if (irqs_disabled()) {
320 		local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
321 		local_irq_enable();
322 	} else
323 		__hard_irq_enable();
324 }
325 
326 /*
327  * This is a helper to use when about to go into idle low-power
328  * when the latter has the side effect of re-enabling interrupts
329  * (such as calling H_CEDE under pHyp).
330  *
331  * You call this function with interrupts soft-disabled (this is
332  * already the case when ppc_md.power_save is called). The function
333  * will return whether to enter power save or just return.
334  *
335  * In the former case, it will have notified lockdep of interrupts
336  * being re-enabled and generally sanitized the lazy irq state,
337  * and in the latter case it will leave with interrupts hard
338  * disabled and marked as such, so the local_irq_enable() call
339  * in arch_cpu_idle() will properly re-enable everything.
340  */
341 bool prep_irq_for_idle(void)
342 {
343 	/*
344 	 * First we need to hard disable to ensure no interrupt
345 	 * occurs before we effectively enter the low power state
346 	 */
347 	__hard_irq_disable();
348 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
349 
350 	/*
351 	 * If anything happened while we were soft-disabled,
352 	 * we return now and do not enter the low power state.
353 	 */
354 	if (lazy_irq_pending())
355 		return false;
356 
357 	/* Tell lockdep we are about to re-enable */
358 	trace_hardirqs_on();
359 
360 	/*
361 	 * Mark interrupts as soft-enabled and clear the
362 	 * PACA_IRQ_HARD_DIS from the pending mask since we
363 	 * are about to hard enable as well as a side effect
364 	 * of entering the low power state.
365 	 */
366 	local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
367 	irq_soft_mask_set(IRQS_ENABLED);
368 
369 	/* Tell the caller to enter the low power state */
370 	return true;
371 }
372 
373 #ifdef CONFIG_PPC_BOOK3S
374 /*
375  * This is for idle sequences that return with IRQs off, but the
376  * idle state itself wakes on interrupt. Tell the irq tracer that
377  * IRQs are enabled for the duration of idle so it does not get long
378  * off times. Must be paired with fini_irq_for_idle_irqsoff.
379  */
380 bool prep_irq_for_idle_irqsoff(void)
381 {
382 	WARN_ON(!irqs_disabled());
383 
384 	/*
385 	 * First we need to hard disable to ensure no interrupt
386 	 * occurs before we effectively enter the low power state
387 	 */
388 	__hard_irq_disable();
389 	local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
390 
391 	/*
392 	 * If anything happened while we were soft-disabled,
393 	 * we return now and do not enter the low power state.
394 	 */
395 	if (lazy_irq_pending())
396 		return false;
397 
398 	/* Tell lockdep we are about to re-enable */
399 	trace_hardirqs_on();
400 
401 	return true;
402 }
403 
404 /*
405  * Take the SRR1 wakeup reason, index into this table to find the
406  * appropriate irq_happened bit.
407  *
408  * Sytem reset exceptions taken in idle state also come through here,
409  * but they are NMI interrupts so do not need to wait for IRQs to be
410  * restored, and should be taken as early as practical. These are marked
411  * with 0xff in the table. The Power ISA specifies 0100b as the system
412  * reset interrupt reason.
413  */
414 #define IRQ_SYSTEM_RESET	0xff
415 
416 static const u8 srr1_to_lazyirq[0x10] = {
417 	0, 0, 0,
418 	PACA_IRQ_DBELL,
419 	IRQ_SYSTEM_RESET,
420 	PACA_IRQ_DBELL,
421 	PACA_IRQ_DEC,
422 	0,
423 	PACA_IRQ_EE,
424 	PACA_IRQ_EE,
425 	PACA_IRQ_HMI,
426 	0, 0, 0, 0, 0 };
427 
428 void replay_system_reset(void)
429 {
430 	struct pt_regs regs;
431 
432 	ppc_save_regs(&regs);
433 	regs.trap = 0x100;
434 	get_paca()->in_nmi = 1;
435 	system_reset_exception(&regs);
436 	get_paca()->in_nmi = 0;
437 }
438 EXPORT_SYMBOL_GPL(replay_system_reset);
439 
440 void irq_set_pending_from_srr1(unsigned long srr1)
441 {
442 	unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
443 	u8 reason = srr1_to_lazyirq[idx];
444 
445 	/*
446 	 * Take the system reset now, which is immediately after registers
447 	 * are restored from idle. It's an NMI, so interrupts need not be
448 	 * re-enabled before it is taken.
449 	 */
450 	if (unlikely(reason == IRQ_SYSTEM_RESET)) {
451 		replay_system_reset();
452 		return;
453 	}
454 
455 	/*
456 	 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
457 	 * so this can be called unconditionally with the SRR1 wake
458 	 * reason as returned by the idle code, which uses 0 to mean no
459 	 * interrupt.
460 	 *
461 	 * If a future CPU was to designate this as an interrupt reason,
462 	 * then a new index for no interrupt must be assigned.
463 	 */
464 	local_paca->irq_happened |= reason;
465 }
466 #endif /* CONFIG_PPC_BOOK3S */
467 
468 /*
469  * Force a replay of the external interrupt handler on this CPU.
470  */
471 void force_external_irq_replay(void)
472 {
473 	/*
474 	 * This must only be called with interrupts soft-disabled,
475 	 * the replay will happen when re-enabling.
476 	 */
477 	WARN_ON(!arch_irqs_disabled());
478 
479 	/* Indicate in the PACA that we have an interrupt to replay */
480 	local_paca->irq_happened |= PACA_IRQ_EE;
481 }
482 
483 #endif /* CONFIG_PPC64 */
484 
485 int arch_show_interrupts(struct seq_file *p, int prec)
486 {
487 	int j;
488 
489 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
490 	if (tau_initialized) {
491 		seq_printf(p, "%*s: ", prec, "TAU");
492 		for_each_online_cpu(j)
493 			seq_printf(p, "%10u ", tau_interrupts(j));
494 		seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
495 	}
496 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
497 
498 	seq_printf(p, "%*s: ", prec, "LOC");
499 	for_each_online_cpu(j)
500 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
501         seq_printf(p, "  Local timer interrupts for timer event device\n");
502 
503 	seq_printf(p, "%*s: ", prec, "LOC");
504 	for_each_online_cpu(j)
505 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
506         seq_printf(p, "  Local timer interrupts for others\n");
507 
508 	seq_printf(p, "%*s: ", prec, "SPU");
509 	for_each_online_cpu(j)
510 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
511 	seq_printf(p, "  Spurious interrupts\n");
512 
513 	seq_printf(p, "%*s: ", prec, "PMI");
514 	for_each_online_cpu(j)
515 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
516 	seq_printf(p, "  Performance monitoring interrupts\n");
517 
518 	seq_printf(p, "%*s: ", prec, "MCE");
519 	for_each_online_cpu(j)
520 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
521 	seq_printf(p, "  Machine check exceptions\n");
522 
523 	if (cpu_has_feature(CPU_FTR_HVMODE)) {
524 		seq_printf(p, "%*s: ", prec, "HMI");
525 		for_each_online_cpu(j)
526 			seq_printf(p, "%10u ",
527 					per_cpu(irq_stat, j).hmi_exceptions);
528 		seq_printf(p, "  Hypervisor Maintenance Interrupts\n");
529 	}
530 
531 	seq_printf(p, "%*s: ", prec, "NMI");
532 	for_each_online_cpu(j)
533 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
534 	seq_printf(p, "  System Reset interrupts\n");
535 
536 #ifdef CONFIG_PPC_WATCHDOG
537 	seq_printf(p, "%*s: ", prec, "WDG");
538 	for_each_online_cpu(j)
539 		seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
540 	seq_printf(p, "  Watchdog soft-NMI interrupts\n");
541 #endif
542 
543 #ifdef CONFIG_PPC_DOORBELL
544 	if (cpu_has_feature(CPU_FTR_DBELL)) {
545 		seq_printf(p, "%*s: ", prec, "DBL");
546 		for_each_online_cpu(j)
547 			seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
548 		seq_printf(p, "  Doorbell interrupts\n");
549 	}
550 #endif
551 
552 	return 0;
553 }
554 
555 /*
556  * /proc/stat helpers
557  */
558 u64 arch_irq_stat_cpu(unsigned int cpu)
559 {
560 	u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
561 
562 	sum += per_cpu(irq_stat, cpu).pmu_irqs;
563 	sum += per_cpu(irq_stat, cpu).mce_exceptions;
564 	sum += per_cpu(irq_stat, cpu).spurious_irqs;
565 	sum += per_cpu(irq_stat, cpu).timer_irqs_others;
566 	sum += per_cpu(irq_stat, cpu).hmi_exceptions;
567 	sum += per_cpu(irq_stat, cpu).sreset_irqs;
568 #ifdef CONFIG_PPC_WATCHDOG
569 	sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
570 #endif
571 #ifdef CONFIG_PPC_DOORBELL
572 	sum += per_cpu(irq_stat, cpu).doorbell_irqs;
573 #endif
574 
575 	return sum;
576 }
577 
578 static inline void check_stack_overflow(void)
579 {
580 #ifdef CONFIG_DEBUG_STACKOVERFLOW
581 	long sp;
582 
583 	sp = current_stack_pointer() & (THREAD_SIZE-1);
584 
585 	/* check for stack overflow: is there less than 2KB free? */
586 	if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
587 		pr_err("do_IRQ: stack overflow: %ld\n",
588 			sp - sizeof(struct thread_info));
589 		dump_stack();
590 	}
591 #endif
592 }
593 
594 void __do_irq(struct pt_regs *regs)
595 {
596 	unsigned int irq;
597 
598 	irq_enter();
599 
600 	trace_irq_entry(regs);
601 
602 	check_stack_overflow();
603 
604 	/*
605 	 * Query the platform PIC for the interrupt & ack it.
606 	 *
607 	 * This will typically lower the interrupt line to the CPU
608 	 */
609 	irq = ppc_md.get_irq();
610 
611 	/* We can hard enable interrupts now to allow perf interrupts */
612 	may_hard_irq_enable();
613 
614 	/* And finally process it */
615 	if (unlikely(!irq))
616 		__this_cpu_inc(irq_stat.spurious_irqs);
617 	else
618 		generic_handle_irq(irq);
619 
620 	trace_irq_exit(regs);
621 
622 	irq_exit();
623 }
624 
625 void do_IRQ(struct pt_regs *regs)
626 {
627 	struct pt_regs *old_regs = set_irq_regs(regs);
628 	struct thread_info *curtp, *irqtp, *sirqtp;
629 
630 	/* Switch to the irq stack to handle this */
631 	curtp = current_thread_info();
632 	irqtp = hardirq_ctx[raw_smp_processor_id()];
633 	sirqtp = softirq_ctx[raw_smp_processor_id()];
634 
635 	/* Already there ? */
636 	if (unlikely(curtp == irqtp || curtp == sirqtp)) {
637 		__do_irq(regs);
638 		set_irq_regs(old_regs);
639 		return;
640 	}
641 
642 	/* Prepare the thread_info in the irq stack */
643 	irqtp->task = curtp->task;
644 	irqtp->flags = 0;
645 
646 	/* Copy the preempt_count so that the [soft]irq checks work. */
647 	irqtp->preempt_count = curtp->preempt_count;
648 
649 	/* Switch stack and call */
650 	call_do_irq(regs, irqtp);
651 
652 	/* Restore stack limit */
653 	irqtp->task = NULL;
654 
655 	/* Copy back updates to the thread_info */
656 	if (irqtp->flags)
657 		set_bits(irqtp->flags, &curtp->flags);
658 
659 	set_irq_regs(old_regs);
660 }
661 
662 void __init init_IRQ(void)
663 {
664 	if (ppc_md.init_IRQ)
665 		ppc_md.init_IRQ();
666 
667 	exc_lvl_ctx_init();
668 
669 	irq_ctx_init();
670 }
671 
672 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
673 struct thread_info   *critirq_ctx[NR_CPUS] __read_mostly;
674 struct thread_info    *dbgirq_ctx[NR_CPUS] __read_mostly;
675 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
676 
677 void exc_lvl_ctx_init(void)
678 {
679 	struct thread_info *tp;
680 	int i, cpu_nr;
681 
682 	for_each_possible_cpu(i) {
683 #ifdef CONFIG_PPC64
684 		cpu_nr = i;
685 #else
686 #ifdef CONFIG_SMP
687 		cpu_nr = get_hard_smp_processor_id(i);
688 #else
689 		cpu_nr = 0;
690 #endif
691 #endif
692 
693 		memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
694 		tp = critirq_ctx[cpu_nr];
695 		tp->cpu = cpu_nr;
696 		tp->preempt_count = 0;
697 
698 #ifdef CONFIG_BOOKE
699 		memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
700 		tp = dbgirq_ctx[cpu_nr];
701 		tp->cpu = cpu_nr;
702 		tp->preempt_count = 0;
703 
704 		memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
705 		tp = mcheckirq_ctx[cpu_nr];
706 		tp->cpu = cpu_nr;
707 		tp->preempt_count = HARDIRQ_OFFSET;
708 #endif
709 	}
710 }
711 #endif
712 
713 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
714 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
715 
716 void irq_ctx_init(void)
717 {
718 	struct thread_info *tp;
719 	int i;
720 
721 	for_each_possible_cpu(i) {
722 		memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
723 		tp = softirq_ctx[i];
724 		tp->cpu = i;
725 		klp_init_thread_info(tp);
726 
727 		memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
728 		tp = hardirq_ctx[i];
729 		tp->cpu = i;
730 		klp_init_thread_info(tp);
731 	}
732 }
733 
734 void do_softirq_own_stack(void)
735 {
736 	struct thread_info *curtp, *irqtp;
737 
738 	curtp = current_thread_info();
739 	irqtp = softirq_ctx[smp_processor_id()];
740 	irqtp->task = curtp->task;
741 	irqtp->flags = 0;
742 	call_do_softirq(irqtp);
743 	irqtp->task = NULL;
744 
745 	/* Set any flag that may have been set on the
746 	 * alternate stack
747 	 */
748 	if (irqtp->flags)
749 		set_bits(irqtp->flags, &curtp->flags);
750 }
751 
752 irq_hw_number_t virq_to_hw(unsigned int virq)
753 {
754 	struct irq_data *irq_data = irq_get_irq_data(virq);
755 	return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
756 }
757 EXPORT_SYMBOL_GPL(virq_to_hw);
758 
759 #ifdef CONFIG_SMP
760 int irq_choose_cpu(const struct cpumask *mask)
761 {
762 	int cpuid;
763 
764 	if (cpumask_equal(mask, cpu_online_mask)) {
765 		static int irq_rover;
766 		static DEFINE_RAW_SPINLOCK(irq_rover_lock);
767 		unsigned long flags;
768 
769 		/* Round-robin distribution... */
770 do_round_robin:
771 		raw_spin_lock_irqsave(&irq_rover_lock, flags);
772 
773 		irq_rover = cpumask_next(irq_rover, cpu_online_mask);
774 		if (irq_rover >= nr_cpu_ids)
775 			irq_rover = cpumask_first(cpu_online_mask);
776 
777 		cpuid = irq_rover;
778 
779 		raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
780 	} else {
781 		cpuid = cpumask_first_and(mask, cpu_online_mask);
782 		if (cpuid >= nr_cpu_ids)
783 			goto do_round_robin;
784 	}
785 
786 	return get_hard_smp_processor_id(cpuid);
787 }
788 #else
789 int irq_choose_cpu(const struct cpumask *mask)
790 {
791 	return hard_smp_processor_id();
792 }
793 #endif
794 
795 int arch_early_irq_init(void)
796 {
797 	return 0;
798 }
799 
800 #ifdef CONFIG_PPC64
801 static int __init setup_noirqdistrib(char *str)
802 {
803 	distribute_irqs = 0;
804 	return 1;
805 }
806 
807 __setup("noirqdistrib", setup_noirqdistrib);
808 #endif /* CONFIG_PPC64 */
809