1 /* 2 * arch/ppc/kernel/irq.c 3 * 4 * Derived from arch/i386/kernel/irq.c 5 * Copyright (C) 1992 Linus Torvalds 6 * Adapted from arch/i386 by Gary Thomas 7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 8 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 9 * Copyright (C) 1996-2001 Cort Dougan 10 * Adapted for Power Macintosh by Paul Mackerras 11 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) 12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). 13 * 14 * This program is free software; you can redistribute it and/or 15 * modify it under the terms of the GNU General Public License 16 * as published by the Free Software Foundation; either version 17 * 2 of the License, or (at your option) any later version. 18 * 19 * This file contains the code used by various IRQ handling routines: 20 * asking for different IRQ's should be done through these routines 21 * instead of just grabbing them. Thus setups with different IRQ numbers 22 * shouldn't result in any weird surprises, and installing new handlers 23 * should be easier. 24 * 25 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the 26 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit 27 * mask register (of which only 16 are defined), hence the weird shifting 28 * and complement of the cached_irq_mask. I want to be able to stuff 29 * this right into the SIU SMASK register. 30 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx 31 * to reduce code space and undefined function references. 32 */ 33 34 #include <linux/module.h> 35 #include <linux/threads.h> 36 #include <linux/kernel_stat.h> 37 #include <linux/signal.h> 38 #include <linux/sched.h> 39 #include <linux/ptrace.h> 40 #include <linux/ioport.h> 41 #include <linux/interrupt.h> 42 #include <linux/timex.h> 43 #include <linux/config.h> 44 #include <linux/init.h> 45 #include <linux/slab.h> 46 #include <linux/delay.h> 47 #include <linux/irq.h> 48 #include <linux/seq_file.h> 49 #include <linux/cpumask.h> 50 #include <linux/profile.h> 51 #include <linux/bitops.h> 52 53 #include <asm/uaccess.h> 54 #include <asm/system.h> 55 #include <asm/io.h> 56 #include <asm/pgtable.h> 57 #include <asm/irq.h> 58 #include <asm/cache.h> 59 #include <asm/prom.h> 60 #include <asm/ptrace.h> 61 #include <asm/machdep.h> 62 #ifdef CONFIG_PPC_ISERIES 63 #include <asm/paca.h> 64 #endif 65 66 int __irq_offset_value; 67 #ifdef CONFIG_PPC32 68 EXPORT_SYMBOL(__irq_offset_value); 69 #endif 70 71 static int ppc_spurious_interrupts; 72 73 #ifdef CONFIG_PPC32 74 #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 75 76 unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 77 atomic_t ppc_n_lost_interrupts; 78 79 #ifdef CONFIG_TAU_INT 80 extern int tau_initialized; 81 extern int tau_interrupts(int); 82 #endif 83 84 #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) 85 extern atomic_t ipi_recv; 86 extern atomic_t ipi_sent; 87 #endif 88 #endif /* CONFIG_PPC32 */ 89 90 #ifdef CONFIG_PPC64 91 EXPORT_SYMBOL(irq_desc); 92 93 int distribute_irqs = 1; 94 u64 ppc64_interrupt_controller; 95 #endif /* CONFIG_PPC64 */ 96 97 int show_interrupts(struct seq_file *p, void *v) 98 { 99 int i = *(loff_t *)v, j; 100 struct irqaction *action; 101 irq_desc_t *desc; 102 unsigned long flags; 103 104 if (i == 0) { 105 seq_puts(p, " "); 106 for_each_online_cpu(j) 107 seq_printf(p, "CPU%d ", j); 108 seq_putc(p, '\n'); 109 } 110 111 if (i < NR_IRQS) { 112 desc = get_irq_desc(i); 113 spin_lock_irqsave(&desc->lock, flags); 114 action = desc->action; 115 if (!action || !action->handler) 116 goto skip; 117 seq_printf(p, "%3d: ", i); 118 #ifdef CONFIG_SMP 119 for_each_online_cpu(j) 120 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 121 #else 122 seq_printf(p, "%10u ", kstat_irqs(i)); 123 #endif /* CONFIG_SMP */ 124 if (desc->handler) 125 seq_printf(p, " %s ", desc->handler->typename); 126 else 127 seq_puts(p, " None "); 128 seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); 129 seq_printf(p, " %s", action->name); 130 for (action = action->next; action; action = action->next) 131 seq_printf(p, ", %s", action->name); 132 seq_putc(p, '\n'); 133 skip: 134 spin_unlock_irqrestore(&desc->lock, flags); 135 } else if (i == NR_IRQS) { 136 #ifdef CONFIG_PPC32 137 #ifdef CONFIG_TAU_INT 138 if (tau_initialized){ 139 seq_puts(p, "TAU: "); 140 for (j = 0; j < NR_CPUS; j++) 141 if (cpu_online(j)) 142 seq_printf(p, "%10u ", tau_interrupts(j)); 143 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 144 } 145 #endif 146 #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) 147 /* should this be per processor send/receive? */ 148 seq_printf(p, "IPI (recv/sent): %10u/%u\n", 149 atomic_read(&ipi_recv), atomic_read(&ipi_sent)); 150 #endif 151 #endif /* CONFIG_PPC32 */ 152 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); 153 } 154 return 0; 155 } 156 157 #ifdef CONFIG_HOTPLUG_CPU 158 void fixup_irqs(cpumask_t map) 159 { 160 unsigned int irq; 161 static int warned; 162 163 for_each_irq(irq) { 164 cpumask_t mask; 165 166 if (irq_desc[irq].status & IRQ_PER_CPU) 167 continue; 168 169 cpus_and(mask, irq_affinity[irq], map); 170 if (any_online_cpu(mask) == NR_CPUS) { 171 printk("Breaking affinity for irq %i\n", irq); 172 mask = map; 173 } 174 if (irq_desc[irq].handler->set_affinity) 175 irq_desc[irq].handler->set_affinity(irq, mask); 176 else if (irq_desc[irq].action && !(warned++)) 177 printk("Cannot set affinity for irq %i\n", irq); 178 } 179 180 local_irq_enable(); 181 mdelay(1); 182 local_irq_disable(); 183 } 184 #endif 185 186 void do_IRQ(struct pt_regs *regs) 187 { 188 int irq; 189 #ifdef CONFIG_IRQSTACKS 190 struct thread_info *curtp, *irqtp; 191 #endif 192 193 irq_enter(); 194 195 #ifdef CONFIG_DEBUG_STACKOVERFLOW 196 /* Debugging check for stack overflow: is there less than 2KB free? */ 197 { 198 long sp; 199 200 sp = __get_SP() & (THREAD_SIZE-1); 201 202 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { 203 printk("do_IRQ: stack overflow: %ld\n", 204 sp - sizeof(struct thread_info)); 205 dump_stack(); 206 } 207 } 208 #endif 209 210 /* 211 * Every platform is required to implement ppc_md.get_irq. 212 * This function will either return an irq number or -1 to 213 * indicate there are no more pending. 214 * The value -2 is for buggy hardware and means that this IRQ 215 * has already been handled. -- Tom 216 */ 217 irq = ppc_md.get_irq(regs); 218 219 if (irq >= 0) { 220 #ifdef CONFIG_IRQSTACKS 221 /* Switch to the irq stack to handle this */ 222 curtp = current_thread_info(); 223 irqtp = hardirq_ctx[smp_processor_id()]; 224 if (curtp != irqtp) { 225 irqtp->task = curtp->task; 226 irqtp->flags = 0; 227 call___do_IRQ(irq, regs, irqtp); 228 irqtp->task = NULL; 229 if (irqtp->flags) 230 set_bits(irqtp->flags, &curtp->flags); 231 } else 232 #endif 233 __do_IRQ(irq, regs); 234 } else if (irq != -2) 235 /* That's not SMP safe ... but who cares ? */ 236 ppc_spurious_interrupts++; 237 238 irq_exit(); 239 240 #ifdef CONFIG_PPC_ISERIES 241 { 242 struct paca_struct *lpaca = get_paca(); 243 244 if (lpaca->lppaca.int_dword.fields.decr_int) { 245 lpaca->lppaca.int_dword.fields.decr_int = 0; 246 /* Signal a fake decrementer interrupt */ 247 timer_interrupt(regs); 248 } 249 } 250 #endif 251 } 252 253 void __init init_IRQ(void) 254 { 255 #ifdef CONFIG_PPC64 256 static int once = 0; 257 258 if (once) 259 return; 260 261 once++; 262 263 #endif 264 ppc_md.init_IRQ(); 265 #ifdef CONFIG_PPC64 266 irq_ctx_init(); 267 #endif 268 } 269 270 #ifdef CONFIG_PPC64 271 /* 272 * Virtual IRQ mapping code, used on systems with XICS interrupt controllers. 273 */ 274 275 #define UNDEFINED_IRQ 0xffffffff 276 unsigned int virt_irq_to_real_map[NR_IRQS]; 277 278 /* 279 * Don't use virtual irqs 0, 1, 2 for devices. 280 * The pcnet32 driver considers interrupt numbers < 2 to be invalid, 281 * and 2 is the XICS IPI interrupt. 282 * We limit virtual irqs to 17 less than NR_IRQS so that when we 283 * offset them by 16 (to reserve the first 16 for ISA interrupts) 284 * we don't end up with an interrupt number >= NR_IRQS. 285 */ 286 #define MIN_VIRT_IRQ 3 287 #define MAX_VIRT_IRQ (NR_IRQS - NUM_ISA_INTERRUPTS - 1) 288 #define NR_VIRT_IRQS (MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1) 289 290 void 291 virt_irq_init(void) 292 { 293 int i; 294 for (i = 0; i < NR_IRQS; i++) 295 virt_irq_to_real_map[i] = UNDEFINED_IRQ; 296 } 297 298 /* Create a mapping for a real_irq if it doesn't already exist. 299 * Return the virtual irq as a convenience. 300 */ 301 int virt_irq_create_mapping(unsigned int real_irq) 302 { 303 unsigned int virq, first_virq; 304 static int warned; 305 306 if (ppc64_interrupt_controller == IC_OPEN_PIC) 307 return real_irq; /* no mapping for openpic (for now) */ 308 309 if (ppc64_interrupt_controller == IC_CELL_PIC) 310 return real_irq; /* no mapping for iic either */ 311 312 /* don't map interrupts < MIN_VIRT_IRQ */ 313 if (real_irq < MIN_VIRT_IRQ) { 314 virt_irq_to_real_map[real_irq] = real_irq; 315 return real_irq; 316 } 317 318 /* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */ 319 virq = real_irq; 320 if (virq > MAX_VIRT_IRQ) 321 virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; 322 323 /* search for this number or a free slot */ 324 first_virq = virq; 325 while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) { 326 if (virt_irq_to_real_map[virq] == real_irq) 327 return virq; 328 if (++virq > MAX_VIRT_IRQ) 329 virq = MIN_VIRT_IRQ; 330 if (virq == first_virq) 331 goto nospace; /* oops, no free slots */ 332 } 333 334 virt_irq_to_real_map[virq] = real_irq; 335 return virq; 336 337 nospace: 338 if (!warned) { 339 printk(KERN_CRIT "Interrupt table is full\n"); 340 printk(KERN_CRIT "Increase NR_IRQS (currently %d) " 341 "in your kernel sources and rebuild.\n", NR_IRQS); 342 warned = 1; 343 } 344 return NO_IRQ; 345 } 346 347 /* 348 * In most cases will get a hit on the very first slot checked in the 349 * virt_irq_to_real_map. Only when there are a large number of 350 * IRQs will this be expensive. 351 */ 352 unsigned int real_irq_to_virt_slowpath(unsigned int real_irq) 353 { 354 unsigned int virq; 355 unsigned int first_virq; 356 357 virq = real_irq; 358 359 if (virq > MAX_VIRT_IRQ) 360 virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; 361 362 first_virq = virq; 363 364 do { 365 if (virt_irq_to_real_map[virq] == real_irq) 366 return virq; 367 368 virq++; 369 370 if (virq >= MAX_VIRT_IRQ) 371 virq = 0; 372 373 } while (first_virq != virq); 374 375 return NO_IRQ; 376 377 } 378 379 #ifdef CONFIG_IRQSTACKS 380 struct thread_info *softirq_ctx[NR_CPUS]; 381 struct thread_info *hardirq_ctx[NR_CPUS]; 382 383 void irq_ctx_init(void) 384 { 385 struct thread_info *tp; 386 int i; 387 388 for_each_cpu(i) { 389 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 390 tp = softirq_ctx[i]; 391 tp->cpu = i; 392 tp->preempt_count = SOFTIRQ_OFFSET; 393 394 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 395 tp = hardirq_ctx[i]; 396 tp->cpu = i; 397 tp->preempt_count = HARDIRQ_OFFSET; 398 } 399 } 400 401 void do_softirq(void) 402 { 403 unsigned long flags; 404 struct thread_info *curtp, *irqtp; 405 406 if (in_interrupt()) 407 return; 408 409 local_irq_save(flags); 410 411 if (local_softirq_pending()) { 412 curtp = current_thread_info(); 413 irqtp = softirq_ctx[smp_processor_id()]; 414 irqtp->task = curtp->task; 415 call_do_softirq(irqtp); 416 irqtp->task = NULL; 417 } 418 419 local_irq_restore(flags); 420 } 421 EXPORT_SYMBOL(do_softirq); 422 423 #endif /* CONFIG_IRQSTACKS */ 424 425 static int __init setup_noirqdistrib(char *str) 426 { 427 distribute_irqs = 0; 428 return 1; 429 } 430 431 __setup("noirqdistrib", setup_noirqdistrib); 432 #endif /* CONFIG_PPC64 */ 433