1 /* 2 * linux/kernel/irq/handle.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code. 8 * 9 * Detailed information is available in Documentation/DocBook/genericirq 10 * 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/module.h> 15 #include <linux/random.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/rculist.h> 19 #include <linux/hash.h> 20 #include <trace/irq.h> 21 #include <linux/bootmem.h> 22 23 #include "internals.h" 24 25 /* 26 * lockdep: we want to handle all irq_desc locks as a single lock-class: 27 */ 28 struct lock_class_key irq_desc_lock_class; 29 30 /** 31 * handle_bad_irq - handle spurious and unhandled irqs 32 * @irq: the interrupt number 33 * @desc: description of the interrupt 34 * 35 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 36 */ 37 void handle_bad_irq(unsigned int irq, struct irq_desc *desc) 38 { 39 print_irq_desc(irq, desc); 40 kstat_incr_irqs_this_cpu(irq, desc); 41 ack_bad_irq(irq); 42 } 43 44 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 45 static void __init init_irq_default_affinity(void) 46 { 47 alloc_bootmem_cpumask_var(&irq_default_affinity); 48 cpumask_setall(irq_default_affinity); 49 } 50 #else 51 static void __init init_irq_default_affinity(void) 52 { 53 } 54 #endif 55 56 /* 57 * Linux has a controller-independent interrupt architecture. 58 * Every controller has a 'controller-template', that is used 59 * by the main code to do the right thing. Each driver-visible 60 * interrupt source is transparently wired to the appropriate 61 * controller. Thus drivers need not be aware of the 62 * interrupt-controller. 63 * 64 * The code is designed to be easily extended with new/different 65 * interrupt controllers, without having to do assembly magic or 66 * having to touch the generic code. 67 * 68 * Controller mappings for all interrupt sources: 69 */ 70 int nr_irqs = NR_IRQS; 71 EXPORT_SYMBOL_GPL(nr_irqs); 72 73 #ifdef CONFIG_SPARSE_IRQ 74 75 static struct irq_desc irq_desc_init = { 76 .irq = -1, 77 .status = IRQ_DISABLED, 78 .chip = &no_irq_chip, 79 .handle_irq = handle_bad_irq, 80 .depth = 1, 81 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 82 }; 83 84 void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) 85 { 86 int node; 87 void *ptr; 88 89 node = cpu_to_node(cpu); 90 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node); 91 92 /* 93 * don't overwite if can not get new one 94 * init_copy_kstat_irqs() could still use old one 95 */ 96 if (ptr) { 97 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", 98 cpu, node); 99 desc->kstat_irqs = ptr; 100 } 101 } 102 103 static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) 104 { 105 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 106 107 spin_lock_init(&desc->lock); 108 desc->irq = irq; 109 #ifdef CONFIG_SMP 110 desc->cpu = cpu; 111 #endif 112 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 113 init_kstat_irqs(desc, cpu, nr_cpu_ids); 114 if (!desc->kstat_irqs) { 115 printk(KERN_ERR "can not alloc kstat_irqs\n"); 116 BUG_ON(1); 117 } 118 if (!init_alloc_desc_masks(desc, cpu, false)) { 119 printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); 120 BUG_ON(1); 121 } 122 arch_init_chip_data(desc, cpu); 123 } 124 125 /* 126 * Protect the sparse_irqs: 127 */ 128 DEFINE_SPINLOCK(sparse_irq_lock); 129 130 struct irq_desc **irq_desc_ptrs __read_mostly; 131 132 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 133 [0 ... NR_IRQS_LEGACY-1] = { 134 .irq = -1, 135 .status = IRQ_DISABLED, 136 .chip = &no_irq_chip, 137 .handle_irq = handle_bad_irq, 138 .depth = 1, 139 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 140 } 141 }; 142 143 static unsigned int *kstat_irqs_legacy; 144 145 int __init early_irq_init(void) 146 { 147 struct irq_desc *desc; 148 int legacy_count; 149 int i; 150 151 init_irq_default_affinity(); 152 153 /* initialize nr_irqs based on nr_cpu_ids */ 154 arch_probe_nr_irqs(); 155 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); 156 157 desc = irq_desc_legacy; 158 legacy_count = ARRAY_SIZE(irq_desc_legacy); 159 160 /* allocate irq_desc_ptrs array based on nr_irqs */ 161 irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *)); 162 163 /* allocate based on nr_cpu_ids */ 164 /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */ 165 kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids * 166 sizeof(int)); 167 168 for (i = 0; i < legacy_count; i++) { 169 desc[i].irq = i; 170 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; 171 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 172 init_alloc_desc_masks(&desc[i], 0, true); 173 irq_desc_ptrs[i] = desc + i; 174 } 175 176 for (i = legacy_count; i < nr_irqs; i++) 177 irq_desc_ptrs[i] = NULL; 178 179 return arch_early_irq_init(); 180 } 181 182 struct irq_desc *irq_to_desc(unsigned int irq) 183 { 184 if (irq_desc_ptrs && irq < nr_irqs) 185 return irq_desc_ptrs[irq]; 186 187 return NULL; 188 } 189 190 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) 191 { 192 struct irq_desc *desc; 193 unsigned long flags; 194 int node; 195 196 if (irq >= nr_irqs) { 197 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", 198 irq, nr_irqs); 199 return NULL; 200 } 201 202 desc = irq_desc_ptrs[irq]; 203 if (desc) 204 return desc; 205 206 spin_lock_irqsave(&sparse_irq_lock, flags); 207 208 /* We have to check it to avoid races with another CPU */ 209 desc = irq_desc_ptrs[irq]; 210 if (desc) 211 goto out_unlock; 212 213 node = cpu_to_node(cpu); 214 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 215 printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n", 216 irq, cpu, node); 217 if (!desc) { 218 printk(KERN_ERR "can not alloc irq_desc\n"); 219 BUG_ON(1); 220 } 221 init_one_irq_desc(irq, desc, cpu); 222 223 irq_desc_ptrs[irq] = desc; 224 225 out_unlock: 226 spin_unlock_irqrestore(&sparse_irq_lock, flags); 227 228 return desc; 229 } 230 231 #else /* !CONFIG_SPARSE_IRQ */ 232 233 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 234 [0 ... NR_IRQS-1] = { 235 .status = IRQ_DISABLED, 236 .chip = &no_irq_chip, 237 .handle_irq = handle_bad_irq, 238 .depth = 1, 239 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 240 } 241 }; 242 243 static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; 244 int __init early_irq_init(void) 245 { 246 struct irq_desc *desc; 247 int count; 248 int i; 249 250 init_irq_default_affinity(); 251 252 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); 253 254 desc = irq_desc; 255 count = ARRAY_SIZE(irq_desc); 256 257 for (i = 0; i < count; i++) { 258 desc[i].irq = i; 259 init_alloc_desc_masks(&desc[i], 0, true); 260 desc[i].kstat_irqs = kstat_irqs_all[i]; 261 } 262 return arch_early_irq_init(); 263 } 264 265 struct irq_desc *irq_to_desc(unsigned int irq) 266 { 267 return (irq < NR_IRQS) ? irq_desc + irq : NULL; 268 } 269 270 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) 271 { 272 return irq_to_desc(irq); 273 } 274 #endif /* !CONFIG_SPARSE_IRQ */ 275 276 void clear_kstat_irqs(struct irq_desc *desc) 277 { 278 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); 279 } 280 281 /* 282 * What should we do if we get a hw irq event on an illegal vector? 283 * Each architecture has to answer this themself. 284 */ 285 static void ack_bad(unsigned int irq) 286 { 287 struct irq_desc *desc = irq_to_desc(irq); 288 289 print_irq_desc(irq, desc); 290 ack_bad_irq(irq); 291 } 292 293 /* 294 * NOP functions 295 */ 296 static void noop(unsigned int irq) 297 { 298 } 299 300 static unsigned int noop_ret(unsigned int irq) 301 { 302 return 0; 303 } 304 305 /* 306 * Generic no controller implementation 307 */ 308 struct irq_chip no_irq_chip = { 309 .name = "none", 310 .startup = noop_ret, 311 .shutdown = noop, 312 .enable = noop, 313 .disable = noop, 314 .ack = ack_bad, 315 .end = noop, 316 }; 317 318 /* 319 * Generic dummy implementation which can be used for 320 * real dumb interrupt sources 321 */ 322 struct irq_chip dummy_irq_chip = { 323 .name = "dummy", 324 .startup = noop_ret, 325 .shutdown = noop, 326 .enable = noop, 327 .disable = noop, 328 .ack = noop, 329 .mask = noop, 330 .unmask = noop, 331 .end = noop, 332 }; 333 334 /* 335 * Special, empty irq handler: 336 */ 337 irqreturn_t no_action(int cpl, void *dev_id) 338 { 339 return IRQ_NONE; 340 } 341 342 static void warn_no_thread(unsigned int irq, struct irqaction *action) 343 { 344 if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags)) 345 return; 346 347 printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD " 348 "but no thread function available.", irq, action->name); 349 } 350 351 DEFINE_TRACE(irq_handler_entry); 352 DEFINE_TRACE(irq_handler_exit); 353 354 /** 355 * handle_IRQ_event - irq action chain handler 356 * @irq: the interrupt number 357 * @action: the interrupt action chain for this irq 358 * 359 * Handles the action chain of an irq event 360 */ 361 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) 362 { 363 irqreturn_t ret, retval = IRQ_NONE; 364 unsigned int status = 0; 365 366 WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!"); 367 368 if (!(action->flags & IRQF_DISABLED)) 369 local_irq_enable_in_hardirq(); 370 371 do { 372 trace_irq_handler_entry(irq, action); 373 ret = action->handler(irq, action->dev_id); 374 trace_irq_handler_exit(irq, action, ret); 375 376 switch (ret) { 377 case IRQ_WAKE_THREAD: 378 /* 379 * Set result to handled so the spurious check 380 * does not trigger. 381 */ 382 ret = IRQ_HANDLED; 383 384 /* 385 * Catch drivers which return WAKE_THREAD but 386 * did not set up a thread function 387 */ 388 if (unlikely(!action->thread_fn)) { 389 warn_no_thread(irq, action); 390 break; 391 } 392 393 /* 394 * Wake up the handler thread for this 395 * action. In case the thread crashed and was 396 * killed we just pretend that we handled the 397 * interrupt. The hardirq handler above has 398 * disabled the device interrupt, so no irq 399 * storm is lurking. 400 */ 401 if (likely(!test_bit(IRQTF_DIED, 402 &action->thread_flags))) { 403 set_bit(IRQTF_RUNTHREAD, &action->thread_flags); 404 wake_up_process(action->thread); 405 } 406 407 /* Fall through to add to randomness */ 408 case IRQ_HANDLED: 409 status |= action->flags; 410 break; 411 412 default: 413 break; 414 } 415 416 retval |= ret; 417 action = action->next; 418 } while (action); 419 420 if (status & IRQF_SAMPLE_RANDOM) 421 add_interrupt_randomness(irq); 422 local_irq_disable(); 423 424 return retval; 425 } 426 427 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 428 429 #ifdef CONFIG_ENABLE_WARN_DEPRECATED 430 # warning __do_IRQ is deprecated. Please convert to proper flow handlers 431 #endif 432 433 /** 434 * __do_IRQ - original all in one highlevel IRQ handler 435 * @irq: the interrupt number 436 * 437 * __do_IRQ handles all normal device IRQ's (the special 438 * SMP cross-CPU interrupts have their own specific 439 * handlers). 440 * 441 * This is the original x86 implementation which is used for every 442 * interrupt type. 443 */ 444 unsigned int __do_IRQ(unsigned int irq) 445 { 446 struct irq_desc *desc = irq_to_desc(irq); 447 struct irqaction *action; 448 unsigned int status; 449 450 kstat_incr_irqs_this_cpu(irq, desc); 451 452 if (CHECK_IRQ_PER_CPU(desc->status)) { 453 irqreturn_t action_ret; 454 455 /* 456 * No locking required for CPU-local interrupts: 457 */ 458 if (desc->chip->ack) { 459 desc->chip->ack(irq); 460 /* get new one */ 461 desc = irq_remap_to_desc(irq, desc); 462 } 463 if (likely(!(desc->status & IRQ_DISABLED))) { 464 action_ret = handle_IRQ_event(irq, desc->action); 465 if (!noirqdebug) 466 note_interrupt(irq, desc, action_ret); 467 } 468 desc->chip->end(irq); 469 return 1; 470 } 471 472 spin_lock(&desc->lock); 473 if (desc->chip->ack) { 474 desc->chip->ack(irq); 475 desc = irq_remap_to_desc(irq, desc); 476 } 477 /* 478 * REPLAY is when Linux resends an IRQ that was dropped earlier 479 * WAITING is used by probe to mark irqs that are being tested 480 */ 481 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); 482 status |= IRQ_PENDING; /* we _want_ to handle it */ 483 484 /* 485 * If the IRQ is disabled for whatever reason, we cannot 486 * use the action we have. 487 */ 488 action = NULL; 489 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { 490 action = desc->action; 491 status &= ~IRQ_PENDING; /* we commit to handling */ 492 status |= IRQ_INPROGRESS; /* we are handling it */ 493 } 494 desc->status = status; 495 496 /* 497 * If there is no IRQ handler or it was disabled, exit early. 498 * Since we set PENDING, if another processor is handling 499 * a different instance of this same irq, the other processor 500 * will take care of it. 501 */ 502 if (unlikely(!action)) 503 goto out; 504 505 /* 506 * Edge triggered interrupts need to remember 507 * pending events. 508 * This applies to any hw interrupts that allow a second 509 * instance of the same irq to arrive while we are in do_IRQ 510 * or in the handler. But the code here only handles the _second_ 511 * instance of the irq, not the third or fourth. So it is mostly 512 * useful for irq hardware that does not mask cleanly in an 513 * SMP environment. 514 */ 515 for (;;) { 516 irqreturn_t action_ret; 517 518 spin_unlock(&desc->lock); 519 520 action_ret = handle_IRQ_event(irq, action); 521 if (!noirqdebug) 522 note_interrupt(irq, desc, action_ret); 523 524 spin_lock(&desc->lock); 525 if (likely(!(desc->status & IRQ_PENDING))) 526 break; 527 desc->status &= ~IRQ_PENDING; 528 } 529 desc->status &= ~IRQ_INPROGRESS; 530 531 out: 532 /* 533 * The ->end() handler has to deal with interrupts which got 534 * disabled while the handler was running. 535 */ 536 desc->chip->end(irq); 537 spin_unlock(&desc->lock); 538 539 return 1; 540 } 541 #endif 542 543 void early_init_irq_lock_class(void) 544 { 545 struct irq_desc *desc; 546 int i; 547 548 for_each_irq_desc(i, desc) { 549 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 550 } 551 } 552 553 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 554 { 555 struct irq_desc *desc = irq_to_desc(irq); 556 return desc ? desc->kstat_irqs[cpu] : 0; 557 } 558 EXPORT_SYMBOL(kstat_irqs_cpu); 559 560