1 /* 2 * linux/kernel/irq/handle.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code. 8 * 9 * Detailed information is available in Documentation/DocBook/genericirq 10 * 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/random.h> 17 #include <linux/interrupt.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/rculist.h> 20 #include <linux/hash.h> 21 #include <linux/bootmem.h> 22 #include <trace/events/irq.h> 23 24 #include "internals.h" 25 26 /* 27 * lockdep: we want to handle all irq_desc locks as a single lock-class: 28 */ 29 struct lock_class_key irq_desc_lock_class; 30 31 /** 32 * handle_bad_irq - handle spurious and unhandled irqs 33 * @irq: the interrupt number 34 * @desc: description of the interrupt 35 * 36 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 37 */ 38 void handle_bad_irq(unsigned int irq, struct irq_desc *desc) 39 { 40 print_irq_desc(irq, desc); 41 kstat_incr_irqs_this_cpu(irq, desc); 42 ack_bad_irq(irq); 43 } 44 45 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 46 static void __init init_irq_default_affinity(void) 47 { 48 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); 49 cpumask_setall(irq_default_affinity); 50 } 51 #else 52 static void __init init_irq_default_affinity(void) 53 { 54 } 55 #endif 56 57 /* 58 * Linux has a controller-independent interrupt architecture. 59 * Every controller has a 'controller-template', that is used 60 * by the main code to do the right thing. Each driver-visible 61 * interrupt source is transparently wired to the appropriate 62 * controller. Thus drivers need not be aware of the 63 * interrupt-controller. 64 * 65 * The code is designed to be easily extended with new/different 66 * interrupt controllers, without having to do assembly magic or 67 * having to touch the generic code. 68 * 69 * Controller mappings for all interrupt sources: 70 */ 71 int nr_irqs = NR_IRQS; 72 EXPORT_SYMBOL_GPL(nr_irqs); 73 74 #ifdef CONFIG_SPARSE_IRQ 75 76 static struct irq_desc irq_desc_init = { 77 .irq = -1, 78 .status = IRQ_DISABLED, 79 .chip = &no_irq_chip, 80 .handle_irq = handle_bad_irq, 81 .depth = 1, 82 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 83 }; 84 85 void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) 86 { 87 void *ptr; 88 89 if (slab_is_available()) 90 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), 91 GFP_ATOMIC, node); 92 else 93 ptr = alloc_bootmem_node(NODE_DATA(node), 94 nr * sizeof(*desc->kstat_irqs)); 95 96 /* 97 * don't overwite if can not get new one 98 * init_copy_kstat_irqs() could still use old one 99 */ 100 if (ptr) { 101 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); 102 desc->kstat_irqs = ptr; 103 } 104 } 105 106 static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) 107 { 108 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 109 110 spin_lock_init(&desc->lock); 111 desc->irq = irq; 112 #ifdef CONFIG_SMP 113 desc->node = node; 114 #endif 115 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 116 init_kstat_irqs(desc, node, nr_cpu_ids); 117 if (!desc->kstat_irqs) { 118 printk(KERN_ERR "can not alloc kstat_irqs\n"); 119 BUG_ON(1); 120 } 121 if (!alloc_desc_masks(desc, node, false)) { 122 printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); 123 BUG_ON(1); 124 } 125 init_desc_masks(desc); 126 arch_init_chip_data(desc, node); 127 } 128 129 /* 130 * Protect the sparse_irqs: 131 */ 132 DEFINE_SPINLOCK(sparse_irq_lock); 133 134 struct irq_desc **irq_desc_ptrs __read_mostly; 135 136 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 137 [0 ... NR_IRQS_LEGACY-1] = { 138 .irq = -1, 139 .status = IRQ_DISABLED, 140 .chip = &no_irq_chip, 141 .handle_irq = handle_bad_irq, 142 .depth = 1, 143 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 144 } 145 }; 146 147 static unsigned int *kstat_irqs_legacy; 148 149 int __init early_irq_init(void) 150 { 151 struct irq_desc *desc; 152 int legacy_count; 153 int node; 154 int i; 155 156 init_irq_default_affinity(); 157 158 /* initialize nr_irqs based on nr_cpu_ids */ 159 arch_probe_nr_irqs(); 160 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); 161 162 desc = irq_desc_legacy; 163 legacy_count = ARRAY_SIZE(irq_desc_legacy); 164 node = first_online_node; 165 166 /* allocate irq_desc_ptrs array based on nr_irqs */ 167 irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT); 168 169 /* allocate based on nr_cpu_ids */ 170 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * 171 sizeof(int), GFP_NOWAIT, node); 172 173 for (i = 0; i < legacy_count; i++) { 174 desc[i].irq = i; 175 #ifdef CONFIG_SMP 176 desc[i].node = node; 177 #endif 178 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; 179 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 180 alloc_desc_masks(&desc[i], node, true); 181 init_desc_masks(&desc[i]); 182 irq_desc_ptrs[i] = desc + i; 183 } 184 185 for (i = legacy_count; i < nr_irqs; i++) 186 irq_desc_ptrs[i] = NULL; 187 188 return arch_early_irq_init(); 189 } 190 191 struct irq_desc *irq_to_desc(unsigned int irq) 192 { 193 if (irq_desc_ptrs && irq < nr_irqs) 194 return irq_desc_ptrs[irq]; 195 196 return NULL; 197 } 198 199 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) 200 { 201 struct irq_desc *desc; 202 unsigned long flags; 203 204 if (irq >= nr_irqs) { 205 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", 206 irq, nr_irqs); 207 return NULL; 208 } 209 210 desc = irq_desc_ptrs[irq]; 211 if (desc) 212 return desc; 213 214 spin_lock_irqsave(&sparse_irq_lock, flags); 215 216 /* We have to check it to avoid races with another CPU */ 217 desc = irq_desc_ptrs[irq]; 218 if (desc) 219 goto out_unlock; 220 221 if (slab_is_available()) 222 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 223 else 224 desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc)); 225 226 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); 227 if (!desc) { 228 printk(KERN_ERR "can not alloc irq_desc\n"); 229 BUG_ON(1); 230 } 231 init_one_irq_desc(irq, desc, node); 232 233 irq_desc_ptrs[irq] = desc; 234 235 out_unlock: 236 spin_unlock_irqrestore(&sparse_irq_lock, flags); 237 238 return desc; 239 } 240 241 #else /* !CONFIG_SPARSE_IRQ */ 242 243 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 244 [0 ... NR_IRQS-1] = { 245 .status = IRQ_DISABLED, 246 .chip = &no_irq_chip, 247 .handle_irq = handle_bad_irq, 248 .depth = 1, 249 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 250 } 251 }; 252 253 static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; 254 int __init early_irq_init(void) 255 { 256 struct irq_desc *desc; 257 int count; 258 int i; 259 260 init_irq_default_affinity(); 261 262 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); 263 264 desc = irq_desc; 265 count = ARRAY_SIZE(irq_desc); 266 267 for (i = 0; i < count; i++) { 268 desc[i].irq = i; 269 alloc_desc_masks(&desc[i], 0, true); 270 init_desc_masks(&desc[i]); 271 desc[i].kstat_irqs = kstat_irqs_all[i]; 272 } 273 return arch_early_irq_init(); 274 } 275 276 struct irq_desc *irq_to_desc(unsigned int irq) 277 { 278 return (irq < NR_IRQS) ? irq_desc + irq : NULL; 279 } 280 281 struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) 282 { 283 return irq_to_desc(irq); 284 } 285 #endif /* !CONFIG_SPARSE_IRQ */ 286 287 void clear_kstat_irqs(struct irq_desc *desc) 288 { 289 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); 290 } 291 292 /* 293 * What should we do if we get a hw irq event on an illegal vector? 294 * Each architecture has to answer this themself. 295 */ 296 static void ack_bad(unsigned int irq) 297 { 298 struct irq_desc *desc = irq_to_desc(irq); 299 300 print_irq_desc(irq, desc); 301 ack_bad_irq(irq); 302 } 303 304 /* 305 * NOP functions 306 */ 307 static void noop(unsigned int irq) 308 { 309 } 310 311 static unsigned int noop_ret(unsigned int irq) 312 { 313 return 0; 314 } 315 316 /* 317 * Generic no controller implementation 318 */ 319 struct irq_chip no_irq_chip = { 320 .name = "none", 321 .startup = noop_ret, 322 .shutdown = noop, 323 .enable = noop, 324 .disable = noop, 325 .ack = ack_bad, 326 .end = noop, 327 }; 328 329 /* 330 * Generic dummy implementation which can be used for 331 * real dumb interrupt sources 332 */ 333 struct irq_chip dummy_irq_chip = { 334 .name = "dummy", 335 .startup = noop_ret, 336 .shutdown = noop, 337 .enable = noop, 338 .disable = noop, 339 .ack = noop, 340 .mask = noop, 341 .unmask = noop, 342 .end = noop, 343 }; 344 345 /* 346 * Special, empty irq handler: 347 */ 348 irqreturn_t no_action(int cpl, void *dev_id) 349 { 350 return IRQ_NONE; 351 } 352 353 static void warn_no_thread(unsigned int irq, struct irqaction *action) 354 { 355 if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags)) 356 return; 357 358 printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD " 359 "but no thread function available.", irq, action->name); 360 } 361 362 /** 363 * handle_IRQ_event - irq action chain handler 364 * @irq: the interrupt number 365 * @action: the interrupt action chain for this irq 366 * 367 * Handles the action chain of an irq event 368 */ 369 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) 370 { 371 irqreturn_t ret, retval = IRQ_NONE; 372 unsigned int status = 0; 373 374 if (!(action->flags & IRQF_DISABLED)) 375 local_irq_enable_in_hardirq(); 376 377 do { 378 trace_irq_handler_entry(irq, action); 379 ret = action->handler(irq, action->dev_id); 380 trace_irq_handler_exit(irq, action, ret); 381 382 switch (ret) { 383 case IRQ_WAKE_THREAD: 384 /* 385 * Set result to handled so the spurious check 386 * does not trigger. 387 */ 388 ret = IRQ_HANDLED; 389 390 /* 391 * Catch drivers which return WAKE_THREAD but 392 * did not set up a thread function 393 */ 394 if (unlikely(!action->thread_fn)) { 395 warn_no_thread(irq, action); 396 break; 397 } 398 399 /* 400 * Wake up the handler thread for this 401 * action. In case the thread crashed and was 402 * killed we just pretend that we handled the 403 * interrupt. The hardirq handler above has 404 * disabled the device interrupt, so no irq 405 * storm is lurking. 406 */ 407 if (likely(!test_bit(IRQTF_DIED, 408 &action->thread_flags))) { 409 set_bit(IRQTF_RUNTHREAD, &action->thread_flags); 410 wake_up_process(action->thread); 411 } 412 413 /* Fall through to add to randomness */ 414 case IRQ_HANDLED: 415 status |= action->flags; 416 break; 417 418 default: 419 break; 420 } 421 422 retval |= ret; 423 action = action->next; 424 } while (action); 425 426 if (status & IRQF_SAMPLE_RANDOM) 427 add_interrupt_randomness(irq); 428 local_irq_disable(); 429 430 return retval; 431 } 432 433 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 434 435 #ifdef CONFIG_ENABLE_WARN_DEPRECATED 436 # warning __do_IRQ is deprecated. Please convert to proper flow handlers 437 #endif 438 439 /** 440 * __do_IRQ - original all in one highlevel IRQ handler 441 * @irq: the interrupt number 442 * 443 * __do_IRQ handles all normal device IRQ's (the special 444 * SMP cross-CPU interrupts have their own specific 445 * handlers). 446 * 447 * This is the original x86 implementation which is used for every 448 * interrupt type. 449 */ 450 unsigned int __do_IRQ(unsigned int irq) 451 { 452 struct irq_desc *desc = irq_to_desc(irq); 453 struct irqaction *action; 454 unsigned int status; 455 456 kstat_incr_irqs_this_cpu(irq, desc); 457 458 if (CHECK_IRQ_PER_CPU(desc->status)) { 459 irqreturn_t action_ret; 460 461 /* 462 * No locking required for CPU-local interrupts: 463 */ 464 if (desc->chip->ack) 465 desc->chip->ack(irq); 466 if (likely(!(desc->status & IRQ_DISABLED))) { 467 action_ret = handle_IRQ_event(irq, desc->action); 468 if (!noirqdebug) 469 note_interrupt(irq, desc, action_ret); 470 } 471 desc->chip->end(irq); 472 return 1; 473 } 474 475 spin_lock(&desc->lock); 476 if (desc->chip->ack) 477 desc->chip->ack(irq); 478 /* 479 * REPLAY is when Linux resends an IRQ that was dropped earlier 480 * WAITING is used by probe to mark irqs that are being tested 481 */ 482 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); 483 status |= IRQ_PENDING; /* we _want_ to handle it */ 484 485 /* 486 * If the IRQ is disabled for whatever reason, we cannot 487 * use the action we have. 488 */ 489 action = NULL; 490 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { 491 action = desc->action; 492 status &= ~IRQ_PENDING; /* we commit to handling */ 493 status |= IRQ_INPROGRESS; /* we are handling it */ 494 } 495 desc->status = status; 496 497 /* 498 * If there is no IRQ handler or it was disabled, exit early. 499 * Since we set PENDING, if another processor is handling 500 * a different instance of this same irq, the other processor 501 * will take care of it. 502 */ 503 if (unlikely(!action)) 504 goto out; 505 506 /* 507 * Edge triggered interrupts need to remember 508 * pending events. 509 * This applies to any hw interrupts that allow a second 510 * instance of the same irq to arrive while we are in do_IRQ 511 * or in the handler. But the code here only handles the _second_ 512 * instance of the irq, not the third or fourth. So it is mostly 513 * useful for irq hardware that does not mask cleanly in an 514 * SMP environment. 515 */ 516 for (;;) { 517 irqreturn_t action_ret; 518 519 spin_unlock(&desc->lock); 520 521 action_ret = handle_IRQ_event(irq, action); 522 if (!noirqdebug) 523 note_interrupt(irq, desc, action_ret); 524 525 spin_lock(&desc->lock); 526 if (likely(!(desc->status & IRQ_PENDING))) 527 break; 528 desc->status &= ~IRQ_PENDING; 529 } 530 desc->status &= ~IRQ_INPROGRESS; 531 532 out: 533 /* 534 * The ->end() handler has to deal with interrupts which got 535 * disabled while the handler was running. 536 */ 537 desc->chip->end(irq); 538 spin_unlock(&desc->lock); 539 540 return 1; 541 } 542 #endif 543 544 void early_init_irq_lock_class(void) 545 { 546 struct irq_desc *desc; 547 int i; 548 549 for_each_irq_desc(i, desc) { 550 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 551 } 552 } 553 554 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 555 { 556 struct irq_desc *desc = irq_to_desc(irq); 557 return desc ? desc->kstat_irqs[cpu] : 0; 558 } 559 EXPORT_SYMBOL(kstat_irqs_cpu); 560 561