1 /* 2 * linux/kernel/irq/handle.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code. 8 * 9 * Detailed information is available in Documentation/DocBook/genericirq 10 * 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/random.h> 17 #include <linux/interrupt.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/rculist.h> 20 #include <linux/hash.h> 21 #include <linux/bootmem.h> 22 #include <trace/events/irq.h> 23 24 #include "internals.h" 25 26 /* 27 * lockdep: we want to handle all irq_desc locks as a single lock-class: 28 */ 29 struct lock_class_key irq_desc_lock_class; 30 31 /** 32 * handle_bad_irq - handle spurious and unhandled irqs 33 * @irq: the interrupt number 34 * @desc: description of the interrupt 35 * 36 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 37 */ 38 void handle_bad_irq(unsigned int irq, struct irq_desc *desc) 39 { 40 print_irq_desc(irq, desc); 41 kstat_incr_irqs_this_cpu(irq, desc); 42 ack_bad_irq(irq); 43 } 44 45 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 46 static void __init init_irq_default_affinity(void) 47 { 48 alloc_bootmem_cpumask_var(&irq_default_affinity); 49 cpumask_setall(irq_default_affinity); 50 } 51 #else 52 static void __init init_irq_default_affinity(void) 53 { 54 } 55 #endif 56 57 /* 58 * Linux has a controller-independent interrupt architecture. 59 * Every controller has a 'controller-template', that is used 60 * by the main code to do the right thing. Each driver-visible 61 * interrupt source is transparently wired to the appropriate 62 * controller. Thus drivers need not be aware of the 63 * interrupt-controller. 64 * 65 * The code is designed to be easily extended with new/different 66 * interrupt controllers, without having to do assembly magic or 67 * having to touch the generic code. 68 * 69 * Controller mappings for all interrupt sources: 70 */ 71 int nr_irqs = NR_IRQS; 72 EXPORT_SYMBOL_GPL(nr_irqs); 73 74 #ifdef CONFIG_SPARSE_IRQ 75 76 static struct irq_desc irq_desc_init = { 77 .irq = -1, 78 .status = IRQ_DISABLED, 79 .chip = &no_irq_chip, 80 .handle_irq = handle_bad_irq, 81 .depth = 1, 82 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 83 }; 84 85 void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) 86 { 87 void *ptr; 88 89 if (slab_is_available()) 90 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), 91 GFP_ATOMIC, node); 92 else 93 ptr = alloc_bootmem_node(NODE_DATA(node), 94 nr * sizeof(*desc->kstat_irqs)); 95 96 /* 97 * don't overwite if can not get new one 98 * init_copy_kstat_irqs() could still use old one 99 */ 100 if (ptr) { 101 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); 102 desc->kstat_irqs = ptr; 103 } 104 } 105 106 static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) 107 { 108 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 109 110 spin_lock_init(&desc->lock); 111 desc->irq = irq; 112 #ifdef CONFIG_SMP 113 desc->node = node; 114 #endif 115 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 116 init_kstat_irqs(desc, node, nr_cpu_ids); 117 if (!desc->kstat_irqs) { 118 printk(KERN_ERR "can not alloc kstat_irqs\n"); 119 BUG_ON(1); 120 } 121 if (!alloc_desc_masks(desc, node, false)) { 122 printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); 123 BUG_ON(1); 124 } 125 init_desc_masks(desc); 126 arch_init_chip_data(desc, node); 127 } 128 129 /* 130 * Protect the sparse_irqs: 131 */ 132 DEFINE_SPINLOCK(sparse_irq_lock); 133 134 struct irq_desc **irq_desc_ptrs __read_mostly; 135 136 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 137 [0 ... NR_IRQS_LEGACY-1] = { 138 .irq = -1, 139 .status = IRQ_DISABLED, 140 .chip = &no_irq_chip, 141 .handle_irq = handle_bad_irq, 142 .depth = 1, 143 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 144 } 145 }; 146 147 static unsigned int *kstat_irqs_legacy; 148 149 int __init early_irq_init(void) 150 { 151 struct irq_desc *desc; 152 int legacy_count; 153 int node; 154 int i; 155 156 init_irq_default_affinity(); 157 158 /* initialize nr_irqs based on nr_cpu_ids */ 159 arch_probe_nr_irqs(); 160 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); 161 162 desc = irq_desc_legacy; 163 legacy_count = ARRAY_SIZE(irq_desc_legacy); 164 node = first_online_node; 165 166 /* allocate irq_desc_ptrs array based on nr_irqs */ 167 irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT); 168 169 /* allocate based on nr_cpu_ids */ 170 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * 171 sizeof(int), GFP_NOWAIT, node); 172 173 for (i = 0; i < legacy_count; i++) { 174 desc[i].irq = i; 175 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; 176 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 177 alloc_desc_masks(&desc[i], node, true); 178 init_desc_masks(&desc[i]); 179 irq_desc_ptrs[i] = desc + i; 180 } 181 182 for (i = legacy_count; i < nr_irqs; i++) 183 irq_desc_ptrs[i] = NULL; 184 185 return arch_early_irq_init(); 186 } 187 188 struct irq_desc *irq_to_desc(unsigned int irq) 189 { 190 if (irq_desc_ptrs && irq < nr_irqs) 191 return irq_desc_ptrs[irq]; 192 193 return NULL; 194 } 195 196 struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) 197 { 198 struct irq_desc *desc; 199 unsigned long flags; 200 201 if (irq >= nr_irqs) { 202 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", 203 irq, nr_irqs); 204 return NULL; 205 } 206 207 desc = irq_desc_ptrs[irq]; 208 if (desc) 209 return desc; 210 211 spin_lock_irqsave(&sparse_irq_lock, flags); 212 213 /* We have to check it to avoid races with another CPU */ 214 desc = irq_desc_ptrs[irq]; 215 if (desc) 216 goto out_unlock; 217 218 if (slab_is_available()) 219 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 220 else 221 desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc)); 222 223 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); 224 if (!desc) { 225 printk(KERN_ERR "can not alloc irq_desc\n"); 226 BUG_ON(1); 227 } 228 init_one_irq_desc(irq, desc, node); 229 230 irq_desc_ptrs[irq] = desc; 231 232 out_unlock: 233 spin_unlock_irqrestore(&sparse_irq_lock, flags); 234 235 return desc; 236 } 237 238 #else /* !CONFIG_SPARSE_IRQ */ 239 240 struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 241 [0 ... NR_IRQS-1] = { 242 .status = IRQ_DISABLED, 243 .chip = &no_irq_chip, 244 .handle_irq = handle_bad_irq, 245 .depth = 1, 246 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 247 } 248 }; 249 250 static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; 251 int __init early_irq_init(void) 252 { 253 struct irq_desc *desc; 254 int count; 255 int i; 256 257 init_irq_default_affinity(); 258 259 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); 260 261 desc = irq_desc; 262 count = ARRAY_SIZE(irq_desc); 263 264 for (i = 0; i < count; i++) { 265 desc[i].irq = i; 266 alloc_desc_masks(&desc[i], 0, true); 267 init_desc_masks(&desc[i]); 268 desc[i].kstat_irqs = kstat_irqs_all[i]; 269 } 270 return arch_early_irq_init(); 271 } 272 273 struct irq_desc *irq_to_desc(unsigned int irq) 274 { 275 return (irq < NR_IRQS) ? irq_desc + irq : NULL; 276 } 277 278 struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) 279 { 280 return irq_to_desc(irq); 281 } 282 #endif /* !CONFIG_SPARSE_IRQ */ 283 284 void clear_kstat_irqs(struct irq_desc *desc) 285 { 286 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); 287 } 288 289 /* 290 * What should we do if we get a hw irq event on an illegal vector? 291 * Each architecture has to answer this themself. 292 */ 293 static void ack_bad(unsigned int irq) 294 { 295 struct irq_desc *desc = irq_to_desc(irq); 296 297 print_irq_desc(irq, desc); 298 ack_bad_irq(irq); 299 } 300 301 /* 302 * NOP functions 303 */ 304 static void noop(unsigned int irq) 305 { 306 } 307 308 static unsigned int noop_ret(unsigned int irq) 309 { 310 return 0; 311 } 312 313 /* 314 * Generic no controller implementation 315 */ 316 struct irq_chip no_irq_chip = { 317 .name = "none", 318 .startup = noop_ret, 319 .shutdown = noop, 320 .enable = noop, 321 .disable = noop, 322 .ack = ack_bad, 323 .end = noop, 324 }; 325 326 /* 327 * Generic dummy implementation which can be used for 328 * real dumb interrupt sources 329 */ 330 struct irq_chip dummy_irq_chip = { 331 .name = "dummy", 332 .startup = noop_ret, 333 .shutdown = noop, 334 .enable = noop, 335 .disable = noop, 336 .ack = noop, 337 .mask = noop, 338 .unmask = noop, 339 .end = noop, 340 }; 341 342 /* 343 * Special, empty irq handler: 344 */ 345 irqreturn_t no_action(int cpl, void *dev_id) 346 { 347 return IRQ_NONE; 348 } 349 350 static void warn_no_thread(unsigned int irq, struct irqaction *action) 351 { 352 if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags)) 353 return; 354 355 printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD " 356 "but no thread function available.", irq, action->name); 357 } 358 359 /** 360 * handle_IRQ_event - irq action chain handler 361 * @irq: the interrupt number 362 * @action: the interrupt action chain for this irq 363 * 364 * Handles the action chain of an irq event 365 */ 366 irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) 367 { 368 irqreturn_t ret, retval = IRQ_NONE; 369 unsigned int status = 0; 370 371 if (!(action->flags & IRQF_DISABLED)) 372 local_irq_enable_in_hardirq(); 373 374 do { 375 trace_irq_handler_entry(irq, action); 376 ret = action->handler(irq, action->dev_id); 377 trace_irq_handler_exit(irq, action, ret); 378 379 switch (ret) { 380 case IRQ_WAKE_THREAD: 381 /* 382 * Set result to handled so the spurious check 383 * does not trigger. 384 */ 385 ret = IRQ_HANDLED; 386 387 /* 388 * Catch drivers which return WAKE_THREAD but 389 * did not set up a thread function 390 */ 391 if (unlikely(!action->thread_fn)) { 392 warn_no_thread(irq, action); 393 break; 394 } 395 396 /* 397 * Wake up the handler thread for this 398 * action. In case the thread crashed and was 399 * killed we just pretend that we handled the 400 * interrupt. The hardirq handler above has 401 * disabled the device interrupt, so no irq 402 * storm is lurking. 403 */ 404 if (likely(!test_bit(IRQTF_DIED, 405 &action->thread_flags))) { 406 set_bit(IRQTF_RUNTHREAD, &action->thread_flags); 407 wake_up_process(action->thread); 408 } 409 410 /* Fall through to add to randomness */ 411 case IRQ_HANDLED: 412 status |= action->flags; 413 break; 414 415 default: 416 break; 417 } 418 419 retval |= ret; 420 action = action->next; 421 } while (action); 422 423 if (status & IRQF_SAMPLE_RANDOM) 424 add_interrupt_randomness(irq); 425 local_irq_disable(); 426 427 return retval; 428 } 429 430 #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 431 432 #ifdef CONFIG_ENABLE_WARN_DEPRECATED 433 # warning __do_IRQ is deprecated. Please convert to proper flow handlers 434 #endif 435 436 /** 437 * __do_IRQ - original all in one highlevel IRQ handler 438 * @irq: the interrupt number 439 * 440 * __do_IRQ handles all normal device IRQ's (the special 441 * SMP cross-CPU interrupts have their own specific 442 * handlers). 443 * 444 * This is the original x86 implementation which is used for every 445 * interrupt type. 446 */ 447 unsigned int __do_IRQ(unsigned int irq) 448 { 449 struct irq_desc *desc = irq_to_desc(irq); 450 struct irqaction *action; 451 unsigned int status; 452 453 kstat_incr_irqs_this_cpu(irq, desc); 454 455 if (CHECK_IRQ_PER_CPU(desc->status)) { 456 irqreturn_t action_ret; 457 458 /* 459 * No locking required for CPU-local interrupts: 460 */ 461 if (desc->chip->ack) 462 desc->chip->ack(irq); 463 if (likely(!(desc->status & IRQ_DISABLED))) { 464 action_ret = handle_IRQ_event(irq, desc->action); 465 if (!noirqdebug) 466 note_interrupt(irq, desc, action_ret); 467 } 468 desc->chip->end(irq); 469 return 1; 470 } 471 472 spin_lock(&desc->lock); 473 if (desc->chip->ack) 474 desc->chip->ack(irq); 475 /* 476 * REPLAY is when Linux resends an IRQ that was dropped earlier 477 * WAITING is used by probe to mark irqs that are being tested 478 */ 479 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); 480 status |= IRQ_PENDING; /* we _want_ to handle it */ 481 482 /* 483 * If the IRQ is disabled for whatever reason, we cannot 484 * use the action we have. 485 */ 486 action = NULL; 487 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { 488 action = desc->action; 489 status &= ~IRQ_PENDING; /* we commit to handling */ 490 status |= IRQ_INPROGRESS; /* we are handling it */ 491 } 492 desc->status = status; 493 494 /* 495 * If there is no IRQ handler or it was disabled, exit early. 496 * Since we set PENDING, if another processor is handling 497 * a different instance of this same irq, the other processor 498 * will take care of it. 499 */ 500 if (unlikely(!action)) 501 goto out; 502 503 /* 504 * Edge triggered interrupts need to remember 505 * pending events. 506 * This applies to any hw interrupts that allow a second 507 * instance of the same irq to arrive while we are in do_IRQ 508 * or in the handler. But the code here only handles the _second_ 509 * instance of the irq, not the third or fourth. So it is mostly 510 * useful for irq hardware that does not mask cleanly in an 511 * SMP environment. 512 */ 513 for (;;) { 514 irqreturn_t action_ret; 515 516 spin_unlock(&desc->lock); 517 518 action_ret = handle_IRQ_event(irq, action); 519 if (!noirqdebug) 520 note_interrupt(irq, desc, action_ret); 521 522 spin_lock(&desc->lock); 523 if (likely(!(desc->status & IRQ_PENDING))) 524 break; 525 desc->status &= ~IRQ_PENDING; 526 } 527 desc->status &= ~IRQ_INPROGRESS; 528 529 out: 530 /* 531 * The ->end() handler has to deal with interrupts which got 532 * disabled while the handler was running. 533 */ 534 desc->chip->end(irq); 535 spin_unlock(&desc->lock); 536 537 return 1; 538 } 539 #endif 540 541 void early_init_irq_lock_class(void) 542 { 543 struct irq_desc *desc; 544 int i; 545 546 for_each_irq_desc(i, desc) { 547 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 548 } 549 } 550 551 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 552 { 553 struct irq_desc *desc = irq_to_desc(irq); 554 return desc ? desc->kstat_irqs[cpu] : 0; 555 } 556 EXPORT_SYMBOL(kstat_irqs_cpu); 557 558