1 /* 2 * linux/kernel/irq/chip.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code, for irq-chip 8 * based architectures. 9 * 10 * Detailed information is available in Documentation/DocBook/genericirq 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/msi.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 19 #include "internals.h" 20 21 /** 22 * irq_set_chip - set the irq chip for an irq 23 * @irq: irq number 24 * @chip: pointer to irq chip description structure 25 */ 26 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 27 { 28 struct irq_desc *desc = irq_to_desc(irq); 29 unsigned long flags; 30 31 if (!desc) { 32 WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq); 33 return -EINVAL; 34 } 35 36 if (!chip) 37 chip = &no_irq_chip; 38 39 raw_spin_lock_irqsave(&desc->lock, flags); 40 irq_chip_set_defaults(chip); 41 desc->irq_data.chip = chip; 42 raw_spin_unlock_irqrestore(&desc->lock, flags); 43 44 return 0; 45 } 46 EXPORT_SYMBOL(irq_set_chip); 47 48 /** 49 * irq_set_type - set the irq trigger type for an irq 50 * @irq: irq number 51 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 52 */ 53 int irq_set_irq_type(unsigned int irq, unsigned int type) 54 { 55 struct irq_desc *desc = irq_to_desc(irq); 56 unsigned long flags; 57 int ret = -ENXIO; 58 59 if (!desc) { 60 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); 61 return -ENODEV; 62 } 63 64 type &= IRQ_TYPE_SENSE_MASK; 65 if (type == IRQ_TYPE_NONE) 66 return 0; 67 68 chip_bus_lock(desc); 69 raw_spin_lock_irqsave(&desc->lock, flags); 70 ret = __irq_set_trigger(desc, irq, type); 71 raw_spin_unlock_irqrestore(&desc->lock, flags); 72 chip_bus_sync_unlock(desc); 73 return ret; 74 } 75 EXPORT_SYMBOL(irq_set_irq_type); 76 77 /** 78 * irq_set_handler_data - set irq handler data for an irq 79 * @irq: Interrupt number 80 * @data: Pointer to interrupt specific data 81 * 82 * Set the hardware irq controller data for an irq 83 */ 84 int irq_set_handler_data(unsigned int irq, void *data) 85 { 86 struct irq_desc *desc = irq_to_desc(irq); 87 unsigned long flags; 88 89 if (!desc) { 90 printk(KERN_ERR 91 "Trying to install controller data for IRQ%d\n", irq); 92 return -EINVAL; 93 } 94 95 raw_spin_lock_irqsave(&desc->lock, flags); 96 desc->irq_data.handler_data = data; 97 raw_spin_unlock_irqrestore(&desc->lock, flags); 98 return 0; 99 } 100 EXPORT_SYMBOL(irq_set_handler_data); 101 102 /** 103 * irq_set_msi_desc - set MSI descriptor data for an irq 104 * @irq: Interrupt number 105 * @entry: Pointer to MSI descriptor data 106 * 107 * Set the MSI descriptor entry for an irq 108 */ 109 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 110 { 111 struct irq_desc *desc = irq_to_desc(irq); 112 unsigned long flags; 113 114 if (!desc) { 115 printk(KERN_ERR 116 "Trying to install msi data for IRQ%d\n", irq); 117 return -EINVAL; 118 } 119 120 raw_spin_lock_irqsave(&desc->lock, flags); 121 desc->irq_data.msi_desc = entry; 122 if (entry) 123 entry->irq = irq; 124 raw_spin_unlock_irqrestore(&desc->lock, flags); 125 return 0; 126 } 127 128 /** 129 * irq_set_chip_data - set irq chip data for an irq 130 * @irq: Interrupt number 131 * @data: Pointer to chip specific data 132 * 133 * Set the hardware irq chip data for an irq 134 */ 135 int irq_set_chip_data(unsigned int irq, void *data) 136 { 137 struct irq_desc *desc = irq_to_desc(irq); 138 unsigned long flags; 139 140 if (!desc) { 141 printk(KERN_ERR 142 "Trying to install chip data for IRQ%d\n", irq); 143 return -EINVAL; 144 } 145 146 if (!desc->irq_data.chip) { 147 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); 148 return -EINVAL; 149 } 150 151 raw_spin_lock_irqsave(&desc->lock, flags); 152 desc->irq_data.chip_data = data; 153 raw_spin_unlock_irqrestore(&desc->lock, flags); 154 155 return 0; 156 } 157 EXPORT_SYMBOL(irq_set_chip_data); 158 159 struct irq_data *irq_get_irq_data(unsigned int irq) 160 { 161 struct irq_desc *desc = irq_to_desc(irq); 162 163 return desc ? &desc->irq_data : NULL; 164 } 165 EXPORT_SYMBOL_GPL(irq_get_irq_data); 166 167 static void irq_state_clr_disabled(struct irq_desc *desc) 168 { 169 desc->istate &= ~IRQS_DISABLED; 170 irq_compat_clr_disabled(desc); 171 } 172 173 static void irq_state_set_disabled(struct irq_desc *desc) 174 { 175 desc->istate |= IRQS_DISABLED; 176 irq_compat_set_disabled(desc); 177 } 178 179 static void irq_state_clr_masked(struct irq_desc *desc) 180 { 181 desc->istate &= ~IRQS_MASKED; 182 irq_compat_clr_masked(desc); 183 } 184 185 static void irq_state_set_masked(struct irq_desc *desc) 186 { 187 desc->istate |= IRQS_MASKED; 188 irq_compat_set_masked(desc); 189 } 190 191 int irq_startup(struct irq_desc *desc) 192 { 193 irq_state_clr_disabled(desc); 194 desc->depth = 0; 195 196 if (desc->irq_data.chip->irq_startup) { 197 int ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 198 irq_state_clr_masked(desc); 199 return ret; 200 } 201 202 irq_enable(desc); 203 return 0; 204 } 205 206 void irq_shutdown(struct irq_desc *desc) 207 { 208 irq_state_set_disabled(desc); 209 desc->depth = 1; 210 if (desc->irq_data.chip->irq_shutdown) 211 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 212 if (desc->irq_data.chip->irq_disable) 213 desc->irq_data.chip->irq_disable(&desc->irq_data); 214 else 215 desc->irq_data.chip->irq_mask(&desc->irq_data); 216 irq_state_set_masked(desc); 217 } 218 219 void irq_enable(struct irq_desc *desc) 220 { 221 irq_state_clr_disabled(desc); 222 if (desc->irq_data.chip->irq_enable) 223 desc->irq_data.chip->irq_enable(&desc->irq_data); 224 else 225 desc->irq_data.chip->irq_unmask(&desc->irq_data); 226 irq_state_clr_masked(desc); 227 } 228 229 void irq_disable(struct irq_desc *desc) 230 { 231 irq_state_set_disabled(desc); 232 if (desc->irq_data.chip->irq_disable) { 233 desc->irq_data.chip->irq_disable(&desc->irq_data); 234 } 235 irq_state_set_masked(desc); 236 } 237 238 #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 239 /* Temporary migration helpers */ 240 static void compat_irq_mask(struct irq_data *data) 241 { 242 data->chip->mask(data->irq); 243 } 244 245 static void compat_irq_unmask(struct irq_data *data) 246 { 247 data->chip->unmask(data->irq); 248 } 249 250 static void compat_irq_ack(struct irq_data *data) 251 { 252 data->chip->ack(data->irq); 253 } 254 255 static void compat_irq_mask_ack(struct irq_data *data) 256 { 257 data->chip->mask_ack(data->irq); 258 } 259 260 static void compat_irq_eoi(struct irq_data *data) 261 { 262 data->chip->eoi(data->irq); 263 } 264 265 static void compat_irq_enable(struct irq_data *data) 266 { 267 data->chip->enable(data->irq); 268 } 269 270 static void compat_irq_disable(struct irq_data *data) 271 { 272 data->chip->disable(data->irq); 273 } 274 275 static void compat_irq_shutdown(struct irq_data *data) 276 { 277 data->chip->shutdown(data->irq); 278 } 279 280 static unsigned int compat_irq_startup(struct irq_data *data) 281 { 282 return data->chip->startup(data->irq); 283 } 284 285 static int compat_irq_set_affinity(struct irq_data *data, 286 const struct cpumask *dest, bool force) 287 { 288 return data->chip->set_affinity(data->irq, dest); 289 } 290 291 static int compat_irq_set_type(struct irq_data *data, unsigned int type) 292 { 293 return data->chip->set_type(data->irq, type); 294 } 295 296 static int compat_irq_set_wake(struct irq_data *data, unsigned int on) 297 { 298 return data->chip->set_wake(data->irq, on); 299 } 300 301 static int compat_irq_retrigger(struct irq_data *data) 302 { 303 return data->chip->retrigger(data->irq); 304 } 305 306 static void compat_bus_lock(struct irq_data *data) 307 { 308 data->chip->bus_lock(data->irq); 309 } 310 311 static void compat_bus_sync_unlock(struct irq_data *data) 312 { 313 data->chip->bus_sync_unlock(data->irq); 314 } 315 #endif 316 317 /* 318 * Fixup enable/disable function pointers 319 */ 320 void irq_chip_set_defaults(struct irq_chip *chip) 321 { 322 #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 323 if (chip->enable) 324 chip->irq_enable = compat_irq_enable; 325 if (chip->disable) 326 chip->irq_disable = compat_irq_disable; 327 if (chip->shutdown) 328 chip->irq_shutdown = compat_irq_shutdown; 329 if (chip->startup) 330 chip->irq_startup = compat_irq_startup; 331 if (!chip->end) 332 chip->end = dummy_irq_chip.end; 333 if (chip->bus_lock) 334 chip->irq_bus_lock = compat_bus_lock; 335 if (chip->bus_sync_unlock) 336 chip->irq_bus_sync_unlock = compat_bus_sync_unlock; 337 if (chip->mask) 338 chip->irq_mask = compat_irq_mask; 339 if (chip->unmask) 340 chip->irq_unmask = compat_irq_unmask; 341 if (chip->ack) 342 chip->irq_ack = compat_irq_ack; 343 if (chip->mask_ack) 344 chip->irq_mask_ack = compat_irq_mask_ack; 345 if (chip->eoi) 346 chip->irq_eoi = compat_irq_eoi; 347 if (chip->set_affinity) 348 chip->irq_set_affinity = compat_irq_set_affinity; 349 if (chip->set_type) 350 chip->irq_set_type = compat_irq_set_type; 351 if (chip->set_wake) 352 chip->irq_set_wake = compat_irq_set_wake; 353 if (chip->retrigger) 354 chip->irq_retrigger = compat_irq_retrigger; 355 #endif 356 } 357 358 static inline void mask_ack_irq(struct irq_desc *desc) 359 { 360 if (desc->irq_data.chip->irq_mask_ack) 361 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 362 else { 363 desc->irq_data.chip->irq_mask(&desc->irq_data); 364 if (desc->irq_data.chip->irq_ack) 365 desc->irq_data.chip->irq_ack(&desc->irq_data); 366 } 367 irq_state_set_masked(desc); 368 } 369 370 void mask_irq(struct irq_desc *desc) 371 { 372 if (desc->irq_data.chip->irq_mask) { 373 desc->irq_data.chip->irq_mask(&desc->irq_data); 374 irq_state_set_masked(desc); 375 } 376 } 377 378 void unmask_irq(struct irq_desc *desc) 379 { 380 if (desc->irq_data.chip->irq_unmask) { 381 desc->irq_data.chip->irq_unmask(&desc->irq_data); 382 irq_state_clr_masked(desc); 383 } 384 } 385 386 /* 387 * handle_nested_irq - Handle a nested irq from a irq thread 388 * @irq: the interrupt number 389 * 390 * Handle interrupts which are nested into a threaded interrupt 391 * handler. The handler function is called inside the calling 392 * threads context. 393 */ 394 void handle_nested_irq(unsigned int irq) 395 { 396 struct irq_desc *desc = irq_to_desc(irq); 397 struct irqaction *action; 398 irqreturn_t action_ret; 399 400 might_sleep(); 401 402 raw_spin_lock_irq(&desc->lock); 403 404 kstat_incr_irqs_this_cpu(irq, desc); 405 406 action = desc->action; 407 if (unlikely(!action || (desc->istate & IRQS_DISABLED))) 408 goto out_unlock; 409 410 irq_compat_set_progress(desc); 411 desc->istate |= IRQS_INPROGRESS; 412 raw_spin_unlock_irq(&desc->lock); 413 414 action_ret = action->thread_fn(action->irq, action->dev_id); 415 if (!noirqdebug) 416 note_interrupt(irq, desc, action_ret); 417 418 raw_spin_lock_irq(&desc->lock); 419 desc->istate &= ~IRQS_INPROGRESS; 420 irq_compat_clr_progress(desc); 421 422 out_unlock: 423 raw_spin_unlock_irq(&desc->lock); 424 } 425 EXPORT_SYMBOL_GPL(handle_nested_irq); 426 427 static bool irq_check_poll(struct irq_desc *desc) 428 { 429 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 430 return false; 431 return irq_wait_for_poll(desc); 432 } 433 434 /** 435 * handle_simple_irq - Simple and software-decoded IRQs. 436 * @irq: the interrupt number 437 * @desc: the interrupt description structure for this irq 438 * 439 * Simple interrupts are either sent from a demultiplexing interrupt 440 * handler or come from hardware, where no interrupt hardware control 441 * is necessary. 442 * 443 * Note: The caller is expected to handle the ack, clear, mask and 444 * unmask issues if necessary. 445 */ 446 void 447 handle_simple_irq(unsigned int irq, struct irq_desc *desc) 448 { 449 raw_spin_lock(&desc->lock); 450 451 if (unlikely(desc->istate & IRQS_INPROGRESS)) 452 if (!irq_check_poll(desc)) 453 goto out_unlock; 454 455 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 456 kstat_incr_irqs_this_cpu(irq, desc); 457 458 if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) 459 goto out_unlock; 460 461 handle_irq_event(desc); 462 463 out_unlock: 464 raw_spin_unlock(&desc->lock); 465 } 466 467 /** 468 * handle_level_irq - Level type irq handler 469 * @irq: the interrupt number 470 * @desc: the interrupt description structure for this irq 471 * 472 * Level type interrupts are active as long as the hardware line has 473 * the active level. This may require to mask the interrupt and unmask 474 * it after the associated handler has acknowledged the device, so the 475 * interrupt line is back to inactive. 476 */ 477 void 478 handle_level_irq(unsigned int irq, struct irq_desc *desc) 479 { 480 raw_spin_lock(&desc->lock); 481 mask_ack_irq(desc); 482 483 if (unlikely(desc->istate & IRQS_INPROGRESS)) 484 if (!irq_check_poll(desc)) 485 goto out_unlock; 486 487 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 488 kstat_incr_irqs_this_cpu(irq, desc); 489 490 /* 491 * If its disabled or no action available 492 * keep it masked and get out of here 493 */ 494 if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) 495 goto out_unlock; 496 497 handle_irq_event(desc); 498 499 if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT))) 500 unmask_irq(desc); 501 out_unlock: 502 raw_spin_unlock(&desc->lock); 503 } 504 EXPORT_SYMBOL_GPL(handle_level_irq); 505 506 /** 507 * handle_fasteoi_irq - irq handler for transparent controllers 508 * @irq: the interrupt number 509 * @desc: the interrupt description structure for this irq 510 * 511 * Only a single callback will be issued to the chip: an ->eoi() 512 * call when the interrupt has been serviced. This enables support 513 * for modern forms of interrupt handlers, which handle the flow 514 * details in hardware, transparently. 515 */ 516 void 517 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 518 { 519 raw_spin_lock(&desc->lock); 520 521 if (unlikely(desc->istate & IRQS_INPROGRESS)) 522 if (!irq_check_poll(desc)) 523 goto out; 524 525 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 526 kstat_incr_irqs_this_cpu(irq, desc); 527 528 /* 529 * If its disabled or no action available 530 * then mask it and get out of here: 531 */ 532 if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) { 533 irq_compat_set_pending(desc); 534 desc->istate |= IRQS_PENDING; 535 mask_irq(desc); 536 goto out; 537 } 538 handle_irq_event(desc); 539 out: 540 desc->irq_data.chip->irq_eoi(&desc->irq_data); 541 raw_spin_unlock(&desc->lock); 542 } 543 544 /** 545 * handle_edge_irq - edge type IRQ handler 546 * @irq: the interrupt number 547 * @desc: the interrupt description structure for this irq 548 * 549 * Interrupt occures on the falling and/or rising edge of a hardware 550 * signal. The occurence is latched into the irq controller hardware 551 * and must be acked in order to be reenabled. After the ack another 552 * interrupt can happen on the same source even before the first one 553 * is handled by the associated event handler. If this happens it 554 * might be necessary to disable (mask) the interrupt depending on the 555 * controller hardware. This requires to reenable the interrupt inside 556 * of the loop which handles the interrupts which have arrived while 557 * the handler was running. If all pending interrupts are handled, the 558 * loop is left. 559 */ 560 void 561 handle_edge_irq(unsigned int irq, struct irq_desc *desc) 562 { 563 raw_spin_lock(&desc->lock); 564 565 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 566 /* 567 * If we're currently running this IRQ, or its disabled, 568 * we shouldn't process the IRQ. Mark it pending, handle 569 * the necessary masking and go out 570 */ 571 if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) || 572 !desc->action))) { 573 if (!irq_check_poll(desc)) { 574 irq_compat_set_pending(desc); 575 desc->istate |= IRQS_PENDING; 576 mask_ack_irq(desc); 577 goto out_unlock; 578 } 579 } 580 kstat_incr_irqs_this_cpu(irq, desc); 581 582 /* Start handling the irq */ 583 desc->irq_data.chip->irq_ack(&desc->irq_data); 584 585 do { 586 if (unlikely(!desc->action)) { 587 mask_irq(desc); 588 goto out_unlock; 589 } 590 591 /* 592 * When another irq arrived while we were handling 593 * one, we could have masked the irq. 594 * Renable it, if it was not disabled in meantime. 595 */ 596 if (unlikely(desc->istate & IRQS_PENDING)) { 597 if (!(desc->istate & IRQS_DISABLED) && 598 (desc->istate & IRQS_MASKED)) 599 unmask_irq(desc); 600 } 601 602 handle_irq_event(desc); 603 604 } while ((desc->istate & IRQS_PENDING) && 605 !(desc->istate & IRQS_DISABLED)); 606 607 out_unlock: 608 raw_spin_unlock(&desc->lock); 609 } 610 611 /** 612 * handle_percpu_irq - Per CPU local irq handler 613 * @irq: the interrupt number 614 * @desc: the interrupt description structure for this irq 615 * 616 * Per CPU interrupts on SMP machines without locking requirements 617 */ 618 void 619 handle_percpu_irq(unsigned int irq, struct irq_desc *desc) 620 { 621 struct irq_chip *chip = irq_desc_get_chip(desc); 622 623 kstat_incr_irqs_this_cpu(irq, desc); 624 625 if (chip->irq_ack) 626 chip->irq_ack(&desc->irq_data); 627 628 handle_irq_event_percpu(desc, desc->action); 629 630 if (chip->irq_eoi) 631 chip->irq_eoi(&desc->irq_data); 632 } 633 634 void 635 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 636 const char *name) 637 { 638 struct irq_desc *desc = irq_to_desc(irq); 639 unsigned long flags; 640 641 if (!desc) { 642 printk(KERN_ERR 643 "Trying to install type control for IRQ%d\n", irq); 644 return; 645 } 646 647 if (!handle) { 648 handle = handle_bad_irq; 649 } else { 650 if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) 651 return; 652 } 653 654 chip_bus_lock(desc); 655 raw_spin_lock_irqsave(&desc->lock, flags); 656 657 /* Uninstall? */ 658 if (handle == handle_bad_irq) { 659 if (desc->irq_data.chip != &no_irq_chip) 660 mask_ack_irq(desc); 661 irq_compat_set_disabled(desc); 662 desc->istate |= IRQS_DISABLED; 663 desc->depth = 1; 664 } 665 desc->handle_irq = handle; 666 desc->name = name; 667 668 if (handle != handle_bad_irq && is_chained) { 669 irq_settings_set_noprobe(desc); 670 irq_settings_set_norequest(desc); 671 irq_startup(desc); 672 } 673 raw_spin_unlock_irqrestore(&desc->lock, flags); 674 chip_bus_sync_unlock(desc); 675 } 676 EXPORT_SYMBOL_GPL(__set_irq_handler); 677 678 void 679 set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, 680 irq_flow_handler_t handle) 681 { 682 irq_set_chip(irq, chip); 683 __set_irq_handler(irq, handle, 0, NULL); 684 } 685 686 void 687 set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 688 irq_flow_handler_t handle, const char *name) 689 { 690 irq_set_chip(irq, chip); 691 __set_irq_handler(irq, handle, 0, name); 692 } 693 694 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 695 { 696 struct irq_desc *desc = irq_to_desc(irq); 697 unsigned long flags; 698 699 if (!desc) 700 return; 701 702 raw_spin_lock_irqsave(&desc->lock, flags); 703 704 irq_settings_clr_and_set(desc, clr, set); 705 706 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 707 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 708 if (irq_settings_has_no_balance_set(desc)) 709 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 710 if (irq_settings_is_per_cpu(desc)) 711 irqd_set(&desc->irq_data, IRQD_PER_CPU); 712 if (irq_settings_can_move_pcntxt(desc)) 713 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 714 715 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); 716 717 raw_spin_unlock_irqrestore(&desc->lock, flags); 718 } 719