1 /* 2 * linux/kernel/irq/chip.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code, for irq-chip 8 * based architectures. 9 * 10 * Detailed information is available in Documentation/DocBook/genericirq 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/msi.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 19 #include "internals.h" 20 21 /** 22 * irq_set_chip - set the irq chip for an irq 23 * @irq: irq number 24 * @chip: pointer to irq chip description structure 25 */ 26 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 27 { 28 unsigned long flags; 29 struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 30 31 if (!desc) 32 return -EINVAL; 33 34 if (!chip) 35 chip = &no_irq_chip; 36 37 irq_chip_set_defaults(chip); 38 desc->irq_data.chip = chip; 39 irq_put_desc_unlock(desc, flags); 40 return 0; 41 } 42 EXPORT_SYMBOL(irq_set_chip); 43 44 /** 45 * irq_set_type - set the irq trigger type for an irq 46 * @irq: irq number 47 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 48 */ 49 int irq_set_irq_type(unsigned int irq, unsigned int type) 50 { 51 unsigned long flags; 52 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 53 int ret = 0; 54 55 if (!desc) 56 return -EINVAL; 57 58 type &= IRQ_TYPE_SENSE_MASK; 59 if (type != IRQ_TYPE_NONE) 60 ret = __irq_set_trigger(desc, irq, type); 61 irq_put_desc_busunlock(desc, flags); 62 return ret; 63 } 64 EXPORT_SYMBOL(irq_set_irq_type); 65 66 /** 67 * irq_set_handler_data - set irq handler data for an irq 68 * @irq: Interrupt number 69 * @data: Pointer to interrupt specific data 70 * 71 * Set the hardware irq controller data for an irq 72 */ 73 int irq_set_handler_data(unsigned int irq, void *data) 74 { 75 unsigned long flags; 76 struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 77 78 if (!desc) 79 return -EINVAL; 80 desc->irq_data.handler_data = data; 81 irq_put_desc_unlock(desc, flags); 82 return 0; 83 } 84 EXPORT_SYMBOL(irq_set_handler_data); 85 86 /** 87 * irq_set_msi_desc - set MSI descriptor data for an irq 88 * @irq: Interrupt number 89 * @entry: Pointer to MSI descriptor data 90 * 91 * Set the MSI descriptor entry for an irq 92 */ 93 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 94 { 95 unsigned long flags; 96 struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 97 98 if (!desc) 99 return -EINVAL; 100 desc->irq_data.msi_desc = entry; 101 if (entry) 102 entry->irq = irq; 103 irq_put_desc_unlock(desc, flags); 104 return 0; 105 } 106 107 /** 108 * irq_set_chip_data - set irq chip data for an irq 109 * @irq: Interrupt number 110 * @data: Pointer to chip specific data 111 * 112 * Set the hardware irq chip data for an irq 113 */ 114 int irq_set_chip_data(unsigned int irq, void *data) 115 { 116 unsigned long flags; 117 struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 118 119 if (!desc) 120 return -EINVAL; 121 desc->irq_data.chip_data = data; 122 irq_put_desc_unlock(desc, flags); 123 return 0; 124 } 125 EXPORT_SYMBOL(irq_set_chip_data); 126 127 struct irq_data *irq_get_irq_data(unsigned int irq) 128 { 129 struct irq_desc *desc = irq_to_desc(irq); 130 131 return desc ? &desc->irq_data : NULL; 132 } 133 EXPORT_SYMBOL_GPL(irq_get_irq_data); 134 135 static void irq_state_clr_disabled(struct irq_desc *desc) 136 { 137 desc->istate &= ~IRQS_DISABLED; 138 irq_compat_clr_disabled(desc); 139 } 140 141 static void irq_state_set_disabled(struct irq_desc *desc) 142 { 143 desc->istate |= IRQS_DISABLED; 144 irq_compat_set_disabled(desc); 145 } 146 147 static void irq_state_clr_masked(struct irq_desc *desc) 148 { 149 desc->istate &= ~IRQS_MASKED; 150 irq_compat_clr_masked(desc); 151 } 152 153 static void irq_state_set_masked(struct irq_desc *desc) 154 { 155 desc->istate |= IRQS_MASKED; 156 irq_compat_set_masked(desc); 157 } 158 159 int irq_startup(struct irq_desc *desc) 160 { 161 irq_state_clr_disabled(desc); 162 desc->depth = 0; 163 164 if (desc->irq_data.chip->irq_startup) { 165 int ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 166 irq_state_clr_masked(desc); 167 return ret; 168 } 169 170 irq_enable(desc); 171 return 0; 172 } 173 174 void irq_shutdown(struct irq_desc *desc) 175 { 176 irq_state_set_disabled(desc); 177 desc->depth = 1; 178 if (desc->irq_data.chip->irq_shutdown) 179 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 180 if (desc->irq_data.chip->irq_disable) 181 desc->irq_data.chip->irq_disable(&desc->irq_data); 182 else 183 desc->irq_data.chip->irq_mask(&desc->irq_data); 184 irq_state_set_masked(desc); 185 } 186 187 void irq_enable(struct irq_desc *desc) 188 { 189 irq_state_clr_disabled(desc); 190 if (desc->irq_data.chip->irq_enable) 191 desc->irq_data.chip->irq_enable(&desc->irq_data); 192 else 193 desc->irq_data.chip->irq_unmask(&desc->irq_data); 194 irq_state_clr_masked(desc); 195 } 196 197 void irq_disable(struct irq_desc *desc) 198 { 199 irq_state_set_disabled(desc); 200 if (desc->irq_data.chip->irq_disable) { 201 desc->irq_data.chip->irq_disable(&desc->irq_data); 202 } 203 irq_state_set_masked(desc); 204 } 205 206 #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 207 /* Temporary migration helpers */ 208 static void compat_irq_mask(struct irq_data *data) 209 { 210 data->chip->mask(data->irq); 211 } 212 213 static void compat_irq_unmask(struct irq_data *data) 214 { 215 data->chip->unmask(data->irq); 216 } 217 218 static void compat_irq_ack(struct irq_data *data) 219 { 220 data->chip->ack(data->irq); 221 } 222 223 static void compat_irq_mask_ack(struct irq_data *data) 224 { 225 data->chip->mask_ack(data->irq); 226 } 227 228 static void compat_irq_eoi(struct irq_data *data) 229 { 230 data->chip->eoi(data->irq); 231 } 232 233 static void compat_irq_enable(struct irq_data *data) 234 { 235 data->chip->enable(data->irq); 236 } 237 238 static void compat_irq_disable(struct irq_data *data) 239 { 240 data->chip->disable(data->irq); 241 } 242 243 static void compat_irq_shutdown(struct irq_data *data) 244 { 245 data->chip->shutdown(data->irq); 246 } 247 248 static unsigned int compat_irq_startup(struct irq_data *data) 249 { 250 return data->chip->startup(data->irq); 251 } 252 253 static int compat_irq_set_affinity(struct irq_data *data, 254 const struct cpumask *dest, bool force) 255 { 256 return data->chip->set_affinity(data->irq, dest); 257 } 258 259 static int compat_irq_set_type(struct irq_data *data, unsigned int type) 260 { 261 return data->chip->set_type(data->irq, type); 262 } 263 264 static int compat_irq_set_wake(struct irq_data *data, unsigned int on) 265 { 266 return data->chip->set_wake(data->irq, on); 267 } 268 269 static int compat_irq_retrigger(struct irq_data *data) 270 { 271 return data->chip->retrigger(data->irq); 272 } 273 274 static void compat_bus_lock(struct irq_data *data) 275 { 276 data->chip->bus_lock(data->irq); 277 } 278 279 static void compat_bus_sync_unlock(struct irq_data *data) 280 { 281 data->chip->bus_sync_unlock(data->irq); 282 } 283 #endif 284 285 /* 286 * Fixup enable/disable function pointers 287 */ 288 void irq_chip_set_defaults(struct irq_chip *chip) 289 { 290 #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED 291 if (chip->enable) 292 chip->irq_enable = compat_irq_enable; 293 if (chip->disable) 294 chip->irq_disable = compat_irq_disable; 295 if (chip->shutdown) 296 chip->irq_shutdown = compat_irq_shutdown; 297 if (chip->startup) 298 chip->irq_startup = compat_irq_startup; 299 if (!chip->end) 300 chip->end = dummy_irq_chip.end; 301 if (chip->bus_lock) 302 chip->irq_bus_lock = compat_bus_lock; 303 if (chip->bus_sync_unlock) 304 chip->irq_bus_sync_unlock = compat_bus_sync_unlock; 305 if (chip->mask) 306 chip->irq_mask = compat_irq_mask; 307 if (chip->unmask) 308 chip->irq_unmask = compat_irq_unmask; 309 if (chip->ack) 310 chip->irq_ack = compat_irq_ack; 311 if (chip->mask_ack) 312 chip->irq_mask_ack = compat_irq_mask_ack; 313 if (chip->eoi) 314 chip->irq_eoi = compat_irq_eoi; 315 if (chip->set_affinity) 316 chip->irq_set_affinity = compat_irq_set_affinity; 317 if (chip->set_type) 318 chip->irq_set_type = compat_irq_set_type; 319 if (chip->set_wake) 320 chip->irq_set_wake = compat_irq_set_wake; 321 if (chip->retrigger) 322 chip->irq_retrigger = compat_irq_retrigger; 323 #endif 324 } 325 326 static inline void mask_ack_irq(struct irq_desc *desc) 327 { 328 if (desc->irq_data.chip->irq_mask_ack) 329 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 330 else { 331 desc->irq_data.chip->irq_mask(&desc->irq_data); 332 if (desc->irq_data.chip->irq_ack) 333 desc->irq_data.chip->irq_ack(&desc->irq_data); 334 } 335 irq_state_set_masked(desc); 336 } 337 338 void mask_irq(struct irq_desc *desc) 339 { 340 if (desc->irq_data.chip->irq_mask) { 341 desc->irq_data.chip->irq_mask(&desc->irq_data); 342 irq_state_set_masked(desc); 343 } 344 } 345 346 void unmask_irq(struct irq_desc *desc) 347 { 348 if (desc->irq_data.chip->irq_unmask) { 349 desc->irq_data.chip->irq_unmask(&desc->irq_data); 350 irq_state_clr_masked(desc); 351 } 352 } 353 354 /* 355 * handle_nested_irq - Handle a nested irq from a irq thread 356 * @irq: the interrupt number 357 * 358 * Handle interrupts which are nested into a threaded interrupt 359 * handler. The handler function is called inside the calling 360 * threads context. 361 */ 362 void handle_nested_irq(unsigned int irq) 363 { 364 struct irq_desc *desc = irq_to_desc(irq); 365 struct irqaction *action; 366 irqreturn_t action_ret; 367 368 might_sleep(); 369 370 raw_spin_lock_irq(&desc->lock); 371 372 kstat_incr_irqs_this_cpu(irq, desc); 373 374 action = desc->action; 375 if (unlikely(!action || (desc->istate & IRQS_DISABLED))) 376 goto out_unlock; 377 378 irq_compat_set_progress(desc); 379 desc->istate |= IRQS_INPROGRESS; 380 raw_spin_unlock_irq(&desc->lock); 381 382 action_ret = action->thread_fn(action->irq, action->dev_id); 383 if (!noirqdebug) 384 note_interrupt(irq, desc, action_ret); 385 386 raw_spin_lock_irq(&desc->lock); 387 desc->istate &= ~IRQS_INPROGRESS; 388 irq_compat_clr_progress(desc); 389 390 out_unlock: 391 raw_spin_unlock_irq(&desc->lock); 392 } 393 EXPORT_SYMBOL_GPL(handle_nested_irq); 394 395 static bool irq_check_poll(struct irq_desc *desc) 396 { 397 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 398 return false; 399 return irq_wait_for_poll(desc); 400 } 401 402 /** 403 * handle_simple_irq - Simple and software-decoded IRQs. 404 * @irq: the interrupt number 405 * @desc: the interrupt description structure for this irq 406 * 407 * Simple interrupts are either sent from a demultiplexing interrupt 408 * handler or come from hardware, where no interrupt hardware control 409 * is necessary. 410 * 411 * Note: The caller is expected to handle the ack, clear, mask and 412 * unmask issues if necessary. 413 */ 414 void 415 handle_simple_irq(unsigned int irq, struct irq_desc *desc) 416 { 417 raw_spin_lock(&desc->lock); 418 419 if (unlikely(desc->istate & IRQS_INPROGRESS)) 420 if (!irq_check_poll(desc)) 421 goto out_unlock; 422 423 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 424 kstat_incr_irqs_this_cpu(irq, desc); 425 426 if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) 427 goto out_unlock; 428 429 handle_irq_event(desc); 430 431 out_unlock: 432 raw_spin_unlock(&desc->lock); 433 } 434 435 /** 436 * handle_level_irq - Level type irq handler 437 * @irq: the interrupt number 438 * @desc: the interrupt description structure for this irq 439 * 440 * Level type interrupts are active as long as the hardware line has 441 * the active level. This may require to mask the interrupt and unmask 442 * it after the associated handler has acknowledged the device, so the 443 * interrupt line is back to inactive. 444 */ 445 void 446 handle_level_irq(unsigned int irq, struct irq_desc *desc) 447 { 448 raw_spin_lock(&desc->lock); 449 mask_ack_irq(desc); 450 451 if (unlikely(desc->istate & IRQS_INPROGRESS)) 452 if (!irq_check_poll(desc)) 453 goto out_unlock; 454 455 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 456 kstat_incr_irqs_this_cpu(irq, desc); 457 458 /* 459 * If its disabled or no action available 460 * keep it masked and get out of here 461 */ 462 if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) 463 goto out_unlock; 464 465 handle_irq_event(desc); 466 467 if (!(desc->istate & (IRQS_DISABLED | IRQS_ONESHOT))) 468 unmask_irq(desc); 469 out_unlock: 470 raw_spin_unlock(&desc->lock); 471 } 472 EXPORT_SYMBOL_GPL(handle_level_irq); 473 474 /** 475 * handle_fasteoi_irq - irq handler for transparent controllers 476 * @irq: the interrupt number 477 * @desc: the interrupt description structure for this irq 478 * 479 * Only a single callback will be issued to the chip: an ->eoi() 480 * call when the interrupt has been serviced. This enables support 481 * for modern forms of interrupt handlers, which handle the flow 482 * details in hardware, transparently. 483 */ 484 void 485 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 486 { 487 raw_spin_lock(&desc->lock); 488 489 if (unlikely(desc->istate & IRQS_INPROGRESS)) 490 if (!irq_check_poll(desc)) 491 goto out; 492 493 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 494 kstat_incr_irqs_this_cpu(irq, desc); 495 496 /* 497 * If its disabled or no action available 498 * then mask it and get out of here: 499 */ 500 if (unlikely(!desc->action || (desc->istate & IRQS_DISABLED))) { 501 irq_compat_set_pending(desc); 502 desc->istate |= IRQS_PENDING; 503 mask_irq(desc); 504 goto out; 505 } 506 handle_irq_event(desc); 507 out: 508 desc->irq_data.chip->irq_eoi(&desc->irq_data); 509 raw_spin_unlock(&desc->lock); 510 } 511 512 /** 513 * handle_edge_irq - edge type IRQ handler 514 * @irq: the interrupt number 515 * @desc: the interrupt description structure for this irq 516 * 517 * Interrupt occures on the falling and/or rising edge of a hardware 518 * signal. The occurence is latched into the irq controller hardware 519 * and must be acked in order to be reenabled. After the ack another 520 * interrupt can happen on the same source even before the first one 521 * is handled by the associated event handler. If this happens it 522 * might be necessary to disable (mask) the interrupt depending on the 523 * controller hardware. This requires to reenable the interrupt inside 524 * of the loop which handles the interrupts which have arrived while 525 * the handler was running. If all pending interrupts are handled, the 526 * loop is left. 527 */ 528 void 529 handle_edge_irq(unsigned int irq, struct irq_desc *desc) 530 { 531 raw_spin_lock(&desc->lock); 532 533 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 534 /* 535 * If we're currently running this IRQ, or its disabled, 536 * we shouldn't process the IRQ. Mark it pending, handle 537 * the necessary masking and go out 538 */ 539 if (unlikely((desc->istate & (IRQS_DISABLED | IRQS_INPROGRESS) || 540 !desc->action))) { 541 if (!irq_check_poll(desc)) { 542 irq_compat_set_pending(desc); 543 desc->istate |= IRQS_PENDING; 544 mask_ack_irq(desc); 545 goto out_unlock; 546 } 547 } 548 kstat_incr_irqs_this_cpu(irq, desc); 549 550 /* Start handling the irq */ 551 desc->irq_data.chip->irq_ack(&desc->irq_data); 552 553 do { 554 if (unlikely(!desc->action)) { 555 mask_irq(desc); 556 goto out_unlock; 557 } 558 559 /* 560 * When another irq arrived while we were handling 561 * one, we could have masked the irq. 562 * Renable it, if it was not disabled in meantime. 563 */ 564 if (unlikely(desc->istate & IRQS_PENDING)) { 565 if (!(desc->istate & IRQS_DISABLED) && 566 (desc->istate & IRQS_MASKED)) 567 unmask_irq(desc); 568 } 569 570 handle_irq_event(desc); 571 572 } while ((desc->istate & IRQS_PENDING) && 573 !(desc->istate & IRQS_DISABLED)); 574 575 out_unlock: 576 raw_spin_unlock(&desc->lock); 577 } 578 579 /** 580 * handle_percpu_irq - Per CPU local irq handler 581 * @irq: the interrupt number 582 * @desc: the interrupt description structure for this irq 583 * 584 * Per CPU interrupts on SMP machines without locking requirements 585 */ 586 void 587 handle_percpu_irq(unsigned int irq, struct irq_desc *desc) 588 { 589 struct irq_chip *chip = irq_desc_get_chip(desc); 590 591 kstat_incr_irqs_this_cpu(irq, desc); 592 593 if (chip->irq_ack) 594 chip->irq_ack(&desc->irq_data); 595 596 handle_irq_event_percpu(desc, desc->action); 597 598 if (chip->irq_eoi) 599 chip->irq_eoi(&desc->irq_data); 600 } 601 602 void 603 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 604 const char *name) 605 { 606 unsigned long flags; 607 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 608 609 if (!desc) 610 return; 611 612 if (!handle) { 613 handle = handle_bad_irq; 614 } else { 615 if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) 616 goto out; 617 } 618 619 /* Uninstall? */ 620 if (handle == handle_bad_irq) { 621 if (desc->irq_data.chip != &no_irq_chip) 622 mask_ack_irq(desc); 623 irq_compat_set_disabled(desc); 624 desc->istate |= IRQS_DISABLED; 625 desc->depth = 1; 626 } 627 desc->handle_irq = handle; 628 desc->name = name; 629 630 if (handle != handle_bad_irq && is_chained) { 631 irq_settings_set_noprobe(desc); 632 irq_settings_set_norequest(desc); 633 irq_startup(desc); 634 } 635 out: 636 irq_put_desc_busunlock(desc, flags); 637 } 638 EXPORT_SYMBOL_GPL(__set_irq_handler); 639 640 void 641 set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, 642 irq_flow_handler_t handle) 643 { 644 irq_set_chip(irq, chip); 645 __set_irq_handler(irq, handle, 0, NULL); 646 } 647 648 void 649 set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 650 irq_flow_handler_t handle, const char *name) 651 { 652 irq_set_chip(irq, chip); 653 __set_irq_handler(irq, handle, 0, name); 654 } 655 656 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 657 { 658 unsigned long flags; 659 struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 660 661 if (!desc) 662 return; 663 irq_settings_clr_and_set(desc, clr, set); 664 665 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 666 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 667 if (irq_settings_has_no_balance_set(desc)) 668 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 669 if (irq_settings_is_per_cpu(desc)) 670 irqd_set(&desc->irq_data, IRQD_PER_CPU); 671 if (irq_settings_can_move_pcntxt(desc)) 672 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 673 674 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); 675 676 irq_put_desc_unlock(desc, flags); 677 } 678