1 /* 2 * linux/kernel/irq/chip.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code, for irq-chip 8 * based architectures. 9 * 10 * Detailed information is available in Documentation/core-api/genericirq.rst 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/msi.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 #include <linux/irqdomain.h> 19 20 #include <trace/events/irq.h> 21 22 #include "internals.h" 23 24 static irqreturn_t bad_chained_irq(int irq, void *dev_id) 25 { 26 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); 27 return IRQ_NONE; 28 } 29 30 /* 31 * Chained handlers should never call action on their IRQ. This default 32 * action will emit warning if such thing happens. 33 */ 34 struct irqaction chained_action = { 35 .handler = bad_chained_irq, 36 }; 37 38 /** 39 * irq_set_chip - set the irq chip for an irq 40 * @irq: irq number 41 * @chip: pointer to irq chip description structure 42 */ 43 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 44 { 45 unsigned long flags; 46 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 47 48 if (!desc) 49 return -EINVAL; 50 51 if (!chip) 52 chip = &no_irq_chip; 53 54 desc->irq_data.chip = chip; 55 irq_put_desc_unlock(desc, flags); 56 /* 57 * For !CONFIG_SPARSE_IRQ make the irq show up in 58 * allocated_irqs. 59 */ 60 irq_mark_irq(irq); 61 return 0; 62 } 63 EXPORT_SYMBOL(irq_set_chip); 64 65 /** 66 * irq_set_type - set the irq trigger type for an irq 67 * @irq: irq number 68 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 69 */ 70 int irq_set_irq_type(unsigned int irq, unsigned int type) 71 { 72 unsigned long flags; 73 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 74 int ret = 0; 75 76 if (!desc) 77 return -EINVAL; 78 79 ret = __irq_set_trigger(desc, type); 80 irq_put_desc_busunlock(desc, flags); 81 return ret; 82 } 83 EXPORT_SYMBOL(irq_set_irq_type); 84 85 /** 86 * irq_set_handler_data - set irq handler data for an irq 87 * @irq: Interrupt number 88 * @data: Pointer to interrupt specific data 89 * 90 * Set the hardware irq controller data for an irq 91 */ 92 int irq_set_handler_data(unsigned int irq, void *data) 93 { 94 unsigned long flags; 95 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 96 97 if (!desc) 98 return -EINVAL; 99 desc->irq_common_data.handler_data = data; 100 irq_put_desc_unlock(desc, flags); 101 return 0; 102 } 103 EXPORT_SYMBOL(irq_set_handler_data); 104 105 /** 106 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 107 * @irq_base: Interrupt number base 108 * @irq_offset: Interrupt number offset 109 * @entry: Pointer to MSI descriptor data 110 * 111 * Set the MSI descriptor entry for an irq at offset 112 */ 113 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, 114 struct msi_desc *entry) 115 { 116 unsigned long flags; 117 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 118 119 if (!desc) 120 return -EINVAL; 121 desc->irq_common_data.msi_desc = entry; 122 if (entry && !irq_offset) 123 entry->irq = irq_base; 124 irq_put_desc_unlock(desc, flags); 125 return 0; 126 } 127 128 /** 129 * irq_set_msi_desc - set MSI descriptor data for an irq 130 * @irq: Interrupt number 131 * @entry: Pointer to MSI descriptor data 132 * 133 * Set the MSI descriptor entry for an irq 134 */ 135 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 136 { 137 return irq_set_msi_desc_off(irq, 0, entry); 138 } 139 140 /** 141 * irq_set_chip_data - set irq chip data for an irq 142 * @irq: Interrupt number 143 * @data: Pointer to chip specific data 144 * 145 * Set the hardware irq chip data for an irq 146 */ 147 int irq_set_chip_data(unsigned int irq, void *data) 148 { 149 unsigned long flags; 150 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 151 152 if (!desc) 153 return -EINVAL; 154 desc->irq_data.chip_data = data; 155 irq_put_desc_unlock(desc, flags); 156 return 0; 157 } 158 EXPORT_SYMBOL(irq_set_chip_data); 159 160 struct irq_data *irq_get_irq_data(unsigned int irq) 161 { 162 struct irq_desc *desc = irq_to_desc(irq); 163 164 return desc ? &desc->irq_data : NULL; 165 } 166 EXPORT_SYMBOL_GPL(irq_get_irq_data); 167 168 static void irq_state_clr_disabled(struct irq_desc *desc) 169 { 170 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 171 } 172 173 static void irq_state_clr_masked(struct irq_desc *desc) 174 { 175 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 176 } 177 178 static void irq_state_clr_started(struct irq_desc *desc) 179 { 180 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); 181 } 182 183 static void irq_state_set_started(struct irq_desc *desc) 184 { 185 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); 186 } 187 188 enum { 189 IRQ_STARTUP_NORMAL, 190 IRQ_STARTUP_MANAGED, 191 IRQ_STARTUP_ABORT, 192 }; 193 194 #ifdef CONFIG_SMP 195 static int 196 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) 197 { 198 struct irq_data *d = irq_desc_get_irq_data(desc); 199 200 if (!irqd_affinity_is_managed(d)) 201 return IRQ_STARTUP_NORMAL; 202 203 irqd_clr_managed_shutdown(d); 204 205 if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) { 206 /* 207 * Catch code which fiddles with enable_irq() on a managed 208 * and potentially shutdown IRQ. Chained interrupt 209 * installment or irq auto probing should not happen on 210 * managed irqs either. 211 */ 212 if (WARN_ON_ONCE(force)) 213 return IRQ_STARTUP_ABORT; 214 /* 215 * The interrupt was requested, but there is no online CPU 216 * in it's affinity mask. Put it into managed shutdown 217 * state and let the cpu hotplug mechanism start it up once 218 * a CPU in the mask becomes available. 219 */ 220 return IRQ_STARTUP_ABORT; 221 } 222 /* 223 * Managed interrupts have reserved resources, so this should not 224 * happen. 225 */ 226 if (WARN_ON(irq_domain_activate_irq(d, false))) 227 return IRQ_STARTUP_ABORT; 228 return IRQ_STARTUP_MANAGED; 229 } 230 #else 231 static __always_inline int 232 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) 233 { 234 return IRQ_STARTUP_NORMAL; 235 } 236 #endif 237 238 static int __irq_startup(struct irq_desc *desc) 239 { 240 struct irq_data *d = irq_desc_get_irq_data(desc); 241 int ret = 0; 242 243 /* Warn if this interrupt is not activated but try nevertheless */ 244 WARN_ON_ONCE(!irqd_is_activated(d)); 245 246 if (d->chip->irq_startup) { 247 ret = d->chip->irq_startup(d); 248 irq_state_clr_disabled(desc); 249 irq_state_clr_masked(desc); 250 } else { 251 irq_enable(desc); 252 } 253 irq_state_set_started(desc); 254 return ret; 255 } 256 257 int irq_startup(struct irq_desc *desc, bool resend, bool force) 258 { 259 struct irq_data *d = irq_desc_get_irq_data(desc); 260 struct cpumask *aff = irq_data_get_affinity_mask(d); 261 int ret = 0; 262 263 desc->depth = 0; 264 265 if (irqd_is_started(d)) { 266 irq_enable(desc); 267 } else { 268 switch (__irq_startup_managed(desc, aff, force)) { 269 case IRQ_STARTUP_NORMAL: 270 ret = __irq_startup(desc); 271 irq_setup_affinity(desc); 272 break; 273 case IRQ_STARTUP_MANAGED: 274 irq_do_set_affinity(d, aff, false); 275 ret = __irq_startup(desc); 276 break; 277 case IRQ_STARTUP_ABORT: 278 irqd_set_managed_shutdown(d); 279 return 0; 280 } 281 } 282 if (resend) 283 check_irq_resend(desc); 284 285 return ret; 286 } 287 288 int irq_activate(struct irq_desc *desc) 289 { 290 struct irq_data *d = irq_desc_get_irq_data(desc); 291 292 if (!irqd_affinity_is_managed(d)) 293 return irq_domain_activate_irq(d, false); 294 return 0; 295 } 296 297 int irq_activate_and_startup(struct irq_desc *desc, bool resend) 298 { 299 if (WARN_ON(irq_activate(desc))) 300 return 0; 301 return irq_startup(desc, resend, IRQ_START_FORCE); 302 } 303 304 static void __irq_disable(struct irq_desc *desc, bool mask); 305 306 void irq_shutdown(struct irq_desc *desc) 307 { 308 if (irqd_is_started(&desc->irq_data)) { 309 desc->depth = 1; 310 if (desc->irq_data.chip->irq_shutdown) { 311 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 312 irq_state_set_disabled(desc); 313 irq_state_set_masked(desc); 314 } else { 315 __irq_disable(desc, true); 316 } 317 irq_state_clr_started(desc); 318 } 319 /* 320 * This must be called even if the interrupt was never started up, 321 * because the activation can happen before the interrupt is 322 * available for request/startup. It has it's own state tracking so 323 * it's safe to call it unconditionally. 324 */ 325 irq_domain_deactivate_irq(&desc->irq_data); 326 } 327 328 void irq_enable(struct irq_desc *desc) 329 { 330 if (!irqd_irq_disabled(&desc->irq_data)) { 331 unmask_irq(desc); 332 } else { 333 irq_state_clr_disabled(desc); 334 if (desc->irq_data.chip->irq_enable) { 335 desc->irq_data.chip->irq_enable(&desc->irq_data); 336 irq_state_clr_masked(desc); 337 } else { 338 unmask_irq(desc); 339 } 340 } 341 } 342 343 static void __irq_disable(struct irq_desc *desc, bool mask) 344 { 345 if (irqd_irq_disabled(&desc->irq_data)) { 346 if (mask) 347 mask_irq(desc); 348 } else { 349 irq_state_set_disabled(desc); 350 if (desc->irq_data.chip->irq_disable) { 351 desc->irq_data.chip->irq_disable(&desc->irq_data); 352 irq_state_set_masked(desc); 353 } else if (mask) { 354 mask_irq(desc); 355 } 356 } 357 } 358 359 /** 360 * irq_disable - Mark interrupt disabled 361 * @desc: irq descriptor which should be disabled 362 * 363 * If the chip does not implement the irq_disable callback, we 364 * use a lazy disable approach. That means we mark the interrupt 365 * disabled, but leave the hardware unmasked. That's an 366 * optimization because we avoid the hardware access for the 367 * common case where no interrupt happens after we marked it 368 * disabled. If an interrupt happens, then the interrupt flow 369 * handler masks the line at the hardware level and marks it 370 * pending. 371 * 372 * If the interrupt chip does not implement the irq_disable callback, 373 * a driver can disable the lazy approach for a particular irq line by 374 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can 375 * be used for devices which cannot disable the interrupt at the 376 * device level under certain circumstances and have to use 377 * disable_irq[_nosync] instead. 378 */ 379 void irq_disable(struct irq_desc *desc) 380 { 381 __irq_disable(desc, irq_settings_disable_unlazy(desc)); 382 } 383 384 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 385 { 386 if (desc->irq_data.chip->irq_enable) 387 desc->irq_data.chip->irq_enable(&desc->irq_data); 388 else 389 desc->irq_data.chip->irq_unmask(&desc->irq_data); 390 cpumask_set_cpu(cpu, desc->percpu_enabled); 391 } 392 393 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 394 { 395 if (desc->irq_data.chip->irq_disable) 396 desc->irq_data.chip->irq_disable(&desc->irq_data); 397 else 398 desc->irq_data.chip->irq_mask(&desc->irq_data); 399 cpumask_clear_cpu(cpu, desc->percpu_enabled); 400 } 401 402 static inline void mask_ack_irq(struct irq_desc *desc) 403 { 404 if (desc->irq_data.chip->irq_mask_ack) { 405 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 406 irq_state_set_masked(desc); 407 } else { 408 mask_irq(desc); 409 if (desc->irq_data.chip->irq_ack) 410 desc->irq_data.chip->irq_ack(&desc->irq_data); 411 } 412 } 413 414 void mask_irq(struct irq_desc *desc) 415 { 416 if (irqd_irq_masked(&desc->irq_data)) 417 return; 418 419 if (desc->irq_data.chip->irq_mask) { 420 desc->irq_data.chip->irq_mask(&desc->irq_data); 421 irq_state_set_masked(desc); 422 } 423 } 424 425 void unmask_irq(struct irq_desc *desc) 426 { 427 if (!irqd_irq_masked(&desc->irq_data)) 428 return; 429 430 if (desc->irq_data.chip->irq_unmask) { 431 desc->irq_data.chip->irq_unmask(&desc->irq_data); 432 irq_state_clr_masked(desc); 433 } 434 } 435 436 void unmask_threaded_irq(struct irq_desc *desc) 437 { 438 struct irq_chip *chip = desc->irq_data.chip; 439 440 if (chip->flags & IRQCHIP_EOI_THREADED) 441 chip->irq_eoi(&desc->irq_data); 442 443 unmask_irq(desc); 444 } 445 446 /* 447 * handle_nested_irq - Handle a nested irq from a irq thread 448 * @irq: the interrupt number 449 * 450 * Handle interrupts which are nested into a threaded interrupt 451 * handler. The handler function is called inside the calling 452 * threads context. 453 */ 454 void handle_nested_irq(unsigned int irq) 455 { 456 struct irq_desc *desc = irq_to_desc(irq); 457 struct irqaction *action; 458 irqreturn_t action_ret; 459 460 might_sleep(); 461 462 raw_spin_lock_irq(&desc->lock); 463 464 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 465 466 action = desc->action; 467 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { 468 desc->istate |= IRQS_PENDING; 469 goto out_unlock; 470 } 471 472 kstat_incr_irqs_this_cpu(desc); 473 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 474 raw_spin_unlock_irq(&desc->lock); 475 476 action_ret = IRQ_NONE; 477 for_each_action_of_desc(desc, action) 478 action_ret |= action->thread_fn(action->irq, action->dev_id); 479 480 if (!noirqdebug) 481 note_interrupt(desc, action_ret); 482 483 raw_spin_lock_irq(&desc->lock); 484 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 485 486 out_unlock: 487 raw_spin_unlock_irq(&desc->lock); 488 } 489 EXPORT_SYMBOL_GPL(handle_nested_irq); 490 491 static bool irq_check_poll(struct irq_desc *desc) 492 { 493 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 494 return false; 495 return irq_wait_for_poll(desc); 496 } 497 498 static bool irq_may_run(struct irq_desc *desc) 499 { 500 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; 501 502 /* 503 * If the interrupt is not in progress and is not an armed 504 * wakeup interrupt, proceed. 505 */ 506 if (!irqd_has_set(&desc->irq_data, mask)) 507 return true; 508 509 /* 510 * If the interrupt is an armed wakeup source, mark it pending 511 * and suspended, disable it and notify the pm core about the 512 * event. 513 */ 514 if (irq_pm_check_wakeup(desc)) 515 return false; 516 517 /* 518 * Handle a potential concurrent poll on a different core. 519 */ 520 return irq_check_poll(desc); 521 } 522 523 /** 524 * handle_simple_irq - Simple and software-decoded IRQs. 525 * @desc: the interrupt description structure for this irq 526 * 527 * Simple interrupts are either sent from a demultiplexing interrupt 528 * handler or come from hardware, where no interrupt hardware control 529 * is necessary. 530 * 531 * Note: The caller is expected to handle the ack, clear, mask and 532 * unmask issues if necessary. 533 */ 534 void handle_simple_irq(struct irq_desc *desc) 535 { 536 raw_spin_lock(&desc->lock); 537 538 if (!irq_may_run(desc)) 539 goto out_unlock; 540 541 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 542 543 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 544 desc->istate |= IRQS_PENDING; 545 goto out_unlock; 546 } 547 548 kstat_incr_irqs_this_cpu(desc); 549 handle_irq_event(desc); 550 551 out_unlock: 552 raw_spin_unlock(&desc->lock); 553 } 554 EXPORT_SYMBOL_GPL(handle_simple_irq); 555 556 /** 557 * handle_untracked_irq - Simple and software-decoded IRQs. 558 * @desc: the interrupt description structure for this irq 559 * 560 * Untracked interrupts are sent from a demultiplexing interrupt 561 * handler when the demultiplexer does not know which device it its 562 * multiplexed irq domain generated the interrupt. IRQ's handled 563 * through here are not subjected to stats tracking, randomness, or 564 * spurious interrupt detection. 565 * 566 * Note: Like handle_simple_irq, the caller is expected to handle 567 * the ack, clear, mask and unmask issues if necessary. 568 */ 569 void handle_untracked_irq(struct irq_desc *desc) 570 { 571 unsigned int flags = 0; 572 573 raw_spin_lock(&desc->lock); 574 575 if (!irq_may_run(desc)) 576 goto out_unlock; 577 578 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 579 580 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 581 desc->istate |= IRQS_PENDING; 582 goto out_unlock; 583 } 584 585 desc->istate &= ~IRQS_PENDING; 586 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 587 raw_spin_unlock(&desc->lock); 588 589 __handle_irq_event_percpu(desc, &flags); 590 591 raw_spin_lock(&desc->lock); 592 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 593 594 out_unlock: 595 raw_spin_unlock(&desc->lock); 596 } 597 EXPORT_SYMBOL_GPL(handle_untracked_irq); 598 599 /* 600 * Called unconditionally from handle_level_irq() and only for oneshot 601 * interrupts from handle_fasteoi_irq() 602 */ 603 static void cond_unmask_irq(struct irq_desc *desc) 604 { 605 /* 606 * We need to unmask in the following cases: 607 * - Standard level irq (IRQF_ONESHOT is not set) 608 * - Oneshot irq which did not wake the thread (caused by a 609 * spurious interrupt or a primary handler handling it 610 * completely). 611 */ 612 if (!irqd_irq_disabled(&desc->irq_data) && 613 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 614 unmask_irq(desc); 615 } 616 617 /** 618 * handle_level_irq - Level type irq handler 619 * @desc: the interrupt description structure for this irq 620 * 621 * Level type interrupts are active as long as the hardware line has 622 * the active level. This may require to mask the interrupt and unmask 623 * it after the associated handler has acknowledged the device, so the 624 * interrupt line is back to inactive. 625 */ 626 void handle_level_irq(struct irq_desc *desc) 627 { 628 raw_spin_lock(&desc->lock); 629 mask_ack_irq(desc); 630 631 if (!irq_may_run(desc)) 632 goto out_unlock; 633 634 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 635 636 /* 637 * If its disabled or no action available 638 * keep it masked and get out of here 639 */ 640 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 641 desc->istate |= IRQS_PENDING; 642 goto out_unlock; 643 } 644 645 kstat_incr_irqs_this_cpu(desc); 646 handle_irq_event(desc); 647 648 cond_unmask_irq(desc); 649 650 out_unlock: 651 raw_spin_unlock(&desc->lock); 652 } 653 EXPORT_SYMBOL_GPL(handle_level_irq); 654 655 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI 656 static inline void preflow_handler(struct irq_desc *desc) 657 { 658 if (desc->preflow_handler) 659 desc->preflow_handler(&desc->irq_data); 660 } 661 #else 662 static inline void preflow_handler(struct irq_desc *desc) { } 663 #endif 664 665 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) 666 { 667 if (!(desc->istate & IRQS_ONESHOT)) { 668 chip->irq_eoi(&desc->irq_data); 669 return; 670 } 671 /* 672 * We need to unmask in the following cases: 673 * - Oneshot irq which did not wake the thread (caused by a 674 * spurious interrupt or a primary handler handling it 675 * completely). 676 */ 677 if (!irqd_irq_disabled(&desc->irq_data) && 678 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { 679 chip->irq_eoi(&desc->irq_data); 680 unmask_irq(desc); 681 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { 682 chip->irq_eoi(&desc->irq_data); 683 } 684 } 685 686 /** 687 * handle_fasteoi_irq - irq handler for transparent controllers 688 * @desc: the interrupt description structure for this irq 689 * 690 * Only a single callback will be issued to the chip: an ->eoi() 691 * call when the interrupt has been serviced. This enables support 692 * for modern forms of interrupt handlers, which handle the flow 693 * details in hardware, transparently. 694 */ 695 void handle_fasteoi_irq(struct irq_desc *desc) 696 { 697 struct irq_chip *chip = desc->irq_data.chip; 698 699 raw_spin_lock(&desc->lock); 700 701 if (!irq_may_run(desc)) 702 goto out; 703 704 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 705 706 /* 707 * If its disabled or no action available 708 * then mask it and get out of here: 709 */ 710 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 711 desc->istate |= IRQS_PENDING; 712 mask_irq(desc); 713 goto out; 714 } 715 716 kstat_incr_irqs_this_cpu(desc); 717 if (desc->istate & IRQS_ONESHOT) 718 mask_irq(desc); 719 720 preflow_handler(desc); 721 handle_irq_event(desc); 722 723 cond_unmask_eoi_irq(desc, chip); 724 725 raw_spin_unlock(&desc->lock); 726 return; 727 out: 728 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 729 chip->irq_eoi(&desc->irq_data); 730 raw_spin_unlock(&desc->lock); 731 } 732 EXPORT_SYMBOL_GPL(handle_fasteoi_irq); 733 734 /** 735 * handle_edge_irq - edge type IRQ handler 736 * @desc: the interrupt description structure for this irq 737 * 738 * Interrupt occures on the falling and/or rising edge of a hardware 739 * signal. The occurrence is latched into the irq controller hardware 740 * and must be acked in order to be reenabled. After the ack another 741 * interrupt can happen on the same source even before the first one 742 * is handled by the associated event handler. If this happens it 743 * might be necessary to disable (mask) the interrupt depending on the 744 * controller hardware. This requires to reenable the interrupt inside 745 * of the loop which handles the interrupts which have arrived while 746 * the handler was running. If all pending interrupts are handled, the 747 * loop is left. 748 */ 749 void handle_edge_irq(struct irq_desc *desc) 750 { 751 raw_spin_lock(&desc->lock); 752 753 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 754 755 if (!irq_may_run(desc)) { 756 desc->istate |= IRQS_PENDING; 757 mask_ack_irq(desc); 758 goto out_unlock; 759 } 760 761 /* 762 * If its disabled or no action available then mask it and get 763 * out of here. 764 */ 765 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 766 desc->istate |= IRQS_PENDING; 767 mask_ack_irq(desc); 768 goto out_unlock; 769 } 770 771 kstat_incr_irqs_this_cpu(desc); 772 773 /* Start handling the irq */ 774 desc->irq_data.chip->irq_ack(&desc->irq_data); 775 776 do { 777 if (unlikely(!desc->action)) { 778 mask_irq(desc); 779 goto out_unlock; 780 } 781 782 /* 783 * When another irq arrived while we were handling 784 * one, we could have masked the irq. 785 * Renable it, if it was not disabled in meantime. 786 */ 787 if (unlikely(desc->istate & IRQS_PENDING)) { 788 if (!irqd_irq_disabled(&desc->irq_data) && 789 irqd_irq_masked(&desc->irq_data)) 790 unmask_irq(desc); 791 } 792 793 handle_irq_event(desc); 794 795 } while ((desc->istate & IRQS_PENDING) && 796 !irqd_irq_disabled(&desc->irq_data)); 797 798 out_unlock: 799 raw_spin_unlock(&desc->lock); 800 } 801 EXPORT_SYMBOL(handle_edge_irq); 802 803 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 804 /** 805 * handle_edge_eoi_irq - edge eoi type IRQ handler 806 * @desc: the interrupt description structure for this irq 807 * 808 * Similar as the above handle_edge_irq, but using eoi and w/o the 809 * mask/unmask logic. 810 */ 811 void handle_edge_eoi_irq(struct irq_desc *desc) 812 { 813 struct irq_chip *chip = irq_desc_get_chip(desc); 814 815 raw_spin_lock(&desc->lock); 816 817 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 818 819 if (!irq_may_run(desc)) { 820 desc->istate |= IRQS_PENDING; 821 goto out_eoi; 822 } 823 824 /* 825 * If its disabled or no action available then mask it and get 826 * out of here. 827 */ 828 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 829 desc->istate |= IRQS_PENDING; 830 goto out_eoi; 831 } 832 833 kstat_incr_irqs_this_cpu(desc); 834 835 do { 836 if (unlikely(!desc->action)) 837 goto out_eoi; 838 839 handle_irq_event(desc); 840 841 } while ((desc->istate & IRQS_PENDING) && 842 !irqd_irq_disabled(&desc->irq_data)); 843 844 out_eoi: 845 chip->irq_eoi(&desc->irq_data); 846 raw_spin_unlock(&desc->lock); 847 } 848 #endif 849 850 /** 851 * handle_percpu_irq - Per CPU local irq handler 852 * @desc: the interrupt description structure for this irq 853 * 854 * Per CPU interrupts on SMP machines without locking requirements 855 */ 856 void handle_percpu_irq(struct irq_desc *desc) 857 { 858 struct irq_chip *chip = irq_desc_get_chip(desc); 859 860 kstat_incr_irqs_this_cpu(desc); 861 862 if (chip->irq_ack) 863 chip->irq_ack(&desc->irq_data); 864 865 handle_irq_event_percpu(desc); 866 867 if (chip->irq_eoi) 868 chip->irq_eoi(&desc->irq_data); 869 } 870 871 /** 872 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 873 * @desc: the interrupt description structure for this irq 874 * 875 * Per CPU interrupts on SMP machines without locking requirements. Same as 876 * handle_percpu_irq() above but with the following extras: 877 * 878 * action->percpu_dev_id is a pointer to percpu variables which 879 * contain the real device id for the cpu on which this handler is 880 * called 881 */ 882 void handle_percpu_devid_irq(struct irq_desc *desc) 883 { 884 struct irq_chip *chip = irq_desc_get_chip(desc); 885 struct irqaction *action = desc->action; 886 unsigned int irq = irq_desc_get_irq(desc); 887 irqreturn_t res; 888 889 kstat_incr_irqs_this_cpu(desc); 890 891 if (chip->irq_ack) 892 chip->irq_ack(&desc->irq_data); 893 894 if (likely(action)) { 895 trace_irq_handler_entry(irq, action); 896 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 897 trace_irq_handler_exit(irq, action, res); 898 } else { 899 unsigned int cpu = smp_processor_id(); 900 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); 901 902 if (enabled) 903 irq_percpu_disable(desc, cpu); 904 905 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", 906 enabled ? " and unmasked" : "", irq, cpu); 907 } 908 909 if (chip->irq_eoi) 910 chip->irq_eoi(&desc->irq_data); 911 } 912 913 static void 914 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, 915 int is_chained, const char *name) 916 { 917 if (!handle) { 918 handle = handle_bad_irq; 919 } else { 920 struct irq_data *irq_data = &desc->irq_data; 921 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 922 /* 923 * With hierarchical domains we might run into a 924 * situation where the outermost chip is not yet set 925 * up, but the inner chips are there. Instead of 926 * bailing we install the handler, but obviously we 927 * cannot enable/startup the interrupt at this point. 928 */ 929 while (irq_data) { 930 if (irq_data->chip != &no_irq_chip) 931 break; 932 /* 933 * Bail out if the outer chip is not set up 934 * and the interrrupt supposed to be started 935 * right away. 936 */ 937 if (WARN_ON(is_chained)) 938 return; 939 /* Try the parent */ 940 irq_data = irq_data->parent_data; 941 } 942 #endif 943 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) 944 return; 945 } 946 947 /* Uninstall? */ 948 if (handle == handle_bad_irq) { 949 if (desc->irq_data.chip != &no_irq_chip) 950 mask_ack_irq(desc); 951 irq_state_set_disabled(desc); 952 if (is_chained) 953 desc->action = NULL; 954 desc->depth = 1; 955 } 956 desc->handle_irq = handle; 957 desc->name = name; 958 959 if (handle != handle_bad_irq && is_chained) { 960 unsigned int type = irqd_get_trigger_type(&desc->irq_data); 961 962 /* 963 * We're about to start this interrupt immediately, 964 * hence the need to set the trigger configuration. 965 * But the .set_type callback may have overridden the 966 * flow handler, ignoring that we're dealing with a 967 * chained interrupt. Reset it immediately because we 968 * do know better. 969 */ 970 if (type != IRQ_TYPE_NONE) { 971 __irq_set_trigger(desc, type); 972 desc->handle_irq = handle; 973 } 974 975 irq_settings_set_noprobe(desc); 976 irq_settings_set_norequest(desc); 977 irq_settings_set_nothread(desc); 978 desc->action = &chained_action; 979 irq_activate_and_startup(desc, IRQ_RESEND); 980 } 981 } 982 983 void 984 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 985 const char *name) 986 { 987 unsigned long flags; 988 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 989 990 if (!desc) 991 return; 992 993 __irq_do_set_handler(desc, handle, is_chained, name); 994 irq_put_desc_busunlock(desc, flags); 995 } 996 EXPORT_SYMBOL_GPL(__irq_set_handler); 997 998 void 999 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, 1000 void *data) 1001 { 1002 unsigned long flags; 1003 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 1004 1005 if (!desc) 1006 return; 1007 1008 desc->irq_common_data.handler_data = data; 1009 __irq_do_set_handler(desc, handle, 1, NULL); 1010 1011 irq_put_desc_busunlock(desc, flags); 1012 } 1013 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); 1014 1015 void 1016 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 1017 irq_flow_handler_t handle, const char *name) 1018 { 1019 irq_set_chip(irq, chip); 1020 __irq_set_handler(irq, handle, 0, name); 1021 } 1022 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 1023 1024 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 1025 { 1026 unsigned long flags, trigger, tmp; 1027 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 1028 1029 if (!desc) 1030 return; 1031 1032 /* 1033 * Warn when a driver sets the no autoenable flag on an already 1034 * active interrupt. 1035 */ 1036 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); 1037 1038 irq_settings_clr_and_set(desc, clr, set); 1039 1040 trigger = irqd_get_trigger_type(&desc->irq_data); 1041 1042 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 1043 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 1044 if (irq_settings_has_no_balance_set(desc)) 1045 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1046 if (irq_settings_is_per_cpu(desc)) 1047 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1048 if (irq_settings_can_move_pcntxt(desc)) 1049 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 1050 if (irq_settings_is_level(desc)) 1051 irqd_set(&desc->irq_data, IRQD_LEVEL); 1052 1053 tmp = irq_settings_get_trigger_mask(desc); 1054 if (tmp != IRQ_TYPE_NONE) 1055 trigger = tmp; 1056 1057 irqd_set(&desc->irq_data, trigger); 1058 1059 irq_put_desc_unlock(desc, flags); 1060 } 1061 EXPORT_SYMBOL_GPL(irq_modify_status); 1062 1063 /** 1064 * irq_cpu_online - Invoke all irq_cpu_online functions. 1065 * 1066 * Iterate through all irqs and invoke the chip.irq_cpu_online() 1067 * for each. 1068 */ 1069 void irq_cpu_online(void) 1070 { 1071 struct irq_desc *desc; 1072 struct irq_chip *chip; 1073 unsigned long flags; 1074 unsigned int irq; 1075 1076 for_each_active_irq(irq) { 1077 desc = irq_to_desc(irq); 1078 if (!desc) 1079 continue; 1080 1081 raw_spin_lock_irqsave(&desc->lock, flags); 1082 1083 chip = irq_data_get_irq_chip(&desc->irq_data); 1084 if (chip && chip->irq_cpu_online && 1085 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1086 !irqd_irq_disabled(&desc->irq_data))) 1087 chip->irq_cpu_online(&desc->irq_data); 1088 1089 raw_spin_unlock_irqrestore(&desc->lock, flags); 1090 } 1091 } 1092 1093 /** 1094 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 1095 * 1096 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 1097 * for each. 1098 */ 1099 void irq_cpu_offline(void) 1100 { 1101 struct irq_desc *desc; 1102 struct irq_chip *chip; 1103 unsigned long flags; 1104 unsigned int irq; 1105 1106 for_each_active_irq(irq) { 1107 desc = irq_to_desc(irq); 1108 if (!desc) 1109 continue; 1110 1111 raw_spin_lock_irqsave(&desc->lock, flags); 1112 1113 chip = irq_data_get_irq_chip(&desc->irq_data); 1114 if (chip && chip->irq_cpu_offline && 1115 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1116 !irqd_irq_disabled(&desc->irq_data))) 1117 chip->irq_cpu_offline(&desc->irq_data); 1118 1119 raw_spin_unlock_irqrestore(&desc->lock, flags); 1120 } 1121 } 1122 1123 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1124 1125 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS 1126 /** 1127 * handle_fasteoi_ack_irq - irq handler for edge hierarchy 1128 * stacked on transparent controllers 1129 * 1130 * @desc: the interrupt description structure for this irq 1131 * 1132 * Like handle_fasteoi_irq(), but for use with hierarchy where 1133 * the irq_chip also needs to have its ->irq_ack() function 1134 * called. 1135 */ 1136 void handle_fasteoi_ack_irq(struct irq_desc *desc) 1137 { 1138 struct irq_chip *chip = desc->irq_data.chip; 1139 1140 raw_spin_lock(&desc->lock); 1141 1142 if (!irq_may_run(desc)) 1143 goto out; 1144 1145 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1146 1147 /* 1148 * If its disabled or no action available 1149 * then mask it and get out of here: 1150 */ 1151 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1152 desc->istate |= IRQS_PENDING; 1153 mask_irq(desc); 1154 goto out; 1155 } 1156 1157 kstat_incr_irqs_this_cpu(desc); 1158 if (desc->istate & IRQS_ONESHOT) 1159 mask_irq(desc); 1160 1161 /* Start handling the irq */ 1162 desc->irq_data.chip->irq_ack(&desc->irq_data); 1163 1164 preflow_handler(desc); 1165 handle_irq_event(desc); 1166 1167 cond_unmask_eoi_irq(desc, chip); 1168 1169 raw_spin_unlock(&desc->lock); 1170 return; 1171 out: 1172 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1173 chip->irq_eoi(&desc->irq_data); 1174 raw_spin_unlock(&desc->lock); 1175 } 1176 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); 1177 1178 /** 1179 * handle_fasteoi_mask_irq - irq handler for level hierarchy 1180 * stacked on transparent controllers 1181 * 1182 * @desc: the interrupt description structure for this irq 1183 * 1184 * Like handle_fasteoi_irq(), but for use with hierarchy where 1185 * the irq_chip also needs to have its ->irq_mask_ack() function 1186 * called. 1187 */ 1188 void handle_fasteoi_mask_irq(struct irq_desc *desc) 1189 { 1190 struct irq_chip *chip = desc->irq_data.chip; 1191 1192 raw_spin_lock(&desc->lock); 1193 mask_ack_irq(desc); 1194 1195 if (!irq_may_run(desc)) 1196 goto out; 1197 1198 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1199 1200 /* 1201 * If its disabled or no action available 1202 * then mask it and get out of here: 1203 */ 1204 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1205 desc->istate |= IRQS_PENDING; 1206 mask_irq(desc); 1207 goto out; 1208 } 1209 1210 kstat_incr_irqs_this_cpu(desc); 1211 if (desc->istate & IRQS_ONESHOT) 1212 mask_irq(desc); 1213 1214 preflow_handler(desc); 1215 handle_irq_event(desc); 1216 1217 cond_unmask_eoi_irq(desc, chip); 1218 1219 raw_spin_unlock(&desc->lock); 1220 return; 1221 out: 1222 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1223 chip->irq_eoi(&desc->irq_data); 1224 raw_spin_unlock(&desc->lock); 1225 } 1226 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); 1227 1228 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */ 1229 1230 /** 1231 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if 1232 * NULL) 1233 * @data: Pointer to interrupt specific data 1234 */ 1235 void irq_chip_enable_parent(struct irq_data *data) 1236 { 1237 data = data->parent_data; 1238 if (data->chip->irq_enable) 1239 data->chip->irq_enable(data); 1240 else 1241 data->chip->irq_unmask(data); 1242 } 1243 EXPORT_SYMBOL_GPL(irq_chip_enable_parent); 1244 1245 /** 1246 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if 1247 * NULL) 1248 * @data: Pointer to interrupt specific data 1249 */ 1250 void irq_chip_disable_parent(struct irq_data *data) 1251 { 1252 data = data->parent_data; 1253 if (data->chip->irq_disable) 1254 data->chip->irq_disable(data); 1255 else 1256 data->chip->irq_mask(data); 1257 } 1258 EXPORT_SYMBOL_GPL(irq_chip_disable_parent); 1259 1260 /** 1261 * irq_chip_ack_parent - Acknowledge the parent interrupt 1262 * @data: Pointer to interrupt specific data 1263 */ 1264 void irq_chip_ack_parent(struct irq_data *data) 1265 { 1266 data = data->parent_data; 1267 data->chip->irq_ack(data); 1268 } 1269 EXPORT_SYMBOL_GPL(irq_chip_ack_parent); 1270 1271 /** 1272 * irq_chip_mask_parent - Mask the parent interrupt 1273 * @data: Pointer to interrupt specific data 1274 */ 1275 void irq_chip_mask_parent(struct irq_data *data) 1276 { 1277 data = data->parent_data; 1278 data->chip->irq_mask(data); 1279 } 1280 EXPORT_SYMBOL_GPL(irq_chip_mask_parent); 1281 1282 /** 1283 * irq_chip_unmask_parent - Unmask the parent interrupt 1284 * @data: Pointer to interrupt specific data 1285 */ 1286 void irq_chip_unmask_parent(struct irq_data *data) 1287 { 1288 data = data->parent_data; 1289 data->chip->irq_unmask(data); 1290 } 1291 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); 1292 1293 /** 1294 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt 1295 * @data: Pointer to interrupt specific data 1296 */ 1297 void irq_chip_eoi_parent(struct irq_data *data) 1298 { 1299 data = data->parent_data; 1300 data->chip->irq_eoi(data); 1301 } 1302 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); 1303 1304 /** 1305 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt 1306 * @data: Pointer to interrupt specific data 1307 * @dest: The affinity mask to set 1308 * @force: Flag to enforce setting (disable online checks) 1309 * 1310 * Conditinal, as the underlying parent chip might not implement it. 1311 */ 1312 int irq_chip_set_affinity_parent(struct irq_data *data, 1313 const struct cpumask *dest, bool force) 1314 { 1315 data = data->parent_data; 1316 if (data->chip->irq_set_affinity) 1317 return data->chip->irq_set_affinity(data, dest, force); 1318 1319 return -ENOSYS; 1320 } 1321 EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent); 1322 1323 /** 1324 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt 1325 * @data: Pointer to interrupt specific data 1326 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 1327 * 1328 * Conditional, as the underlying parent chip might not implement it. 1329 */ 1330 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) 1331 { 1332 data = data->parent_data; 1333 1334 if (data->chip->irq_set_type) 1335 return data->chip->irq_set_type(data, type); 1336 1337 return -ENOSYS; 1338 } 1339 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); 1340 1341 /** 1342 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware 1343 * @data: Pointer to interrupt specific data 1344 * 1345 * Iterate through the domain hierarchy of the interrupt and check 1346 * whether a hw retrigger function exists. If yes, invoke it. 1347 */ 1348 int irq_chip_retrigger_hierarchy(struct irq_data *data) 1349 { 1350 for (data = data->parent_data; data; data = data->parent_data) 1351 if (data->chip && data->chip->irq_retrigger) 1352 return data->chip->irq_retrigger(data); 1353 1354 return 0; 1355 } 1356 1357 /** 1358 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt 1359 * @data: Pointer to interrupt specific data 1360 * @vcpu_info: The vcpu affinity information 1361 */ 1362 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) 1363 { 1364 data = data->parent_data; 1365 if (data->chip->irq_set_vcpu_affinity) 1366 return data->chip->irq_set_vcpu_affinity(data, vcpu_info); 1367 1368 return -ENOSYS; 1369 } 1370 1371 /** 1372 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt 1373 * @data: Pointer to interrupt specific data 1374 * @on: Whether to set or reset the wake-up capability of this irq 1375 * 1376 * Conditional, as the underlying parent chip might not implement it. 1377 */ 1378 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) 1379 { 1380 data = data->parent_data; 1381 if (data->chip->irq_set_wake) 1382 return data->chip->irq_set_wake(data, on); 1383 1384 return -ENOSYS; 1385 } 1386 #endif 1387 1388 /** 1389 * irq_chip_compose_msi_msg - Componse msi message for a irq chip 1390 * @data: Pointer to interrupt specific data 1391 * @msg: Pointer to the MSI message 1392 * 1393 * For hierarchical domains we find the first chip in the hierarchy 1394 * which implements the irq_compose_msi_msg callback. For non 1395 * hierarchical we use the top level chip. 1396 */ 1397 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 1398 { 1399 struct irq_data *pos = NULL; 1400 1401 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1402 for (; data; data = data->parent_data) 1403 #endif 1404 if (data->chip && data->chip->irq_compose_msi_msg) 1405 pos = data; 1406 if (!pos) 1407 return -ENOSYS; 1408 1409 pos->chip->irq_compose_msi_msg(pos, msg); 1410 1411 return 0; 1412 } 1413 1414 /** 1415 * irq_chip_pm_get - Enable power for an IRQ chip 1416 * @data: Pointer to interrupt specific data 1417 * 1418 * Enable the power to the IRQ chip referenced by the interrupt data 1419 * structure. 1420 */ 1421 int irq_chip_pm_get(struct irq_data *data) 1422 { 1423 int retval; 1424 1425 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) { 1426 retval = pm_runtime_get_sync(data->chip->parent_device); 1427 if (retval < 0) { 1428 pm_runtime_put_noidle(data->chip->parent_device); 1429 return retval; 1430 } 1431 } 1432 1433 return 0; 1434 } 1435 1436 /** 1437 * irq_chip_pm_put - Disable power for an IRQ chip 1438 * @data: Pointer to interrupt specific data 1439 * 1440 * Disable the power to the IRQ chip referenced by the interrupt data 1441 * structure, belongs. Note that power will only be disabled, once this 1442 * function has been called for all IRQs that have called irq_chip_pm_get(). 1443 */ 1444 int irq_chip_pm_put(struct irq_data *data) 1445 { 1446 int retval = 0; 1447 1448 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) 1449 retval = pm_runtime_put(data->chip->parent_device); 1450 1451 return (retval < 0) ? retval : 0; 1452 } 1453