1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 5 * 6 * This file contains the core interrupt handling code, for irq-chip based 7 * architectures. Detailed information is available in 8 * Documentation/core-api/genericirq.rst 9 */ 10 11 #include <linux/irq.h> 12 #include <linux/msi.h> 13 #include <linux/module.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/irqdomain.h> 17 18 #include <trace/events/irq.h> 19 20 #include "internals.h" 21 22 static irqreturn_t bad_chained_irq(int irq, void *dev_id) 23 { 24 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); 25 return IRQ_NONE; 26 } 27 28 /* 29 * Chained handlers should never call action on their IRQ. This default 30 * action will emit warning if such thing happens. 31 */ 32 struct irqaction chained_action = { 33 .handler = bad_chained_irq, 34 }; 35 36 /** 37 * irq_set_chip - set the irq chip for an irq 38 * @irq: irq number 39 * @chip: pointer to irq chip description structure 40 */ 41 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 42 { 43 unsigned long flags; 44 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 45 46 if (!desc) 47 return -EINVAL; 48 49 if (!chip) 50 chip = &no_irq_chip; 51 52 desc->irq_data.chip = chip; 53 irq_put_desc_unlock(desc, flags); 54 /* 55 * For !CONFIG_SPARSE_IRQ make the irq show up in 56 * allocated_irqs. 57 */ 58 irq_mark_irq(irq); 59 return 0; 60 } 61 EXPORT_SYMBOL(irq_set_chip); 62 63 /** 64 * irq_set_type - set the irq trigger type for an irq 65 * @irq: irq number 66 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 67 */ 68 int irq_set_irq_type(unsigned int irq, unsigned int type) 69 { 70 unsigned long flags; 71 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 72 int ret = 0; 73 74 if (!desc) 75 return -EINVAL; 76 77 ret = __irq_set_trigger(desc, type); 78 irq_put_desc_busunlock(desc, flags); 79 return ret; 80 } 81 EXPORT_SYMBOL(irq_set_irq_type); 82 83 /** 84 * irq_set_handler_data - set irq handler data for an irq 85 * @irq: Interrupt number 86 * @data: Pointer to interrupt specific data 87 * 88 * Set the hardware irq controller data for an irq 89 */ 90 int irq_set_handler_data(unsigned int irq, void *data) 91 { 92 unsigned long flags; 93 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 94 95 if (!desc) 96 return -EINVAL; 97 desc->irq_common_data.handler_data = data; 98 irq_put_desc_unlock(desc, flags); 99 return 0; 100 } 101 EXPORT_SYMBOL(irq_set_handler_data); 102 103 /** 104 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 105 * @irq_base: Interrupt number base 106 * @irq_offset: Interrupt number offset 107 * @entry: Pointer to MSI descriptor data 108 * 109 * Set the MSI descriptor entry for an irq at offset 110 */ 111 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, 112 struct msi_desc *entry) 113 { 114 unsigned long flags; 115 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 116 117 if (!desc) 118 return -EINVAL; 119 desc->irq_common_data.msi_desc = entry; 120 if (entry && !irq_offset) 121 entry->irq = irq_base; 122 irq_put_desc_unlock(desc, flags); 123 return 0; 124 } 125 126 /** 127 * irq_set_msi_desc - set MSI descriptor data for an irq 128 * @irq: Interrupt number 129 * @entry: Pointer to MSI descriptor data 130 * 131 * Set the MSI descriptor entry for an irq 132 */ 133 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 134 { 135 return irq_set_msi_desc_off(irq, 0, entry); 136 } 137 138 /** 139 * irq_set_chip_data - set irq chip data for an irq 140 * @irq: Interrupt number 141 * @data: Pointer to chip specific data 142 * 143 * Set the hardware irq chip data for an irq 144 */ 145 int irq_set_chip_data(unsigned int irq, void *data) 146 { 147 unsigned long flags; 148 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 149 150 if (!desc) 151 return -EINVAL; 152 desc->irq_data.chip_data = data; 153 irq_put_desc_unlock(desc, flags); 154 return 0; 155 } 156 EXPORT_SYMBOL(irq_set_chip_data); 157 158 struct irq_data *irq_get_irq_data(unsigned int irq) 159 { 160 struct irq_desc *desc = irq_to_desc(irq); 161 162 return desc ? &desc->irq_data : NULL; 163 } 164 EXPORT_SYMBOL_GPL(irq_get_irq_data); 165 166 static void irq_state_clr_disabled(struct irq_desc *desc) 167 { 168 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 169 } 170 171 static void irq_state_clr_masked(struct irq_desc *desc) 172 { 173 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 174 } 175 176 static void irq_state_clr_started(struct irq_desc *desc) 177 { 178 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); 179 } 180 181 static void irq_state_set_started(struct irq_desc *desc) 182 { 183 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); 184 } 185 186 enum { 187 IRQ_STARTUP_NORMAL, 188 IRQ_STARTUP_MANAGED, 189 IRQ_STARTUP_ABORT, 190 }; 191 192 #ifdef CONFIG_SMP 193 static int 194 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) 195 { 196 struct irq_data *d = irq_desc_get_irq_data(desc); 197 198 if (!irqd_affinity_is_managed(d)) 199 return IRQ_STARTUP_NORMAL; 200 201 irqd_clr_managed_shutdown(d); 202 203 if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) { 204 /* 205 * Catch code which fiddles with enable_irq() on a managed 206 * and potentially shutdown IRQ. Chained interrupt 207 * installment or irq auto probing should not happen on 208 * managed irqs either. 209 */ 210 if (WARN_ON_ONCE(force)) 211 return IRQ_STARTUP_ABORT; 212 /* 213 * The interrupt was requested, but there is no online CPU 214 * in it's affinity mask. Put it into managed shutdown 215 * state and let the cpu hotplug mechanism start it up once 216 * a CPU in the mask becomes available. 217 */ 218 return IRQ_STARTUP_ABORT; 219 } 220 /* 221 * Managed interrupts have reserved resources, so this should not 222 * happen. 223 */ 224 if (WARN_ON(irq_domain_activate_irq(d, false))) 225 return IRQ_STARTUP_ABORT; 226 return IRQ_STARTUP_MANAGED; 227 } 228 #else 229 static __always_inline int 230 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) 231 { 232 return IRQ_STARTUP_NORMAL; 233 } 234 #endif 235 236 static int __irq_startup(struct irq_desc *desc) 237 { 238 struct irq_data *d = irq_desc_get_irq_data(desc); 239 int ret = 0; 240 241 /* Warn if this interrupt is not activated but try nevertheless */ 242 WARN_ON_ONCE(!irqd_is_activated(d)); 243 244 if (d->chip->irq_startup) { 245 ret = d->chip->irq_startup(d); 246 irq_state_clr_disabled(desc); 247 irq_state_clr_masked(desc); 248 } else { 249 irq_enable(desc); 250 } 251 irq_state_set_started(desc); 252 return ret; 253 } 254 255 int irq_startup(struct irq_desc *desc, bool resend, bool force) 256 { 257 struct irq_data *d = irq_desc_get_irq_data(desc); 258 struct cpumask *aff = irq_data_get_affinity_mask(d); 259 int ret = 0; 260 261 desc->depth = 0; 262 263 if (irqd_is_started(d)) { 264 irq_enable(desc); 265 } else { 266 switch (__irq_startup_managed(desc, aff, force)) { 267 case IRQ_STARTUP_NORMAL: 268 ret = __irq_startup(desc); 269 irq_setup_affinity(desc); 270 break; 271 case IRQ_STARTUP_MANAGED: 272 irq_do_set_affinity(d, aff, false); 273 ret = __irq_startup(desc); 274 break; 275 case IRQ_STARTUP_ABORT: 276 irqd_set_managed_shutdown(d); 277 return 0; 278 } 279 } 280 if (resend) 281 check_irq_resend(desc); 282 283 return ret; 284 } 285 286 int irq_activate(struct irq_desc *desc) 287 { 288 struct irq_data *d = irq_desc_get_irq_data(desc); 289 290 if (!irqd_affinity_is_managed(d)) 291 return irq_domain_activate_irq(d, false); 292 return 0; 293 } 294 295 int irq_activate_and_startup(struct irq_desc *desc, bool resend) 296 { 297 if (WARN_ON(irq_activate(desc))) 298 return 0; 299 return irq_startup(desc, resend, IRQ_START_FORCE); 300 } 301 302 static void __irq_disable(struct irq_desc *desc, bool mask); 303 304 void irq_shutdown(struct irq_desc *desc) 305 { 306 if (irqd_is_started(&desc->irq_data)) { 307 desc->depth = 1; 308 if (desc->irq_data.chip->irq_shutdown) { 309 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 310 irq_state_set_disabled(desc); 311 irq_state_set_masked(desc); 312 } else { 313 __irq_disable(desc, true); 314 } 315 irq_state_clr_started(desc); 316 } 317 /* 318 * This must be called even if the interrupt was never started up, 319 * because the activation can happen before the interrupt is 320 * available for request/startup. It has it's own state tracking so 321 * it's safe to call it unconditionally. 322 */ 323 irq_domain_deactivate_irq(&desc->irq_data); 324 } 325 326 void irq_enable(struct irq_desc *desc) 327 { 328 if (!irqd_irq_disabled(&desc->irq_data)) { 329 unmask_irq(desc); 330 } else { 331 irq_state_clr_disabled(desc); 332 if (desc->irq_data.chip->irq_enable) { 333 desc->irq_data.chip->irq_enable(&desc->irq_data); 334 irq_state_clr_masked(desc); 335 } else { 336 unmask_irq(desc); 337 } 338 } 339 } 340 341 static void __irq_disable(struct irq_desc *desc, bool mask) 342 { 343 if (irqd_irq_disabled(&desc->irq_data)) { 344 if (mask) 345 mask_irq(desc); 346 } else { 347 irq_state_set_disabled(desc); 348 if (desc->irq_data.chip->irq_disable) { 349 desc->irq_data.chip->irq_disable(&desc->irq_data); 350 irq_state_set_masked(desc); 351 } else if (mask) { 352 mask_irq(desc); 353 } 354 } 355 } 356 357 /** 358 * irq_disable - Mark interrupt disabled 359 * @desc: irq descriptor which should be disabled 360 * 361 * If the chip does not implement the irq_disable callback, we 362 * use a lazy disable approach. That means we mark the interrupt 363 * disabled, but leave the hardware unmasked. That's an 364 * optimization because we avoid the hardware access for the 365 * common case where no interrupt happens after we marked it 366 * disabled. If an interrupt happens, then the interrupt flow 367 * handler masks the line at the hardware level and marks it 368 * pending. 369 * 370 * If the interrupt chip does not implement the irq_disable callback, 371 * a driver can disable the lazy approach for a particular irq line by 372 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can 373 * be used for devices which cannot disable the interrupt at the 374 * device level under certain circumstances and have to use 375 * disable_irq[_nosync] instead. 376 */ 377 void irq_disable(struct irq_desc *desc) 378 { 379 __irq_disable(desc, irq_settings_disable_unlazy(desc)); 380 } 381 382 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 383 { 384 if (desc->irq_data.chip->irq_enable) 385 desc->irq_data.chip->irq_enable(&desc->irq_data); 386 else 387 desc->irq_data.chip->irq_unmask(&desc->irq_data); 388 cpumask_set_cpu(cpu, desc->percpu_enabled); 389 } 390 391 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 392 { 393 if (desc->irq_data.chip->irq_disable) 394 desc->irq_data.chip->irq_disable(&desc->irq_data); 395 else 396 desc->irq_data.chip->irq_mask(&desc->irq_data); 397 cpumask_clear_cpu(cpu, desc->percpu_enabled); 398 } 399 400 static inline void mask_ack_irq(struct irq_desc *desc) 401 { 402 if (desc->irq_data.chip->irq_mask_ack) { 403 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 404 irq_state_set_masked(desc); 405 } else { 406 mask_irq(desc); 407 if (desc->irq_data.chip->irq_ack) 408 desc->irq_data.chip->irq_ack(&desc->irq_data); 409 } 410 } 411 412 void mask_irq(struct irq_desc *desc) 413 { 414 if (irqd_irq_masked(&desc->irq_data)) 415 return; 416 417 if (desc->irq_data.chip->irq_mask) { 418 desc->irq_data.chip->irq_mask(&desc->irq_data); 419 irq_state_set_masked(desc); 420 } 421 } 422 423 void unmask_irq(struct irq_desc *desc) 424 { 425 if (!irqd_irq_masked(&desc->irq_data)) 426 return; 427 428 if (desc->irq_data.chip->irq_unmask) { 429 desc->irq_data.chip->irq_unmask(&desc->irq_data); 430 irq_state_clr_masked(desc); 431 } 432 } 433 434 void unmask_threaded_irq(struct irq_desc *desc) 435 { 436 struct irq_chip *chip = desc->irq_data.chip; 437 438 if (chip->flags & IRQCHIP_EOI_THREADED) 439 chip->irq_eoi(&desc->irq_data); 440 441 unmask_irq(desc); 442 } 443 444 /* 445 * handle_nested_irq - Handle a nested irq from a irq thread 446 * @irq: the interrupt number 447 * 448 * Handle interrupts which are nested into a threaded interrupt 449 * handler. The handler function is called inside the calling 450 * threads context. 451 */ 452 void handle_nested_irq(unsigned int irq) 453 { 454 struct irq_desc *desc = irq_to_desc(irq); 455 struct irqaction *action; 456 irqreturn_t action_ret; 457 458 might_sleep(); 459 460 raw_spin_lock_irq(&desc->lock); 461 462 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 463 464 action = desc->action; 465 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { 466 desc->istate |= IRQS_PENDING; 467 goto out_unlock; 468 } 469 470 kstat_incr_irqs_this_cpu(desc); 471 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 472 raw_spin_unlock_irq(&desc->lock); 473 474 action_ret = IRQ_NONE; 475 for_each_action_of_desc(desc, action) 476 action_ret |= action->thread_fn(action->irq, action->dev_id); 477 478 if (!noirqdebug) 479 note_interrupt(desc, action_ret); 480 481 raw_spin_lock_irq(&desc->lock); 482 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 483 484 out_unlock: 485 raw_spin_unlock_irq(&desc->lock); 486 } 487 EXPORT_SYMBOL_GPL(handle_nested_irq); 488 489 static bool irq_check_poll(struct irq_desc *desc) 490 { 491 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 492 return false; 493 return irq_wait_for_poll(desc); 494 } 495 496 static bool irq_may_run(struct irq_desc *desc) 497 { 498 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; 499 500 /* 501 * If the interrupt is not in progress and is not an armed 502 * wakeup interrupt, proceed. 503 */ 504 if (!irqd_has_set(&desc->irq_data, mask)) 505 return true; 506 507 /* 508 * If the interrupt is an armed wakeup source, mark it pending 509 * and suspended, disable it and notify the pm core about the 510 * event. 511 */ 512 if (irq_pm_check_wakeup(desc)) 513 return false; 514 515 /* 516 * Handle a potential concurrent poll on a different core. 517 */ 518 return irq_check_poll(desc); 519 } 520 521 /** 522 * handle_simple_irq - Simple and software-decoded IRQs. 523 * @desc: the interrupt description structure for this irq 524 * 525 * Simple interrupts are either sent from a demultiplexing interrupt 526 * handler or come from hardware, where no interrupt hardware control 527 * is necessary. 528 * 529 * Note: The caller is expected to handle the ack, clear, mask and 530 * unmask issues if necessary. 531 */ 532 void handle_simple_irq(struct irq_desc *desc) 533 { 534 raw_spin_lock(&desc->lock); 535 536 if (!irq_may_run(desc)) 537 goto out_unlock; 538 539 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 540 541 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 542 desc->istate |= IRQS_PENDING; 543 goto out_unlock; 544 } 545 546 kstat_incr_irqs_this_cpu(desc); 547 handle_irq_event(desc); 548 549 out_unlock: 550 raw_spin_unlock(&desc->lock); 551 } 552 EXPORT_SYMBOL_GPL(handle_simple_irq); 553 554 /** 555 * handle_untracked_irq - Simple and software-decoded IRQs. 556 * @desc: the interrupt description structure for this irq 557 * 558 * Untracked interrupts are sent from a demultiplexing interrupt 559 * handler when the demultiplexer does not know which device it its 560 * multiplexed irq domain generated the interrupt. IRQ's handled 561 * through here are not subjected to stats tracking, randomness, or 562 * spurious interrupt detection. 563 * 564 * Note: Like handle_simple_irq, the caller is expected to handle 565 * the ack, clear, mask and unmask issues if necessary. 566 */ 567 void handle_untracked_irq(struct irq_desc *desc) 568 { 569 unsigned int flags = 0; 570 571 raw_spin_lock(&desc->lock); 572 573 if (!irq_may_run(desc)) 574 goto out_unlock; 575 576 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 577 578 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 579 desc->istate |= IRQS_PENDING; 580 goto out_unlock; 581 } 582 583 desc->istate &= ~IRQS_PENDING; 584 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 585 raw_spin_unlock(&desc->lock); 586 587 __handle_irq_event_percpu(desc, &flags); 588 589 raw_spin_lock(&desc->lock); 590 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 591 592 out_unlock: 593 raw_spin_unlock(&desc->lock); 594 } 595 EXPORT_SYMBOL_GPL(handle_untracked_irq); 596 597 /* 598 * Called unconditionally from handle_level_irq() and only for oneshot 599 * interrupts from handle_fasteoi_irq() 600 */ 601 static void cond_unmask_irq(struct irq_desc *desc) 602 { 603 /* 604 * We need to unmask in the following cases: 605 * - Standard level irq (IRQF_ONESHOT is not set) 606 * - Oneshot irq which did not wake the thread (caused by a 607 * spurious interrupt or a primary handler handling it 608 * completely). 609 */ 610 if (!irqd_irq_disabled(&desc->irq_data) && 611 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 612 unmask_irq(desc); 613 } 614 615 /** 616 * handle_level_irq - Level type irq handler 617 * @desc: the interrupt description structure for this irq 618 * 619 * Level type interrupts are active as long as the hardware line has 620 * the active level. This may require to mask the interrupt and unmask 621 * it after the associated handler has acknowledged the device, so the 622 * interrupt line is back to inactive. 623 */ 624 void handle_level_irq(struct irq_desc *desc) 625 { 626 raw_spin_lock(&desc->lock); 627 mask_ack_irq(desc); 628 629 if (!irq_may_run(desc)) 630 goto out_unlock; 631 632 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 633 634 /* 635 * If its disabled or no action available 636 * keep it masked and get out of here 637 */ 638 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 639 desc->istate |= IRQS_PENDING; 640 goto out_unlock; 641 } 642 643 kstat_incr_irqs_this_cpu(desc); 644 handle_irq_event(desc); 645 646 cond_unmask_irq(desc); 647 648 out_unlock: 649 raw_spin_unlock(&desc->lock); 650 } 651 EXPORT_SYMBOL_GPL(handle_level_irq); 652 653 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI 654 static inline void preflow_handler(struct irq_desc *desc) 655 { 656 if (desc->preflow_handler) 657 desc->preflow_handler(&desc->irq_data); 658 } 659 #else 660 static inline void preflow_handler(struct irq_desc *desc) { } 661 #endif 662 663 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) 664 { 665 if (!(desc->istate & IRQS_ONESHOT)) { 666 chip->irq_eoi(&desc->irq_data); 667 return; 668 } 669 /* 670 * We need to unmask in the following cases: 671 * - Oneshot irq which did not wake the thread (caused by a 672 * spurious interrupt or a primary handler handling it 673 * completely). 674 */ 675 if (!irqd_irq_disabled(&desc->irq_data) && 676 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { 677 chip->irq_eoi(&desc->irq_data); 678 unmask_irq(desc); 679 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { 680 chip->irq_eoi(&desc->irq_data); 681 } 682 } 683 684 /** 685 * handle_fasteoi_irq - irq handler for transparent controllers 686 * @desc: the interrupt description structure for this irq 687 * 688 * Only a single callback will be issued to the chip: an ->eoi() 689 * call when the interrupt has been serviced. This enables support 690 * for modern forms of interrupt handlers, which handle the flow 691 * details in hardware, transparently. 692 */ 693 void handle_fasteoi_irq(struct irq_desc *desc) 694 { 695 struct irq_chip *chip = desc->irq_data.chip; 696 697 raw_spin_lock(&desc->lock); 698 699 if (!irq_may_run(desc)) 700 goto out; 701 702 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 703 704 /* 705 * If its disabled or no action available 706 * then mask it and get out of here: 707 */ 708 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 709 desc->istate |= IRQS_PENDING; 710 mask_irq(desc); 711 goto out; 712 } 713 714 kstat_incr_irqs_this_cpu(desc); 715 if (desc->istate & IRQS_ONESHOT) 716 mask_irq(desc); 717 718 preflow_handler(desc); 719 handle_irq_event(desc); 720 721 cond_unmask_eoi_irq(desc, chip); 722 723 raw_spin_unlock(&desc->lock); 724 return; 725 out: 726 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 727 chip->irq_eoi(&desc->irq_data); 728 raw_spin_unlock(&desc->lock); 729 } 730 EXPORT_SYMBOL_GPL(handle_fasteoi_irq); 731 732 /** 733 * handle_fasteoi_nmi - irq handler for NMI interrupt lines 734 * @desc: the interrupt description structure for this irq 735 * 736 * A simple NMI-safe handler, considering the restrictions 737 * from request_nmi. 738 * 739 * Only a single callback will be issued to the chip: an ->eoi() 740 * call when the interrupt has been serviced. This enables support 741 * for modern forms of interrupt handlers, which handle the flow 742 * details in hardware, transparently. 743 */ 744 void handle_fasteoi_nmi(struct irq_desc *desc) 745 { 746 struct irq_chip *chip = irq_desc_get_chip(desc); 747 struct irqaction *action = desc->action; 748 unsigned int irq = irq_desc_get_irq(desc); 749 irqreturn_t res; 750 751 trace_irq_handler_entry(irq, action); 752 /* 753 * NMIs cannot be shared, there is only one action. 754 */ 755 res = action->handler(irq, action->dev_id); 756 trace_irq_handler_exit(irq, action, res); 757 758 if (chip->irq_eoi) 759 chip->irq_eoi(&desc->irq_data); 760 } 761 EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); 762 763 /** 764 * handle_edge_irq - edge type IRQ handler 765 * @desc: the interrupt description structure for this irq 766 * 767 * Interrupt occures on the falling and/or rising edge of a hardware 768 * signal. The occurrence is latched into the irq controller hardware 769 * and must be acked in order to be reenabled. After the ack another 770 * interrupt can happen on the same source even before the first one 771 * is handled by the associated event handler. If this happens it 772 * might be necessary to disable (mask) the interrupt depending on the 773 * controller hardware. This requires to reenable the interrupt inside 774 * of the loop which handles the interrupts which have arrived while 775 * the handler was running. If all pending interrupts are handled, the 776 * loop is left. 777 */ 778 void handle_edge_irq(struct irq_desc *desc) 779 { 780 raw_spin_lock(&desc->lock); 781 782 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 783 784 if (!irq_may_run(desc)) { 785 desc->istate |= IRQS_PENDING; 786 mask_ack_irq(desc); 787 goto out_unlock; 788 } 789 790 /* 791 * If its disabled or no action available then mask it and get 792 * out of here. 793 */ 794 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 795 desc->istate |= IRQS_PENDING; 796 mask_ack_irq(desc); 797 goto out_unlock; 798 } 799 800 kstat_incr_irqs_this_cpu(desc); 801 802 /* Start handling the irq */ 803 desc->irq_data.chip->irq_ack(&desc->irq_data); 804 805 do { 806 if (unlikely(!desc->action)) { 807 mask_irq(desc); 808 goto out_unlock; 809 } 810 811 /* 812 * When another irq arrived while we were handling 813 * one, we could have masked the irq. 814 * Renable it, if it was not disabled in meantime. 815 */ 816 if (unlikely(desc->istate & IRQS_PENDING)) { 817 if (!irqd_irq_disabled(&desc->irq_data) && 818 irqd_irq_masked(&desc->irq_data)) 819 unmask_irq(desc); 820 } 821 822 handle_irq_event(desc); 823 824 } while ((desc->istate & IRQS_PENDING) && 825 !irqd_irq_disabled(&desc->irq_data)); 826 827 out_unlock: 828 raw_spin_unlock(&desc->lock); 829 } 830 EXPORT_SYMBOL(handle_edge_irq); 831 832 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 833 /** 834 * handle_edge_eoi_irq - edge eoi type IRQ handler 835 * @desc: the interrupt description structure for this irq 836 * 837 * Similar as the above handle_edge_irq, but using eoi and w/o the 838 * mask/unmask logic. 839 */ 840 void handle_edge_eoi_irq(struct irq_desc *desc) 841 { 842 struct irq_chip *chip = irq_desc_get_chip(desc); 843 844 raw_spin_lock(&desc->lock); 845 846 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 847 848 if (!irq_may_run(desc)) { 849 desc->istate |= IRQS_PENDING; 850 goto out_eoi; 851 } 852 853 /* 854 * If its disabled or no action available then mask it and get 855 * out of here. 856 */ 857 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 858 desc->istate |= IRQS_PENDING; 859 goto out_eoi; 860 } 861 862 kstat_incr_irqs_this_cpu(desc); 863 864 do { 865 if (unlikely(!desc->action)) 866 goto out_eoi; 867 868 handle_irq_event(desc); 869 870 } while ((desc->istate & IRQS_PENDING) && 871 !irqd_irq_disabled(&desc->irq_data)); 872 873 out_eoi: 874 chip->irq_eoi(&desc->irq_data); 875 raw_spin_unlock(&desc->lock); 876 } 877 #endif 878 879 /** 880 * handle_percpu_irq - Per CPU local irq handler 881 * @desc: the interrupt description structure for this irq 882 * 883 * Per CPU interrupts on SMP machines without locking requirements 884 */ 885 void handle_percpu_irq(struct irq_desc *desc) 886 { 887 struct irq_chip *chip = irq_desc_get_chip(desc); 888 889 /* 890 * PER CPU interrupts are not serialized. Do not touch 891 * desc->tot_count. 892 */ 893 __kstat_incr_irqs_this_cpu(desc); 894 895 if (chip->irq_ack) 896 chip->irq_ack(&desc->irq_data); 897 898 handle_irq_event_percpu(desc); 899 900 if (chip->irq_eoi) 901 chip->irq_eoi(&desc->irq_data); 902 } 903 904 /** 905 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 906 * @desc: the interrupt description structure for this irq 907 * 908 * Per CPU interrupts on SMP machines without locking requirements. Same as 909 * handle_percpu_irq() above but with the following extras: 910 * 911 * action->percpu_dev_id is a pointer to percpu variables which 912 * contain the real device id for the cpu on which this handler is 913 * called 914 */ 915 void handle_percpu_devid_irq(struct irq_desc *desc) 916 { 917 struct irq_chip *chip = irq_desc_get_chip(desc); 918 struct irqaction *action = desc->action; 919 unsigned int irq = irq_desc_get_irq(desc); 920 irqreturn_t res; 921 922 /* 923 * PER CPU interrupts are not serialized. Do not touch 924 * desc->tot_count. 925 */ 926 __kstat_incr_irqs_this_cpu(desc); 927 928 if (chip->irq_ack) 929 chip->irq_ack(&desc->irq_data); 930 931 if (likely(action)) { 932 trace_irq_handler_entry(irq, action); 933 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 934 trace_irq_handler_exit(irq, action, res); 935 } else { 936 unsigned int cpu = smp_processor_id(); 937 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); 938 939 if (enabled) 940 irq_percpu_disable(desc, cpu); 941 942 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", 943 enabled ? " and unmasked" : "", irq, cpu); 944 } 945 946 if (chip->irq_eoi) 947 chip->irq_eoi(&desc->irq_data); 948 } 949 950 /** 951 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu 952 * dev ids 953 * @desc: the interrupt description structure for this irq 954 * 955 * Similar to handle_fasteoi_nmi, but handling the dev_id cookie 956 * as a percpu pointer. 957 */ 958 void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc) 959 { 960 struct irq_chip *chip = irq_desc_get_chip(desc); 961 struct irqaction *action = desc->action; 962 unsigned int irq = irq_desc_get_irq(desc); 963 irqreturn_t res; 964 965 trace_irq_handler_entry(irq, action); 966 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 967 trace_irq_handler_exit(irq, action, res); 968 969 if (chip->irq_eoi) 970 chip->irq_eoi(&desc->irq_data); 971 } 972 973 static void 974 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, 975 int is_chained, const char *name) 976 { 977 if (!handle) { 978 handle = handle_bad_irq; 979 } else { 980 struct irq_data *irq_data = &desc->irq_data; 981 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 982 /* 983 * With hierarchical domains we might run into a 984 * situation where the outermost chip is not yet set 985 * up, but the inner chips are there. Instead of 986 * bailing we install the handler, but obviously we 987 * cannot enable/startup the interrupt at this point. 988 */ 989 while (irq_data) { 990 if (irq_data->chip != &no_irq_chip) 991 break; 992 /* 993 * Bail out if the outer chip is not set up 994 * and the interrupt supposed to be started 995 * right away. 996 */ 997 if (WARN_ON(is_chained)) 998 return; 999 /* Try the parent */ 1000 irq_data = irq_data->parent_data; 1001 } 1002 #endif 1003 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) 1004 return; 1005 } 1006 1007 /* Uninstall? */ 1008 if (handle == handle_bad_irq) { 1009 if (desc->irq_data.chip != &no_irq_chip) 1010 mask_ack_irq(desc); 1011 irq_state_set_disabled(desc); 1012 if (is_chained) 1013 desc->action = NULL; 1014 desc->depth = 1; 1015 } 1016 desc->handle_irq = handle; 1017 desc->name = name; 1018 1019 if (handle != handle_bad_irq && is_chained) { 1020 unsigned int type = irqd_get_trigger_type(&desc->irq_data); 1021 1022 /* 1023 * We're about to start this interrupt immediately, 1024 * hence the need to set the trigger configuration. 1025 * But the .set_type callback may have overridden the 1026 * flow handler, ignoring that we're dealing with a 1027 * chained interrupt. Reset it immediately because we 1028 * do know better. 1029 */ 1030 if (type != IRQ_TYPE_NONE) { 1031 __irq_set_trigger(desc, type); 1032 desc->handle_irq = handle; 1033 } 1034 1035 irq_settings_set_noprobe(desc); 1036 irq_settings_set_norequest(desc); 1037 irq_settings_set_nothread(desc); 1038 desc->action = &chained_action; 1039 irq_activate_and_startup(desc, IRQ_RESEND); 1040 } 1041 } 1042 1043 void 1044 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 1045 const char *name) 1046 { 1047 unsigned long flags; 1048 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 1049 1050 if (!desc) 1051 return; 1052 1053 __irq_do_set_handler(desc, handle, is_chained, name); 1054 irq_put_desc_busunlock(desc, flags); 1055 } 1056 EXPORT_SYMBOL_GPL(__irq_set_handler); 1057 1058 void 1059 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, 1060 void *data) 1061 { 1062 unsigned long flags; 1063 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 1064 1065 if (!desc) 1066 return; 1067 1068 desc->irq_common_data.handler_data = data; 1069 __irq_do_set_handler(desc, handle, 1, NULL); 1070 1071 irq_put_desc_busunlock(desc, flags); 1072 } 1073 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); 1074 1075 void 1076 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 1077 irq_flow_handler_t handle, const char *name) 1078 { 1079 irq_set_chip(irq, chip); 1080 __irq_set_handler(irq, handle, 0, name); 1081 } 1082 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 1083 1084 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 1085 { 1086 unsigned long flags, trigger, tmp; 1087 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 1088 1089 if (!desc) 1090 return; 1091 1092 /* 1093 * Warn when a driver sets the no autoenable flag on an already 1094 * active interrupt. 1095 */ 1096 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); 1097 1098 irq_settings_clr_and_set(desc, clr, set); 1099 1100 trigger = irqd_get_trigger_type(&desc->irq_data); 1101 1102 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 1103 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 1104 if (irq_settings_has_no_balance_set(desc)) 1105 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1106 if (irq_settings_is_per_cpu(desc)) 1107 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1108 if (irq_settings_can_move_pcntxt(desc)) 1109 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 1110 if (irq_settings_is_level(desc)) 1111 irqd_set(&desc->irq_data, IRQD_LEVEL); 1112 1113 tmp = irq_settings_get_trigger_mask(desc); 1114 if (tmp != IRQ_TYPE_NONE) 1115 trigger = tmp; 1116 1117 irqd_set(&desc->irq_data, trigger); 1118 1119 irq_put_desc_unlock(desc, flags); 1120 } 1121 EXPORT_SYMBOL_GPL(irq_modify_status); 1122 1123 /** 1124 * irq_cpu_online - Invoke all irq_cpu_online functions. 1125 * 1126 * Iterate through all irqs and invoke the chip.irq_cpu_online() 1127 * for each. 1128 */ 1129 void irq_cpu_online(void) 1130 { 1131 struct irq_desc *desc; 1132 struct irq_chip *chip; 1133 unsigned long flags; 1134 unsigned int irq; 1135 1136 for_each_active_irq(irq) { 1137 desc = irq_to_desc(irq); 1138 if (!desc) 1139 continue; 1140 1141 raw_spin_lock_irqsave(&desc->lock, flags); 1142 1143 chip = irq_data_get_irq_chip(&desc->irq_data); 1144 if (chip && chip->irq_cpu_online && 1145 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1146 !irqd_irq_disabled(&desc->irq_data))) 1147 chip->irq_cpu_online(&desc->irq_data); 1148 1149 raw_spin_unlock_irqrestore(&desc->lock, flags); 1150 } 1151 } 1152 1153 /** 1154 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 1155 * 1156 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 1157 * for each. 1158 */ 1159 void irq_cpu_offline(void) 1160 { 1161 struct irq_desc *desc; 1162 struct irq_chip *chip; 1163 unsigned long flags; 1164 unsigned int irq; 1165 1166 for_each_active_irq(irq) { 1167 desc = irq_to_desc(irq); 1168 if (!desc) 1169 continue; 1170 1171 raw_spin_lock_irqsave(&desc->lock, flags); 1172 1173 chip = irq_data_get_irq_chip(&desc->irq_data); 1174 if (chip && chip->irq_cpu_offline && 1175 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1176 !irqd_irq_disabled(&desc->irq_data))) 1177 chip->irq_cpu_offline(&desc->irq_data); 1178 1179 raw_spin_unlock_irqrestore(&desc->lock, flags); 1180 } 1181 } 1182 1183 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1184 1185 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS 1186 /** 1187 * handle_fasteoi_ack_irq - irq handler for edge hierarchy 1188 * stacked on transparent controllers 1189 * 1190 * @desc: the interrupt description structure for this irq 1191 * 1192 * Like handle_fasteoi_irq(), but for use with hierarchy where 1193 * the irq_chip also needs to have its ->irq_ack() function 1194 * called. 1195 */ 1196 void handle_fasteoi_ack_irq(struct irq_desc *desc) 1197 { 1198 struct irq_chip *chip = desc->irq_data.chip; 1199 1200 raw_spin_lock(&desc->lock); 1201 1202 if (!irq_may_run(desc)) 1203 goto out; 1204 1205 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1206 1207 /* 1208 * If its disabled or no action available 1209 * then mask it and get out of here: 1210 */ 1211 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1212 desc->istate |= IRQS_PENDING; 1213 mask_irq(desc); 1214 goto out; 1215 } 1216 1217 kstat_incr_irqs_this_cpu(desc); 1218 if (desc->istate & IRQS_ONESHOT) 1219 mask_irq(desc); 1220 1221 /* Start handling the irq */ 1222 desc->irq_data.chip->irq_ack(&desc->irq_data); 1223 1224 preflow_handler(desc); 1225 handle_irq_event(desc); 1226 1227 cond_unmask_eoi_irq(desc, chip); 1228 1229 raw_spin_unlock(&desc->lock); 1230 return; 1231 out: 1232 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1233 chip->irq_eoi(&desc->irq_data); 1234 raw_spin_unlock(&desc->lock); 1235 } 1236 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); 1237 1238 /** 1239 * handle_fasteoi_mask_irq - irq handler for level hierarchy 1240 * stacked on transparent controllers 1241 * 1242 * @desc: the interrupt description structure for this irq 1243 * 1244 * Like handle_fasteoi_irq(), but for use with hierarchy where 1245 * the irq_chip also needs to have its ->irq_mask_ack() function 1246 * called. 1247 */ 1248 void handle_fasteoi_mask_irq(struct irq_desc *desc) 1249 { 1250 struct irq_chip *chip = desc->irq_data.chip; 1251 1252 raw_spin_lock(&desc->lock); 1253 mask_ack_irq(desc); 1254 1255 if (!irq_may_run(desc)) 1256 goto out; 1257 1258 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1259 1260 /* 1261 * If its disabled or no action available 1262 * then mask it and get out of here: 1263 */ 1264 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1265 desc->istate |= IRQS_PENDING; 1266 mask_irq(desc); 1267 goto out; 1268 } 1269 1270 kstat_incr_irqs_this_cpu(desc); 1271 if (desc->istate & IRQS_ONESHOT) 1272 mask_irq(desc); 1273 1274 preflow_handler(desc); 1275 handle_irq_event(desc); 1276 1277 cond_unmask_eoi_irq(desc, chip); 1278 1279 raw_spin_unlock(&desc->lock); 1280 return; 1281 out: 1282 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1283 chip->irq_eoi(&desc->irq_data); 1284 raw_spin_unlock(&desc->lock); 1285 } 1286 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); 1287 1288 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */ 1289 1290 /** 1291 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if 1292 * NULL) 1293 * @data: Pointer to interrupt specific data 1294 */ 1295 void irq_chip_enable_parent(struct irq_data *data) 1296 { 1297 data = data->parent_data; 1298 if (data->chip->irq_enable) 1299 data->chip->irq_enable(data); 1300 else 1301 data->chip->irq_unmask(data); 1302 } 1303 EXPORT_SYMBOL_GPL(irq_chip_enable_parent); 1304 1305 /** 1306 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if 1307 * NULL) 1308 * @data: Pointer to interrupt specific data 1309 */ 1310 void irq_chip_disable_parent(struct irq_data *data) 1311 { 1312 data = data->parent_data; 1313 if (data->chip->irq_disable) 1314 data->chip->irq_disable(data); 1315 else 1316 data->chip->irq_mask(data); 1317 } 1318 EXPORT_SYMBOL_GPL(irq_chip_disable_parent); 1319 1320 /** 1321 * irq_chip_ack_parent - Acknowledge the parent interrupt 1322 * @data: Pointer to interrupt specific data 1323 */ 1324 void irq_chip_ack_parent(struct irq_data *data) 1325 { 1326 data = data->parent_data; 1327 data->chip->irq_ack(data); 1328 } 1329 EXPORT_SYMBOL_GPL(irq_chip_ack_parent); 1330 1331 /** 1332 * irq_chip_mask_parent - Mask the parent interrupt 1333 * @data: Pointer to interrupt specific data 1334 */ 1335 void irq_chip_mask_parent(struct irq_data *data) 1336 { 1337 data = data->parent_data; 1338 data->chip->irq_mask(data); 1339 } 1340 EXPORT_SYMBOL_GPL(irq_chip_mask_parent); 1341 1342 /** 1343 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt 1344 * @data: Pointer to interrupt specific data 1345 */ 1346 void irq_chip_mask_ack_parent(struct irq_data *data) 1347 { 1348 data = data->parent_data; 1349 data->chip->irq_mask_ack(data); 1350 } 1351 EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent); 1352 1353 /** 1354 * irq_chip_unmask_parent - Unmask the parent interrupt 1355 * @data: Pointer to interrupt specific data 1356 */ 1357 void irq_chip_unmask_parent(struct irq_data *data) 1358 { 1359 data = data->parent_data; 1360 data->chip->irq_unmask(data); 1361 } 1362 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); 1363 1364 /** 1365 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt 1366 * @data: Pointer to interrupt specific data 1367 */ 1368 void irq_chip_eoi_parent(struct irq_data *data) 1369 { 1370 data = data->parent_data; 1371 data->chip->irq_eoi(data); 1372 } 1373 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); 1374 1375 /** 1376 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt 1377 * @data: Pointer to interrupt specific data 1378 * @dest: The affinity mask to set 1379 * @force: Flag to enforce setting (disable online checks) 1380 * 1381 * Conditinal, as the underlying parent chip might not implement it. 1382 */ 1383 int irq_chip_set_affinity_parent(struct irq_data *data, 1384 const struct cpumask *dest, bool force) 1385 { 1386 data = data->parent_data; 1387 if (data->chip->irq_set_affinity) 1388 return data->chip->irq_set_affinity(data, dest, force); 1389 1390 return -ENOSYS; 1391 } 1392 EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent); 1393 1394 /** 1395 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt 1396 * @data: Pointer to interrupt specific data 1397 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 1398 * 1399 * Conditional, as the underlying parent chip might not implement it. 1400 */ 1401 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) 1402 { 1403 data = data->parent_data; 1404 1405 if (data->chip->irq_set_type) 1406 return data->chip->irq_set_type(data, type); 1407 1408 return -ENOSYS; 1409 } 1410 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); 1411 1412 /** 1413 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware 1414 * @data: Pointer to interrupt specific data 1415 * 1416 * Iterate through the domain hierarchy of the interrupt and check 1417 * whether a hw retrigger function exists. If yes, invoke it. 1418 */ 1419 int irq_chip_retrigger_hierarchy(struct irq_data *data) 1420 { 1421 for (data = data->parent_data; data; data = data->parent_data) 1422 if (data->chip && data->chip->irq_retrigger) 1423 return data->chip->irq_retrigger(data); 1424 1425 return 0; 1426 } 1427 1428 /** 1429 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt 1430 * @data: Pointer to interrupt specific data 1431 * @vcpu_info: The vcpu affinity information 1432 */ 1433 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) 1434 { 1435 data = data->parent_data; 1436 if (data->chip->irq_set_vcpu_affinity) 1437 return data->chip->irq_set_vcpu_affinity(data, vcpu_info); 1438 1439 return -ENOSYS; 1440 } 1441 1442 /** 1443 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt 1444 * @data: Pointer to interrupt specific data 1445 * @on: Whether to set or reset the wake-up capability of this irq 1446 * 1447 * Conditional, as the underlying parent chip might not implement it. 1448 */ 1449 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) 1450 { 1451 data = data->parent_data; 1452 1453 if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) 1454 return 0; 1455 1456 if (data->chip->irq_set_wake) 1457 return data->chip->irq_set_wake(data, on); 1458 1459 return -ENOSYS; 1460 } 1461 EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent); 1462 1463 /** 1464 * irq_chip_request_resources_parent - Request resources on the parent interrupt 1465 * @data: Pointer to interrupt specific data 1466 */ 1467 int irq_chip_request_resources_parent(struct irq_data *data) 1468 { 1469 data = data->parent_data; 1470 1471 if (data->chip->irq_request_resources) 1472 return data->chip->irq_request_resources(data); 1473 1474 return -ENOSYS; 1475 } 1476 EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent); 1477 1478 /** 1479 * irq_chip_release_resources_parent - Release resources on the parent interrupt 1480 * @data: Pointer to interrupt specific data 1481 */ 1482 void irq_chip_release_resources_parent(struct irq_data *data) 1483 { 1484 data = data->parent_data; 1485 if (data->chip->irq_release_resources) 1486 data->chip->irq_release_resources(data); 1487 } 1488 EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent); 1489 #endif 1490 1491 /** 1492 * irq_chip_compose_msi_msg - Componse msi message for a irq chip 1493 * @data: Pointer to interrupt specific data 1494 * @msg: Pointer to the MSI message 1495 * 1496 * For hierarchical domains we find the first chip in the hierarchy 1497 * which implements the irq_compose_msi_msg callback. For non 1498 * hierarchical we use the top level chip. 1499 */ 1500 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 1501 { 1502 struct irq_data *pos = NULL; 1503 1504 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1505 for (; data; data = data->parent_data) 1506 #endif 1507 if (data->chip && data->chip->irq_compose_msi_msg) 1508 pos = data; 1509 if (!pos) 1510 return -ENOSYS; 1511 1512 pos->chip->irq_compose_msi_msg(pos, msg); 1513 1514 return 0; 1515 } 1516 1517 /** 1518 * irq_chip_pm_get - Enable power for an IRQ chip 1519 * @data: Pointer to interrupt specific data 1520 * 1521 * Enable the power to the IRQ chip referenced by the interrupt data 1522 * structure. 1523 */ 1524 int irq_chip_pm_get(struct irq_data *data) 1525 { 1526 int retval; 1527 1528 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) { 1529 retval = pm_runtime_get_sync(data->chip->parent_device); 1530 if (retval < 0) { 1531 pm_runtime_put_noidle(data->chip->parent_device); 1532 return retval; 1533 } 1534 } 1535 1536 return 0; 1537 } 1538 1539 /** 1540 * irq_chip_pm_put - Disable power for an IRQ chip 1541 * @data: Pointer to interrupt specific data 1542 * 1543 * Disable the power to the IRQ chip referenced by the interrupt data 1544 * structure, belongs. Note that power will only be disabled, once this 1545 * function has been called for all IRQs that have called irq_chip_pm_get(). 1546 */ 1547 int irq_chip_pm_put(struct irq_data *data) 1548 { 1549 int retval = 0; 1550 1551 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) 1552 retval = pm_runtime_put(data->chip->parent_device); 1553 1554 return (retval < 0) ? retval : 0; 1555 } 1556