1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 5 * 6 * This file contains the core interrupt handling code, for irq-chip based 7 * architectures. Detailed information is available in 8 * Documentation/core-api/genericirq.rst 9 */ 10 11 #include <linux/irq.h> 12 #include <linux/msi.h> 13 #include <linux/module.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/irqdomain.h> 17 18 #include <trace/events/irq.h> 19 20 #include "internals.h" 21 22 static irqreturn_t bad_chained_irq(int irq, void *dev_id) 23 { 24 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); 25 return IRQ_NONE; 26 } 27 28 /* 29 * Chained handlers should never call action on their IRQ. This default 30 * action will emit warning if such thing happens. 31 */ 32 struct irqaction chained_action = { 33 .handler = bad_chained_irq, 34 }; 35 36 /** 37 * irq_set_chip - set the irq chip for an irq 38 * @irq: irq number 39 * @chip: pointer to irq chip description structure 40 */ 41 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 42 { 43 unsigned long flags; 44 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 45 46 if (!desc) 47 return -EINVAL; 48 49 if (!chip) 50 chip = &no_irq_chip; 51 52 desc->irq_data.chip = chip; 53 irq_put_desc_unlock(desc, flags); 54 /* 55 * For !CONFIG_SPARSE_IRQ make the irq show up in 56 * allocated_irqs. 57 */ 58 irq_mark_irq(irq); 59 return 0; 60 } 61 EXPORT_SYMBOL(irq_set_chip); 62 63 /** 64 * irq_set_type - set the irq trigger type for an irq 65 * @irq: irq number 66 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 67 */ 68 int irq_set_irq_type(unsigned int irq, unsigned int type) 69 { 70 unsigned long flags; 71 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 72 int ret = 0; 73 74 if (!desc) 75 return -EINVAL; 76 77 ret = __irq_set_trigger(desc, type); 78 irq_put_desc_busunlock(desc, flags); 79 return ret; 80 } 81 EXPORT_SYMBOL(irq_set_irq_type); 82 83 /** 84 * irq_set_handler_data - set irq handler data for an irq 85 * @irq: Interrupt number 86 * @data: Pointer to interrupt specific data 87 * 88 * Set the hardware irq controller data for an irq 89 */ 90 int irq_set_handler_data(unsigned int irq, void *data) 91 { 92 unsigned long flags; 93 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 94 95 if (!desc) 96 return -EINVAL; 97 desc->irq_common_data.handler_data = data; 98 irq_put_desc_unlock(desc, flags); 99 return 0; 100 } 101 EXPORT_SYMBOL(irq_set_handler_data); 102 103 /** 104 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 105 * @irq_base: Interrupt number base 106 * @irq_offset: Interrupt number offset 107 * @entry: Pointer to MSI descriptor data 108 * 109 * Set the MSI descriptor entry for an irq at offset 110 */ 111 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, 112 struct msi_desc *entry) 113 { 114 unsigned long flags; 115 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 116 117 if (!desc) 118 return -EINVAL; 119 desc->irq_common_data.msi_desc = entry; 120 if (entry && !irq_offset) 121 entry->irq = irq_base; 122 irq_put_desc_unlock(desc, flags); 123 return 0; 124 } 125 126 /** 127 * irq_set_msi_desc - set MSI descriptor data for an irq 128 * @irq: Interrupt number 129 * @entry: Pointer to MSI descriptor data 130 * 131 * Set the MSI descriptor entry for an irq 132 */ 133 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 134 { 135 return irq_set_msi_desc_off(irq, 0, entry); 136 } 137 138 /** 139 * irq_set_chip_data - set irq chip data for an irq 140 * @irq: Interrupt number 141 * @data: Pointer to chip specific data 142 * 143 * Set the hardware irq chip data for an irq 144 */ 145 int irq_set_chip_data(unsigned int irq, void *data) 146 { 147 unsigned long flags; 148 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 149 150 if (!desc) 151 return -EINVAL; 152 desc->irq_data.chip_data = data; 153 irq_put_desc_unlock(desc, flags); 154 return 0; 155 } 156 EXPORT_SYMBOL(irq_set_chip_data); 157 158 struct irq_data *irq_get_irq_data(unsigned int irq) 159 { 160 struct irq_desc *desc = irq_to_desc(irq); 161 162 return desc ? &desc->irq_data : NULL; 163 } 164 EXPORT_SYMBOL_GPL(irq_get_irq_data); 165 166 static void irq_state_clr_disabled(struct irq_desc *desc) 167 { 168 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 169 } 170 171 static void irq_state_clr_masked(struct irq_desc *desc) 172 { 173 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 174 } 175 176 static void irq_state_clr_started(struct irq_desc *desc) 177 { 178 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); 179 } 180 181 static void irq_state_set_started(struct irq_desc *desc) 182 { 183 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); 184 } 185 186 enum { 187 IRQ_STARTUP_NORMAL, 188 IRQ_STARTUP_MANAGED, 189 IRQ_STARTUP_ABORT, 190 }; 191 192 #ifdef CONFIG_SMP 193 static int 194 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) 195 { 196 struct irq_data *d = irq_desc_get_irq_data(desc); 197 198 if (!irqd_affinity_is_managed(d)) 199 return IRQ_STARTUP_NORMAL; 200 201 irqd_clr_managed_shutdown(d); 202 203 if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) { 204 /* 205 * Catch code which fiddles with enable_irq() on a managed 206 * and potentially shutdown IRQ. Chained interrupt 207 * installment or irq auto probing should not happen on 208 * managed irqs either. 209 */ 210 if (WARN_ON_ONCE(force)) 211 return IRQ_STARTUP_ABORT; 212 /* 213 * The interrupt was requested, but there is no online CPU 214 * in it's affinity mask. Put it into managed shutdown 215 * state and let the cpu hotplug mechanism start it up once 216 * a CPU in the mask becomes available. 217 */ 218 return IRQ_STARTUP_ABORT; 219 } 220 /* 221 * Managed interrupts have reserved resources, so this should not 222 * happen. 223 */ 224 if (WARN_ON(irq_domain_activate_irq(d, false))) 225 return IRQ_STARTUP_ABORT; 226 return IRQ_STARTUP_MANAGED; 227 } 228 #else 229 static __always_inline int 230 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) 231 { 232 return IRQ_STARTUP_NORMAL; 233 } 234 #endif 235 236 static int __irq_startup(struct irq_desc *desc) 237 { 238 struct irq_data *d = irq_desc_get_irq_data(desc); 239 int ret = 0; 240 241 /* Warn if this interrupt is not activated but try nevertheless */ 242 WARN_ON_ONCE(!irqd_is_activated(d)); 243 244 if (d->chip->irq_startup) { 245 ret = d->chip->irq_startup(d); 246 irq_state_clr_disabled(desc); 247 irq_state_clr_masked(desc); 248 } else { 249 irq_enable(desc); 250 } 251 irq_state_set_started(desc); 252 return ret; 253 } 254 255 int irq_startup(struct irq_desc *desc, bool resend, bool force) 256 { 257 struct irq_data *d = irq_desc_get_irq_data(desc); 258 struct cpumask *aff = irq_data_get_affinity_mask(d); 259 int ret = 0; 260 261 desc->depth = 0; 262 263 if (irqd_is_started(d)) { 264 irq_enable(desc); 265 } else { 266 switch (__irq_startup_managed(desc, aff, force)) { 267 case IRQ_STARTUP_NORMAL: 268 ret = __irq_startup(desc); 269 irq_setup_affinity(desc); 270 break; 271 case IRQ_STARTUP_MANAGED: 272 irq_do_set_affinity(d, aff, false); 273 ret = __irq_startup(desc); 274 break; 275 case IRQ_STARTUP_ABORT: 276 irqd_set_managed_shutdown(d); 277 return 0; 278 } 279 } 280 if (resend) 281 check_irq_resend(desc, false); 282 283 return ret; 284 } 285 286 int irq_activate(struct irq_desc *desc) 287 { 288 struct irq_data *d = irq_desc_get_irq_data(desc); 289 290 if (!irqd_affinity_is_managed(d)) 291 return irq_domain_activate_irq(d, false); 292 return 0; 293 } 294 295 int irq_activate_and_startup(struct irq_desc *desc, bool resend) 296 { 297 if (WARN_ON(irq_activate(desc))) 298 return 0; 299 return irq_startup(desc, resend, IRQ_START_FORCE); 300 } 301 302 static void __irq_disable(struct irq_desc *desc, bool mask); 303 304 void irq_shutdown(struct irq_desc *desc) 305 { 306 if (irqd_is_started(&desc->irq_data)) { 307 desc->depth = 1; 308 if (desc->irq_data.chip->irq_shutdown) { 309 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 310 irq_state_set_disabled(desc); 311 irq_state_set_masked(desc); 312 } else { 313 __irq_disable(desc, true); 314 } 315 irq_state_clr_started(desc); 316 } 317 } 318 319 320 void irq_shutdown_and_deactivate(struct irq_desc *desc) 321 { 322 irq_shutdown(desc); 323 /* 324 * This must be called even if the interrupt was never started up, 325 * because the activation can happen before the interrupt is 326 * available for request/startup. It has it's own state tracking so 327 * it's safe to call it unconditionally. 328 */ 329 irq_domain_deactivate_irq(&desc->irq_data); 330 } 331 332 void irq_enable(struct irq_desc *desc) 333 { 334 if (!irqd_irq_disabled(&desc->irq_data)) { 335 unmask_irq(desc); 336 } else { 337 irq_state_clr_disabled(desc); 338 if (desc->irq_data.chip->irq_enable) { 339 desc->irq_data.chip->irq_enable(&desc->irq_data); 340 irq_state_clr_masked(desc); 341 } else { 342 unmask_irq(desc); 343 } 344 } 345 } 346 347 static void __irq_disable(struct irq_desc *desc, bool mask) 348 { 349 if (irqd_irq_disabled(&desc->irq_data)) { 350 if (mask) 351 mask_irq(desc); 352 } else { 353 irq_state_set_disabled(desc); 354 if (desc->irq_data.chip->irq_disable) { 355 desc->irq_data.chip->irq_disable(&desc->irq_data); 356 irq_state_set_masked(desc); 357 } else if (mask) { 358 mask_irq(desc); 359 } 360 } 361 } 362 363 /** 364 * irq_disable - Mark interrupt disabled 365 * @desc: irq descriptor which should be disabled 366 * 367 * If the chip does not implement the irq_disable callback, we 368 * use a lazy disable approach. That means we mark the interrupt 369 * disabled, but leave the hardware unmasked. That's an 370 * optimization because we avoid the hardware access for the 371 * common case where no interrupt happens after we marked it 372 * disabled. If an interrupt happens, then the interrupt flow 373 * handler masks the line at the hardware level and marks it 374 * pending. 375 * 376 * If the interrupt chip does not implement the irq_disable callback, 377 * a driver can disable the lazy approach for a particular irq line by 378 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can 379 * be used for devices which cannot disable the interrupt at the 380 * device level under certain circumstances and have to use 381 * disable_irq[_nosync] instead. 382 */ 383 void irq_disable(struct irq_desc *desc) 384 { 385 __irq_disable(desc, irq_settings_disable_unlazy(desc)); 386 } 387 388 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 389 { 390 if (desc->irq_data.chip->irq_enable) 391 desc->irq_data.chip->irq_enable(&desc->irq_data); 392 else 393 desc->irq_data.chip->irq_unmask(&desc->irq_data); 394 cpumask_set_cpu(cpu, desc->percpu_enabled); 395 } 396 397 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 398 { 399 if (desc->irq_data.chip->irq_disable) 400 desc->irq_data.chip->irq_disable(&desc->irq_data); 401 else 402 desc->irq_data.chip->irq_mask(&desc->irq_data); 403 cpumask_clear_cpu(cpu, desc->percpu_enabled); 404 } 405 406 static inline void mask_ack_irq(struct irq_desc *desc) 407 { 408 if (desc->irq_data.chip->irq_mask_ack) { 409 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 410 irq_state_set_masked(desc); 411 } else { 412 mask_irq(desc); 413 if (desc->irq_data.chip->irq_ack) 414 desc->irq_data.chip->irq_ack(&desc->irq_data); 415 } 416 } 417 418 void mask_irq(struct irq_desc *desc) 419 { 420 if (irqd_irq_masked(&desc->irq_data)) 421 return; 422 423 if (desc->irq_data.chip->irq_mask) { 424 desc->irq_data.chip->irq_mask(&desc->irq_data); 425 irq_state_set_masked(desc); 426 } 427 } 428 429 void unmask_irq(struct irq_desc *desc) 430 { 431 if (!irqd_irq_masked(&desc->irq_data)) 432 return; 433 434 if (desc->irq_data.chip->irq_unmask) { 435 desc->irq_data.chip->irq_unmask(&desc->irq_data); 436 irq_state_clr_masked(desc); 437 } 438 } 439 440 void unmask_threaded_irq(struct irq_desc *desc) 441 { 442 struct irq_chip *chip = desc->irq_data.chip; 443 444 if (chip->flags & IRQCHIP_EOI_THREADED) 445 chip->irq_eoi(&desc->irq_data); 446 447 unmask_irq(desc); 448 } 449 450 /* 451 * handle_nested_irq - Handle a nested irq from a irq thread 452 * @irq: the interrupt number 453 * 454 * Handle interrupts which are nested into a threaded interrupt 455 * handler. The handler function is called inside the calling 456 * threads context. 457 */ 458 void handle_nested_irq(unsigned int irq) 459 { 460 struct irq_desc *desc = irq_to_desc(irq); 461 struct irqaction *action; 462 irqreturn_t action_ret; 463 464 might_sleep(); 465 466 raw_spin_lock_irq(&desc->lock); 467 468 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 469 470 action = desc->action; 471 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { 472 desc->istate |= IRQS_PENDING; 473 goto out_unlock; 474 } 475 476 kstat_incr_irqs_this_cpu(desc); 477 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 478 raw_spin_unlock_irq(&desc->lock); 479 480 action_ret = IRQ_NONE; 481 for_each_action_of_desc(desc, action) 482 action_ret |= action->thread_fn(action->irq, action->dev_id); 483 484 if (!noirqdebug) 485 note_interrupt(desc, action_ret); 486 487 raw_spin_lock_irq(&desc->lock); 488 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 489 490 out_unlock: 491 raw_spin_unlock_irq(&desc->lock); 492 } 493 EXPORT_SYMBOL_GPL(handle_nested_irq); 494 495 static bool irq_check_poll(struct irq_desc *desc) 496 { 497 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 498 return false; 499 return irq_wait_for_poll(desc); 500 } 501 502 static bool irq_may_run(struct irq_desc *desc) 503 { 504 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; 505 506 /* 507 * If the interrupt is not in progress and is not an armed 508 * wakeup interrupt, proceed. 509 */ 510 if (!irqd_has_set(&desc->irq_data, mask)) 511 return true; 512 513 /* 514 * If the interrupt is an armed wakeup source, mark it pending 515 * and suspended, disable it and notify the pm core about the 516 * event. 517 */ 518 if (irq_pm_check_wakeup(desc)) 519 return false; 520 521 /* 522 * Handle a potential concurrent poll on a different core. 523 */ 524 return irq_check_poll(desc); 525 } 526 527 /** 528 * handle_simple_irq - Simple and software-decoded IRQs. 529 * @desc: the interrupt description structure for this irq 530 * 531 * Simple interrupts are either sent from a demultiplexing interrupt 532 * handler or come from hardware, where no interrupt hardware control 533 * is necessary. 534 * 535 * Note: The caller is expected to handle the ack, clear, mask and 536 * unmask issues if necessary. 537 */ 538 void handle_simple_irq(struct irq_desc *desc) 539 { 540 raw_spin_lock(&desc->lock); 541 542 if (!irq_may_run(desc)) 543 goto out_unlock; 544 545 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 546 547 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 548 desc->istate |= IRQS_PENDING; 549 goto out_unlock; 550 } 551 552 kstat_incr_irqs_this_cpu(desc); 553 handle_irq_event(desc); 554 555 out_unlock: 556 raw_spin_unlock(&desc->lock); 557 } 558 EXPORT_SYMBOL_GPL(handle_simple_irq); 559 560 /** 561 * handle_untracked_irq - Simple and software-decoded IRQs. 562 * @desc: the interrupt description structure for this irq 563 * 564 * Untracked interrupts are sent from a demultiplexing interrupt 565 * handler when the demultiplexer does not know which device it its 566 * multiplexed irq domain generated the interrupt. IRQ's handled 567 * through here are not subjected to stats tracking, randomness, or 568 * spurious interrupt detection. 569 * 570 * Note: Like handle_simple_irq, the caller is expected to handle 571 * the ack, clear, mask and unmask issues if necessary. 572 */ 573 void handle_untracked_irq(struct irq_desc *desc) 574 { 575 unsigned int flags = 0; 576 577 raw_spin_lock(&desc->lock); 578 579 if (!irq_may_run(desc)) 580 goto out_unlock; 581 582 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 583 584 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 585 desc->istate |= IRQS_PENDING; 586 goto out_unlock; 587 } 588 589 desc->istate &= ~IRQS_PENDING; 590 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 591 raw_spin_unlock(&desc->lock); 592 593 __handle_irq_event_percpu(desc, &flags); 594 595 raw_spin_lock(&desc->lock); 596 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 597 598 out_unlock: 599 raw_spin_unlock(&desc->lock); 600 } 601 EXPORT_SYMBOL_GPL(handle_untracked_irq); 602 603 /* 604 * Called unconditionally from handle_level_irq() and only for oneshot 605 * interrupts from handle_fasteoi_irq() 606 */ 607 static void cond_unmask_irq(struct irq_desc *desc) 608 { 609 /* 610 * We need to unmask in the following cases: 611 * - Standard level irq (IRQF_ONESHOT is not set) 612 * - Oneshot irq which did not wake the thread (caused by a 613 * spurious interrupt or a primary handler handling it 614 * completely). 615 */ 616 if (!irqd_irq_disabled(&desc->irq_data) && 617 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 618 unmask_irq(desc); 619 } 620 621 /** 622 * handle_level_irq - Level type irq handler 623 * @desc: the interrupt description structure for this irq 624 * 625 * Level type interrupts are active as long as the hardware line has 626 * the active level. This may require to mask the interrupt and unmask 627 * it after the associated handler has acknowledged the device, so the 628 * interrupt line is back to inactive. 629 */ 630 void handle_level_irq(struct irq_desc *desc) 631 { 632 raw_spin_lock(&desc->lock); 633 mask_ack_irq(desc); 634 635 if (!irq_may_run(desc)) 636 goto out_unlock; 637 638 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 639 640 /* 641 * If its disabled or no action available 642 * keep it masked and get out of here 643 */ 644 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 645 desc->istate |= IRQS_PENDING; 646 goto out_unlock; 647 } 648 649 kstat_incr_irqs_this_cpu(desc); 650 handle_irq_event(desc); 651 652 cond_unmask_irq(desc); 653 654 out_unlock: 655 raw_spin_unlock(&desc->lock); 656 } 657 EXPORT_SYMBOL_GPL(handle_level_irq); 658 659 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) 660 { 661 if (!(desc->istate & IRQS_ONESHOT)) { 662 chip->irq_eoi(&desc->irq_data); 663 return; 664 } 665 /* 666 * We need to unmask in the following cases: 667 * - Oneshot irq which did not wake the thread (caused by a 668 * spurious interrupt or a primary handler handling it 669 * completely). 670 */ 671 if (!irqd_irq_disabled(&desc->irq_data) && 672 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { 673 chip->irq_eoi(&desc->irq_data); 674 unmask_irq(desc); 675 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { 676 chip->irq_eoi(&desc->irq_data); 677 } 678 } 679 680 /** 681 * handle_fasteoi_irq - irq handler for transparent controllers 682 * @desc: the interrupt description structure for this irq 683 * 684 * Only a single callback will be issued to the chip: an ->eoi() 685 * call when the interrupt has been serviced. This enables support 686 * for modern forms of interrupt handlers, which handle the flow 687 * details in hardware, transparently. 688 */ 689 void handle_fasteoi_irq(struct irq_desc *desc) 690 { 691 struct irq_chip *chip = desc->irq_data.chip; 692 693 raw_spin_lock(&desc->lock); 694 695 if (!irq_may_run(desc)) 696 goto out; 697 698 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 699 700 /* 701 * If its disabled or no action available 702 * then mask it and get out of here: 703 */ 704 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 705 desc->istate |= IRQS_PENDING; 706 mask_irq(desc); 707 goto out; 708 } 709 710 kstat_incr_irqs_this_cpu(desc); 711 if (desc->istate & IRQS_ONESHOT) 712 mask_irq(desc); 713 714 handle_irq_event(desc); 715 716 cond_unmask_eoi_irq(desc, chip); 717 718 raw_spin_unlock(&desc->lock); 719 return; 720 out: 721 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 722 chip->irq_eoi(&desc->irq_data); 723 raw_spin_unlock(&desc->lock); 724 } 725 EXPORT_SYMBOL_GPL(handle_fasteoi_irq); 726 727 /** 728 * handle_fasteoi_nmi - irq handler for NMI interrupt lines 729 * @desc: the interrupt description structure for this irq 730 * 731 * A simple NMI-safe handler, considering the restrictions 732 * from request_nmi. 733 * 734 * Only a single callback will be issued to the chip: an ->eoi() 735 * call when the interrupt has been serviced. This enables support 736 * for modern forms of interrupt handlers, which handle the flow 737 * details in hardware, transparently. 738 */ 739 void handle_fasteoi_nmi(struct irq_desc *desc) 740 { 741 struct irq_chip *chip = irq_desc_get_chip(desc); 742 struct irqaction *action = desc->action; 743 unsigned int irq = irq_desc_get_irq(desc); 744 irqreturn_t res; 745 746 __kstat_incr_irqs_this_cpu(desc); 747 748 trace_irq_handler_entry(irq, action); 749 /* 750 * NMIs cannot be shared, there is only one action. 751 */ 752 res = action->handler(irq, action->dev_id); 753 trace_irq_handler_exit(irq, action, res); 754 755 if (chip->irq_eoi) 756 chip->irq_eoi(&desc->irq_data); 757 } 758 EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); 759 760 /** 761 * handle_edge_irq - edge type IRQ handler 762 * @desc: the interrupt description structure for this irq 763 * 764 * Interrupt occures on the falling and/or rising edge of a hardware 765 * signal. The occurrence is latched into the irq controller hardware 766 * and must be acked in order to be reenabled. After the ack another 767 * interrupt can happen on the same source even before the first one 768 * is handled by the associated event handler. If this happens it 769 * might be necessary to disable (mask) the interrupt depending on the 770 * controller hardware. This requires to reenable the interrupt inside 771 * of the loop which handles the interrupts which have arrived while 772 * the handler was running. If all pending interrupts are handled, the 773 * loop is left. 774 */ 775 void handle_edge_irq(struct irq_desc *desc) 776 { 777 raw_spin_lock(&desc->lock); 778 779 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 780 781 if (!irq_may_run(desc)) { 782 desc->istate |= IRQS_PENDING; 783 mask_ack_irq(desc); 784 goto out_unlock; 785 } 786 787 /* 788 * If its disabled or no action available then mask it and get 789 * out of here. 790 */ 791 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 792 desc->istate |= IRQS_PENDING; 793 mask_ack_irq(desc); 794 goto out_unlock; 795 } 796 797 kstat_incr_irqs_this_cpu(desc); 798 799 /* Start handling the irq */ 800 desc->irq_data.chip->irq_ack(&desc->irq_data); 801 802 do { 803 if (unlikely(!desc->action)) { 804 mask_irq(desc); 805 goto out_unlock; 806 } 807 808 /* 809 * When another irq arrived while we were handling 810 * one, we could have masked the irq. 811 * Renable it, if it was not disabled in meantime. 812 */ 813 if (unlikely(desc->istate & IRQS_PENDING)) { 814 if (!irqd_irq_disabled(&desc->irq_data) && 815 irqd_irq_masked(&desc->irq_data)) 816 unmask_irq(desc); 817 } 818 819 handle_irq_event(desc); 820 821 } while ((desc->istate & IRQS_PENDING) && 822 !irqd_irq_disabled(&desc->irq_data)); 823 824 out_unlock: 825 raw_spin_unlock(&desc->lock); 826 } 827 EXPORT_SYMBOL(handle_edge_irq); 828 829 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 830 /** 831 * handle_edge_eoi_irq - edge eoi type IRQ handler 832 * @desc: the interrupt description structure for this irq 833 * 834 * Similar as the above handle_edge_irq, but using eoi and w/o the 835 * mask/unmask logic. 836 */ 837 void handle_edge_eoi_irq(struct irq_desc *desc) 838 { 839 struct irq_chip *chip = irq_desc_get_chip(desc); 840 841 raw_spin_lock(&desc->lock); 842 843 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 844 845 if (!irq_may_run(desc)) { 846 desc->istate |= IRQS_PENDING; 847 goto out_eoi; 848 } 849 850 /* 851 * If its disabled or no action available then mask it and get 852 * out of here. 853 */ 854 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 855 desc->istate |= IRQS_PENDING; 856 goto out_eoi; 857 } 858 859 kstat_incr_irqs_this_cpu(desc); 860 861 do { 862 if (unlikely(!desc->action)) 863 goto out_eoi; 864 865 handle_irq_event(desc); 866 867 } while ((desc->istate & IRQS_PENDING) && 868 !irqd_irq_disabled(&desc->irq_data)); 869 870 out_eoi: 871 chip->irq_eoi(&desc->irq_data); 872 raw_spin_unlock(&desc->lock); 873 } 874 #endif 875 876 /** 877 * handle_percpu_irq - Per CPU local irq handler 878 * @desc: the interrupt description structure for this irq 879 * 880 * Per CPU interrupts on SMP machines without locking requirements 881 */ 882 void handle_percpu_irq(struct irq_desc *desc) 883 { 884 struct irq_chip *chip = irq_desc_get_chip(desc); 885 886 /* 887 * PER CPU interrupts are not serialized. Do not touch 888 * desc->tot_count. 889 */ 890 __kstat_incr_irqs_this_cpu(desc); 891 892 if (chip->irq_ack) 893 chip->irq_ack(&desc->irq_data); 894 895 handle_irq_event_percpu(desc); 896 897 if (chip->irq_eoi) 898 chip->irq_eoi(&desc->irq_data); 899 } 900 901 /** 902 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 903 * @desc: the interrupt description structure for this irq 904 * 905 * Per CPU interrupts on SMP machines without locking requirements. Same as 906 * handle_percpu_irq() above but with the following extras: 907 * 908 * action->percpu_dev_id is a pointer to percpu variables which 909 * contain the real device id for the cpu on which this handler is 910 * called 911 */ 912 void handle_percpu_devid_irq(struct irq_desc *desc) 913 { 914 struct irq_chip *chip = irq_desc_get_chip(desc); 915 struct irqaction *action = desc->action; 916 unsigned int irq = irq_desc_get_irq(desc); 917 irqreturn_t res; 918 919 /* 920 * PER CPU interrupts are not serialized. Do not touch 921 * desc->tot_count. 922 */ 923 __kstat_incr_irqs_this_cpu(desc); 924 925 if (chip->irq_ack) 926 chip->irq_ack(&desc->irq_data); 927 928 if (likely(action)) { 929 trace_irq_handler_entry(irq, action); 930 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 931 trace_irq_handler_exit(irq, action, res); 932 } else { 933 unsigned int cpu = smp_processor_id(); 934 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); 935 936 if (enabled) 937 irq_percpu_disable(desc, cpu); 938 939 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", 940 enabled ? " and unmasked" : "", irq, cpu); 941 } 942 943 if (chip->irq_eoi) 944 chip->irq_eoi(&desc->irq_data); 945 } 946 947 /** 948 * handle_percpu_devid_fasteoi_ipi - Per CPU local IPI handler with per cpu 949 * dev ids 950 * @desc: the interrupt description structure for this irq 951 * 952 * The biggest difference with the IRQ version is that the interrupt is 953 * EOIed early, as the IPI could result in a context switch, and we need to 954 * make sure the IPI can fire again. We also assume that the arch code has 955 * registered an action. If not, we are positively doomed. 956 */ 957 void handle_percpu_devid_fasteoi_ipi(struct irq_desc *desc) 958 { 959 struct irq_chip *chip = irq_desc_get_chip(desc); 960 struct irqaction *action = desc->action; 961 unsigned int irq = irq_desc_get_irq(desc); 962 irqreturn_t res; 963 964 __kstat_incr_irqs_this_cpu(desc); 965 966 if (chip->irq_eoi) 967 chip->irq_eoi(&desc->irq_data); 968 969 trace_irq_handler_entry(irq, action); 970 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 971 trace_irq_handler_exit(irq, action, res); 972 } 973 974 /** 975 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu 976 * dev ids 977 * @desc: the interrupt description structure for this irq 978 * 979 * Similar to handle_fasteoi_nmi, but handling the dev_id cookie 980 * as a percpu pointer. 981 */ 982 void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc) 983 { 984 struct irq_chip *chip = irq_desc_get_chip(desc); 985 struct irqaction *action = desc->action; 986 unsigned int irq = irq_desc_get_irq(desc); 987 irqreturn_t res; 988 989 __kstat_incr_irqs_this_cpu(desc); 990 991 trace_irq_handler_entry(irq, action); 992 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 993 trace_irq_handler_exit(irq, action, res); 994 995 if (chip->irq_eoi) 996 chip->irq_eoi(&desc->irq_data); 997 } 998 999 static void 1000 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, 1001 int is_chained, const char *name) 1002 { 1003 if (!handle) { 1004 handle = handle_bad_irq; 1005 } else { 1006 struct irq_data *irq_data = &desc->irq_data; 1007 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1008 /* 1009 * With hierarchical domains we might run into a 1010 * situation where the outermost chip is not yet set 1011 * up, but the inner chips are there. Instead of 1012 * bailing we install the handler, but obviously we 1013 * cannot enable/startup the interrupt at this point. 1014 */ 1015 while (irq_data) { 1016 if (irq_data->chip != &no_irq_chip) 1017 break; 1018 /* 1019 * Bail out if the outer chip is not set up 1020 * and the interrupt supposed to be started 1021 * right away. 1022 */ 1023 if (WARN_ON(is_chained)) 1024 return; 1025 /* Try the parent */ 1026 irq_data = irq_data->parent_data; 1027 } 1028 #endif 1029 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) 1030 return; 1031 } 1032 1033 /* Uninstall? */ 1034 if (handle == handle_bad_irq) { 1035 if (desc->irq_data.chip != &no_irq_chip) 1036 mask_ack_irq(desc); 1037 irq_state_set_disabled(desc); 1038 if (is_chained) 1039 desc->action = NULL; 1040 desc->depth = 1; 1041 } 1042 desc->handle_irq = handle; 1043 desc->name = name; 1044 1045 if (handle != handle_bad_irq && is_chained) { 1046 unsigned int type = irqd_get_trigger_type(&desc->irq_data); 1047 1048 /* 1049 * We're about to start this interrupt immediately, 1050 * hence the need to set the trigger configuration. 1051 * But the .set_type callback may have overridden the 1052 * flow handler, ignoring that we're dealing with a 1053 * chained interrupt. Reset it immediately because we 1054 * do know better. 1055 */ 1056 if (type != IRQ_TYPE_NONE) { 1057 __irq_set_trigger(desc, type); 1058 desc->handle_irq = handle; 1059 } 1060 1061 irq_settings_set_noprobe(desc); 1062 irq_settings_set_norequest(desc); 1063 irq_settings_set_nothread(desc); 1064 desc->action = &chained_action; 1065 irq_activate_and_startup(desc, IRQ_RESEND); 1066 } 1067 } 1068 1069 void 1070 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 1071 const char *name) 1072 { 1073 unsigned long flags; 1074 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 1075 1076 if (!desc) 1077 return; 1078 1079 __irq_do_set_handler(desc, handle, is_chained, name); 1080 irq_put_desc_busunlock(desc, flags); 1081 } 1082 EXPORT_SYMBOL_GPL(__irq_set_handler); 1083 1084 void 1085 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, 1086 void *data) 1087 { 1088 unsigned long flags; 1089 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 1090 1091 if (!desc) 1092 return; 1093 1094 desc->irq_common_data.handler_data = data; 1095 __irq_do_set_handler(desc, handle, 1, NULL); 1096 1097 irq_put_desc_busunlock(desc, flags); 1098 } 1099 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); 1100 1101 void 1102 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 1103 irq_flow_handler_t handle, const char *name) 1104 { 1105 irq_set_chip(irq, chip); 1106 __irq_set_handler(irq, handle, 0, name); 1107 } 1108 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 1109 1110 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 1111 { 1112 unsigned long flags, trigger, tmp; 1113 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 1114 1115 if (!desc) 1116 return; 1117 1118 /* 1119 * Warn when a driver sets the no autoenable flag on an already 1120 * active interrupt. 1121 */ 1122 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); 1123 1124 irq_settings_clr_and_set(desc, clr, set); 1125 1126 trigger = irqd_get_trigger_type(&desc->irq_data); 1127 1128 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 1129 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 1130 if (irq_settings_has_no_balance_set(desc)) 1131 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1132 if (irq_settings_is_per_cpu(desc)) 1133 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1134 if (irq_settings_can_move_pcntxt(desc)) 1135 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 1136 if (irq_settings_is_level(desc)) 1137 irqd_set(&desc->irq_data, IRQD_LEVEL); 1138 1139 tmp = irq_settings_get_trigger_mask(desc); 1140 if (tmp != IRQ_TYPE_NONE) 1141 trigger = tmp; 1142 1143 irqd_set(&desc->irq_data, trigger); 1144 1145 irq_put_desc_unlock(desc, flags); 1146 } 1147 EXPORT_SYMBOL_GPL(irq_modify_status); 1148 1149 /** 1150 * irq_cpu_online - Invoke all irq_cpu_online functions. 1151 * 1152 * Iterate through all irqs and invoke the chip.irq_cpu_online() 1153 * for each. 1154 */ 1155 void irq_cpu_online(void) 1156 { 1157 struct irq_desc *desc; 1158 struct irq_chip *chip; 1159 unsigned long flags; 1160 unsigned int irq; 1161 1162 for_each_active_irq(irq) { 1163 desc = irq_to_desc(irq); 1164 if (!desc) 1165 continue; 1166 1167 raw_spin_lock_irqsave(&desc->lock, flags); 1168 1169 chip = irq_data_get_irq_chip(&desc->irq_data); 1170 if (chip && chip->irq_cpu_online && 1171 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1172 !irqd_irq_disabled(&desc->irq_data))) 1173 chip->irq_cpu_online(&desc->irq_data); 1174 1175 raw_spin_unlock_irqrestore(&desc->lock, flags); 1176 } 1177 } 1178 1179 /** 1180 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 1181 * 1182 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 1183 * for each. 1184 */ 1185 void irq_cpu_offline(void) 1186 { 1187 struct irq_desc *desc; 1188 struct irq_chip *chip; 1189 unsigned long flags; 1190 unsigned int irq; 1191 1192 for_each_active_irq(irq) { 1193 desc = irq_to_desc(irq); 1194 if (!desc) 1195 continue; 1196 1197 raw_spin_lock_irqsave(&desc->lock, flags); 1198 1199 chip = irq_data_get_irq_chip(&desc->irq_data); 1200 if (chip && chip->irq_cpu_offline && 1201 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1202 !irqd_irq_disabled(&desc->irq_data))) 1203 chip->irq_cpu_offline(&desc->irq_data); 1204 1205 raw_spin_unlock_irqrestore(&desc->lock, flags); 1206 } 1207 } 1208 1209 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1210 1211 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS 1212 /** 1213 * handle_fasteoi_ack_irq - irq handler for edge hierarchy 1214 * stacked on transparent controllers 1215 * 1216 * @desc: the interrupt description structure for this irq 1217 * 1218 * Like handle_fasteoi_irq(), but for use with hierarchy where 1219 * the irq_chip also needs to have its ->irq_ack() function 1220 * called. 1221 */ 1222 void handle_fasteoi_ack_irq(struct irq_desc *desc) 1223 { 1224 struct irq_chip *chip = desc->irq_data.chip; 1225 1226 raw_spin_lock(&desc->lock); 1227 1228 if (!irq_may_run(desc)) 1229 goto out; 1230 1231 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1232 1233 /* 1234 * If its disabled or no action available 1235 * then mask it and get out of here: 1236 */ 1237 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1238 desc->istate |= IRQS_PENDING; 1239 mask_irq(desc); 1240 goto out; 1241 } 1242 1243 kstat_incr_irqs_this_cpu(desc); 1244 if (desc->istate & IRQS_ONESHOT) 1245 mask_irq(desc); 1246 1247 /* Start handling the irq */ 1248 desc->irq_data.chip->irq_ack(&desc->irq_data); 1249 1250 handle_irq_event(desc); 1251 1252 cond_unmask_eoi_irq(desc, chip); 1253 1254 raw_spin_unlock(&desc->lock); 1255 return; 1256 out: 1257 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1258 chip->irq_eoi(&desc->irq_data); 1259 raw_spin_unlock(&desc->lock); 1260 } 1261 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); 1262 1263 /** 1264 * handle_fasteoi_mask_irq - irq handler for level hierarchy 1265 * stacked on transparent controllers 1266 * 1267 * @desc: the interrupt description structure for this irq 1268 * 1269 * Like handle_fasteoi_irq(), but for use with hierarchy where 1270 * the irq_chip also needs to have its ->irq_mask_ack() function 1271 * called. 1272 */ 1273 void handle_fasteoi_mask_irq(struct irq_desc *desc) 1274 { 1275 struct irq_chip *chip = desc->irq_data.chip; 1276 1277 raw_spin_lock(&desc->lock); 1278 mask_ack_irq(desc); 1279 1280 if (!irq_may_run(desc)) 1281 goto out; 1282 1283 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1284 1285 /* 1286 * If its disabled or no action available 1287 * then mask it and get out of here: 1288 */ 1289 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1290 desc->istate |= IRQS_PENDING; 1291 mask_irq(desc); 1292 goto out; 1293 } 1294 1295 kstat_incr_irqs_this_cpu(desc); 1296 if (desc->istate & IRQS_ONESHOT) 1297 mask_irq(desc); 1298 1299 handle_irq_event(desc); 1300 1301 cond_unmask_eoi_irq(desc, chip); 1302 1303 raw_spin_unlock(&desc->lock); 1304 return; 1305 out: 1306 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1307 chip->irq_eoi(&desc->irq_data); 1308 raw_spin_unlock(&desc->lock); 1309 } 1310 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); 1311 1312 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */ 1313 1314 /** 1315 * irq_chip_set_parent_state - set the state of a parent interrupt. 1316 * 1317 * @data: Pointer to interrupt specific data 1318 * @which: State to be restored (one of IRQCHIP_STATE_*) 1319 * @val: Value corresponding to @which 1320 * 1321 * Conditional success, if the underlying irqchip does not implement it. 1322 */ 1323 int irq_chip_set_parent_state(struct irq_data *data, 1324 enum irqchip_irq_state which, 1325 bool val) 1326 { 1327 data = data->parent_data; 1328 1329 if (!data || !data->chip->irq_set_irqchip_state) 1330 return 0; 1331 1332 return data->chip->irq_set_irqchip_state(data, which, val); 1333 } 1334 EXPORT_SYMBOL_GPL(irq_chip_set_parent_state); 1335 1336 /** 1337 * irq_chip_get_parent_state - get the state of a parent interrupt. 1338 * 1339 * @data: Pointer to interrupt specific data 1340 * @which: one of IRQCHIP_STATE_* the caller wants to know 1341 * @state: a pointer to a boolean where the state is to be stored 1342 * 1343 * Conditional success, if the underlying irqchip does not implement it. 1344 */ 1345 int irq_chip_get_parent_state(struct irq_data *data, 1346 enum irqchip_irq_state which, 1347 bool *state) 1348 { 1349 data = data->parent_data; 1350 1351 if (!data || !data->chip->irq_get_irqchip_state) 1352 return 0; 1353 1354 return data->chip->irq_get_irqchip_state(data, which, state); 1355 } 1356 EXPORT_SYMBOL_GPL(irq_chip_get_parent_state); 1357 1358 /** 1359 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if 1360 * NULL) 1361 * @data: Pointer to interrupt specific data 1362 */ 1363 void irq_chip_enable_parent(struct irq_data *data) 1364 { 1365 data = data->parent_data; 1366 if (data->chip->irq_enable) 1367 data->chip->irq_enable(data); 1368 else 1369 data->chip->irq_unmask(data); 1370 } 1371 EXPORT_SYMBOL_GPL(irq_chip_enable_parent); 1372 1373 /** 1374 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if 1375 * NULL) 1376 * @data: Pointer to interrupt specific data 1377 */ 1378 void irq_chip_disable_parent(struct irq_data *data) 1379 { 1380 data = data->parent_data; 1381 if (data->chip->irq_disable) 1382 data->chip->irq_disable(data); 1383 else 1384 data->chip->irq_mask(data); 1385 } 1386 EXPORT_SYMBOL_GPL(irq_chip_disable_parent); 1387 1388 /** 1389 * irq_chip_ack_parent - Acknowledge the parent interrupt 1390 * @data: Pointer to interrupt specific data 1391 */ 1392 void irq_chip_ack_parent(struct irq_data *data) 1393 { 1394 data = data->parent_data; 1395 data->chip->irq_ack(data); 1396 } 1397 EXPORT_SYMBOL_GPL(irq_chip_ack_parent); 1398 1399 /** 1400 * irq_chip_mask_parent - Mask the parent interrupt 1401 * @data: Pointer to interrupt specific data 1402 */ 1403 void irq_chip_mask_parent(struct irq_data *data) 1404 { 1405 data = data->parent_data; 1406 data->chip->irq_mask(data); 1407 } 1408 EXPORT_SYMBOL_GPL(irq_chip_mask_parent); 1409 1410 /** 1411 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt 1412 * @data: Pointer to interrupt specific data 1413 */ 1414 void irq_chip_mask_ack_parent(struct irq_data *data) 1415 { 1416 data = data->parent_data; 1417 data->chip->irq_mask_ack(data); 1418 } 1419 EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent); 1420 1421 /** 1422 * irq_chip_unmask_parent - Unmask the parent interrupt 1423 * @data: Pointer to interrupt specific data 1424 */ 1425 void irq_chip_unmask_parent(struct irq_data *data) 1426 { 1427 data = data->parent_data; 1428 data->chip->irq_unmask(data); 1429 } 1430 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); 1431 1432 /** 1433 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt 1434 * @data: Pointer to interrupt specific data 1435 */ 1436 void irq_chip_eoi_parent(struct irq_data *data) 1437 { 1438 data = data->parent_data; 1439 data->chip->irq_eoi(data); 1440 } 1441 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); 1442 1443 /** 1444 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt 1445 * @data: Pointer to interrupt specific data 1446 * @dest: The affinity mask to set 1447 * @force: Flag to enforce setting (disable online checks) 1448 * 1449 * Conditinal, as the underlying parent chip might not implement it. 1450 */ 1451 int irq_chip_set_affinity_parent(struct irq_data *data, 1452 const struct cpumask *dest, bool force) 1453 { 1454 data = data->parent_data; 1455 if (data->chip->irq_set_affinity) 1456 return data->chip->irq_set_affinity(data, dest, force); 1457 1458 return -ENOSYS; 1459 } 1460 EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent); 1461 1462 /** 1463 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt 1464 * @data: Pointer to interrupt specific data 1465 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 1466 * 1467 * Conditional, as the underlying parent chip might not implement it. 1468 */ 1469 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) 1470 { 1471 data = data->parent_data; 1472 1473 if (data->chip->irq_set_type) 1474 return data->chip->irq_set_type(data, type); 1475 1476 return -ENOSYS; 1477 } 1478 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); 1479 1480 /** 1481 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware 1482 * @data: Pointer to interrupt specific data 1483 * 1484 * Iterate through the domain hierarchy of the interrupt and check 1485 * whether a hw retrigger function exists. If yes, invoke it. 1486 */ 1487 int irq_chip_retrigger_hierarchy(struct irq_data *data) 1488 { 1489 for (data = data->parent_data; data; data = data->parent_data) 1490 if (data->chip && data->chip->irq_retrigger) 1491 return data->chip->irq_retrigger(data); 1492 1493 return 0; 1494 } 1495 EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy); 1496 1497 /** 1498 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt 1499 * @data: Pointer to interrupt specific data 1500 * @vcpu_info: The vcpu affinity information 1501 */ 1502 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) 1503 { 1504 data = data->parent_data; 1505 if (data->chip->irq_set_vcpu_affinity) 1506 return data->chip->irq_set_vcpu_affinity(data, vcpu_info); 1507 1508 return -ENOSYS; 1509 } 1510 EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent); 1511 /** 1512 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt 1513 * @data: Pointer to interrupt specific data 1514 * @on: Whether to set or reset the wake-up capability of this irq 1515 * 1516 * Conditional, as the underlying parent chip might not implement it. 1517 */ 1518 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) 1519 { 1520 data = data->parent_data; 1521 1522 if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) 1523 return 0; 1524 1525 if (data->chip->irq_set_wake) 1526 return data->chip->irq_set_wake(data, on); 1527 1528 return -ENOSYS; 1529 } 1530 EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent); 1531 1532 /** 1533 * irq_chip_request_resources_parent - Request resources on the parent interrupt 1534 * @data: Pointer to interrupt specific data 1535 */ 1536 int irq_chip_request_resources_parent(struct irq_data *data) 1537 { 1538 data = data->parent_data; 1539 1540 if (data->chip->irq_request_resources) 1541 return data->chip->irq_request_resources(data); 1542 1543 return -ENOSYS; 1544 } 1545 EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent); 1546 1547 /** 1548 * irq_chip_release_resources_parent - Release resources on the parent interrupt 1549 * @data: Pointer to interrupt specific data 1550 */ 1551 void irq_chip_release_resources_parent(struct irq_data *data) 1552 { 1553 data = data->parent_data; 1554 if (data->chip->irq_release_resources) 1555 data->chip->irq_release_resources(data); 1556 } 1557 EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent); 1558 #endif 1559 1560 /** 1561 * irq_chip_compose_msi_msg - Componse msi message for a irq chip 1562 * @data: Pointer to interrupt specific data 1563 * @msg: Pointer to the MSI message 1564 * 1565 * For hierarchical domains we find the first chip in the hierarchy 1566 * which implements the irq_compose_msi_msg callback. For non 1567 * hierarchical we use the top level chip. 1568 */ 1569 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 1570 { 1571 struct irq_data *pos; 1572 1573 for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) { 1574 if (data->chip && data->chip->irq_compose_msi_msg) 1575 pos = data; 1576 } 1577 1578 if (!pos) 1579 return -ENOSYS; 1580 1581 pos->chip->irq_compose_msi_msg(pos, msg); 1582 return 0; 1583 } 1584 1585 /** 1586 * irq_chip_pm_get - Enable power for an IRQ chip 1587 * @data: Pointer to interrupt specific data 1588 * 1589 * Enable the power to the IRQ chip referenced by the interrupt data 1590 * structure. 1591 */ 1592 int irq_chip_pm_get(struct irq_data *data) 1593 { 1594 int retval; 1595 1596 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) { 1597 retval = pm_runtime_get_sync(data->chip->parent_device); 1598 if (retval < 0) { 1599 pm_runtime_put_noidle(data->chip->parent_device); 1600 return retval; 1601 } 1602 } 1603 1604 return 0; 1605 } 1606 1607 /** 1608 * irq_chip_pm_put - Disable power for an IRQ chip 1609 * @data: Pointer to interrupt specific data 1610 * 1611 * Disable the power to the IRQ chip referenced by the interrupt data 1612 * structure, belongs. Note that power will only be disabled, once this 1613 * function has been called for all IRQs that have called irq_chip_pm_get(). 1614 */ 1615 int irq_chip_pm_put(struct irq_data *data) 1616 { 1617 int retval = 0; 1618 1619 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) 1620 retval = pm_runtime_put(data->chip->parent_device); 1621 1622 return (retval < 0) ? retval : 0; 1623 } 1624