1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 5 * 6 * This file contains the core interrupt handling code, for irq-chip based 7 * architectures. Detailed information is available in 8 * Documentation/core-api/genericirq.rst 9 */ 10 11 #include <linux/irq.h> 12 #include <linux/msi.h> 13 #include <linux/module.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/irqdomain.h> 17 18 #include <trace/events/irq.h> 19 20 #include "internals.h" 21 22 static irqreturn_t bad_chained_irq(int irq, void *dev_id) 23 { 24 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); 25 return IRQ_NONE; 26 } 27 28 /* 29 * Chained handlers should never call action on their IRQ. This default 30 * action will emit warning if such thing happens. 31 */ 32 struct irqaction chained_action = { 33 .handler = bad_chained_irq, 34 }; 35 36 /** 37 * irq_set_chip - set the irq chip for an irq 38 * @irq: irq number 39 * @chip: pointer to irq chip description structure 40 */ 41 int irq_set_chip(unsigned int irq, const struct irq_chip *chip) 42 { 43 unsigned long flags; 44 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 45 46 if (!desc) 47 return -EINVAL; 48 49 desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip); 50 irq_put_desc_unlock(desc, flags); 51 /* 52 * For !CONFIG_SPARSE_IRQ make the irq show up in 53 * allocated_irqs. 54 */ 55 irq_mark_irq(irq); 56 return 0; 57 } 58 EXPORT_SYMBOL(irq_set_chip); 59 60 /** 61 * irq_set_irq_type - set the irq trigger type for an irq 62 * @irq: irq number 63 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 64 */ 65 int irq_set_irq_type(unsigned int irq, unsigned int type) 66 { 67 unsigned long flags; 68 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 69 int ret = 0; 70 71 if (!desc) 72 return -EINVAL; 73 74 ret = __irq_set_trigger(desc, type); 75 irq_put_desc_busunlock(desc, flags); 76 return ret; 77 } 78 EXPORT_SYMBOL(irq_set_irq_type); 79 80 /** 81 * irq_set_handler_data - set irq handler data for an irq 82 * @irq: Interrupt number 83 * @data: Pointer to interrupt specific data 84 * 85 * Set the hardware irq controller data for an irq 86 */ 87 int irq_set_handler_data(unsigned int irq, void *data) 88 { 89 unsigned long flags; 90 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 91 92 if (!desc) 93 return -EINVAL; 94 desc->irq_common_data.handler_data = data; 95 irq_put_desc_unlock(desc, flags); 96 return 0; 97 } 98 EXPORT_SYMBOL(irq_set_handler_data); 99 100 /** 101 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 102 * @irq_base: Interrupt number base 103 * @irq_offset: Interrupt number offset 104 * @entry: Pointer to MSI descriptor data 105 * 106 * Set the MSI descriptor entry for an irq at offset 107 */ 108 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, 109 struct msi_desc *entry) 110 { 111 unsigned long flags; 112 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 113 114 if (!desc) 115 return -EINVAL; 116 desc->irq_common_data.msi_desc = entry; 117 if (entry && !irq_offset) 118 entry->irq = irq_base; 119 irq_put_desc_unlock(desc, flags); 120 return 0; 121 } 122 123 /** 124 * irq_set_msi_desc - set MSI descriptor data for an irq 125 * @irq: Interrupt number 126 * @entry: Pointer to MSI descriptor data 127 * 128 * Set the MSI descriptor entry for an irq 129 */ 130 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 131 { 132 return irq_set_msi_desc_off(irq, 0, entry); 133 } 134 135 /** 136 * irq_set_chip_data - set irq chip data for an irq 137 * @irq: Interrupt number 138 * @data: Pointer to chip specific data 139 * 140 * Set the hardware irq chip data for an irq 141 */ 142 int irq_set_chip_data(unsigned int irq, void *data) 143 { 144 unsigned long flags; 145 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 146 147 if (!desc) 148 return -EINVAL; 149 desc->irq_data.chip_data = data; 150 irq_put_desc_unlock(desc, flags); 151 return 0; 152 } 153 EXPORT_SYMBOL(irq_set_chip_data); 154 155 struct irq_data *irq_get_irq_data(unsigned int irq) 156 { 157 struct irq_desc *desc = irq_to_desc(irq); 158 159 return desc ? &desc->irq_data : NULL; 160 } 161 EXPORT_SYMBOL_GPL(irq_get_irq_data); 162 163 static void irq_state_clr_disabled(struct irq_desc *desc) 164 { 165 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 166 } 167 168 static void irq_state_clr_masked(struct irq_desc *desc) 169 { 170 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 171 } 172 173 static void irq_state_clr_started(struct irq_desc *desc) 174 { 175 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); 176 } 177 178 static void irq_state_set_started(struct irq_desc *desc) 179 { 180 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); 181 } 182 183 enum { 184 IRQ_STARTUP_NORMAL, 185 IRQ_STARTUP_MANAGED, 186 IRQ_STARTUP_ABORT, 187 }; 188 189 #ifdef CONFIG_SMP 190 static int 191 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, 192 bool force) 193 { 194 struct irq_data *d = irq_desc_get_irq_data(desc); 195 196 if (!irqd_affinity_is_managed(d)) 197 return IRQ_STARTUP_NORMAL; 198 199 irqd_clr_managed_shutdown(d); 200 201 if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) { 202 /* 203 * Catch code which fiddles with enable_irq() on a managed 204 * and potentially shutdown IRQ. Chained interrupt 205 * installment or irq auto probing should not happen on 206 * managed irqs either. 207 */ 208 if (WARN_ON_ONCE(force)) 209 return IRQ_STARTUP_ABORT; 210 /* 211 * The interrupt was requested, but there is no online CPU 212 * in it's affinity mask. Put it into managed shutdown 213 * state and let the cpu hotplug mechanism start it up once 214 * a CPU in the mask becomes available. 215 */ 216 return IRQ_STARTUP_ABORT; 217 } 218 /* 219 * Managed interrupts have reserved resources, so this should not 220 * happen. 221 */ 222 if (WARN_ON(irq_domain_activate_irq(d, false))) 223 return IRQ_STARTUP_ABORT; 224 return IRQ_STARTUP_MANAGED; 225 } 226 #else 227 static __always_inline int 228 __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff, 229 bool force) 230 { 231 return IRQ_STARTUP_NORMAL; 232 } 233 #endif 234 235 static int __irq_startup(struct irq_desc *desc) 236 { 237 struct irq_data *d = irq_desc_get_irq_data(desc); 238 int ret = 0; 239 240 /* Warn if this interrupt is not activated but try nevertheless */ 241 WARN_ON_ONCE(!irqd_is_activated(d)); 242 243 if (d->chip->irq_startup) { 244 ret = d->chip->irq_startup(d); 245 irq_state_clr_disabled(desc); 246 irq_state_clr_masked(desc); 247 } else { 248 irq_enable(desc); 249 } 250 irq_state_set_started(desc); 251 return ret; 252 } 253 254 int irq_startup(struct irq_desc *desc, bool resend, bool force) 255 { 256 struct irq_data *d = irq_desc_get_irq_data(desc); 257 const struct cpumask *aff = irq_data_get_affinity_mask(d); 258 int ret = 0; 259 260 desc->depth = 0; 261 262 if (irqd_is_started(d)) { 263 irq_enable(desc); 264 } else { 265 switch (__irq_startup_managed(desc, aff, force)) { 266 case IRQ_STARTUP_NORMAL: 267 if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP) 268 irq_setup_affinity(desc); 269 ret = __irq_startup(desc); 270 if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)) 271 irq_setup_affinity(desc); 272 break; 273 case IRQ_STARTUP_MANAGED: 274 irq_do_set_affinity(d, aff, false); 275 ret = __irq_startup(desc); 276 break; 277 case IRQ_STARTUP_ABORT: 278 irqd_set_managed_shutdown(d); 279 return 0; 280 } 281 } 282 if (resend) 283 check_irq_resend(desc, false); 284 285 return ret; 286 } 287 288 int irq_activate(struct irq_desc *desc) 289 { 290 struct irq_data *d = irq_desc_get_irq_data(desc); 291 292 if (!irqd_affinity_is_managed(d)) 293 return irq_domain_activate_irq(d, false); 294 return 0; 295 } 296 297 int irq_activate_and_startup(struct irq_desc *desc, bool resend) 298 { 299 if (WARN_ON(irq_activate(desc))) 300 return 0; 301 return irq_startup(desc, resend, IRQ_START_FORCE); 302 } 303 304 static void __irq_disable(struct irq_desc *desc, bool mask); 305 306 void irq_shutdown(struct irq_desc *desc) 307 { 308 if (irqd_is_started(&desc->irq_data)) { 309 desc->depth = 1; 310 if (desc->irq_data.chip->irq_shutdown) { 311 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 312 irq_state_set_disabled(desc); 313 irq_state_set_masked(desc); 314 } else { 315 __irq_disable(desc, true); 316 } 317 irq_state_clr_started(desc); 318 } 319 } 320 321 322 void irq_shutdown_and_deactivate(struct irq_desc *desc) 323 { 324 irq_shutdown(desc); 325 /* 326 * This must be called even if the interrupt was never started up, 327 * because the activation can happen before the interrupt is 328 * available for request/startup. It has it's own state tracking so 329 * it's safe to call it unconditionally. 330 */ 331 irq_domain_deactivate_irq(&desc->irq_data); 332 } 333 334 void irq_enable(struct irq_desc *desc) 335 { 336 if (!irqd_irq_disabled(&desc->irq_data)) { 337 unmask_irq(desc); 338 } else { 339 irq_state_clr_disabled(desc); 340 if (desc->irq_data.chip->irq_enable) { 341 desc->irq_data.chip->irq_enable(&desc->irq_data); 342 irq_state_clr_masked(desc); 343 } else { 344 unmask_irq(desc); 345 } 346 } 347 } 348 349 static void __irq_disable(struct irq_desc *desc, bool mask) 350 { 351 if (irqd_irq_disabled(&desc->irq_data)) { 352 if (mask) 353 mask_irq(desc); 354 } else { 355 irq_state_set_disabled(desc); 356 if (desc->irq_data.chip->irq_disable) { 357 desc->irq_data.chip->irq_disable(&desc->irq_data); 358 irq_state_set_masked(desc); 359 } else if (mask) { 360 mask_irq(desc); 361 } 362 } 363 } 364 365 /** 366 * irq_disable - Mark interrupt disabled 367 * @desc: irq descriptor which should be disabled 368 * 369 * If the chip does not implement the irq_disable callback, we 370 * use a lazy disable approach. That means we mark the interrupt 371 * disabled, but leave the hardware unmasked. That's an 372 * optimization because we avoid the hardware access for the 373 * common case where no interrupt happens after we marked it 374 * disabled. If an interrupt happens, then the interrupt flow 375 * handler masks the line at the hardware level and marks it 376 * pending. 377 * 378 * If the interrupt chip does not implement the irq_disable callback, 379 * a driver can disable the lazy approach for a particular irq line by 380 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can 381 * be used for devices which cannot disable the interrupt at the 382 * device level under certain circumstances and have to use 383 * disable_irq[_nosync] instead. 384 */ 385 void irq_disable(struct irq_desc *desc) 386 { 387 __irq_disable(desc, irq_settings_disable_unlazy(desc)); 388 } 389 390 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 391 { 392 if (desc->irq_data.chip->irq_enable) 393 desc->irq_data.chip->irq_enable(&desc->irq_data); 394 else 395 desc->irq_data.chip->irq_unmask(&desc->irq_data); 396 cpumask_set_cpu(cpu, desc->percpu_enabled); 397 } 398 399 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 400 { 401 if (desc->irq_data.chip->irq_disable) 402 desc->irq_data.chip->irq_disable(&desc->irq_data); 403 else 404 desc->irq_data.chip->irq_mask(&desc->irq_data); 405 cpumask_clear_cpu(cpu, desc->percpu_enabled); 406 } 407 408 static inline void mask_ack_irq(struct irq_desc *desc) 409 { 410 if (desc->irq_data.chip->irq_mask_ack) { 411 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 412 irq_state_set_masked(desc); 413 } else { 414 mask_irq(desc); 415 if (desc->irq_data.chip->irq_ack) 416 desc->irq_data.chip->irq_ack(&desc->irq_data); 417 } 418 } 419 420 void mask_irq(struct irq_desc *desc) 421 { 422 if (irqd_irq_masked(&desc->irq_data)) 423 return; 424 425 if (desc->irq_data.chip->irq_mask) { 426 desc->irq_data.chip->irq_mask(&desc->irq_data); 427 irq_state_set_masked(desc); 428 } 429 } 430 431 void unmask_irq(struct irq_desc *desc) 432 { 433 if (!irqd_irq_masked(&desc->irq_data)) 434 return; 435 436 if (desc->irq_data.chip->irq_unmask) { 437 desc->irq_data.chip->irq_unmask(&desc->irq_data); 438 irq_state_clr_masked(desc); 439 } 440 } 441 442 void unmask_threaded_irq(struct irq_desc *desc) 443 { 444 struct irq_chip *chip = desc->irq_data.chip; 445 446 if (chip->flags & IRQCHIP_EOI_THREADED) 447 chip->irq_eoi(&desc->irq_data); 448 449 unmask_irq(desc); 450 } 451 452 /* 453 * handle_nested_irq - Handle a nested irq from a irq thread 454 * @irq: the interrupt number 455 * 456 * Handle interrupts which are nested into a threaded interrupt 457 * handler. The handler function is called inside the calling 458 * threads context. 459 */ 460 void handle_nested_irq(unsigned int irq) 461 { 462 struct irq_desc *desc = irq_to_desc(irq); 463 struct irqaction *action; 464 irqreturn_t action_ret; 465 466 might_sleep(); 467 468 raw_spin_lock_irq(&desc->lock); 469 470 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 471 472 action = desc->action; 473 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { 474 desc->istate |= IRQS_PENDING; 475 goto out_unlock; 476 } 477 478 kstat_incr_irqs_this_cpu(desc); 479 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 480 raw_spin_unlock_irq(&desc->lock); 481 482 action_ret = IRQ_NONE; 483 for_each_action_of_desc(desc, action) 484 action_ret |= action->thread_fn(action->irq, action->dev_id); 485 486 if (!irq_settings_no_debug(desc)) 487 note_interrupt(desc, action_ret); 488 489 raw_spin_lock_irq(&desc->lock); 490 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 491 492 out_unlock: 493 raw_spin_unlock_irq(&desc->lock); 494 } 495 EXPORT_SYMBOL_GPL(handle_nested_irq); 496 497 static bool irq_check_poll(struct irq_desc *desc) 498 { 499 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 500 return false; 501 return irq_wait_for_poll(desc); 502 } 503 504 static bool irq_may_run(struct irq_desc *desc) 505 { 506 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; 507 508 /* 509 * If the interrupt is not in progress and is not an armed 510 * wakeup interrupt, proceed. 511 */ 512 if (!irqd_has_set(&desc->irq_data, mask)) 513 return true; 514 515 /* 516 * If the interrupt is an armed wakeup source, mark it pending 517 * and suspended, disable it and notify the pm core about the 518 * event. 519 */ 520 if (irq_pm_check_wakeup(desc)) 521 return false; 522 523 /* 524 * Handle a potential concurrent poll on a different core. 525 */ 526 return irq_check_poll(desc); 527 } 528 529 /** 530 * handle_simple_irq - Simple and software-decoded IRQs. 531 * @desc: the interrupt description structure for this irq 532 * 533 * Simple interrupts are either sent from a demultiplexing interrupt 534 * handler or come from hardware, where no interrupt hardware control 535 * is necessary. 536 * 537 * Note: The caller is expected to handle the ack, clear, mask and 538 * unmask issues if necessary. 539 */ 540 void handle_simple_irq(struct irq_desc *desc) 541 { 542 raw_spin_lock(&desc->lock); 543 544 if (!irq_may_run(desc)) 545 goto out_unlock; 546 547 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 548 549 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 550 desc->istate |= IRQS_PENDING; 551 goto out_unlock; 552 } 553 554 kstat_incr_irqs_this_cpu(desc); 555 handle_irq_event(desc); 556 557 out_unlock: 558 raw_spin_unlock(&desc->lock); 559 } 560 EXPORT_SYMBOL_GPL(handle_simple_irq); 561 562 /** 563 * handle_untracked_irq - Simple and software-decoded IRQs. 564 * @desc: the interrupt description structure for this irq 565 * 566 * Untracked interrupts are sent from a demultiplexing interrupt 567 * handler when the demultiplexer does not know which device it its 568 * multiplexed irq domain generated the interrupt. IRQ's handled 569 * through here are not subjected to stats tracking, randomness, or 570 * spurious interrupt detection. 571 * 572 * Note: Like handle_simple_irq, the caller is expected to handle 573 * the ack, clear, mask and unmask issues if necessary. 574 */ 575 void handle_untracked_irq(struct irq_desc *desc) 576 { 577 raw_spin_lock(&desc->lock); 578 579 if (!irq_may_run(desc)) 580 goto out_unlock; 581 582 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 583 584 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 585 desc->istate |= IRQS_PENDING; 586 goto out_unlock; 587 } 588 589 desc->istate &= ~IRQS_PENDING; 590 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 591 raw_spin_unlock(&desc->lock); 592 593 __handle_irq_event_percpu(desc); 594 595 raw_spin_lock(&desc->lock); 596 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 597 598 out_unlock: 599 raw_spin_unlock(&desc->lock); 600 } 601 EXPORT_SYMBOL_GPL(handle_untracked_irq); 602 603 /* 604 * Called unconditionally from handle_level_irq() and only for oneshot 605 * interrupts from handle_fasteoi_irq() 606 */ 607 static void cond_unmask_irq(struct irq_desc *desc) 608 { 609 /* 610 * We need to unmask in the following cases: 611 * - Standard level irq (IRQF_ONESHOT is not set) 612 * - Oneshot irq which did not wake the thread (caused by a 613 * spurious interrupt or a primary handler handling it 614 * completely). 615 */ 616 if (!irqd_irq_disabled(&desc->irq_data) && 617 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 618 unmask_irq(desc); 619 } 620 621 /** 622 * handle_level_irq - Level type irq handler 623 * @desc: the interrupt description structure for this irq 624 * 625 * Level type interrupts are active as long as the hardware line has 626 * the active level. This may require to mask the interrupt and unmask 627 * it after the associated handler has acknowledged the device, so the 628 * interrupt line is back to inactive. 629 */ 630 void handle_level_irq(struct irq_desc *desc) 631 { 632 raw_spin_lock(&desc->lock); 633 mask_ack_irq(desc); 634 635 if (!irq_may_run(desc)) 636 goto out_unlock; 637 638 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 639 640 /* 641 * If its disabled or no action available 642 * keep it masked and get out of here 643 */ 644 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 645 desc->istate |= IRQS_PENDING; 646 goto out_unlock; 647 } 648 649 kstat_incr_irqs_this_cpu(desc); 650 handle_irq_event(desc); 651 652 cond_unmask_irq(desc); 653 654 out_unlock: 655 raw_spin_unlock(&desc->lock); 656 } 657 EXPORT_SYMBOL_GPL(handle_level_irq); 658 659 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) 660 { 661 if (!(desc->istate & IRQS_ONESHOT)) { 662 chip->irq_eoi(&desc->irq_data); 663 return; 664 } 665 /* 666 * We need to unmask in the following cases: 667 * - Oneshot irq which did not wake the thread (caused by a 668 * spurious interrupt or a primary handler handling it 669 * completely). 670 */ 671 if (!irqd_irq_disabled(&desc->irq_data) && 672 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { 673 chip->irq_eoi(&desc->irq_data); 674 unmask_irq(desc); 675 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { 676 chip->irq_eoi(&desc->irq_data); 677 } 678 } 679 680 /** 681 * handle_fasteoi_irq - irq handler for transparent controllers 682 * @desc: the interrupt description structure for this irq 683 * 684 * Only a single callback will be issued to the chip: an ->eoi() 685 * call when the interrupt has been serviced. This enables support 686 * for modern forms of interrupt handlers, which handle the flow 687 * details in hardware, transparently. 688 */ 689 void handle_fasteoi_irq(struct irq_desc *desc) 690 { 691 struct irq_chip *chip = desc->irq_data.chip; 692 693 raw_spin_lock(&desc->lock); 694 695 if (!irq_may_run(desc)) 696 goto out; 697 698 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 699 700 /* 701 * If its disabled or no action available 702 * then mask it and get out of here: 703 */ 704 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 705 desc->istate |= IRQS_PENDING; 706 mask_irq(desc); 707 goto out; 708 } 709 710 kstat_incr_irqs_this_cpu(desc); 711 if (desc->istate & IRQS_ONESHOT) 712 mask_irq(desc); 713 714 handle_irq_event(desc); 715 716 cond_unmask_eoi_irq(desc, chip); 717 718 raw_spin_unlock(&desc->lock); 719 return; 720 out: 721 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 722 chip->irq_eoi(&desc->irq_data); 723 raw_spin_unlock(&desc->lock); 724 } 725 EXPORT_SYMBOL_GPL(handle_fasteoi_irq); 726 727 /** 728 * handle_fasteoi_nmi - irq handler for NMI interrupt lines 729 * @desc: the interrupt description structure for this irq 730 * 731 * A simple NMI-safe handler, considering the restrictions 732 * from request_nmi. 733 * 734 * Only a single callback will be issued to the chip: an ->eoi() 735 * call when the interrupt has been serviced. This enables support 736 * for modern forms of interrupt handlers, which handle the flow 737 * details in hardware, transparently. 738 */ 739 void handle_fasteoi_nmi(struct irq_desc *desc) 740 { 741 struct irq_chip *chip = irq_desc_get_chip(desc); 742 struct irqaction *action = desc->action; 743 unsigned int irq = irq_desc_get_irq(desc); 744 irqreturn_t res; 745 746 __kstat_incr_irqs_this_cpu(desc); 747 748 trace_irq_handler_entry(irq, action); 749 /* 750 * NMIs cannot be shared, there is only one action. 751 */ 752 res = action->handler(irq, action->dev_id); 753 trace_irq_handler_exit(irq, action, res); 754 755 if (chip->irq_eoi) 756 chip->irq_eoi(&desc->irq_data); 757 } 758 EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); 759 760 /** 761 * handle_edge_irq - edge type IRQ handler 762 * @desc: the interrupt description structure for this irq 763 * 764 * Interrupt occurs on the falling and/or rising edge of a hardware 765 * signal. The occurrence is latched into the irq controller hardware 766 * and must be acked in order to be reenabled. After the ack another 767 * interrupt can happen on the same source even before the first one 768 * is handled by the associated event handler. If this happens it 769 * might be necessary to disable (mask) the interrupt depending on the 770 * controller hardware. This requires to reenable the interrupt inside 771 * of the loop which handles the interrupts which have arrived while 772 * the handler was running. If all pending interrupts are handled, the 773 * loop is left. 774 */ 775 void handle_edge_irq(struct irq_desc *desc) 776 { 777 raw_spin_lock(&desc->lock); 778 779 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 780 781 if (!irq_may_run(desc)) { 782 desc->istate |= IRQS_PENDING; 783 mask_ack_irq(desc); 784 goto out_unlock; 785 } 786 787 /* 788 * If its disabled or no action available then mask it and get 789 * out of here. 790 */ 791 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 792 desc->istate |= IRQS_PENDING; 793 mask_ack_irq(desc); 794 goto out_unlock; 795 } 796 797 kstat_incr_irqs_this_cpu(desc); 798 799 /* Start handling the irq */ 800 desc->irq_data.chip->irq_ack(&desc->irq_data); 801 802 do { 803 if (unlikely(!desc->action)) { 804 mask_irq(desc); 805 goto out_unlock; 806 } 807 808 /* 809 * When another irq arrived while we were handling 810 * one, we could have masked the irq. 811 * Reenable it, if it was not disabled in meantime. 812 */ 813 if (unlikely(desc->istate & IRQS_PENDING)) { 814 if (!irqd_irq_disabled(&desc->irq_data) && 815 irqd_irq_masked(&desc->irq_data)) 816 unmask_irq(desc); 817 } 818 819 handle_irq_event(desc); 820 821 } while ((desc->istate & IRQS_PENDING) && 822 !irqd_irq_disabled(&desc->irq_data)); 823 824 out_unlock: 825 raw_spin_unlock(&desc->lock); 826 } 827 EXPORT_SYMBOL(handle_edge_irq); 828 829 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 830 /** 831 * handle_edge_eoi_irq - edge eoi type IRQ handler 832 * @desc: the interrupt description structure for this irq 833 * 834 * Similar as the above handle_edge_irq, but using eoi and w/o the 835 * mask/unmask logic. 836 */ 837 void handle_edge_eoi_irq(struct irq_desc *desc) 838 { 839 struct irq_chip *chip = irq_desc_get_chip(desc); 840 841 raw_spin_lock(&desc->lock); 842 843 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 844 845 if (!irq_may_run(desc)) { 846 desc->istate |= IRQS_PENDING; 847 goto out_eoi; 848 } 849 850 /* 851 * If its disabled or no action available then mask it and get 852 * out of here. 853 */ 854 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 855 desc->istate |= IRQS_PENDING; 856 goto out_eoi; 857 } 858 859 kstat_incr_irqs_this_cpu(desc); 860 861 do { 862 if (unlikely(!desc->action)) 863 goto out_eoi; 864 865 handle_irq_event(desc); 866 867 } while ((desc->istate & IRQS_PENDING) && 868 !irqd_irq_disabled(&desc->irq_data)); 869 870 out_eoi: 871 chip->irq_eoi(&desc->irq_data); 872 raw_spin_unlock(&desc->lock); 873 } 874 #endif 875 876 /** 877 * handle_percpu_irq - Per CPU local irq handler 878 * @desc: the interrupt description structure for this irq 879 * 880 * Per CPU interrupts on SMP machines without locking requirements 881 */ 882 void handle_percpu_irq(struct irq_desc *desc) 883 { 884 struct irq_chip *chip = irq_desc_get_chip(desc); 885 886 /* 887 * PER CPU interrupts are not serialized. Do not touch 888 * desc->tot_count. 889 */ 890 __kstat_incr_irqs_this_cpu(desc); 891 892 if (chip->irq_ack) 893 chip->irq_ack(&desc->irq_data); 894 895 handle_irq_event_percpu(desc); 896 897 if (chip->irq_eoi) 898 chip->irq_eoi(&desc->irq_data); 899 } 900 901 /** 902 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 903 * @desc: the interrupt description structure for this irq 904 * 905 * Per CPU interrupts on SMP machines without locking requirements. Same as 906 * handle_percpu_irq() above but with the following extras: 907 * 908 * action->percpu_dev_id is a pointer to percpu variables which 909 * contain the real device id for the cpu on which this handler is 910 * called 911 */ 912 void handle_percpu_devid_irq(struct irq_desc *desc) 913 { 914 struct irq_chip *chip = irq_desc_get_chip(desc); 915 struct irqaction *action = desc->action; 916 unsigned int irq = irq_desc_get_irq(desc); 917 irqreturn_t res; 918 919 /* 920 * PER CPU interrupts are not serialized. Do not touch 921 * desc->tot_count. 922 */ 923 __kstat_incr_irqs_this_cpu(desc); 924 925 if (chip->irq_ack) 926 chip->irq_ack(&desc->irq_data); 927 928 if (likely(action)) { 929 trace_irq_handler_entry(irq, action); 930 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 931 trace_irq_handler_exit(irq, action, res); 932 } else { 933 unsigned int cpu = smp_processor_id(); 934 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); 935 936 if (enabled) 937 irq_percpu_disable(desc, cpu); 938 939 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", 940 enabled ? " and unmasked" : "", irq, cpu); 941 } 942 943 if (chip->irq_eoi) 944 chip->irq_eoi(&desc->irq_data); 945 } 946 947 /** 948 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu 949 * dev ids 950 * @desc: the interrupt description structure for this irq 951 * 952 * Similar to handle_fasteoi_nmi, but handling the dev_id cookie 953 * as a percpu pointer. 954 */ 955 void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc) 956 { 957 struct irq_chip *chip = irq_desc_get_chip(desc); 958 struct irqaction *action = desc->action; 959 unsigned int irq = irq_desc_get_irq(desc); 960 irqreturn_t res; 961 962 __kstat_incr_irqs_this_cpu(desc); 963 964 trace_irq_handler_entry(irq, action); 965 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 966 trace_irq_handler_exit(irq, action, res); 967 968 if (chip->irq_eoi) 969 chip->irq_eoi(&desc->irq_data); 970 } 971 972 static void 973 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, 974 int is_chained, const char *name) 975 { 976 if (!handle) { 977 handle = handle_bad_irq; 978 } else { 979 struct irq_data *irq_data = &desc->irq_data; 980 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 981 /* 982 * With hierarchical domains we might run into a 983 * situation where the outermost chip is not yet set 984 * up, but the inner chips are there. Instead of 985 * bailing we install the handler, but obviously we 986 * cannot enable/startup the interrupt at this point. 987 */ 988 while (irq_data) { 989 if (irq_data->chip != &no_irq_chip) 990 break; 991 /* 992 * Bail out if the outer chip is not set up 993 * and the interrupt supposed to be started 994 * right away. 995 */ 996 if (WARN_ON(is_chained)) 997 return; 998 /* Try the parent */ 999 irq_data = irq_data->parent_data; 1000 } 1001 #endif 1002 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) 1003 return; 1004 } 1005 1006 /* Uninstall? */ 1007 if (handle == handle_bad_irq) { 1008 if (desc->irq_data.chip != &no_irq_chip) 1009 mask_ack_irq(desc); 1010 irq_state_set_disabled(desc); 1011 if (is_chained) { 1012 desc->action = NULL; 1013 WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc))); 1014 } 1015 desc->depth = 1; 1016 } 1017 desc->handle_irq = handle; 1018 desc->name = name; 1019 1020 if (handle != handle_bad_irq && is_chained) { 1021 unsigned int type = irqd_get_trigger_type(&desc->irq_data); 1022 1023 /* 1024 * We're about to start this interrupt immediately, 1025 * hence the need to set the trigger configuration. 1026 * But the .set_type callback may have overridden the 1027 * flow handler, ignoring that we're dealing with a 1028 * chained interrupt. Reset it immediately because we 1029 * do know better. 1030 */ 1031 if (type != IRQ_TYPE_NONE) { 1032 __irq_set_trigger(desc, type); 1033 desc->handle_irq = handle; 1034 } 1035 1036 irq_settings_set_noprobe(desc); 1037 irq_settings_set_norequest(desc); 1038 irq_settings_set_nothread(desc); 1039 desc->action = &chained_action; 1040 WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc))); 1041 irq_activate_and_startup(desc, IRQ_RESEND); 1042 } 1043 } 1044 1045 void 1046 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 1047 const char *name) 1048 { 1049 unsigned long flags; 1050 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 1051 1052 if (!desc) 1053 return; 1054 1055 __irq_do_set_handler(desc, handle, is_chained, name); 1056 irq_put_desc_busunlock(desc, flags); 1057 } 1058 EXPORT_SYMBOL_GPL(__irq_set_handler); 1059 1060 void 1061 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, 1062 void *data) 1063 { 1064 unsigned long flags; 1065 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 1066 1067 if (!desc) 1068 return; 1069 1070 desc->irq_common_data.handler_data = data; 1071 __irq_do_set_handler(desc, handle, 1, NULL); 1072 1073 irq_put_desc_busunlock(desc, flags); 1074 } 1075 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); 1076 1077 void 1078 irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, 1079 irq_flow_handler_t handle, const char *name) 1080 { 1081 irq_set_chip(irq, chip); 1082 __irq_set_handler(irq, handle, 0, name); 1083 } 1084 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 1085 1086 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 1087 { 1088 unsigned long flags, trigger, tmp; 1089 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 1090 1091 if (!desc) 1092 return; 1093 1094 /* 1095 * Warn when a driver sets the no autoenable flag on an already 1096 * active interrupt. 1097 */ 1098 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); 1099 1100 irq_settings_clr_and_set(desc, clr, set); 1101 1102 trigger = irqd_get_trigger_type(&desc->irq_data); 1103 1104 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 1105 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 1106 if (irq_settings_has_no_balance_set(desc)) 1107 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1108 if (irq_settings_is_per_cpu(desc)) 1109 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1110 if (irq_settings_can_move_pcntxt(desc)) 1111 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 1112 if (irq_settings_is_level(desc)) 1113 irqd_set(&desc->irq_data, IRQD_LEVEL); 1114 1115 tmp = irq_settings_get_trigger_mask(desc); 1116 if (tmp != IRQ_TYPE_NONE) 1117 trigger = tmp; 1118 1119 irqd_set(&desc->irq_data, trigger); 1120 1121 irq_put_desc_unlock(desc, flags); 1122 } 1123 EXPORT_SYMBOL_GPL(irq_modify_status); 1124 1125 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE 1126 /** 1127 * irq_cpu_online - Invoke all irq_cpu_online functions. 1128 * 1129 * Iterate through all irqs and invoke the chip.irq_cpu_online() 1130 * for each. 1131 */ 1132 void irq_cpu_online(void) 1133 { 1134 struct irq_desc *desc; 1135 struct irq_chip *chip; 1136 unsigned long flags; 1137 unsigned int irq; 1138 1139 for_each_active_irq(irq) { 1140 desc = irq_to_desc(irq); 1141 if (!desc) 1142 continue; 1143 1144 raw_spin_lock_irqsave(&desc->lock, flags); 1145 1146 chip = irq_data_get_irq_chip(&desc->irq_data); 1147 if (chip && chip->irq_cpu_online && 1148 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1149 !irqd_irq_disabled(&desc->irq_data))) 1150 chip->irq_cpu_online(&desc->irq_data); 1151 1152 raw_spin_unlock_irqrestore(&desc->lock, flags); 1153 } 1154 } 1155 1156 /** 1157 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 1158 * 1159 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 1160 * for each. 1161 */ 1162 void irq_cpu_offline(void) 1163 { 1164 struct irq_desc *desc; 1165 struct irq_chip *chip; 1166 unsigned long flags; 1167 unsigned int irq; 1168 1169 for_each_active_irq(irq) { 1170 desc = irq_to_desc(irq); 1171 if (!desc) 1172 continue; 1173 1174 raw_spin_lock_irqsave(&desc->lock, flags); 1175 1176 chip = irq_data_get_irq_chip(&desc->irq_data); 1177 if (chip && chip->irq_cpu_offline && 1178 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1179 !irqd_irq_disabled(&desc->irq_data))) 1180 chip->irq_cpu_offline(&desc->irq_data); 1181 1182 raw_spin_unlock_irqrestore(&desc->lock, flags); 1183 } 1184 } 1185 #endif 1186 1187 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1188 1189 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS 1190 /** 1191 * handle_fasteoi_ack_irq - irq handler for edge hierarchy 1192 * stacked on transparent controllers 1193 * 1194 * @desc: the interrupt description structure for this irq 1195 * 1196 * Like handle_fasteoi_irq(), but for use with hierarchy where 1197 * the irq_chip also needs to have its ->irq_ack() function 1198 * called. 1199 */ 1200 void handle_fasteoi_ack_irq(struct irq_desc *desc) 1201 { 1202 struct irq_chip *chip = desc->irq_data.chip; 1203 1204 raw_spin_lock(&desc->lock); 1205 1206 if (!irq_may_run(desc)) 1207 goto out; 1208 1209 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1210 1211 /* 1212 * If its disabled or no action available 1213 * then mask it and get out of here: 1214 */ 1215 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1216 desc->istate |= IRQS_PENDING; 1217 mask_irq(desc); 1218 goto out; 1219 } 1220 1221 kstat_incr_irqs_this_cpu(desc); 1222 if (desc->istate & IRQS_ONESHOT) 1223 mask_irq(desc); 1224 1225 /* Start handling the irq */ 1226 desc->irq_data.chip->irq_ack(&desc->irq_data); 1227 1228 handle_irq_event(desc); 1229 1230 cond_unmask_eoi_irq(desc, chip); 1231 1232 raw_spin_unlock(&desc->lock); 1233 return; 1234 out: 1235 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1236 chip->irq_eoi(&desc->irq_data); 1237 raw_spin_unlock(&desc->lock); 1238 } 1239 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); 1240 1241 /** 1242 * handle_fasteoi_mask_irq - irq handler for level hierarchy 1243 * stacked on transparent controllers 1244 * 1245 * @desc: the interrupt description structure for this irq 1246 * 1247 * Like handle_fasteoi_irq(), but for use with hierarchy where 1248 * the irq_chip also needs to have its ->irq_mask_ack() function 1249 * called. 1250 */ 1251 void handle_fasteoi_mask_irq(struct irq_desc *desc) 1252 { 1253 struct irq_chip *chip = desc->irq_data.chip; 1254 1255 raw_spin_lock(&desc->lock); 1256 mask_ack_irq(desc); 1257 1258 if (!irq_may_run(desc)) 1259 goto out; 1260 1261 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1262 1263 /* 1264 * If its disabled or no action available 1265 * then mask it and get out of here: 1266 */ 1267 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1268 desc->istate |= IRQS_PENDING; 1269 mask_irq(desc); 1270 goto out; 1271 } 1272 1273 kstat_incr_irqs_this_cpu(desc); 1274 if (desc->istate & IRQS_ONESHOT) 1275 mask_irq(desc); 1276 1277 handle_irq_event(desc); 1278 1279 cond_unmask_eoi_irq(desc, chip); 1280 1281 raw_spin_unlock(&desc->lock); 1282 return; 1283 out: 1284 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1285 chip->irq_eoi(&desc->irq_data); 1286 raw_spin_unlock(&desc->lock); 1287 } 1288 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); 1289 1290 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */ 1291 1292 /** 1293 * irq_chip_set_parent_state - set the state of a parent interrupt. 1294 * 1295 * @data: Pointer to interrupt specific data 1296 * @which: State to be restored (one of IRQCHIP_STATE_*) 1297 * @val: Value corresponding to @which 1298 * 1299 * Conditional success, if the underlying irqchip does not implement it. 1300 */ 1301 int irq_chip_set_parent_state(struct irq_data *data, 1302 enum irqchip_irq_state which, 1303 bool val) 1304 { 1305 data = data->parent_data; 1306 1307 if (!data || !data->chip->irq_set_irqchip_state) 1308 return 0; 1309 1310 return data->chip->irq_set_irqchip_state(data, which, val); 1311 } 1312 EXPORT_SYMBOL_GPL(irq_chip_set_parent_state); 1313 1314 /** 1315 * irq_chip_get_parent_state - get the state of a parent interrupt. 1316 * 1317 * @data: Pointer to interrupt specific data 1318 * @which: one of IRQCHIP_STATE_* the caller wants to know 1319 * @state: a pointer to a boolean where the state is to be stored 1320 * 1321 * Conditional success, if the underlying irqchip does not implement it. 1322 */ 1323 int irq_chip_get_parent_state(struct irq_data *data, 1324 enum irqchip_irq_state which, 1325 bool *state) 1326 { 1327 data = data->parent_data; 1328 1329 if (!data || !data->chip->irq_get_irqchip_state) 1330 return 0; 1331 1332 return data->chip->irq_get_irqchip_state(data, which, state); 1333 } 1334 EXPORT_SYMBOL_GPL(irq_chip_get_parent_state); 1335 1336 /** 1337 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if 1338 * NULL) 1339 * @data: Pointer to interrupt specific data 1340 */ 1341 void irq_chip_enable_parent(struct irq_data *data) 1342 { 1343 data = data->parent_data; 1344 if (data->chip->irq_enable) 1345 data->chip->irq_enable(data); 1346 else 1347 data->chip->irq_unmask(data); 1348 } 1349 EXPORT_SYMBOL_GPL(irq_chip_enable_parent); 1350 1351 /** 1352 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if 1353 * NULL) 1354 * @data: Pointer to interrupt specific data 1355 */ 1356 void irq_chip_disable_parent(struct irq_data *data) 1357 { 1358 data = data->parent_data; 1359 if (data->chip->irq_disable) 1360 data->chip->irq_disable(data); 1361 else 1362 data->chip->irq_mask(data); 1363 } 1364 EXPORT_SYMBOL_GPL(irq_chip_disable_parent); 1365 1366 /** 1367 * irq_chip_ack_parent - Acknowledge the parent interrupt 1368 * @data: Pointer to interrupt specific data 1369 */ 1370 void irq_chip_ack_parent(struct irq_data *data) 1371 { 1372 data = data->parent_data; 1373 data->chip->irq_ack(data); 1374 } 1375 EXPORT_SYMBOL_GPL(irq_chip_ack_parent); 1376 1377 /** 1378 * irq_chip_mask_parent - Mask the parent interrupt 1379 * @data: Pointer to interrupt specific data 1380 */ 1381 void irq_chip_mask_parent(struct irq_data *data) 1382 { 1383 data = data->parent_data; 1384 data->chip->irq_mask(data); 1385 } 1386 EXPORT_SYMBOL_GPL(irq_chip_mask_parent); 1387 1388 /** 1389 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt 1390 * @data: Pointer to interrupt specific data 1391 */ 1392 void irq_chip_mask_ack_parent(struct irq_data *data) 1393 { 1394 data = data->parent_data; 1395 data->chip->irq_mask_ack(data); 1396 } 1397 EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent); 1398 1399 /** 1400 * irq_chip_unmask_parent - Unmask the parent interrupt 1401 * @data: Pointer to interrupt specific data 1402 */ 1403 void irq_chip_unmask_parent(struct irq_data *data) 1404 { 1405 data = data->parent_data; 1406 data->chip->irq_unmask(data); 1407 } 1408 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); 1409 1410 /** 1411 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt 1412 * @data: Pointer to interrupt specific data 1413 */ 1414 void irq_chip_eoi_parent(struct irq_data *data) 1415 { 1416 data = data->parent_data; 1417 data->chip->irq_eoi(data); 1418 } 1419 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); 1420 1421 /** 1422 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt 1423 * @data: Pointer to interrupt specific data 1424 * @dest: The affinity mask to set 1425 * @force: Flag to enforce setting (disable online checks) 1426 * 1427 * Conditional, as the underlying parent chip might not implement it. 1428 */ 1429 int irq_chip_set_affinity_parent(struct irq_data *data, 1430 const struct cpumask *dest, bool force) 1431 { 1432 data = data->parent_data; 1433 if (data->chip->irq_set_affinity) 1434 return data->chip->irq_set_affinity(data, dest, force); 1435 1436 return -ENOSYS; 1437 } 1438 EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent); 1439 1440 /** 1441 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt 1442 * @data: Pointer to interrupt specific data 1443 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 1444 * 1445 * Conditional, as the underlying parent chip might not implement it. 1446 */ 1447 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) 1448 { 1449 data = data->parent_data; 1450 1451 if (data->chip->irq_set_type) 1452 return data->chip->irq_set_type(data, type); 1453 1454 return -ENOSYS; 1455 } 1456 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); 1457 1458 /** 1459 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware 1460 * @data: Pointer to interrupt specific data 1461 * 1462 * Iterate through the domain hierarchy of the interrupt and check 1463 * whether a hw retrigger function exists. If yes, invoke it. 1464 */ 1465 int irq_chip_retrigger_hierarchy(struct irq_data *data) 1466 { 1467 for (data = data->parent_data; data; data = data->parent_data) 1468 if (data->chip && data->chip->irq_retrigger) 1469 return data->chip->irq_retrigger(data); 1470 1471 return 0; 1472 } 1473 EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy); 1474 1475 /** 1476 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt 1477 * @data: Pointer to interrupt specific data 1478 * @vcpu_info: The vcpu affinity information 1479 */ 1480 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) 1481 { 1482 data = data->parent_data; 1483 if (data->chip->irq_set_vcpu_affinity) 1484 return data->chip->irq_set_vcpu_affinity(data, vcpu_info); 1485 1486 return -ENOSYS; 1487 } 1488 EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent); 1489 /** 1490 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt 1491 * @data: Pointer to interrupt specific data 1492 * @on: Whether to set or reset the wake-up capability of this irq 1493 * 1494 * Conditional, as the underlying parent chip might not implement it. 1495 */ 1496 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) 1497 { 1498 data = data->parent_data; 1499 1500 if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) 1501 return 0; 1502 1503 if (data->chip->irq_set_wake) 1504 return data->chip->irq_set_wake(data, on); 1505 1506 return -ENOSYS; 1507 } 1508 EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent); 1509 1510 /** 1511 * irq_chip_request_resources_parent - Request resources on the parent interrupt 1512 * @data: Pointer to interrupt specific data 1513 */ 1514 int irq_chip_request_resources_parent(struct irq_data *data) 1515 { 1516 data = data->parent_data; 1517 1518 if (data->chip->irq_request_resources) 1519 return data->chip->irq_request_resources(data); 1520 1521 /* no error on missing optional irq_chip::irq_request_resources */ 1522 return 0; 1523 } 1524 EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent); 1525 1526 /** 1527 * irq_chip_release_resources_parent - Release resources on the parent interrupt 1528 * @data: Pointer to interrupt specific data 1529 */ 1530 void irq_chip_release_resources_parent(struct irq_data *data) 1531 { 1532 data = data->parent_data; 1533 if (data->chip->irq_release_resources) 1534 data->chip->irq_release_resources(data); 1535 } 1536 EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent); 1537 #endif 1538 1539 /** 1540 * irq_chip_compose_msi_msg - Compose msi message for a irq chip 1541 * @data: Pointer to interrupt specific data 1542 * @msg: Pointer to the MSI message 1543 * 1544 * For hierarchical domains we find the first chip in the hierarchy 1545 * which implements the irq_compose_msi_msg callback. For non 1546 * hierarchical we use the top level chip. 1547 */ 1548 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 1549 { 1550 struct irq_data *pos; 1551 1552 for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) { 1553 if (data->chip && data->chip->irq_compose_msi_msg) 1554 pos = data; 1555 } 1556 1557 if (!pos) 1558 return -ENOSYS; 1559 1560 pos->chip->irq_compose_msi_msg(pos, msg); 1561 return 0; 1562 } 1563 1564 static struct device *irq_get_pm_device(struct irq_data *data) 1565 { 1566 if (data->domain) 1567 return data->domain->pm_dev; 1568 1569 return NULL; 1570 } 1571 1572 /** 1573 * irq_chip_pm_get - Enable power for an IRQ chip 1574 * @data: Pointer to interrupt specific data 1575 * 1576 * Enable the power to the IRQ chip referenced by the interrupt data 1577 * structure. 1578 */ 1579 int irq_chip_pm_get(struct irq_data *data) 1580 { 1581 struct device *dev = irq_get_pm_device(data); 1582 int retval = 0; 1583 1584 if (IS_ENABLED(CONFIG_PM) && dev) 1585 retval = pm_runtime_resume_and_get(dev); 1586 1587 return retval; 1588 } 1589 1590 /** 1591 * irq_chip_pm_put - Disable power for an IRQ chip 1592 * @data: Pointer to interrupt specific data 1593 * 1594 * Disable the power to the IRQ chip referenced by the interrupt data 1595 * structure, belongs. Note that power will only be disabled, once this 1596 * function has been called for all IRQs that have called irq_chip_pm_get(). 1597 */ 1598 int irq_chip_pm_put(struct irq_data *data) 1599 { 1600 struct device *dev = irq_get_pm_device(data); 1601 int retval = 0; 1602 1603 if (IS_ENABLED(CONFIG_PM) && dev) 1604 retval = pm_runtime_put(dev); 1605 1606 return (retval < 0) ? retval : 0; 1607 } 1608