1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 5 * 6 * This file contains the core interrupt handling code, for irq-chip based 7 * architectures. Detailed information is available in 8 * Documentation/core-api/genericirq.rst 9 */ 10 11 #include <linux/irq.h> 12 #include <linux/msi.h> 13 #include <linux/module.h> 14 #include <linux/interrupt.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/irqdomain.h> 17 18 #include <trace/events/irq.h> 19 20 #include "internals.h" 21 22 static irqreturn_t bad_chained_irq(int irq, void *dev_id) 23 { 24 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); 25 return IRQ_NONE; 26 } 27 28 /* 29 * Chained handlers should never call action on their IRQ. This default 30 * action will emit warning if such thing happens. 31 */ 32 struct irqaction chained_action = { 33 .handler = bad_chained_irq, 34 }; 35 36 /** 37 * irq_set_chip - set the irq chip for an irq 38 * @irq: irq number 39 * @chip: pointer to irq chip description structure 40 */ 41 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 42 { 43 unsigned long flags; 44 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 45 46 if (!desc) 47 return -EINVAL; 48 49 if (!chip) 50 chip = &no_irq_chip; 51 52 desc->irq_data.chip = chip; 53 irq_put_desc_unlock(desc, flags); 54 /* 55 * For !CONFIG_SPARSE_IRQ make the irq show up in 56 * allocated_irqs. 57 */ 58 irq_mark_irq(irq); 59 return 0; 60 } 61 EXPORT_SYMBOL(irq_set_chip); 62 63 /** 64 * irq_set_irq_type - set the irq trigger type for an irq 65 * @irq: irq number 66 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 67 */ 68 int irq_set_irq_type(unsigned int irq, unsigned int type) 69 { 70 unsigned long flags; 71 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 72 int ret = 0; 73 74 if (!desc) 75 return -EINVAL; 76 77 ret = __irq_set_trigger(desc, type); 78 irq_put_desc_busunlock(desc, flags); 79 return ret; 80 } 81 EXPORT_SYMBOL(irq_set_irq_type); 82 83 /** 84 * irq_set_handler_data - set irq handler data for an irq 85 * @irq: Interrupt number 86 * @data: Pointer to interrupt specific data 87 * 88 * Set the hardware irq controller data for an irq 89 */ 90 int irq_set_handler_data(unsigned int irq, void *data) 91 { 92 unsigned long flags; 93 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 94 95 if (!desc) 96 return -EINVAL; 97 desc->irq_common_data.handler_data = data; 98 irq_put_desc_unlock(desc, flags); 99 return 0; 100 } 101 EXPORT_SYMBOL(irq_set_handler_data); 102 103 /** 104 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 105 * @irq_base: Interrupt number base 106 * @irq_offset: Interrupt number offset 107 * @entry: Pointer to MSI descriptor data 108 * 109 * Set the MSI descriptor entry for an irq at offset 110 */ 111 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, 112 struct msi_desc *entry) 113 { 114 unsigned long flags; 115 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 116 117 if (!desc) 118 return -EINVAL; 119 desc->irq_common_data.msi_desc = entry; 120 if (entry && !irq_offset) 121 entry->irq = irq_base; 122 irq_put_desc_unlock(desc, flags); 123 return 0; 124 } 125 126 /** 127 * irq_set_msi_desc - set MSI descriptor data for an irq 128 * @irq: Interrupt number 129 * @entry: Pointer to MSI descriptor data 130 * 131 * Set the MSI descriptor entry for an irq 132 */ 133 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 134 { 135 return irq_set_msi_desc_off(irq, 0, entry); 136 } 137 138 /** 139 * irq_set_chip_data - set irq chip data for an irq 140 * @irq: Interrupt number 141 * @data: Pointer to chip specific data 142 * 143 * Set the hardware irq chip data for an irq 144 */ 145 int irq_set_chip_data(unsigned int irq, void *data) 146 { 147 unsigned long flags; 148 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 149 150 if (!desc) 151 return -EINVAL; 152 desc->irq_data.chip_data = data; 153 irq_put_desc_unlock(desc, flags); 154 return 0; 155 } 156 EXPORT_SYMBOL(irq_set_chip_data); 157 158 struct irq_data *irq_get_irq_data(unsigned int irq) 159 { 160 struct irq_desc *desc = irq_to_desc(irq); 161 162 return desc ? &desc->irq_data : NULL; 163 } 164 EXPORT_SYMBOL_GPL(irq_get_irq_data); 165 166 static void irq_state_clr_disabled(struct irq_desc *desc) 167 { 168 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 169 } 170 171 static void irq_state_clr_masked(struct irq_desc *desc) 172 { 173 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 174 } 175 176 static void irq_state_clr_started(struct irq_desc *desc) 177 { 178 irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED); 179 } 180 181 static void irq_state_set_started(struct irq_desc *desc) 182 { 183 irqd_set(&desc->irq_data, IRQD_IRQ_STARTED); 184 } 185 186 enum { 187 IRQ_STARTUP_NORMAL, 188 IRQ_STARTUP_MANAGED, 189 IRQ_STARTUP_ABORT, 190 }; 191 192 #ifdef CONFIG_SMP 193 static int 194 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) 195 { 196 struct irq_data *d = irq_desc_get_irq_data(desc); 197 198 if (!irqd_affinity_is_managed(d)) 199 return IRQ_STARTUP_NORMAL; 200 201 irqd_clr_managed_shutdown(d); 202 203 if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) { 204 /* 205 * Catch code which fiddles with enable_irq() on a managed 206 * and potentially shutdown IRQ. Chained interrupt 207 * installment or irq auto probing should not happen on 208 * managed irqs either. 209 */ 210 if (WARN_ON_ONCE(force)) 211 return IRQ_STARTUP_ABORT; 212 /* 213 * The interrupt was requested, but there is no online CPU 214 * in it's affinity mask. Put it into managed shutdown 215 * state and let the cpu hotplug mechanism start it up once 216 * a CPU in the mask becomes available. 217 */ 218 return IRQ_STARTUP_ABORT; 219 } 220 /* 221 * Managed interrupts have reserved resources, so this should not 222 * happen. 223 */ 224 if (WARN_ON(irq_domain_activate_irq(d, false))) 225 return IRQ_STARTUP_ABORT; 226 return IRQ_STARTUP_MANAGED; 227 } 228 #else 229 static __always_inline int 230 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) 231 { 232 return IRQ_STARTUP_NORMAL; 233 } 234 #endif 235 236 static int __irq_startup(struct irq_desc *desc) 237 { 238 struct irq_data *d = irq_desc_get_irq_data(desc); 239 int ret = 0; 240 241 /* Warn if this interrupt is not activated but try nevertheless */ 242 WARN_ON_ONCE(!irqd_is_activated(d)); 243 244 if (d->chip->irq_startup) { 245 ret = d->chip->irq_startup(d); 246 irq_state_clr_disabled(desc); 247 irq_state_clr_masked(desc); 248 } else { 249 irq_enable(desc); 250 } 251 irq_state_set_started(desc); 252 return ret; 253 } 254 255 int irq_startup(struct irq_desc *desc, bool resend, bool force) 256 { 257 struct irq_data *d = irq_desc_get_irq_data(desc); 258 struct cpumask *aff = irq_data_get_affinity_mask(d); 259 int ret = 0; 260 261 desc->depth = 0; 262 263 if (irqd_is_started(d)) { 264 irq_enable(desc); 265 } else { 266 switch (__irq_startup_managed(desc, aff, force)) { 267 case IRQ_STARTUP_NORMAL: 268 if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP) 269 irq_setup_affinity(desc); 270 ret = __irq_startup(desc); 271 if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)) 272 irq_setup_affinity(desc); 273 break; 274 case IRQ_STARTUP_MANAGED: 275 irq_do_set_affinity(d, aff, false); 276 ret = __irq_startup(desc); 277 break; 278 case IRQ_STARTUP_ABORT: 279 irqd_set_managed_shutdown(d); 280 return 0; 281 } 282 } 283 if (resend) 284 check_irq_resend(desc, false); 285 286 return ret; 287 } 288 289 int irq_activate(struct irq_desc *desc) 290 { 291 struct irq_data *d = irq_desc_get_irq_data(desc); 292 293 if (!irqd_affinity_is_managed(d)) 294 return irq_domain_activate_irq(d, false); 295 return 0; 296 } 297 298 int irq_activate_and_startup(struct irq_desc *desc, bool resend) 299 { 300 if (WARN_ON(irq_activate(desc))) 301 return 0; 302 return irq_startup(desc, resend, IRQ_START_FORCE); 303 } 304 305 static void __irq_disable(struct irq_desc *desc, bool mask); 306 307 void irq_shutdown(struct irq_desc *desc) 308 { 309 if (irqd_is_started(&desc->irq_data)) { 310 desc->depth = 1; 311 if (desc->irq_data.chip->irq_shutdown) { 312 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 313 irq_state_set_disabled(desc); 314 irq_state_set_masked(desc); 315 } else { 316 __irq_disable(desc, true); 317 } 318 irq_state_clr_started(desc); 319 } 320 } 321 322 323 void irq_shutdown_and_deactivate(struct irq_desc *desc) 324 { 325 irq_shutdown(desc); 326 /* 327 * This must be called even if the interrupt was never started up, 328 * because the activation can happen before the interrupt is 329 * available for request/startup. It has it's own state tracking so 330 * it's safe to call it unconditionally. 331 */ 332 irq_domain_deactivate_irq(&desc->irq_data); 333 } 334 335 void irq_enable(struct irq_desc *desc) 336 { 337 if (!irqd_irq_disabled(&desc->irq_data)) { 338 unmask_irq(desc); 339 } else { 340 irq_state_clr_disabled(desc); 341 if (desc->irq_data.chip->irq_enable) { 342 desc->irq_data.chip->irq_enable(&desc->irq_data); 343 irq_state_clr_masked(desc); 344 } else { 345 unmask_irq(desc); 346 } 347 } 348 } 349 350 static void __irq_disable(struct irq_desc *desc, bool mask) 351 { 352 if (irqd_irq_disabled(&desc->irq_data)) { 353 if (mask) 354 mask_irq(desc); 355 } else { 356 irq_state_set_disabled(desc); 357 if (desc->irq_data.chip->irq_disable) { 358 desc->irq_data.chip->irq_disable(&desc->irq_data); 359 irq_state_set_masked(desc); 360 } else if (mask) { 361 mask_irq(desc); 362 } 363 } 364 } 365 366 /** 367 * irq_disable - Mark interrupt disabled 368 * @desc: irq descriptor which should be disabled 369 * 370 * If the chip does not implement the irq_disable callback, we 371 * use a lazy disable approach. That means we mark the interrupt 372 * disabled, but leave the hardware unmasked. That's an 373 * optimization because we avoid the hardware access for the 374 * common case where no interrupt happens after we marked it 375 * disabled. If an interrupt happens, then the interrupt flow 376 * handler masks the line at the hardware level and marks it 377 * pending. 378 * 379 * If the interrupt chip does not implement the irq_disable callback, 380 * a driver can disable the lazy approach for a particular irq line by 381 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can 382 * be used for devices which cannot disable the interrupt at the 383 * device level under certain circumstances and have to use 384 * disable_irq[_nosync] instead. 385 */ 386 void irq_disable(struct irq_desc *desc) 387 { 388 __irq_disable(desc, irq_settings_disable_unlazy(desc)); 389 } 390 391 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 392 { 393 if (desc->irq_data.chip->irq_enable) 394 desc->irq_data.chip->irq_enable(&desc->irq_data); 395 else 396 desc->irq_data.chip->irq_unmask(&desc->irq_data); 397 cpumask_set_cpu(cpu, desc->percpu_enabled); 398 } 399 400 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 401 { 402 if (desc->irq_data.chip->irq_disable) 403 desc->irq_data.chip->irq_disable(&desc->irq_data); 404 else 405 desc->irq_data.chip->irq_mask(&desc->irq_data); 406 cpumask_clear_cpu(cpu, desc->percpu_enabled); 407 } 408 409 static inline void mask_ack_irq(struct irq_desc *desc) 410 { 411 if (desc->irq_data.chip->irq_mask_ack) { 412 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 413 irq_state_set_masked(desc); 414 } else { 415 mask_irq(desc); 416 if (desc->irq_data.chip->irq_ack) 417 desc->irq_data.chip->irq_ack(&desc->irq_data); 418 } 419 } 420 421 void mask_irq(struct irq_desc *desc) 422 { 423 if (irqd_irq_masked(&desc->irq_data)) 424 return; 425 426 if (desc->irq_data.chip->irq_mask) { 427 desc->irq_data.chip->irq_mask(&desc->irq_data); 428 irq_state_set_masked(desc); 429 } 430 } 431 432 void unmask_irq(struct irq_desc *desc) 433 { 434 if (!irqd_irq_masked(&desc->irq_data)) 435 return; 436 437 if (desc->irq_data.chip->irq_unmask) { 438 desc->irq_data.chip->irq_unmask(&desc->irq_data); 439 irq_state_clr_masked(desc); 440 } 441 } 442 443 void unmask_threaded_irq(struct irq_desc *desc) 444 { 445 struct irq_chip *chip = desc->irq_data.chip; 446 447 if (chip->flags & IRQCHIP_EOI_THREADED) 448 chip->irq_eoi(&desc->irq_data); 449 450 unmask_irq(desc); 451 } 452 453 /* 454 * handle_nested_irq - Handle a nested irq from a irq thread 455 * @irq: the interrupt number 456 * 457 * Handle interrupts which are nested into a threaded interrupt 458 * handler. The handler function is called inside the calling 459 * threads context. 460 */ 461 void handle_nested_irq(unsigned int irq) 462 { 463 struct irq_desc *desc = irq_to_desc(irq); 464 struct irqaction *action; 465 irqreturn_t action_ret; 466 467 might_sleep(); 468 469 raw_spin_lock_irq(&desc->lock); 470 471 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 472 473 action = desc->action; 474 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { 475 desc->istate |= IRQS_PENDING; 476 goto out_unlock; 477 } 478 479 kstat_incr_irqs_this_cpu(desc); 480 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 481 raw_spin_unlock_irq(&desc->lock); 482 483 action_ret = IRQ_NONE; 484 for_each_action_of_desc(desc, action) 485 action_ret |= action->thread_fn(action->irq, action->dev_id); 486 487 if (!irq_settings_no_debug(desc)) 488 note_interrupt(desc, action_ret); 489 490 raw_spin_lock_irq(&desc->lock); 491 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 492 493 out_unlock: 494 raw_spin_unlock_irq(&desc->lock); 495 } 496 EXPORT_SYMBOL_GPL(handle_nested_irq); 497 498 static bool irq_check_poll(struct irq_desc *desc) 499 { 500 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 501 return false; 502 return irq_wait_for_poll(desc); 503 } 504 505 static bool irq_may_run(struct irq_desc *desc) 506 { 507 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; 508 509 /* 510 * If the interrupt is not in progress and is not an armed 511 * wakeup interrupt, proceed. 512 */ 513 if (!irqd_has_set(&desc->irq_data, mask)) 514 return true; 515 516 /* 517 * If the interrupt is an armed wakeup source, mark it pending 518 * and suspended, disable it and notify the pm core about the 519 * event. 520 */ 521 if (irq_pm_check_wakeup(desc)) 522 return false; 523 524 /* 525 * Handle a potential concurrent poll on a different core. 526 */ 527 return irq_check_poll(desc); 528 } 529 530 /** 531 * handle_simple_irq - Simple and software-decoded IRQs. 532 * @desc: the interrupt description structure for this irq 533 * 534 * Simple interrupts are either sent from a demultiplexing interrupt 535 * handler or come from hardware, where no interrupt hardware control 536 * is necessary. 537 * 538 * Note: The caller is expected to handle the ack, clear, mask and 539 * unmask issues if necessary. 540 */ 541 void handle_simple_irq(struct irq_desc *desc) 542 { 543 raw_spin_lock(&desc->lock); 544 545 if (!irq_may_run(desc)) 546 goto out_unlock; 547 548 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 549 550 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 551 desc->istate |= IRQS_PENDING; 552 goto out_unlock; 553 } 554 555 kstat_incr_irqs_this_cpu(desc); 556 handle_irq_event(desc); 557 558 out_unlock: 559 raw_spin_unlock(&desc->lock); 560 } 561 EXPORT_SYMBOL_GPL(handle_simple_irq); 562 563 /** 564 * handle_untracked_irq - Simple and software-decoded IRQs. 565 * @desc: the interrupt description structure for this irq 566 * 567 * Untracked interrupts are sent from a demultiplexing interrupt 568 * handler when the demultiplexer does not know which device it its 569 * multiplexed irq domain generated the interrupt. IRQ's handled 570 * through here are not subjected to stats tracking, randomness, or 571 * spurious interrupt detection. 572 * 573 * Note: Like handle_simple_irq, the caller is expected to handle 574 * the ack, clear, mask and unmask issues if necessary. 575 */ 576 void handle_untracked_irq(struct irq_desc *desc) 577 { 578 raw_spin_lock(&desc->lock); 579 580 if (!irq_may_run(desc)) 581 goto out_unlock; 582 583 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 584 585 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 586 desc->istate |= IRQS_PENDING; 587 goto out_unlock; 588 } 589 590 desc->istate &= ~IRQS_PENDING; 591 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 592 raw_spin_unlock(&desc->lock); 593 594 __handle_irq_event_percpu(desc); 595 596 raw_spin_lock(&desc->lock); 597 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 598 599 out_unlock: 600 raw_spin_unlock(&desc->lock); 601 } 602 EXPORT_SYMBOL_GPL(handle_untracked_irq); 603 604 /* 605 * Called unconditionally from handle_level_irq() and only for oneshot 606 * interrupts from handle_fasteoi_irq() 607 */ 608 static void cond_unmask_irq(struct irq_desc *desc) 609 { 610 /* 611 * We need to unmask in the following cases: 612 * - Standard level irq (IRQF_ONESHOT is not set) 613 * - Oneshot irq which did not wake the thread (caused by a 614 * spurious interrupt or a primary handler handling it 615 * completely). 616 */ 617 if (!irqd_irq_disabled(&desc->irq_data) && 618 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 619 unmask_irq(desc); 620 } 621 622 /** 623 * handle_level_irq - Level type irq handler 624 * @desc: the interrupt description structure for this irq 625 * 626 * Level type interrupts are active as long as the hardware line has 627 * the active level. This may require to mask the interrupt and unmask 628 * it after the associated handler has acknowledged the device, so the 629 * interrupt line is back to inactive. 630 */ 631 void handle_level_irq(struct irq_desc *desc) 632 { 633 raw_spin_lock(&desc->lock); 634 mask_ack_irq(desc); 635 636 if (!irq_may_run(desc)) 637 goto out_unlock; 638 639 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 640 641 /* 642 * If its disabled or no action available 643 * keep it masked and get out of here 644 */ 645 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 646 desc->istate |= IRQS_PENDING; 647 goto out_unlock; 648 } 649 650 kstat_incr_irqs_this_cpu(desc); 651 handle_irq_event(desc); 652 653 cond_unmask_irq(desc); 654 655 out_unlock: 656 raw_spin_unlock(&desc->lock); 657 } 658 EXPORT_SYMBOL_GPL(handle_level_irq); 659 660 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) 661 { 662 if (!(desc->istate & IRQS_ONESHOT)) { 663 chip->irq_eoi(&desc->irq_data); 664 return; 665 } 666 /* 667 * We need to unmask in the following cases: 668 * - Oneshot irq which did not wake the thread (caused by a 669 * spurious interrupt or a primary handler handling it 670 * completely). 671 */ 672 if (!irqd_irq_disabled(&desc->irq_data) && 673 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { 674 chip->irq_eoi(&desc->irq_data); 675 unmask_irq(desc); 676 } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) { 677 chip->irq_eoi(&desc->irq_data); 678 } 679 } 680 681 /** 682 * handle_fasteoi_irq - irq handler for transparent controllers 683 * @desc: the interrupt description structure for this irq 684 * 685 * Only a single callback will be issued to the chip: an ->eoi() 686 * call when the interrupt has been serviced. This enables support 687 * for modern forms of interrupt handlers, which handle the flow 688 * details in hardware, transparently. 689 */ 690 void handle_fasteoi_irq(struct irq_desc *desc) 691 { 692 struct irq_chip *chip = desc->irq_data.chip; 693 694 raw_spin_lock(&desc->lock); 695 696 if (!irq_may_run(desc)) 697 goto out; 698 699 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 700 701 /* 702 * If its disabled or no action available 703 * then mask it and get out of here: 704 */ 705 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 706 desc->istate |= IRQS_PENDING; 707 mask_irq(desc); 708 goto out; 709 } 710 711 kstat_incr_irqs_this_cpu(desc); 712 if (desc->istate & IRQS_ONESHOT) 713 mask_irq(desc); 714 715 handle_irq_event(desc); 716 717 cond_unmask_eoi_irq(desc, chip); 718 719 raw_spin_unlock(&desc->lock); 720 return; 721 out: 722 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 723 chip->irq_eoi(&desc->irq_data); 724 raw_spin_unlock(&desc->lock); 725 } 726 EXPORT_SYMBOL_GPL(handle_fasteoi_irq); 727 728 /** 729 * handle_fasteoi_nmi - irq handler for NMI interrupt lines 730 * @desc: the interrupt description structure for this irq 731 * 732 * A simple NMI-safe handler, considering the restrictions 733 * from request_nmi. 734 * 735 * Only a single callback will be issued to the chip: an ->eoi() 736 * call when the interrupt has been serviced. This enables support 737 * for modern forms of interrupt handlers, which handle the flow 738 * details in hardware, transparently. 739 */ 740 void handle_fasteoi_nmi(struct irq_desc *desc) 741 { 742 struct irq_chip *chip = irq_desc_get_chip(desc); 743 struct irqaction *action = desc->action; 744 unsigned int irq = irq_desc_get_irq(desc); 745 irqreturn_t res; 746 747 __kstat_incr_irqs_this_cpu(desc); 748 749 trace_irq_handler_entry(irq, action); 750 /* 751 * NMIs cannot be shared, there is only one action. 752 */ 753 res = action->handler(irq, action->dev_id); 754 trace_irq_handler_exit(irq, action, res); 755 756 if (chip->irq_eoi) 757 chip->irq_eoi(&desc->irq_data); 758 } 759 EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); 760 761 /** 762 * handle_edge_irq - edge type IRQ handler 763 * @desc: the interrupt description structure for this irq 764 * 765 * Interrupt occurs on the falling and/or rising edge of a hardware 766 * signal. The occurrence is latched into the irq controller hardware 767 * and must be acked in order to be reenabled. After the ack another 768 * interrupt can happen on the same source even before the first one 769 * is handled by the associated event handler. If this happens it 770 * might be necessary to disable (mask) the interrupt depending on the 771 * controller hardware. This requires to reenable the interrupt inside 772 * of the loop which handles the interrupts which have arrived while 773 * the handler was running. If all pending interrupts are handled, the 774 * loop is left. 775 */ 776 void handle_edge_irq(struct irq_desc *desc) 777 { 778 raw_spin_lock(&desc->lock); 779 780 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 781 782 if (!irq_may_run(desc)) { 783 desc->istate |= IRQS_PENDING; 784 mask_ack_irq(desc); 785 goto out_unlock; 786 } 787 788 /* 789 * If its disabled or no action available then mask it and get 790 * out of here. 791 */ 792 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 793 desc->istate |= IRQS_PENDING; 794 mask_ack_irq(desc); 795 goto out_unlock; 796 } 797 798 kstat_incr_irqs_this_cpu(desc); 799 800 /* Start handling the irq */ 801 desc->irq_data.chip->irq_ack(&desc->irq_data); 802 803 do { 804 if (unlikely(!desc->action)) { 805 mask_irq(desc); 806 goto out_unlock; 807 } 808 809 /* 810 * When another irq arrived while we were handling 811 * one, we could have masked the irq. 812 * Reenable it, if it was not disabled in meantime. 813 */ 814 if (unlikely(desc->istate & IRQS_PENDING)) { 815 if (!irqd_irq_disabled(&desc->irq_data) && 816 irqd_irq_masked(&desc->irq_data)) 817 unmask_irq(desc); 818 } 819 820 handle_irq_event(desc); 821 822 } while ((desc->istate & IRQS_PENDING) && 823 !irqd_irq_disabled(&desc->irq_data)); 824 825 out_unlock: 826 raw_spin_unlock(&desc->lock); 827 } 828 EXPORT_SYMBOL(handle_edge_irq); 829 830 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 831 /** 832 * handle_edge_eoi_irq - edge eoi type IRQ handler 833 * @desc: the interrupt description structure for this irq 834 * 835 * Similar as the above handle_edge_irq, but using eoi and w/o the 836 * mask/unmask logic. 837 */ 838 void handle_edge_eoi_irq(struct irq_desc *desc) 839 { 840 struct irq_chip *chip = irq_desc_get_chip(desc); 841 842 raw_spin_lock(&desc->lock); 843 844 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 845 846 if (!irq_may_run(desc)) { 847 desc->istate |= IRQS_PENDING; 848 goto out_eoi; 849 } 850 851 /* 852 * If its disabled or no action available then mask it and get 853 * out of here. 854 */ 855 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { 856 desc->istate |= IRQS_PENDING; 857 goto out_eoi; 858 } 859 860 kstat_incr_irqs_this_cpu(desc); 861 862 do { 863 if (unlikely(!desc->action)) 864 goto out_eoi; 865 866 handle_irq_event(desc); 867 868 } while ((desc->istate & IRQS_PENDING) && 869 !irqd_irq_disabled(&desc->irq_data)); 870 871 out_eoi: 872 chip->irq_eoi(&desc->irq_data); 873 raw_spin_unlock(&desc->lock); 874 } 875 #endif 876 877 /** 878 * handle_percpu_irq - Per CPU local irq handler 879 * @desc: the interrupt description structure for this irq 880 * 881 * Per CPU interrupts on SMP machines without locking requirements 882 */ 883 void handle_percpu_irq(struct irq_desc *desc) 884 { 885 struct irq_chip *chip = irq_desc_get_chip(desc); 886 887 /* 888 * PER CPU interrupts are not serialized. Do not touch 889 * desc->tot_count. 890 */ 891 __kstat_incr_irqs_this_cpu(desc); 892 893 if (chip->irq_ack) 894 chip->irq_ack(&desc->irq_data); 895 896 handle_irq_event_percpu(desc); 897 898 if (chip->irq_eoi) 899 chip->irq_eoi(&desc->irq_data); 900 } 901 902 /** 903 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 904 * @desc: the interrupt description structure for this irq 905 * 906 * Per CPU interrupts on SMP machines without locking requirements. Same as 907 * handle_percpu_irq() above but with the following extras: 908 * 909 * action->percpu_dev_id is a pointer to percpu variables which 910 * contain the real device id for the cpu on which this handler is 911 * called 912 */ 913 void handle_percpu_devid_irq(struct irq_desc *desc) 914 { 915 struct irq_chip *chip = irq_desc_get_chip(desc); 916 struct irqaction *action = desc->action; 917 unsigned int irq = irq_desc_get_irq(desc); 918 irqreturn_t res; 919 920 /* 921 * PER CPU interrupts are not serialized. Do not touch 922 * desc->tot_count. 923 */ 924 __kstat_incr_irqs_this_cpu(desc); 925 926 if (chip->irq_ack) 927 chip->irq_ack(&desc->irq_data); 928 929 if (likely(action)) { 930 trace_irq_handler_entry(irq, action); 931 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 932 trace_irq_handler_exit(irq, action, res); 933 } else { 934 unsigned int cpu = smp_processor_id(); 935 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); 936 937 if (enabled) 938 irq_percpu_disable(desc, cpu); 939 940 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n", 941 enabled ? " and unmasked" : "", irq, cpu); 942 } 943 944 if (chip->irq_eoi) 945 chip->irq_eoi(&desc->irq_data); 946 } 947 948 /** 949 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu 950 * dev ids 951 * @desc: the interrupt description structure for this irq 952 * 953 * Similar to handle_fasteoi_nmi, but handling the dev_id cookie 954 * as a percpu pointer. 955 */ 956 void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc) 957 { 958 struct irq_chip *chip = irq_desc_get_chip(desc); 959 struct irqaction *action = desc->action; 960 unsigned int irq = irq_desc_get_irq(desc); 961 irqreturn_t res; 962 963 __kstat_incr_irqs_this_cpu(desc); 964 965 trace_irq_handler_entry(irq, action); 966 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); 967 trace_irq_handler_exit(irq, action, res); 968 969 if (chip->irq_eoi) 970 chip->irq_eoi(&desc->irq_data); 971 } 972 973 static void 974 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, 975 int is_chained, const char *name) 976 { 977 if (!handle) { 978 handle = handle_bad_irq; 979 } else { 980 struct irq_data *irq_data = &desc->irq_data; 981 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 982 /* 983 * With hierarchical domains we might run into a 984 * situation where the outermost chip is not yet set 985 * up, but the inner chips are there. Instead of 986 * bailing we install the handler, but obviously we 987 * cannot enable/startup the interrupt at this point. 988 */ 989 while (irq_data) { 990 if (irq_data->chip != &no_irq_chip) 991 break; 992 /* 993 * Bail out if the outer chip is not set up 994 * and the interrupt supposed to be started 995 * right away. 996 */ 997 if (WARN_ON(is_chained)) 998 return; 999 /* Try the parent */ 1000 irq_data = irq_data->parent_data; 1001 } 1002 #endif 1003 if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) 1004 return; 1005 } 1006 1007 /* Uninstall? */ 1008 if (handle == handle_bad_irq) { 1009 if (desc->irq_data.chip != &no_irq_chip) 1010 mask_ack_irq(desc); 1011 irq_state_set_disabled(desc); 1012 if (is_chained) 1013 desc->action = NULL; 1014 desc->depth = 1; 1015 } 1016 desc->handle_irq = handle; 1017 desc->name = name; 1018 1019 if (handle != handle_bad_irq && is_chained) { 1020 unsigned int type = irqd_get_trigger_type(&desc->irq_data); 1021 1022 /* 1023 * We're about to start this interrupt immediately, 1024 * hence the need to set the trigger configuration. 1025 * But the .set_type callback may have overridden the 1026 * flow handler, ignoring that we're dealing with a 1027 * chained interrupt. Reset it immediately because we 1028 * do know better. 1029 */ 1030 if (type != IRQ_TYPE_NONE) { 1031 __irq_set_trigger(desc, type); 1032 desc->handle_irq = handle; 1033 } 1034 1035 irq_settings_set_noprobe(desc); 1036 irq_settings_set_norequest(desc); 1037 irq_settings_set_nothread(desc); 1038 desc->action = &chained_action; 1039 irq_activate_and_startup(desc, IRQ_RESEND); 1040 } 1041 } 1042 1043 void 1044 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 1045 const char *name) 1046 { 1047 unsigned long flags; 1048 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 1049 1050 if (!desc) 1051 return; 1052 1053 __irq_do_set_handler(desc, handle, is_chained, name); 1054 irq_put_desc_busunlock(desc, flags); 1055 } 1056 EXPORT_SYMBOL_GPL(__irq_set_handler); 1057 1058 void 1059 irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, 1060 void *data) 1061 { 1062 unsigned long flags; 1063 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 1064 1065 if (!desc) 1066 return; 1067 1068 desc->irq_common_data.handler_data = data; 1069 __irq_do_set_handler(desc, handle, 1, NULL); 1070 1071 irq_put_desc_busunlock(desc, flags); 1072 } 1073 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data); 1074 1075 void 1076 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 1077 irq_flow_handler_t handle, const char *name) 1078 { 1079 irq_set_chip(irq, chip); 1080 __irq_set_handler(irq, handle, 0, name); 1081 } 1082 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 1083 1084 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 1085 { 1086 unsigned long flags, trigger, tmp; 1087 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 1088 1089 if (!desc) 1090 return; 1091 1092 /* 1093 * Warn when a driver sets the no autoenable flag on an already 1094 * active interrupt. 1095 */ 1096 WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN)); 1097 1098 irq_settings_clr_and_set(desc, clr, set); 1099 1100 trigger = irqd_get_trigger_type(&desc->irq_data); 1101 1102 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 1103 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 1104 if (irq_settings_has_no_balance_set(desc)) 1105 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1106 if (irq_settings_is_per_cpu(desc)) 1107 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1108 if (irq_settings_can_move_pcntxt(desc)) 1109 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 1110 if (irq_settings_is_level(desc)) 1111 irqd_set(&desc->irq_data, IRQD_LEVEL); 1112 1113 tmp = irq_settings_get_trigger_mask(desc); 1114 if (tmp != IRQ_TYPE_NONE) 1115 trigger = tmp; 1116 1117 irqd_set(&desc->irq_data, trigger); 1118 1119 irq_put_desc_unlock(desc, flags); 1120 } 1121 EXPORT_SYMBOL_GPL(irq_modify_status); 1122 1123 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE 1124 /** 1125 * irq_cpu_online - Invoke all irq_cpu_online functions. 1126 * 1127 * Iterate through all irqs and invoke the chip.irq_cpu_online() 1128 * for each. 1129 */ 1130 void irq_cpu_online(void) 1131 { 1132 struct irq_desc *desc; 1133 struct irq_chip *chip; 1134 unsigned long flags; 1135 unsigned int irq; 1136 1137 for_each_active_irq(irq) { 1138 desc = irq_to_desc(irq); 1139 if (!desc) 1140 continue; 1141 1142 raw_spin_lock_irqsave(&desc->lock, flags); 1143 1144 chip = irq_data_get_irq_chip(&desc->irq_data); 1145 if (chip && chip->irq_cpu_online && 1146 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1147 !irqd_irq_disabled(&desc->irq_data))) 1148 chip->irq_cpu_online(&desc->irq_data); 1149 1150 raw_spin_unlock_irqrestore(&desc->lock, flags); 1151 } 1152 } 1153 1154 /** 1155 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 1156 * 1157 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 1158 * for each. 1159 */ 1160 void irq_cpu_offline(void) 1161 { 1162 struct irq_desc *desc; 1163 struct irq_chip *chip; 1164 unsigned long flags; 1165 unsigned int irq; 1166 1167 for_each_active_irq(irq) { 1168 desc = irq_to_desc(irq); 1169 if (!desc) 1170 continue; 1171 1172 raw_spin_lock_irqsave(&desc->lock, flags); 1173 1174 chip = irq_data_get_irq_chip(&desc->irq_data); 1175 if (chip && chip->irq_cpu_offline && 1176 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 1177 !irqd_irq_disabled(&desc->irq_data))) 1178 chip->irq_cpu_offline(&desc->irq_data); 1179 1180 raw_spin_unlock_irqrestore(&desc->lock, flags); 1181 } 1182 } 1183 #endif 1184 1185 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1186 1187 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS 1188 /** 1189 * handle_fasteoi_ack_irq - irq handler for edge hierarchy 1190 * stacked on transparent controllers 1191 * 1192 * @desc: the interrupt description structure for this irq 1193 * 1194 * Like handle_fasteoi_irq(), but for use with hierarchy where 1195 * the irq_chip also needs to have its ->irq_ack() function 1196 * called. 1197 */ 1198 void handle_fasteoi_ack_irq(struct irq_desc *desc) 1199 { 1200 struct irq_chip *chip = desc->irq_data.chip; 1201 1202 raw_spin_lock(&desc->lock); 1203 1204 if (!irq_may_run(desc)) 1205 goto out; 1206 1207 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1208 1209 /* 1210 * If its disabled or no action available 1211 * then mask it and get out of here: 1212 */ 1213 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1214 desc->istate |= IRQS_PENDING; 1215 mask_irq(desc); 1216 goto out; 1217 } 1218 1219 kstat_incr_irqs_this_cpu(desc); 1220 if (desc->istate & IRQS_ONESHOT) 1221 mask_irq(desc); 1222 1223 /* Start handling the irq */ 1224 desc->irq_data.chip->irq_ack(&desc->irq_data); 1225 1226 handle_irq_event(desc); 1227 1228 cond_unmask_eoi_irq(desc, chip); 1229 1230 raw_spin_unlock(&desc->lock); 1231 return; 1232 out: 1233 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1234 chip->irq_eoi(&desc->irq_data); 1235 raw_spin_unlock(&desc->lock); 1236 } 1237 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq); 1238 1239 /** 1240 * handle_fasteoi_mask_irq - irq handler for level hierarchy 1241 * stacked on transparent controllers 1242 * 1243 * @desc: the interrupt description structure for this irq 1244 * 1245 * Like handle_fasteoi_irq(), but for use with hierarchy where 1246 * the irq_chip also needs to have its ->irq_mask_ack() function 1247 * called. 1248 */ 1249 void handle_fasteoi_mask_irq(struct irq_desc *desc) 1250 { 1251 struct irq_chip *chip = desc->irq_data.chip; 1252 1253 raw_spin_lock(&desc->lock); 1254 mask_ack_irq(desc); 1255 1256 if (!irq_may_run(desc)) 1257 goto out; 1258 1259 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 1260 1261 /* 1262 * If its disabled or no action available 1263 * then mask it and get out of here: 1264 */ 1265 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 1266 desc->istate |= IRQS_PENDING; 1267 mask_irq(desc); 1268 goto out; 1269 } 1270 1271 kstat_incr_irqs_this_cpu(desc); 1272 if (desc->istate & IRQS_ONESHOT) 1273 mask_irq(desc); 1274 1275 handle_irq_event(desc); 1276 1277 cond_unmask_eoi_irq(desc, chip); 1278 1279 raw_spin_unlock(&desc->lock); 1280 return; 1281 out: 1282 if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED)) 1283 chip->irq_eoi(&desc->irq_data); 1284 raw_spin_unlock(&desc->lock); 1285 } 1286 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq); 1287 1288 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */ 1289 1290 /** 1291 * irq_chip_set_parent_state - set the state of a parent interrupt. 1292 * 1293 * @data: Pointer to interrupt specific data 1294 * @which: State to be restored (one of IRQCHIP_STATE_*) 1295 * @val: Value corresponding to @which 1296 * 1297 * Conditional success, if the underlying irqchip does not implement it. 1298 */ 1299 int irq_chip_set_parent_state(struct irq_data *data, 1300 enum irqchip_irq_state which, 1301 bool val) 1302 { 1303 data = data->parent_data; 1304 1305 if (!data || !data->chip->irq_set_irqchip_state) 1306 return 0; 1307 1308 return data->chip->irq_set_irqchip_state(data, which, val); 1309 } 1310 EXPORT_SYMBOL_GPL(irq_chip_set_parent_state); 1311 1312 /** 1313 * irq_chip_get_parent_state - get the state of a parent interrupt. 1314 * 1315 * @data: Pointer to interrupt specific data 1316 * @which: one of IRQCHIP_STATE_* the caller wants to know 1317 * @state: a pointer to a boolean where the state is to be stored 1318 * 1319 * Conditional success, if the underlying irqchip does not implement it. 1320 */ 1321 int irq_chip_get_parent_state(struct irq_data *data, 1322 enum irqchip_irq_state which, 1323 bool *state) 1324 { 1325 data = data->parent_data; 1326 1327 if (!data || !data->chip->irq_get_irqchip_state) 1328 return 0; 1329 1330 return data->chip->irq_get_irqchip_state(data, which, state); 1331 } 1332 EXPORT_SYMBOL_GPL(irq_chip_get_parent_state); 1333 1334 /** 1335 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if 1336 * NULL) 1337 * @data: Pointer to interrupt specific data 1338 */ 1339 void irq_chip_enable_parent(struct irq_data *data) 1340 { 1341 data = data->parent_data; 1342 if (data->chip->irq_enable) 1343 data->chip->irq_enable(data); 1344 else 1345 data->chip->irq_unmask(data); 1346 } 1347 EXPORT_SYMBOL_GPL(irq_chip_enable_parent); 1348 1349 /** 1350 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if 1351 * NULL) 1352 * @data: Pointer to interrupt specific data 1353 */ 1354 void irq_chip_disable_parent(struct irq_data *data) 1355 { 1356 data = data->parent_data; 1357 if (data->chip->irq_disable) 1358 data->chip->irq_disable(data); 1359 else 1360 data->chip->irq_mask(data); 1361 } 1362 EXPORT_SYMBOL_GPL(irq_chip_disable_parent); 1363 1364 /** 1365 * irq_chip_ack_parent - Acknowledge the parent interrupt 1366 * @data: Pointer to interrupt specific data 1367 */ 1368 void irq_chip_ack_parent(struct irq_data *data) 1369 { 1370 data = data->parent_data; 1371 data->chip->irq_ack(data); 1372 } 1373 EXPORT_SYMBOL_GPL(irq_chip_ack_parent); 1374 1375 /** 1376 * irq_chip_mask_parent - Mask the parent interrupt 1377 * @data: Pointer to interrupt specific data 1378 */ 1379 void irq_chip_mask_parent(struct irq_data *data) 1380 { 1381 data = data->parent_data; 1382 data->chip->irq_mask(data); 1383 } 1384 EXPORT_SYMBOL_GPL(irq_chip_mask_parent); 1385 1386 /** 1387 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt 1388 * @data: Pointer to interrupt specific data 1389 */ 1390 void irq_chip_mask_ack_parent(struct irq_data *data) 1391 { 1392 data = data->parent_data; 1393 data->chip->irq_mask_ack(data); 1394 } 1395 EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent); 1396 1397 /** 1398 * irq_chip_unmask_parent - Unmask the parent interrupt 1399 * @data: Pointer to interrupt specific data 1400 */ 1401 void irq_chip_unmask_parent(struct irq_data *data) 1402 { 1403 data = data->parent_data; 1404 data->chip->irq_unmask(data); 1405 } 1406 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent); 1407 1408 /** 1409 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt 1410 * @data: Pointer to interrupt specific data 1411 */ 1412 void irq_chip_eoi_parent(struct irq_data *data) 1413 { 1414 data = data->parent_data; 1415 data->chip->irq_eoi(data); 1416 } 1417 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent); 1418 1419 /** 1420 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt 1421 * @data: Pointer to interrupt specific data 1422 * @dest: The affinity mask to set 1423 * @force: Flag to enforce setting (disable online checks) 1424 * 1425 * Conditional, as the underlying parent chip might not implement it. 1426 */ 1427 int irq_chip_set_affinity_parent(struct irq_data *data, 1428 const struct cpumask *dest, bool force) 1429 { 1430 data = data->parent_data; 1431 if (data->chip->irq_set_affinity) 1432 return data->chip->irq_set_affinity(data, dest, force); 1433 1434 return -ENOSYS; 1435 } 1436 EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent); 1437 1438 /** 1439 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt 1440 * @data: Pointer to interrupt specific data 1441 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 1442 * 1443 * Conditional, as the underlying parent chip might not implement it. 1444 */ 1445 int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) 1446 { 1447 data = data->parent_data; 1448 1449 if (data->chip->irq_set_type) 1450 return data->chip->irq_set_type(data, type); 1451 1452 return -ENOSYS; 1453 } 1454 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent); 1455 1456 /** 1457 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware 1458 * @data: Pointer to interrupt specific data 1459 * 1460 * Iterate through the domain hierarchy of the interrupt and check 1461 * whether a hw retrigger function exists. If yes, invoke it. 1462 */ 1463 int irq_chip_retrigger_hierarchy(struct irq_data *data) 1464 { 1465 for (data = data->parent_data; data; data = data->parent_data) 1466 if (data->chip && data->chip->irq_retrigger) 1467 return data->chip->irq_retrigger(data); 1468 1469 return 0; 1470 } 1471 EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy); 1472 1473 /** 1474 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt 1475 * @data: Pointer to interrupt specific data 1476 * @vcpu_info: The vcpu affinity information 1477 */ 1478 int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info) 1479 { 1480 data = data->parent_data; 1481 if (data->chip->irq_set_vcpu_affinity) 1482 return data->chip->irq_set_vcpu_affinity(data, vcpu_info); 1483 1484 return -ENOSYS; 1485 } 1486 EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent); 1487 /** 1488 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt 1489 * @data: Pointer to interrupt specific data 1490 * @on: Whether to set or reset the wake-up capability of this irq 1491 * 1492 * Conditional, as the underlying parent chip might not implement it. 1493 */ 1494 int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) 1495 { 1496 data = data->parent_data; 1497 1498 if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) 1499 return 0; 1500 1501 if (data->chip->irq_set_wake) 1502 return data->chip->irq_set_wake(data, on); 1503 1504 return -ENOSYS; 1505 } 1506 EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent); 1507 1508 /** 1509 * irq_chip_request_resources_parent - Request resources on the parent interrupt 1510 * @data: Pointer to interrupt specific data 1511 */ 1512 int irq_chip_request_resources_parent(struct irq_data *data) 1513 { 1514 data = data->parent_data; 1515 1516 if (data->chip->irq_request_resources) 1517 return data->chip->irq_request_resources(data); 1518 1519 return -ENOSYS; 1520 } 1521 EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent); 1522 1523 /** 1524 * irq_chip_release_resources_parent - Release resources on the parent interrupt 1525 * @data: Pointer to interrupt specific data 1526 */ 1527 void irq_chip_release_resources_parent(struct irq_data *data) 1528 { 1529 data = data->parent_data; 1530 if (data->chip->irq_release_resources) 1531 data->chip->irq_release_resources(data); 1532 } 1533 EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent); 1534 #endif 1535 1536 /** 1537 * irq_chip_compose_msi_msg - Compose msi message for a irq chip 1538 * @data: Pointer to interrupt specific data 1539 * @msg: Pointer to the MSI message 1540 * 1541 * For hierarchical domains we find the first chip in the hierarchy 1542 * which implements the irq_compose_msi_msg callback. For non 1543 * hierarchical we use the top level chip. 1544 */ 1545 int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) 1546 { 1547 struct irq_data *pos; 1548 1549 for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) { 1550 if (data->chip && data->chip->irq_compose_msi_msg) 1551 pos = data; 1552 } 1553 1554 if (!pos) 1555 return -ENOSYS; 1556 1557 pos->chip->irq_compose_msi_msg(pos, msg); 1558 return 0; 1559 } 1560 1561 /** 1562 * irq_chip_pm_get - Enable power for an IRQ chip 1563 * @data: Pointer to interrupt specific data 1564 * 1565 * Enable the power to the IRQ chip referenced by the interrupt data 1566 * structure. 1567 */ 1568 int irq_chip_pm_get(struct irq_data *data) 1569 { 1570 int retval; 1571 1572 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) { 1573 retval = pm_runtime_get_sync(data->chip->parent_device); 1574 if (retval < 0) { 1575 pm_runtime_put_noidle(data->chip->parent_device); 1576 return retval; 1577 } 1578 } 1579 1580 return 0; 1581 } 1582 1583 /** 1584 * irq_chip_pm_put - Disable power for an IRQ chip 1585 * @data: Pointer to interrupt specific data 1586 * 1587 * Disable the power to the IRQ chip referenced by the interrupt data 1588 * structure, belongs. Note that power will only be disabled, once this 1589 * function has been called for all IRQs that have called irq_chip_pm_get(). 1590 */ 1591 int irq_chip_pm_put(struct irq_data *data) 1592 { 1593 int retval = 0; 1594 1595 if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) 1596 retval = pm_runtime_put(data->chip->parent_device); 1597 1598 return (retval < 0) ? retval : 0; 1599 } 1600