1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 2005-2006 Thomas Gleixner 5 * 6 * This file contains driver APIs to the irq subsystem. 7 */ 8 9 #define pr_fmt(fmt) "genirq: " fmt 10 11 #include <linux/irq.h> 12 #include <linux/kthread.h> 13 #include <linux/module.h> 14 #include <linux/random.h> 15 #include <linux/interrupt.h> 16 #include <linux/slab.h> 17 #include <linux/sched.h> 18 #include <linux/sched/rt.h> 19 #include <linux/sched/task.h> 20 #include <uapi/linux/sched/types.h> 21 #include <linux/task_work.h> 22 23 #include "internals.h" 24 25 #ifdef CONFIG_IRQ_FORCED_THREADING 26 __read_mostly bool force_irqthreads; 27 EXPORT_SYMBOL_GPL(force_irqthreads); 28 29 static int __init setup_forced_irqthreads(char *arg) 30 { 31 force_irqthreads = true; 32 return 0; 33 } 34 early_param("threadirqs", setup_forced_irqthreads); 35 #endif 36 37 static void __synchronize_hardirq(struct irq_desc *desc) 38 { 39 bool inprogress; 40 41 do { 42 unsigned long flags; 43 44 /* 45 * Wait until we're out of the critical section. This might 46 * give the wrong answer due to the lack of memory barriers. 47 */ 48 while (irqd_irq_inprogress(&desc->irq_data)) 49 cpu_relax(); 50 51 /* Ok, that indicated we're done: double-check carefully. */ 52 raw_spin_lock_irqsave(&desc->lock, flags); 53 inprogress = irqd_irq_inprogress(&desc->irq_data); 54 raw_spin_unlock_irqrestore(&desc->lock, flags); 55 56 /* Oops, that failed? */ 57 } while (inprogress); 58 } 59 60 /** 61 * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) 62 * @irq: interrupt number to wait for 63 * 64 * This function waits for any pending hard IRQ handlers for this 65 * interrupt to complete before returning. If you use this 66 * function while holding a resource the IRQ handler may need you 67 * will deadlock. It does not take associated threaded handlers 68 * into account. 69 * 70 * Do not use this for shutdown scenarios where you must be sure 71 * that all parts (hardirq and threaded handler) have completed. 72 * 73 * Returns: false if a threaded handler is active. 74 * 75 * This function may be called - with care - from IRQ context. 76 */ 77 bool synchronize_hardirq(unsigned int irq) 78 { 79 struct irq_desc *desc = irq_to_desc(irq); 80 81 if (desc) { 82 __synchronize_hardirq(desc); 83 return !atomic_read(&desc->threads_active); 84 } 85 86 return true; 87 } 88 EXPORT_SYMBOL(synchronize_hardirq); 89 90 /** 91 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 92 * @irq: interrupt number to wait for 93 * 94 * This function waits for any pending IRQ handlers for this interrupt 95 * to complete before returning. If you use this function while 96 * holding a resource the IRQ handler may need you will deadlock. 97 * 98 * This function may be called - with care - from IRQ context. 99 */ 100 void synchronize_irq(unsigned int irq) 101 { 102 struct irq_desc *desc = irq_to_desc(irq); 103 104 if (desc) { 105 __synchronize_hardirq(desc); 106 /* 107 * We made sure that no hardirq handler is 108 * running. Now verify that no threaded handlers are 109 * active. 110 */ 111 wait_event(desc->wait_for_threads, 112 !atomic_read(&desc->threads_active)); 113 } 114 } 115 EXPORT_SYMBOL(synchronize_irq); 116 117 #ifdef CONFIG_SMP 118 cpumask_var_t irq_default_affinity; 119 120 static bool __irq_can_set_affinity(struct irq_desc *desc) 121 { 122 if (!desc || !irqd_can_balance(&desc->irq_data) || 123 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) 124 return false; 125 return true; 126 } 127 128 /** 129 * irq_can_set_affinity - Check if the affinity of a given irq can be set 130 * @irq: Interrupt to check 131 * 132 */ 133 int irq_can_set_affinity(unsigned int irq) 134 { 135 return __irq_can_set_affinity(irq_to_desc(irq)); 136 } 137 138 /** 139 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space 140 * @irq: Interrupt to check 141 * 142 * Like irq_can_set_affinity() above, but additionally checks for the 143 * AFFINITY_MANAGED flag. 144 */ 145 bool irq_can_set_affinity_usr(unsigned int irq) 146 { 147 struct irq_desc *desc = irq_to_desc(irq); 148 149 return __irq_can_set_affinity(desc) && 150 !irqd_affinity_is_managed(&desc->irq_data); 151 } 152 153 /** 154 * irq_set_thread_affinity - Notify irq threads to adjust affinity 155 * @desc: irq descriptor which has affitnity changed 156 * 157 * We just set IRQTF_AFFINITY and delegate the affinity setting 158 * to the interrupt thread itself. We can not call 159 * set_cpus_allowed_ptr() here as we hold desc->lock and this 160 * code can be called from hard interrupt context. 161 */ 162 void irq_set_thread_affinity(struct irq_desc *desc) 163 { 164 struct irqaction *action; 165 166 for_each_action_of_desc(desc, action) 167 if (action->thread) 168 set_bit(IRQTF_AFFINITY, &action->thread_flags); 169 } 170 171 static void irq_validate_effective_affinity(struct irq_data *data) 172 { 173 #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK 174 const struct cpumask *m = irq_data_get_effective_affinity_mask(data); 175 struct irq_chip *chip = irq_data_get_irq_chip(data); 176 177 if (!cpumask_empty(m)) 178 return; 179 pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", 180 chip->name, data->irq); 181 #endif 182 } 183 184 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, 185 bool force) 186 { 187 struct irq_desc *desc = irq_data_to_desc(data); 188 struct irq_chip *chip = irq_data_get_irq_chip(data); 189 int ret; 190 191 if (!chip || !chip->irq_set_affinity) 192 return -EINVAL; 193 194 ret = chip->irq_set_affinity(data, mask, force); 195 switch (ret) { 196 case IRQ_SET_MASK_OK: 197 case IRQ_SET_MASK_OK_DONE: 198 cpumask_copy(desc->irq_common_data.affinity, mask); 199 case IRQ_SET_MASK_OK_NOCOPY: 200 irq_validate_effective_affinity(data); 201 irq_set_thread_affinity(desc); 202 ret = 0; 203 } 204 205 return ret; 206 } 207 208 #ifdef CONFIG_GENERIC_PENDING_IRQ 209 static inline int irq_set_affinity_pending(struct irq_data *data, 210 const struct cpumask *dest) 211 { 212 struct irq_desc *desc = irq_data_to_desc(data); 213 214 irqd_set_move_pending(data); 215 irq_copy_pending(desc, dest); 216 return 0; 217 } 218 #else 219 static inline int irq_set_affinity_pending(struct irq_data *data, 220 const struct cpumask *dest) 221 { 222 return -EBUSY; 223 } 224 #endif 225 226 static int irq_try_set_affinity(struct irq_data *data, 227 const struct cpumask *dest, bool force) 228 { 229 int ret = irq_do_set_affinity(data, dest, force); 230 231 /* 232 * In case that the underlying vector management is busy and the 233 * architecture supports the generic pending mechanism then utilize 234 * this to avoid returning an error to user space. 235 */ 236 if (ret == -EBUSY && !force) 237 ret = irq_set_affinity_pending(data, dest); 238 return ret; 239 } 240 241 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, 242 bool force) 243 { 244 struct irq_chip *chip = irq_data_get_irq_chip(data); 245 struct irq_desc *desc = irq_data_to_desc(data); 246 int ret = 0; 247 248 if (!chip || !chip->irq_set_affinity) 249 return -EINVAL; 250 251 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) { 252 ret = irq_try_set_affinity(data, mask, force); 253 } else { 254 irqd_set_move_pending(data); 255 irq_copy_pending(desc, mask); 256 } 257 258 if (desc->affinity_notify) { 259 kref_get(&desc->affinity_notify->kref); 260 schedule_work(&desc->affinity_notify->work); 261 } 262 irqd_set(data, IRQD_AFFINITY_SET); 263 264 return ret; 265 } 266 267 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) 268 { 269 struct irq_desc *desc = irq_to_desc(irq); 270 unsigned long flags; 271 int ret; 272 273 if (!desc) 274 return -EINVAL; 275 276 raw_spin_lock_irqsave(&desc->lock, flags); 277 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); 278 raw_spin_unlock_irqrestore(&desc->lock, flags); 279 return ret; 280 } 281 282 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) 283 { 284 unsigned long flags; 285 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 286 287 if (!desc) 288 return -EINVAL; 289 desc->affinity_hint = m; 290 irq_put_desc_unlock(desc, flags); 291 /* set the initial affinity to prevent every interrupt being on CPU0 */ 292 if (m) 293 __irq_set_affinity(irq, m, false); 294 return 0; 295 } 296 EXPORT_SYMBOL_GPL(irq_set_affinity_hint); 297 298 static void irq_affinity_notify(struct work_struct *work) 299 { 300 struct irq_affinity_notify *notify = 301 container_of(work, struct irq_affinity_notify, work); 302 struct irq_desc *desc = irq_to_desc(notify->irq); 303 cpumask_var_t cpumask; 304 unsigned long flags; 305 306 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) 307 goto out; 308 309 raw_spin_lock_irqsave(&desc->lock, flags); 310 if (irq_move_pending(&desc->irq_data)) 311 irq_get_pending(cpumask, desc); 312 else 313 cpumask_copy(cpumask, desc->irq_common_data.affinity); 314 raw_spin_unlock_irqrestore(&desc->lock, flags); 315 316 notify->notify(notify, cpumask); 317 318 free_cpumask_var(cpumask); 319 out: 320 kref_put(¬ify->kref, notify->release); 321 } 322 323 /** 324 * irq_set_affinity_notifier - control notification of IRQ affinity changes 325 * @irq: Interrupt for which to enable/disable notification 326 * @notify: Context for notification, or %NULL to disable 327 * notification. Function pointers must be initialised; 328 * the other fields will be initialised by this function. 329 * 330 * Must be called in process context. Notification may only be enabled 331 * after the IRQ is allocated and must be disabled before the IRQ is 332 * freed using free_irq(). 333 */ 334 int 335 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 336 { 337 struct irq_desc *desc = irq_to_desc(irq); 338 struct irq_affinity_notify *old_notify; 339 unsigned long flags; 340 341 /* The release function is promised process context */ 342 might_sleep(); 343 344 if (!desc || desc->istate & IRQS_NMI) 345 return -EINVAL; 346 347 /* Complete initialisation of *notify */ 348 if (notify) { 349 notify->irq = irq; 350 kref_init(¬ify->kref); 351 INIT_WORK(¬ify->work, irq_affinity_notify); 352 } 353 354 raw_spin_lock_irqsave(&desc->lock, flags); 355 old_notify = desc->affinity_notify; 356 desc->affinity_notify = notify; 357 raw_spin_unlock_irqrestore(&desc->lock, flags); 358 359 if (old_notify) 360 kref_put(&old_notify->kref, old_notify->release); 361 362 return 0; 363 } 364 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); 365 366 #ifndef CONFIG_AUTO_IRQ_AFFINITY 367 /* 368 * Generic version of the affinity autoselector. 369 */ 370 int irq_setup_affinity(struct irq_desc *desc) 371 { 372 struct cpumask *set = irq_default_affinity; 373 int ret, node = irq_desc_get_node(desc); 374 static DEFINE_RAW_SPINLOCK(mask_lock); 375 static struct cpumask mask; 376 377 /* Excludes PER_CPU and NO_BALANCE interrupts */ 378 if (!__irq_can_set_affinity(desc)) 379 return 0; 380 381 raw_spin_lock(&mask_lock); 382 /* 383 * Preserve the managed affinity setting and a userspace affinity 384 * setup, but make sure that one of the targets is online. 385 */ 386 if (irqd_affinity_is_managed(&desc->irq_data) || 387 irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { 388 if (cpumask_intersects(desc->irq_common_data.affinity, 389 cpu_online_mask)) 390 set = desc->irq_common_data.affinity; 391 else 392 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); 393 } 394 395 cpumask_and(&mask, cpu_online_mask, set); 396 if (cpumask_empty(&mask)) 397 cpumask_copy(&mask, cpu_online_mask); 398 399 if (node != NUMA_NO_NODE) { 400 const struct cpumask *nodemask = cpumask_of_node(node); 401 402 /* make sure at least one of the cpus in nodemask is online */ 403 if (cpumask_intersects(&mask, nodemask)) 404 cpumask_and(&mask, &mask, nodemask); 405 } 406 ret = irq_do_set_affinity(&desc->irq_data, &mask, false); 407 raw_spin_unlock(&mask_lock); 408 return ret; 409 } 410 #else 411 /* Wrapper for ALPHA specific affinity selector magic */ 412 int irq_setup_affinity(struct irq_desc *desc) 413 { 414 return irq_select_affinity(irq_desc_get_irq(desc)); 415 } 416 #endif 417 418 /* 419 * Called when a bogus affinity is set via /proc/irq 420 */ 421 int irq_select_affinity_usr(unsigned int irq) 422 { 423 struct irq_desc *desc = irq_to_desc(irq); 424 unsigned long flags; 425 int ret; 426 427 raw_spin_lock_irqsave(&desc->lock, flags); 428 ret = irq_setup_affinity(desc); 429 raw_spin_unlock_irqrestore(&desc->lock, flags); 430 return ret; 431 } 432 #endif 433 434 /** 435 * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt 436 * @irq: interrupt number to set affinity 437 * @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU 438 * specific data for percpu_devid interrupts 439 * 440 * This function uses the vCPU specific data to set the vCPU 441 * affinity for an irq. The vCPU specific data is passed from 442 * outside, such as KVM. One example code path is as below: 443 * KVM -> IOMMU -> irq_set_vcpu_affinity(). 444 */ 445 int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) 446 { 447 unsigned long flags; 448 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 449 struct irq_data *data; 450 struct irq_chip *chip; 451 int ret = -ENOSYS; 452 453 if (!desc) 454 return -EINVAL; 455 456 data = irq_desc_get_irq_data(desc); 457 do { 458 chip = irq_data_get_irq_chip(data); 459 if (chip && chip->irq_set_vcpu_affinity) 460 break; 461 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 462 data = data->parent_data; 463 #else 464 data = NULL; 465 #endif 466 } while (data); 467 468 if (data) 469 ret = chip->irq_set_vcpu_affinity(data, vcpu_info); 470 irq_put_desc_unlock(desc, flags); 471 472 return ret; 473 } 474 EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); 475 476 void __disable_irq(struct irq_desc *desc) 477 { 478 if (!desc->depth++) 479 irq_disable(desc); 480 } 481 482 static int __disable_irq_nosync(unsigned int irq) 483 { 484 unsigned long flags; 485 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 486 487 if (!desc) 488 return -EINVAL; 489 __disable_irq(desc); 490 irq_put_desc_busunlock(desc, flags); 491 return 0; 492 } 493 494 /** 495 * disable_irq_nosync - disable an irq without waiting 496 * @irq: Interrupt to disable 497 * 498 * Disable the selected interrupt line. Disables and Enables are 499 * nested. 500 * Unlike disable_irq(), this function does not ensure existing 501 * instances of the IRQ handler have completed before returning. 502 * 503 * This function may be called from IRQ context. 504 */ 505 void disable_irq_nosync(unsigned int irq) 506 { 507 __disable_irq_nosync(irq); 508 } 509 EXPORT_SYMBOL(disable_irq_nosync); 510 511 /** 512 * disable_irq - disable an irq and wait for completion 513 * @irq: Interrupt to disable 514 * 515 * Disable the selected interrupt line. Enables and Disables are 516 * nested. 517 * This function waits for any pending IRQ handlers for this interrupt 518 * to complete before returning. If you use this function while 519 * holding a resource the IRQ handler may need you will deadlock. 520 * 521 * This function may be called - with care - from IRQ context. 522 */ 523 void disable_irq(unsigned int irq) 524 { 525 if (!__disable_irq_nosync(irq)) 526 synchronize_irq(irq); 527 } 528 EXPORT_SYMBOL(disable_irq); 529 530 /** 531 * disable_hardirq - disables an irq and waits for hardirq completion 532 * @irq: Interrupt to disable 533 * 534 * Disable the selected interrupt line. Enables and Disables are 535 * nested. 536 * This function waits for any pending hard IRQ handlers for this 537 * interrupt to complete before returning. If you use this function while 538 * holding a resource the hard IRQ handler may need you will deadlock. 539 * 540 * When used to optimistically disable an interrupt from atomic context 541 * the return value must be checked. 542 * 543 * Returns: false if a threaded handler is active. 544 * 545 * This function may be called - with care - from IRQ context. 546 */ 547 bool disable_hardirq(unsigned int irq) 548 { 549 if (!__disable_irq_nosync(irq)) 550 return synchronize_hardirq(irq); 551 552 return false; 553 } 554 EXPORT_SYMBOL_GPL(disable_hardirq); 555 556 /** 557 * disable_nmi_nosync - disable an nmi without waiting 558 * @irq: Interrupt to disable 559 * 560 * Disable the selected interrupt line. Disables and enables are 561 * nested. 562 * The interrupt to disable must have been requested through request_nmi. 563 * Unlike disable_nmi(), this function does not ensure existing 564 * instances of the IRQ handler have completed before returning. 565 */ 566 void disable_nmi_nosync(unsigned int irq) 567 { 568 disable_irq_nosync(irq); 569 } 570 571 void __enable_irq(struct irq_desc *desc) 572 { 573 switch (desc->depth) { 574 case 0: 575 err_out: 576 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", 577 irq_desc_get_irq(desc)); 578 break; 579 case 1: { 580 if (desc->istate & IRQS_SUSPENDED) 581 goto err_out; 582 /* Prevent probing on this irq: */ 583 irq_settings_set_noprobe(desc); 584 /* 585 * Call irq_startup() not irq_enable() here because the 586 * interrupt might be marked NOAUTOEN. So irq_startup() 587 * needs to be invoked when it gets enabled the first 588 * time. If it was already started up, then irq_startup() 589 * will invoke irq_enable() under the hood. 590 */ 591 irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); 592 break; 593 } 594 default: 595 desc->depth--; 596 } 597 } 598 599 /** 600 * enable_irq - enable handling of an irq 601 * @irq: Interrupt to enable 602 * 603 * Undoes the effect of one call to disable_irq(). If this 604 * matches the last disable, processing of interrupts on this 605 * IRQ line is re-enabled. 606 * 607 * This function may be called from IRQ context only when 608 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 609 */ 610 void enable_irq(unsigned int irq) 611 { 612 unsigned long flags; 613 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 614 615 if (!desc) 616 return; 617 if (WARN(!desc->irq_data.chip, 618 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) 619 goto out; 620 621 __enable_irq(desc); 622 out: 623 irq_put_desc_busunlock(desc, flags); 624 } 625 EXPORT_SYMBOL(enable_irq); 626 627 /** 628 * enable_nmi - enable handling of an nmi 629 * @irq: Interrupt to enable 630 * 631 * The interrupt to enable must have been requested through request_nmi. 632 * Undoes the effect of one call to disable_nmi(). If this 633 * matches the last disable, processing of interrupts on this 634 * IRQ line is re-enabled. 635 */ 636 void enable_nmi(unsigned int irq) 637 { 638 enable_irq(irq); 639 } 640 641 static int set_irq_wake_real(unsigned int irq, unsigned int on) 642 { 643 struct irq_desc *desc = irq_to_desc(irq); 644 int ret = -ENXIO; 645 646 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) 647 return 0; 648 649 if (desc->irq_data.chip->irq_set_wake) 650 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); 651 652 return ret; 653 } 654 655 /** 656 * irq_set_irq_wake - control irq power management wakeup 657 * @irq: interrupt to control 658 * @on: enable/disable power management wakeup 659 * 660 * Enable/disable power management wakeup mode, which is 661 * disabled by default. Enables and disables must match, 662 * just as they match for non-wakeup mode support. 663 * 664 * Wakeup mode lets this IRQ wake the system from sleep 665 * states like "suspend to RAM". 666 */ 667 int irq_set_irq_wake(unsigned int irq, unsigned int on) 668 { 669 unsigned long flags; 670 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 671 int ret = 0; 672 673 if (!desc) 674 return -EINVAL; 675 676 /* Don't use NMIs as wake up interrupts please */ 677 if (desc->istate & IRQS_NMI) { 678 ret = -EINVAL; 679 goto out_unlock; 680 } 681 682 /* wakeup-capable irqs can be shared between drivers that 683 * don't need to have the same sleep mode behaviors. 684 */ 685 if (on) { 686 if (desc->wake_depth++ == 0) { 687 ret = set_irq_wake_real(irq, on); 688 if (ret) 689 desc->wake_depth = 0; 690 else 691 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); 692 } 693 } else { 694 if (desc->wake_depth == 0) { 695 WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 696 } else if (--desc->wake_depth == 0) { 697 ret = set_irq_wake_real(irq, on); 698 if (ret) 699 desc->wake_depth = 1; 700 else 701 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); 702 } 703 } 704 705 out_unlock: 706 irq_put_desc_busunlock(desc, flags); 707 return ret; 708 } 709 EXPORT_SYMBOL(irq_set_irq_wake); 710 711 /* 712 * Internal function that tells the architecture code whether a 713 * particular irq has been exclusively allocated or is available 714 * for driver use. 715 */ 716 int can_request_irq(unsigned int irq, unsigned long irqflags) 717 { 718 unsigned long flags; 719 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 720 int canrequest = 0; 721 722 if (!desc) 723 return 0; 724 725 if (irq_settings_can_request(desc)) { 726 if (!desc->action || 727 irqflags & desc->action->flags & IRQF_SHARED) 728 canrequest = 1; 729 } 730 irq_put_desc_unlock(desc, flags); 731 return canrequest; 732 } 733 734 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) 735 { 736 struct irq_chip *chip = desc->irq_data.chip; 737 int ret, unmask = 0; 738 739 if (!chip || !chip->irq_set_type) { 740 /* 741 * IRQF_TRIGGER_* but the PIC does not support multiple 742 * flow-types? 743 */ 744 pr_debug("No set_type function for IRQ %d (%s)\n", 745 irq_desc_get_irq(desc), 746 chip ? (chip->name ? : "unknown") : "unknown"); 747 return 0; 748 } 749 750 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { 751 if (!irqd_irq_masked(&desc->irq_data)) 752 mask_irq(desc); 753 if (!irqd_irq_disabled(&desc->irq_data)) 754 unmask = 1; 755 } 756 757 /* Mask all flags except trigger mode */ 758 flags &= IRQ_TYPE_SENSE_MASK; 759 ret = chip->irq_set_type(&desc->irq_data, flags); 760 761 switch (ret) { 762 case IRQ_SET_MASK_OK: 763 case IRQ_SET_MASK_OK_DONE: 764 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); 765 irqd_set(&desc->irq_data, flags); 766 /* fall through */ 767 768 case IRQ_SET_MASK_OK_NOCOPY: 769 flags = irqd_get_trigger_type(&desc->irq_data); 770 irq_settings_set_trigger_mask(desc, flags); 771 irqd_clear(&desc->irq_data, IRQD_LEVEL); 772 irq_settings_clr_level(desc); 773 if (flags & IRQ_TYPE_LEVEL_MASK) { 774 irq_settings_set_level(desc); 775 irqd_set(&desc->irq_data, IRQD_LEVEL); 776 } 777 778 ret = 0; 779 break; 780 default: 781 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", 782 flags, irq_desc_get_irq(desc), chip->irq_set_type); 783 } 784 if (unmask) 785 unmask_irq(desc); 786 return ret; 787 } 788 789 #ifdef CONFIG_HARDIRQS_SW_RESEND 790 int irq_set_parent(int irq, int parent_irq) 791 { 792 unsigned long flags; 793 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 794 795 if (!desc) 796 return -EINVAL; 797 798 desc->parent_irq = parent_irq; 799 800 irq_put_desc_unlock(desc, flags); 801 return 0; 802 } 803 EXPORT_SYMBOL_GPL(irq_set_parent); 804 #endif 805 806 /* 807 * Default primary interrupt handler for threaded interrupts. Is 808 * assigned as primary handler when request_threaded_irq is called 809 * with handler == NULL. Useful for oneshot interrupts. 810 */ 811 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) 812 { 813 return IRQ_WAKE_THREAD; 814 } 815 816 /* 817 * Primary handler for nested threaded interrupts. Should never be 818 * called. 819 */ 820 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) 821 { 822 WARN(1, "Primary handler called for nested irq %d\n", irq); 823 return IRQ_NONE; 824 } 825 826 static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id) 827 { 828 WARN(1, "Secondary action handler called for irq %d\n", irq); 829 return IRQ_NONE; 830 } 831 832 static int irq_wait_for_interrupt(struct irqaction *action) 833 { 834 for (;;) { 835 set_current_state(TASK_INTERRUPTIBLE); 836 837 if (kthread_should_stop()) { 838 /* may need to run one last time */ 839 if (test_and_clear_bit(IRQTF_RUNTHREAD, 840 &action->thread_flags)) { 841 __set_current_state(TASK_RUNNING); 842 return 0; 843 } 844 __set_current_state(TASK_RUNNING); 845 return -1; 846 } 847 848 if (test_and_clear_bit(IRQTF_RUNTHREAD, 849 &action->thread_flags)) { 850 __set_current_state(TASK_RUNNING); 851 return 0; 852 } 853 schedule(); 854 } 855 } 856 857 /* 858 * Oneshot interrupts keep the irq line masked until the threaded 859 * handler finished. unmask if the interrupt has not been disabled and 860 * is marked MASKED. 861 */ 862 static void irq_finalize_oneshot(struct irq_desc *desc, 863 struct irqaction *action) 864 { 865 if (!(desc->istate & IRQS_ONESHOT) || 866 action->handler == irq_forced_secondary_handler) 867 return; 868 again: 869 chip_bus_lock(desc); 870 raw_spin_lock_irq(&desc->lock); 871 872 /* 873 * Implausible though it may be we need to protect us against 874 * the following scenario: 875 * 876 * The thread is faster done than the hard interrupt handler 877 * on the other CPU. If we unmask the irq line then the 878 * interrupt can come in again and masks the line, leaves due 879 * to IRQS_INPROGRESS and the irq line is masked forever. 880 * 881 * This also serializes the state of shared oneshot handlers 882 * versus "desc->threads_onehsot |= action->thread_mask;" in 883 * irq_wake_thread(). See the comment there which explains the 884 * serialization. 885 */ 886 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { 887 raw_spin_unlock_irq(&desc->lock); 888 chip_bus_sync_unlock(desc); 889 cpu_relax(); 890 goto again; 891 } 892 893 /* 894 * Now check again, whether the thread should run. Otherwise 895 * we would clear the threads_oneshot bit of this thread which 896 * was just set. 897 */ 898 if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 899 goto out_unlock; 900 901 desc->threads_oneshot &= ~action->thread_mask; 902 903 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && 904 irqd_irq_masked(&desc->irq_data)) 905 unmask_threaded_irq(desc); 906 907 out_unlock: 908 raw_spin_unlock_irq(&desc->lock); 909 chip_bus_sync_unlock(desc); 910 } 911 912 #ifdef CONFIG_SMP 913 /* 914 * Check whether we need to change the affinity of the interrupt thread. 915 */ 916 static void 917 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 918 { 919 cpumask_var_t mask; 920 bool valid = true; 921 922 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 923 return; 924 925 /* 926 * In case we are out of memory we set IRQTF_AFFINITY again and 927 * try again next time 928 */ 929 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 930 set_bit(IRQTF_AFFINITY, &action->thread_flags); 931 return; 932 } 933 934 raw_spin_lock_irq(&desc->lock); 935 /* 936 * This code is triggered unconditionally. Check the affinity 937 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. 938 */ 939 if (cpumask_available(desc->irq_common_data.affinity)) { 940 const struct cpumask *m; 941 942 m = irq_data_get_effective_affinity_mask(&desc->irq_data); 943 cpumask_copy(mask, m); 944 } else { 945 valid = false; 946 } 947 raw_spin_unlock_irq(&desc->lock); 948 949 if (valid) 950 set_cpus_allowed_ptr(current, mask); 951 free_cpumask_var(mask); 952 } 953 #else 954 static inline void 955 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } 956 #endif 957 958 /* 959 * Interrupts which are not explicitly requested as threaded 960 * interrupts rely on the implicit bh/preempt disable of the hard irq 961 * context. So we need to disable bh here to avoid deadlocks and other 962 * side effects. 963 */ 964 static irqreturn_t 965 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 966 { 967 irqreturn_t ret; 968 969 local_bh_disable(); 970 ret = action->thread_fn(action->irq, action->dev_id); 971 if (ret == IRQ_HANDLED) 972 atomic_inc(&desc->threads_handled); 973 974 irq_finalize_oneshot(desc, action); 975 local_bh_enable(); 976 return ret; 977 } 978 979 /* 980 * Interrupts explicitly requested as threaded interrupts want to be 981 * preemtible - many of them need to sleep and wait for slow busses to 982 * complete. 983 */ 984 static irqreturn_t irq_thread_fn(struct irq_desc *desc, 985 struct irqaction *action) 986 { 987 irqreturn_t ret; 988 989 ret = action->thread_fn(action->irq, action->dev_id); 990 if (ret == IRQ_HANDLED) 991 atomic_inc(&desc->threads_handled); 992 993 irq_finalize_oneshot(desc, action); 994 return ret; 995 } 996 997 static void wake_threads_waitq(struct irq_desc *desc) 998 { 999 if (atomic_dec_and_test(&desc->threads_active)) 1000 wake_up(&desc->wait_for_threads); 1001 } 1002 1003 static void irq_thread_dtor(struct callback_head *unused) 1004 { 1005 struct task_struct *tsk = current; 1006 struct irq_desc *desc; 1007 struct irqaction *action; 1008 1009 if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) 1010 return; 1011 1012 action = kthread_data(tsk); 1013 1014 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 1015 tsk->comm, tsk->pid, action->irq); 1016 1017 1018 desc = irq_to_desc(action->irq); 1019 /* 1020 * If IRQTF_RUNTHREAD is set, we need to decrement 1021 * desc->threads_active and wake possible waiters. 1022 */ 1023 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 1024 wake_threads_waitq(desc); 1025 1026 /* Prevent a stale desc->threads_oneshot */ 1027 irq_finalize_oneshot(desc, action); 1028 } 1029 1030 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) 1031 { 1032 struct irqaction *secondary = action->secondary; 1033 1034 if (WARN_ON_ONCE(!secondary)) 1035 return; 1036 1037 raw_spin_lock_irq(&desc->lock); 1038 __irq_wake_thread(desc, secondary); 1039 raw_spin_unlock_irq(&desc->lock); 1040 } 1041 1042 /* 1043 * Interrupt handler thread 1044 */ 1045 static int irq_thread(void *data) 1046 { 1047 struct callback_head on_exit_work; 1048 struct irqaction *action = data; 1049 struct irq_desc *desc = irq_to_desc(action->irq); 1050 irqreturn_t (*handler_fn)(struct irq_desc *desc, 1051 struct irqaction *action); 1052 1053 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, 1054 &action->thread_flags)) 1055 handler_fn = irq_forced_thread_fn; 1056 else 1057 handler_fn = irq_thread_fn; 1058 1059 init_task_work(&on_exit_work, irq_thread_dtor); 1060 task_work_add(current, &on_exit_work, false); 1061 1062 irq_thread_check_affinity(desc, action); 1063 1064 while (!irq_wait_for_interrupt(action)) { 1065 irqreturn_t action_ret; 1066 1067 irq_thread_check_affinity(desc, action); 1068 1069 action_ret = handler_fn(desc, action); 1070 if (action_ret == IRQ_WAKE_THREAD) 1071 irq_wake_secondary(desc, action); 1072 1073 wake_threads_waitq(desc); 1074 } 1075 1076 /* 1077 * This is the regular exit path. __free_irq() is stopping the 1078 * thread via kthread_stop() after calling 1079 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the 1080 * oneshot mask bit can be set. 1081 */ 1082 task_work_cancel(current, irq_thread_dtor); 1083 return 0; 1084 } 1085 1086 /** 1087 * irq_wake_thread - wake the irq thread for the action identified by dev_id 1088 * @irq: Interrupt line 1089 * @dev_id: Device identity for which the thread should be woken 1090 * 1091 */ 1092 void irq_wake_thread(unsigned int irq, void *dev_id) 1093 { 1094 struct irq_desc *desc = irq_to_desc(irq); 1095 struct irqaction *action; 1096 unsigned long flags; 1097 1098 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1099 return; 1100 1101 raw_spin_lock_irqsave(&desc->lock, flags); 1102 for_each_action_of_desc(desc, action) { 1103 if (action->dev_id == dev_id) { 1104 if (action->thread) 1105 __irq_wake_thread(desc, action); 1106 break; 1107 } 1108 } 1109 raw_spin_unlock_irqrestore(&desc->lock, flags); 1110 } 1111 EXPORT_SYMBOL_GPL(irq_wake_thread); 1112 1113 static int irq_setup_forced_threading(struct irqaction *new) 1114 { 1115 if (!force_irqthreads) 1116 return 0; 1117 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) 1118 return 0; 1119 1120 /* 1121 * No further action required for interrupts which are requested as 1122 * threaded interrupts already 1123 */ 1124 if (new->handler == irq_default_primary_handler) 1125 return 0; 1126 1127 new->flags |= IRQF_ONESHOT; 1128 1129 /* 1130 * Handle the case where we have a real primary handler and a 1131 * thread handler. We force thread them as well by creating a 1132 * secondary action. 1133 */ 1134 if (new->handler && new->thread_fn) { 1135 /* Allocate the secondary action */ 1136 new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1137 if (!new->secondary) 1138 return -ENOMEM; 1139 new->secondary->handler = irq_forced_secondary_handler; 1140 new->secondary->thread_fn = new->thread_fn; 1141 new->secondary->dev_id = new->dev_id; 1142 new->secondary->irq = new->irq; 1143 new->secondary->name = new->name; 1144 } 1145 /* Deal with the primary handler */ 1146 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); 1147 new->thread_fn = new->handler; 1148 new->handler = irq_default_primary_handler; 1149 return 0; 1150 } 1151 1152 static int irq_request_resources(struct irq_desc *desc) 1153 { 1154 struct irq_data *d = &desc->irq_data; 1155 struct irq_chip *c = d->chip; 1156 1157 return c->irq_request_resources ? c->irq_request_resources(d) : 0; 1158 } 1159 1160 static void irq_release_resources(struct irq_desc *desc) 1161 { 1162 struct irq_data *d = &desc->irq_data; 1163 struct irq_chip *c = d->chip; 1164 1165 if (c->irq_release_resources) 1166 c->irq_release_resources(d); 1167 } 1168 1169 static bool irq_supports_nmi(struct irq_desc *desc) 1170 { 1171 struct irq_data *d = irq_desc_get_irq_data(desc); 1172 1173 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1174 /* Only IRQs directly managed by the root irqchip can be set as NMI */ 1175 if (d->parent_data) 1176 return false; 1177 #endif 1178 /* Don't support NMIs for chips behind a slow bus */ 1179 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock) 1180 return false; 1181 1182 return d->chip->flags & IRQCHIP_SUPPORTS_NMI; 1183 } 1184 1185 static int irq_nmi_setup(struct irq_desc *desc) 1186 { 1187 struct irq_data *d = irq_desc_get_irq_data(desc); 1188 struct irq_chip *c = d->chip; 1189 1190 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL; 1191 } 1192 1193 static void irq_nmi_teardown(struct irq_desc *desc) 1194 { 1195 struct irq_data *d = irq_desc_get_irq_data(desc); 1196 struct irq_chip *c = d->chip; 1197 1198 if (c->irq_nmi_teardown) 1199 c->irq_nmi_teardown(d); 1200 } 1201 1202 static int 1203 setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) 1204 { 1205 struct task_struct *t; 1206 struct sched_param param = { 1207 .sched_priority = MAX_USER_RT_PRIO/2, 1208 }; 1209 1210 if (!secondary) { 1211 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 1212 new->name); 1213 } else { 1214 t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, 1215 new->name); 1216 param.sched_priority -= 1; 1217 } 1218 1219 if (IS_ERR(t)) 1220 return PTR_ERR(t); 1221 1222 sched_setscheduler_nocheck(t, SCHED_FIFO, ¶m); 1223 1224 /* 1225 * We keep the reference to the task struct even if 1226 * the thread dies to avoid that the interrupt code 1227 * references an already freed task_struct. 1228 */ 1229 get_task_struct(t); 1230 new->thread = t; 1231 /* 1232 * Tell the thread to set its affinity. This is 1233 * important for shared interrupt handlers as we do 1234 * not invoke setup_affinity() for the secondary 1235 * handlers as everything is already set up. Even for 1236 * interrupts marked with IRQF_NO_BALANCE this is 1237 * correct as we want the thread to move to the cpu(s) 1238 * on which the requesting code placed the interrupt. 1239 */ 1240 set_bit(IRQTF_AFFINITY, &new->thread_flags); 1241 return 0; 1242 } 1243 1244 /* 1245 * Internal function to register an irqaction - typically used to 1246 * allocate special interrupts that are part of the architecture. 1247 * 1248 * Locking rules: 1249 * 1250 * desc->request_mutex Provides serialization against a concurrent free_irq() 1251 * chip_bus_lock Provides serialization for slow bus operations 1252 * desc->lock Provides serialization against hard interrupts 1253 * 1254 * chip_bus_lock and desc->lock are sufficient for all other management and 1255 * interrupt related functions. desc->request_mutex solely serializes 1256 * request/free_irq(). 1257 */ 1258 static int 1259 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 1260 { 1261 struct irqaction *old, **old_ptr; 1262 unsigned long flags, thread_mask = 0; 1263 int ret, nested, shared = 0; 1264 1265 if (!desc) 1266 return -EINVAL; 1267 1268 if (desc->irq_data.chip == &no_irq_chip) 1269 return -ENOSYS; 1270 if (!try_module_get(desc->owner)) 1271 return -ENODEV; 1272 1273 new->irq = irq; 1274 1275 /* 1276 * If the trigger type is not specified by the caller, 1277 * then use the default for this interrupt. 1278 */ 1279 if (!(new->flags & IRQF_TRIGGER_MASK)) 1280 new->flags |= irqd_get_trigger_type(&desc->irq_data); 1281 1282 /* 1283 * Check whether the interrupt nests into another interrupt 1284 * thread. 1285 */ 1286 nested = irq_settings_is_nested_thread(desc); 1287 if (nested) { 1288 if (!new->thread_fn) { 1289 ret = -EINVAL; 1290 goto out_mput; 1291 } 1292 /* 1293 * Replace the primary handler which was provided from 1294 * the driver for non nested interrupt handling by the 1295 * dummy function which warns when called. 1296 */ 1297 new->handler = irq_nested_primary_handler; 1298 } else { 1299 if (irq_settings_can_thread(desc)) { 1300 ret = irq_setup_forced_threading(new); 1301 if (ret) 1302 goto out_mput; 1303 } 1304 } 1305 1306 /* 1307 * Create a handler thread when a thread function is supplied 1308 * and the interrupt does not nest into another interrupt 1309 * thread. 1310 */ 1311 if (new->thread_fn && !nested) { 1312 ret = setup_irq_thread(new, irq, false); 1313 if (ret) 1314 goto out_mput; 1315 if (new->secondary) { 1316 ret = setup_irq_thread(new->secondary, irq, true); 1317 if (ret) 1318 goto out_thread; 1319 } 1320 } 1321 1322 /* 1323 * Drivers are often written to work w/o knowledge about the 1324 * underlying irq chip implementation, so a request for a 1325 * threaded irq without a primary hard irq context handler 1326 * requires the ONESHOT flag to be set. Some irq chips like 1327 * MSI based interrupts are per se one shot safe. Check the 1328 * chip flags, so we can avoid the unmask dance at the end of 1329 * the threaded handler for those. 1330 */ 1331 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) 1332 new->flags &= ~IRQF_ONESHOT; 1333 1334 /* 1335 * Protects against a concurrent __free_irq() call which might wait 1336 * for synchronize_hardirq() to complete without holding the optional 1337 * chip bus lock and desc->lock. Also protects against handing out 1338 * a recycled oneshot thread_mask bit while it's still in use by 1339 * its previous owner. 1340 */ 1341 mutex_lock(&desc->request_mutex); 1342 1343 /* 1344 * Acquire bus lock as the irq_request_resources() callback below 1345 * might rely on the serialization or the magic power management 1346 * functions which are abusing the irq_bus_lock() callback, 1347 */ 1348 chip_bus_lock(desc); 1349 1350 /* First installed action requests resources. */ 1351 if (!desc->action) { 1352 ret = irq_request_resources(desc); 1353 if (ret) { 1354 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", 1355 new->name, irq, desc->irq_data.chip->name); 1356 goto out_bus_unlock; 1357 } 1358 } 1359 1360 /* 1361 * The following block of code has to be executed atomically 1362 * protected against a concurrent interrupt and any of the other 1363 * management calls which are not serialized via 1364 * desc->request_mutex or the optional bus lock. 1365 */ 1366 raw_spin_lock_irqsave(&desc->lock, flags); 1367 old_ptr = &desc->action; 1368 old = *old_ptr; 1369 if (old) { 1370 /* 1371 * Can't share interrupts unless both agree to and are 1372 * the same type (level, edge, polarity). So both flag 1373 * fields must have IRQF_SHARED set and the bits which 1374 * set the trigger type must match. Also all must 1375 * agree on ONESHOT. 1376 * Interrupt lines used for NMIs cannot be shared. 1377 */ 1378 unsigned int oldtype; 1379 1380 if (desc->istate & IRQS_NMI) { 1381 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n", 1382 new->name, irq, desc->irq_data.chip->name); 1383 ret = -EINVAL; 1384 goto out_unlock; 1385 } 1386 1387 /* 1388 * If nobody did set the configuration before, inherit 1389 * the one provided by the requester. 1390 */ 1391 if (irqd_trigger_type_was_set(&desc->irq_data)) { 1392 oldtype = irqd_get_trigger_type(&desc->irq_data); 1393 } else { 1394 oldtype = new->flags & IRQF_TRIGGER_MASK; 1395 irqd_set_trigger_type(&desc->irq_data, oldtype); 1396 } 1397 1398 if (!((old->flags & new->flags) & IRQF_SHARED) || 1399 (oldtype != (new->flags & IRQF_TRIGGER_MASK)) || 1400 ((old->flags ^ new->flags) & IRQF_ONESHOT)) 1401 goto mismatch; 1402 1403 /* All handlers must agree on per-cpuness */ 1404 if ((old->flags & IRQF_PERCPU) != 1405 (new->flags & IRQF_PERCPU)) 1406 goto mismatch; 1407 1408 /* add new interrupt at end of irq queue */ 1409 do { 1410 /* 1411 * Or all existing action->thread_mask bits, 1412 * so we can find the next zero bit for this 1413 * new action. 1414 */ 1415 thread_mask |= old->thread_mask; 1416 old_ptr = &old->next; 1417 old = *old_ptr; 1418 } while (old); 1419 shared = 1; 1420 } 1421 1422 /* 1423 * Setup the thread mask for this irqaction for ONESHOT. For 1424 * !ONESHOT irqs the thread mask is 0 so we can avoid a 1425 * conditional in irq_wake_thread(). 1426 */ 1427 if (new->flags & IRQF_ONESHOT) { 1428 /* 1429 * Unlikely to have 32 resp 64 irqs sharing one line, 1430 * but who knows. 1431 */ 1432 if (thread_mask == ~0UL) { 1433 ret = -EBUSY; 1434 goto out_unlock; 1435 } 1436 /* 1437 * The thread_mask for the action is or'ed to 1438 * desc->thread_active to indicate that the 1439 * IRQF_ONESHOT thread handler has been woken, but not 1440 * yet finished. The bit is cleared when a thread 1441 * completes. When all threads of a shared interrupt 1442 * line have completed desc->threads_active becomes 1443 * zero and the interrupt line is unmasked. See 1444 * handle.c:irq_wake_thread() for further information. 1445 * 1446 * If no thread is woken by primary (hard irq context) 1447 * interrupt handlers, then desc->threads_active is 1448 * also checked for zero to unmask the irq line in the 1449 * affected hard irq flow handlers 1450 * (handle_[fasteoi|level]_irq). 1451 * 1452 * The new action gets the first zero bit of 1453 * thread_mask assigned. See the loop above which or's 1454 * all existing action->thread_mask bits. 1455 */ 1456 new->thread_mask = 1UL << ffz(thread_mask); 1457 1458 } else if (new->handler == irq_default_primary_handler && 1459 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { 1460 /* 1461 * The interrupt was requested with handler = NULL, so 1462 * we use the default primary handler for it. But it 1463 * does not have the oneshot flag set. In combination 1464 * with level interrupts this is deadly, because the 1465 * default primary handler just wakes the thread, then 1466 * the irq lines is reenabled, but the device still 1467 * has the level irq asserted. Rinse and repeat.... 1468 * 1469 * While this works for edge type interrupts, we play 1470 * it safe and reject unconditionally because we can't 1471 * say for sure which type this interrupt really 1472 * has. The type flags are unreliable as the 1473 * underlying chip implementation can override them. 1474 */ 1475 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", 1476 irq); 1477 ret = -EINVAL; 1478 goto out_unlock; 1479 } 1480 1481 if (!shared) { 1482 init_waitqueue_head(&desc->wait_for_threads); 1483 1484 /* Setup the type (level, edge polarity) if configured: */ 1485 if (new->flags & IRQF_TRIGGER_MASK) { 1486 ret = __irq_set_trigger(desc, 1487 new->flags & IRQF_TRIGGER_MASK); 1488 1489 if (ret) 1490 goto out_unlock; 1491 } 1492 1493 /* 1494 * Activate the interrupt. That activation must happen 1495 * independently of IRQ_NOAUTOEN. request_irq() can fail 1496 * and the callers are supposed to handle 1497 * that. enable_irq() of an interrupt requested with 1498 * IRQ_NOAUTOEN is not supposed to fail. The activation 1499 * keeps it in shutdown mode, it merily associates 1500 * resources if necessary and if that's not possible it 1501 * fails. Interrupts which are in managed shutdown mode 1502 * will simply ignore that activation request. 1503 */ 1504 ret = irq_activate(desc); 1505 if (ret) 1506 goto out_unlock; 1507 1508 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ 1509 IRQS_ONESHOT | IRQS_WAITING); 1510 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 1511 1512 if (new->flags & IRQF_PERCPU) { 1513 irqd_set(&desc->irq_data, IRQD_PER_CPU); 1514 irq_settings_set_per_cpu(desc); 1515 } 1516 1517 if (new->flags & IRQF_ONESHOT) 1518 desc->istate |= IRQS_ONESHOT; 1519 1520 /* Exclude IRQ from balancing if requested */ 1521 if (new->flags & IRQF_NOBALANCING) { 1522 irq_settings_set_no_balancing(desc); 1523 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1524 } 1525 1526 if (irq_settings_can_autoenable(desc)) { 1527 irq_startup(desc, IRQ_RESEND, IRQ_START_COND); 1528 } else { 1529 /* 1530 * Shared interrupts do not go well with disabling 1531 * auto enable. The sharing interrupt might request 1532 * it while it's still disabled and then wait for 1533 * interrupts forever. 1534 */ 1535 WARN_ON_ONCE(new->flags & IRQF_SHARED); 1536 /* Undo nested disables: */ 1537 desc->depth = 1; 1538 } 1539 1540 } else if (new->flags & IRQF_TRIGGER_MASK) { 1541 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; 1542 unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); 1543 1544 if (nmsk != omsk) 1545 /* hope the handler works with current trigger mode */ 1546 pr_warn("irq %d uses trigger mode %u; requested %u\n", 1547 irq, omsk, nmsk); 1548 } 1549 1550 *old_ptr = new; 1551 1552 irq_pm_install_action(desc, new); 1553 1554 /* Reset broken irq detection when installing new handler */ 1555 desc->irq_count = 0; 1556 desc->irqs_unhandled = 0; 1557 1558 /* 1559 * Check whether we disabled the irq via the spurious handler 1560 * before. Reenable it and give it another chance. 1561 */ 1562 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { 1563 desc->istate &= ~IRQS_SPURIOUS_DISABLED; 1564 __enable_irq(desc); 1565 } 1566 1567 raw_spin_unlock_irqrestore(&desc->lock, flags); 1568 chip_bus_sync_unlock(desc); 1569 mutex_unlock(&desc->request_mutex); 1570 1571 irq_setup_timings(desc, new); 1572 1573 /* 1574 * Strictly no need to wake it up, but hung_task complains 1575 * when no hard interrupt wakes the thread up. 1576 */ 1577 if (new->thread) 1578 wake_up_process(new->thread); 1579 if (new->secondary) 1580 wake_up_process(new->secondary->thread); 1581 1582 register_irq_proc(irq, desc); 1583 new->dir = NULL; 1584 register_handler_proc(irq, new); 1585 return 0; 1586 1587 mismatch: 1588 if (!(new->flags & IRQF_PROBE_SHARED)) { 1589 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", 1590 irq, new->flags, new->name, old->flags, old->name); 1591 #ifdef CONFIG_DEBUG_SHIRQ 1592 dump_stack(); 1593 #endif 1594 } 1595 ret = -EBUSY; 1596 1597 out_unlock: 1598 raw_spin_unlock_irqrestore(&desc->lock, flags); 1599 1600 if (!desc->action) 1601 irq_release_resources(desc); 1602 out_bus_unlock: 1603 chip_bus_sync_unlock(desc); 1604 mutex_unlock(&desc->request_mutex); 1605 1606 out_thread: 1607 if (new->thread) { 1608 struct task_struct *t = new->thread; 1609 1610 new->thread = NULL; 1611 kthread_stop(t); 1612 put_task_struct(t); 1613 } 1614 if (new->secondary && new->secondary->thread) { 1615 struct task_struct *t = new->secondary->thread; 1616 1617 new->secondary->thread = NULL; 1618 kthread_stop(t); 1619 put_task_struct(t); 1620 } 1621 out_mput: 1622 module_put(desc->owner); 1623 return ret; 1624 } 1625 1626 /** 1627 * setup_irq - setup an interrupt 1628 * @irq: Interrupt line to setup 1629 * @act: irqaction for the interrupt 1630 * 1631 * Used to statically setup interrupts in the early boot process. 1632 */ 1633 int setup_irq(unsigned int irq, struct irqaction *act) 1634 { 1635 int retval; 1636 struct irq_desc *desc = irq_to_desc(irq); 1637 1638 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1639 return -EINVAL; 1640 1641 retval = irq_chip_pm_get(&desc->irq_data); 1642 if (retval < 0) 1643 return retval; 1644 1645 retval = __setup_irq(irq, desc, act); 1646 1647 if (retval) 1648 irq_chip_pm_put(&desc->irq_data); 1649 1650 return retval; 1651 } 1652 EXPORT_SYMBOL_GPL(setup_irq); 1653 1654 /* 1655 * Internal function to unregister an irqaction - used to free 1656 * regular and special interrupts that are part of the architecture. 1657 */ 1658 static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) 1659 { 1660 unsigned irq = desc->irq_data.irq; 1661 struct irqaction *action, **action_ptr; 1662 unsigned long flags; 1663 1664 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 1665 1666 mutex_lock(&desc->request_mutex); 1667 chip_bus_lock(desc); 1668 raw_spin_lock_irqsave(&desc->lock, flags); 1669 1670 /* 1671 * There can be multiple actions per IRQ descriptor, find the right 1672 * one based on the dev_id: 1673 */ 1674 action_ptr = &desc->action; 1675 for (;;) { 1676 action = *action_ptr; 1677 1678 if (!action) { 1679 WARN(1, "Trying to free already-free IRQ %d\n", irq); 1680 raw_spin_unlock_irqrestore(&desc->lock, flags); 1681 chip_bus_sync_unlock(desc); 1682 mutex_unlock(&desc->request_mutex); 1683 return NULL; 1684 } 1685 1686 if (action->dev_id == dev_id) 1687 break; 1688 action_ptr = &action->next; 1689 } 1690 1691 /* Found it - now remove it from the list of entries: */ 1692 *action_ptr = action->next; 1693 1694 irq_pm_remove_action(desc, action); 1695 1696 /* If this was the last handler, shut down the IRQ line: */ 1697 if (!desc->action) { 1698 irq_settings_clr_disable_unlazy(desc); 1699 irq_shutdown(desc); 1700 } 1701 1702 #ifdef CONFIG_SMP 1703 /* make sure affinity_hint is cleaned up */ 1704 if (WARN_ON_ONCE(desc->affinity_hint)) 1705 desc->affinity_hint = NULL; 1706 #endif 1707 1708 raw_spin_unlock_irqrestore(&desc->lock, flags); 1709 /* 1710 * Drop bus_lock here so the changes which were done in the chip 1711 * callbacks above are synced out to the irq chips which hang 1712 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq(). 1713 * 1714 * Aside of that the bus_lock can also be taken from the threaded 1715 * handler in irq_finalize_oneshot() which results in a deadlock 1716 * because kthread_stop() would wait forever for the thread to 1717 * complete, which is blocked on the bus lock. 1718 * 1719 * The still held desc->request_mutex() protects against a 1720 * concurrent request_irq() of this irq so the release of resources 1721 * and timing data is properly serialized. 1722 */ 1723 chip_bus_sync_unlock(desc); 1724 1725 unregister_handler_proc(irq, action); 1726 1727 /* Make sure it's not being used on another CPU: */ 1728 synchronize_hardirq(irq); 1729 1730 #ifdef CONFIG_DEBUG_SHIRQ 1731 /* 1732 * It's a shared IRQ -- the driver ought to be prepared for an IRQ 1733 * event to happen even now it's being freed, so let's make sure that 1734 * is so by doing an extra call to the handler .... 1735 * 1736 * ( We do this after actually deregistering it, to make sure that a 1737 * 'real' IRQ doesn't run in parallel with our fake. ) 1738 */ 1739 if (action->flags & IRQF_SHARED) { 1740 local_irq_save(flags); 1741 action->handler(irq, dev_id); 1742 local_irq_restore(flags); 1743 } 1744 #endif 1745 1746 /* 1747 * The action has already been removed above, but the thread writes 1748 * its oneshot mask bit when it completes. Though request_mutex is 1749 * held across this which prevents __setup_irq() from handing out 1750 * the same bit to a newly requested action. 1751 */ 1752 if (action->thread) { 1753 kthread_stop(action->thread); 1754 put_task_struct(action->thread); 1755 if (action->secondary && action->secondary->thread) { 1756 kthread_stop(action->secondary->thread); 1757 put_task_struct(action->secondary->thread); 1758 } 1759 } 1760 1761 /* Last action releases resources */ 1762 if (!desc->action) { 1763 /* 1764 * Reaquire bus lock as irq_release_resources() might 1765 * require it to deallocate resources over the slow bus. 1766 */ 1767 chip_bus_lock(desc); 1768 irq_release_resources(desc); 1769 chip_bus_sync_unlock(desc); 1770 irq_remove_timings(desc); 1771 } 1772 1773 mutex_unlock(&desc->request_mutex); 1774 1775 irq_chip_pm_put(&desc->irq_data); 1776 module_put(desc->owner); 1777 kfree(action->secondary); 1778 return action; 1779 } 1780 1781 /** 1782 * remove_irq - free an interrupt 1783 * @irq: Interrupt line to free 1784 * @act: irqaction for the interrupt 1785 * 1786 * Used to remove interrupts statically setup by the early boot process. 1787 */ 1788 void remove_irq(unsigned int irq, struct irqaction *act) 1789 { 1790 struct irq_desc *desc = irq_to_desc(irq); 1791 1792 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1793 __free_irq(desc, act->dev_id); 1794 } 1795 EXPORT_SYMBOL_GPL(remove_irq); 1796 1797 /** 1798 * free_irq - free an interrupt allocated with request_irq 1799 * @irq: Interrupt line to free 1800 * @dev_id: Device identity to free 1801 * 1802 * Remove an interrupt handler. The handler is removed and if the 1803 * interrupt line is no longer in use by any driver it is disabled. 1804 * On a shared IRQ the caller must ensure the interrupt is disabled 1805 * on the card it drives before calling this function. The function 1806 * does not return until any executing interrupts for this IRQ 1807 * have completed. 1808 * 1809 * This function must not be called from interrupt context. 1810 * 1811 * Returns the devname argument passed to request_irq. 1812 */ 1813 const void *free_irq(unsigned int irq, void *dev_id) 1814 { 1815 struct irq_desc *desc = irq_to_desc(irq); 1816 struct irqaction *action; 1817 const char *devname; 1818 1819 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1820 return NULL; 1821 1822 #ifdef CONFIG_SMP 1823 if (WARN_ON(desc->affinity_notify)) 1824 desc->affinity_notify = NULL; 1825 #endif 1826 1827 action = __free_irq(desc, dev_id); 1828 1829 if (!action) 1830 return NULL; 1831 1832 devname = action->name; 1833 kfree(action); 1834 return devname; 1835 } 1836 EXPORT_SYMBOL(free_irq); 1837 1838 /* This function must be called with desc->lock held */ 1839 static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) 1840 { 1841 const char *devname = NULL; 1842 1843 desc->istate &= ~IRQS_NMI; 1844 1845 if (!WARN_ON(desc->action == NULL)) { 1846 irq_pm_remove_action(desc, desc->action); 1847 devname = desc->action->name; 1848 unregister_handler_proc(irq, desc->action); 1849 1850 kfree(desc->action); 1851 desc->action = NULL; 1852 } 1853 1854 irq_settings_clr_disable_unlazy(desc); 1855 irq_shutdown(desc); 1856 1857 irq_release_resources(desc); 1858 1859 irq_chip_pm_put(&desc->irq_data); 1860 module_put(desc->owner); 1861 1862 return devname; 1863 } 1864 1865 const void *free_nmi(unsigned int irq, void *dev_id) 1866 { 1867 struct irq_desc *desc = irq_to_desc(irq); 1868 unsigned long flags; 1869 const void *devname; 1870 1871 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI))) 1872 return NULL; 1873 1874 if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1875 return NULL; 1876 1877 /* NMI still enabled */ 1878 if (WARN_ON(desc->depth == 0)) 1879 disable_nmi_nosync(irq); 1880 1881 raw_spin_lock_irqsave(&desc->lock, flags); 1882 1883 irq_nmi_teardown(desc); 1884 devname = __cleanup_nmi(irq, desc); 1885 1886 raw_spin_unlock_irqrestore(&desc->lock, flags); 1887 1888 return devname; 1889 } 1890 1891 /** 1892 * request_threaded_irq - allocate an interrupt line 1893 * @irq: Interrupt line to allocate 1894 * @handler: Function to be called when the IRQ occurs. 1895 * Primary handler for threaded interrupts 1896 * If NULL and thread_fn != NULL the default 1897 * primary handler is installed 1898 * @thread_fn: Function called from the irq handler thread 1899 * If NULL, no irq thread is created 1900 * @irqflags: Interrupt type flags 1901 * @devname: An ascii name for the claiming device 1902 * @dev_id: A cookie passed back to the handler function 1903 * 1904 * This call allocates interrupt resources and enables the 1905 * interrupt line and IRQ handling. From the point this 1906 * call is made your handler function may be invoked. Since 1907 * your handler function must clear any interrupt the board 1908 * raises, you must take care both to initialise your hardware 1909 * and to set up the interrupt handler in the right order. 1910 * 1911 * If you want to set up a threaded irq handler for your device 1912 * then you need to supply @handler and @thread_fn. @handler is 1913 * still called in hard interrupt context and has to check 1914 * whether the interrupt originates from the device. If yes it 1915 * needs to disable the interrupt on the device and return 1916 * IRQ_WAKE_THREAD which will wake up the handler thread and run 1917 * @thread_fn. This split handler design is necessary to support 1918 * shared interrupts. 1919 * 1920 * Dev_id must be globally unique. Normally the address of the 1921 * device data structure is used as the cookie. Since the handler 1922 * receives this value it makes sense to use it. 1923 * 1924 * If your interrupt is shared you must pass a non NULL dev_id 1925 * as this is required when freeing the interrupt. 1926 * 1927 * Flags: 1928 * 1929 * IRQF_SHARED Interrupt is shared 1930 * IRQF_TRIGGER_* Specify active edge(s) or level 1931 * 1932 */ 1933 int request_threaded_irq(unsigned int irq, irq_handler_t handler, 1934 irq_handler_t thread_fn, unsigned long irqflags, 1935 const char *devname, void *dev_id) 1936 { 1937 struct irqaction *action; 1938 struct irq_desc *desc; 1939 int retval; 1940 1941 if (irq == IRQ_NOTCONNECTED) 1942 return -ENOTCONN; 1943 1944 /* 1945 * Sanity-check: shared interrupts must pass in a real dev-ID, 1946 * otherwise we'll have trouble later trying to figure out 1947 * which interrupt is which (messes up the interrupt freeing 1948 * logic etc). 1949 * 1950 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and 1951 * it cannot be set along with IRQF_NO_SUSPEND. 1952 */ 1953 if (((irqflags & IRQF_SHARED) && !dev_id) || 1954 (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) || 1955 ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND))) 1956 return -EINVAL; 1957 1958 desc = irq_to_desc(irq); 1959 if (!desc) 1960 return -EINVAL; 1961 1962 if (!irq_settings_can_request(desc) || 1963 WARN_ON(irq_settings_is_per_cpu_devid(desc))) 1964 return -EINVAL; 1965 1966 if (!handler) { 1967 if (!thread_fn) 1968 return -EINVAL; 1969 handler = irq_default_primary_handler; 1970 } 1971 1972 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1973 if (!action) 1974 return -ENOMEM; 1975 1976 action->handler = handler; 1977 action->thread_fn = thread_fn; 1978 action->flags = irqflags; 1979 action->name = devname; 1980 action->dev_id = dev_id; 1981 1982 retval = irq_chip_pm_get(&desc->irq_data); 1983 if (retval < 0) { 1984 kfree(action); 1985 return retval; 1986 } 1987 1988 retval = __setup_irq(irq, desc, action); 1989 1990 if (retval) { 1991 irq_chip_pm_put(&desc->irq_data); 1992 kfree(action->secondary); 1993 kfree(action); 1994 } 1995 1996 #ifdef CONFIG_DEBUG_SHIRQ_FIXME 1997 if (!retval && (irqflags & IRQF_SHARED)) { 1998 /* 1999 * It's a shared IRQ -- the driver ought to be prepared for it 2000 * to happen immediately, so let's make sure.... 2001 * We disable the irq to make sure that a 'real' IRQ doesn't 2002 * run in parallel with our fake. 2003 */ 2004 unsigned long flags; 2005 2006 disable_irq(irq); 2007 local_irq_save(flags); 2008 2009 handler(irq, dev_id); 2010 2011 local_irq_restore(flags); 2012 enable_irq(irq); 2013 } 2014 #endif 2015 return retval; 2016 } 2017 EXPORT_SYMBOL(request_threaded_irq); 2018 2019 /** 2020 * request_any_context_irq - allocate an interrupt line 2021 * @irq: Interrupt line to allocate 2022 * @handler: Function to be called when the IRQ occurs. 2023 * Threaded handler for threaded interrupts. 2024 * @flags: Interrupt type flags 2025 * @name: An ascii name for the claiming device 2026 * @dev_id: A cookie passed back to the handler function 2027 * 2028 * This call allocates interrupt resources and enables the 2029 * interrupt line and IRQ handling. It selects either a 2030 * hardirq or threaded handling method depending on the 2031 * context. 2032 * 2033 * On failure, it returns a negative value. On success, 2034 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. 2035 */ 2036 int request_any_context_irq(unsigned int irq, irq_handler_t handler, 2037 unsigned long flags, const char *name, void *dev_id) 2038 { 2039 struct irq_desc *desc; 2040 int ret; 2041 2042 if (irq == IRQ_NOTCONNECTED) 2043 return -ENOTCONN; 2044 2045 desc = irq_to_desc(irq); 2046 if (!desc) 2047 return -EINVAL; 2048 2049 if (irq_settings_is_nested_thread(desc)) { 2050 ret = request_threaded_irq(irq, NULL, handler, 2051 flags, name, dev_id); 2052 return !ret ? IRQC_IS_NESTED : ret; 2053 } 2054 2055 ret = request_irq(irq, handler, flags, name, dev_id); 2056 return !ret ? IRQC_IS_HARDIRQ : ret; 2057 } 2058 EXPORT_SYMBOL_GPL(request_any_context_irq); 2059 2060 /** 2061 * request_nmi - allocate an interrupt line for NMI delivery 2062 * @irq: Interrupt line to allocate 2063 * @handler: Function to be called when the IRQ occurs. 2064 * Threaded handler for threaded interrupts. 2065 * @irqflags: Interrupt type flags 2066 * @name: An ascii name for the claiming device 2067 * @dev_id: A cookie passed back to the handler function 2068 * 2069 * This call allocates interrupt resources and enables the 2070 * interrupt line and IRQ handling. It sets up the IRQ line 2071 * to be handled as an NMI. 2072 * 2073 * An interrupt line delivering NMIs cannot be shared and IRQ handling 2074 * cannot be threaded. 2075 * 2076 * Interrupt lines requested for NMI delivering must produce per cpu 2077 * interrupts and have auto enabling setting disabled. 2078 * 2079 * Dev_id must be globally unique. Normally the address of the 2080 * device data structure is used as the cookie. Since the handler 2081 * receives this value it makes sense to use it. 2082 * 2083 * If the interrupt line cannot be used to deliver NMIs, function 2084 * will fail and return a negative value. 2085 */ 2086 int request_nmi(unsigned int irq, irq_handler_t handler, 2087 unsigned long irqflags, const char *name, void *dev_id) 2088 { 2089 struct irqaction *action; 2090 struct irq_desc *desc; 2091 unsigned long flags; 2092 int retval; 2093 2094 if (irq == IRQ_NOTCONNECTED) 2095 return -ENOTCONN; 2096 2097 /* NMI cannot be shared, used for Polling */ 2098 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL)) 2099 return -EINVAL; 2100 2101 if (!(irqflags & IRQF_PERCPU)) 2102 return -EINVAL; 2103 2104 if (!handler) 2105 return -EINVAL; 2106 2107 desc = irq_to_desc(irq); 2108 2109 if (!desc || irq_settings_can_autoenable(desc) || 2110 !irq_settings_can_request(desc) || 2111 WARN_ON(irq_settings_is_per_cpu_devid(desc)) || 2112 !irq_supports_nmi(desc)) 2113 return -EINVAL; 2114 2115 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 2116 if (!action) 2117 return -ENOMEM; 2118 2119 action->handler = handler; 2120 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING; 2121 action->name = name; 2122 action->dev_id = dev_id; 2123 2124 retval = irq_chip_pm_get(&desc->irq_data); 2125 if (retval < 0) 2126 goto err_out; 2127 2128 retval = __setup_irq(irq, desc, action); 2129 if (retval) 2130 goto err_irq_setup; 2131 2132 raw_spin_lock_irqsave(&desc->lock, flags); 2133 2134 /* Setup NMI state */ 2135 desc->istate |= IRQS_NMI; 2136 retval = irq_nmi_setup(desc); 2137 if (retval) { 2138 __cleanup_nmi(irq, desc); 2139 raw_spin_unlock_irqrestore(&desc->lock, flags); 2140 return -EINVAL; 2141 } 2142 2143 raw_spin_unlock_irqrestore(&desc->lock, flags); 2144 2145 return 0; 2146 2147 err_irq_setup: 2148 irq_chip_pm_put(&desc->irq_data); 2149 err_out: 2150 kfree(action); 2151 2152 return retval; 2153 } 2154 2155 void enable_percpu_irq(unsigned int irq, unsigned int type) 2156 { 2157 unsigned int cpu = smp_processor_id(); 2158 unsigned long flags; 2159 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 2160 2161 if (!desc) 2162 return; 2163 2164 /* 2165 * If the trigger type is not specified by the caller, then 2166 * use the default for this interrupt. 2167 */ 2168 type &= IRQ_TYPE_SENSE_MASK; 2169 if (type == IRQ_TYPE_NONE) 2170 type = irqd_get_trigger_type(&desc->irq_data); 2171 2172 if (type != IRQ_TYPE_NONE) { 2173 int ret; 2174 2175 ret = __irq_set_trigger(desc, type); 2176 2177 if (ret) { 2178 WARN(1, "failed to set type for IRQ%d\n", irq); 2179 goto out; 2180 } 2181 } 2182 2183 irq_percpu_enable(desc, cpu); 2184 out: 2185 irq_put_desc_unlock(desc, flags); 2186 } 2187 EXPORT_SYMBOL_GPL(enable_percpu_irq); 2188 2189 void enable_percpu_nmi(unsigned int irq, unsigned int type) 2190 { 2191 enable_percpu_irq(irq, type); 2192 } 2193 2194 /** 2195 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled 2196 * @irq: Linux irq number to check for 2197 * 2198 * Must be called from a non migratable context. Returns the enable 2199 * state of a per cpu interrupt on the current cpu. 2200 */ 2201 bool irq_percpu_is_enabled(unsigned int irq) 2202 { 2203 unsigned int cpu = smp_processor_id(); 2204 struct irq_desc *desc; 2205 unsigned long flags; 2206 bool is_enabled; 2207 2208 desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 2209 if (!desc) 2210 return false; 2211 2212 is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); 2213 irq_put_desc_unlock(desc, flags); 2214 2215 return is_enabled; 2216 } 2217 EXPORT_SYMBOL_GPL(irq_percpu_is_enabled); 2218 2219 void disable_percpu_irq(unsigned int irq) 2220 { 2221 unsigned int cpu = smp_processor_id(); 2222 unsigned long flags; 2223 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); 2224 2225 if (!desc) 2226 return; 2227 2228 irq_percpu_disable(desc, cpu); 2229 irq_put_desc_unlock(desc, flags); 2230 } 2231 EXPORT_SYMBOL_GPL(disable_percpu_irq); 2232 2233 void disable_percpu_nmi(unsigned int irq) 2234 { 2235 disable_percpu_irq(irq); 2236 } 2237 2238 /* 2239 * Internal function to unregister a percpu irqaction. 2240 */ 2241 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) 2242 { 2243 struct irq_desc *desc = irq_to_desc(irq); 2244 struct irqaction *action; 2245 unsigned long flags; 2246 2247 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 2248 2249 if (!desc) 2250 return NULL; 2251 2252 raw_spin_lock_irqsave(&desc->lock, flags); 2253 2254 action = desc->action; 2255 if (!action || action->percpu_dev_id != dev_id) { 2256 WARN(1, "Trying to free already-free IRQ %d\n", irq); 2257 goto bad; 2258 } 2259 2260 if (!cpumask_empty(desc->percpu_enabled)) { 2261 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n", 2262 irq, cpumask_first(desc->percpu_enabled)); 2263 goto bad; 2264 } 2265 2266 /* Found it - now remove it from the list of entries: */ 2267 desc->action = NULL; 2268 2269 desc->istate &= ~IRQS_NMI; 2270 2271 raw_spin_unlock_irqrestore(&desc->lock, flags); 2272 2273 unregister_handler_proc(irq, action); 2274 2275 irq_chip_pm_put(&desc->irq_data); 2276 module_put(desc->owner); 2277 return action; 2278 2279 bad: 2280 raw_spin_unlock_irqrestore(&desc->lock, flags); 2281 return NULL; 2282 } 2283 2284 /** 2285 * remove_percpu_irq - free a per-cpu interrupt 2286 * @irq: Interrupt line to free 2287 * @act: irqaction for the interrupt 2288 * 2289 * Used to remove interrupts statically setup by the early boot process. 2290 */ 2291 void remove_percpu_irq(unsigned int irq, struct irqaction *act) 2292 { 2293 struct irq_desc *desc = irq_to_desc(irq); 2294 2295 if (desc && irq_settings_is_per_cpu_devid(desc)) 2296 __free_percpu_irq(irq, act->percpu_dev_id); 2297 } 2298 2299 /** 2300 * free_percpu_irq - free an interrupt allocated with request_percpu_irq 2301 * @irq: Interrupt line to free 2302 * @dev_id: Device identity to free 2303 * 2304 * Remove a percpu interrupt handler. The handler is removed, but 2305 * the interrupt line is not disabled. This must be done on each 2306 * CPU before calling this function. The function does not return 2307 * until any executing interrupts for this IRQ have completed. 2308 * 2309 * This function must not be called from interrupt context. 2310 */ 2311 void free_percpu_irq(unsigned int irq, void __percpu *dev_id) 2312 { 2313 struct irq_desc *desc = irq_to_desc(irq); 2314 2315 if (!desc || !irq_settings_is_per_cpu_devid(desc)) 2316 return; 2317 2318 chip_bus_lock(desc); 2319 kfree(__free_percpu_irq(irq, dev_id)); 2320 chip_bus_sync_unlock(desc); 2321 } 2322 EXPORT_SYMBOL_GPL(free_percpu_irq); 2323 2324 void free_percpu_nmi(unsigned int irq, void __percpu *dev_id) 2325 { 2326 struct irq_desc *desc = irq_to_desc(irq); 2327 2328 if (!desc || !irq_settings_is_per_cpu_devid(desc)) 2329 return; 2330 2331 if (WARN_ON(!(desc->istate & IRQS_NMI))) 2332 return; 2333 2334 kfree(__free_percpu_irq(irq, dev_id)); 2335 } 2336 2337 /** 2338 * setup_percpu_irq - setup a per-cpu interrupt 2339 * @irq: Interrupt line to setup 2340 * @act: irqaction for the interrupt 2341 * 2342 * Used to statically setup per-cpu interrupts in the early boot process. 2343 */ 2344 int setup_percpu_irq(unsigned int irq, struct irqaction *act) 2345 { 2346 struct irq_desc *desc = irq_to_desc(irq); 2347 int retval; 2348 2349 if (!desc || !irq_settings_is_per_cpu_devid(desc)) 2350 return -EINVAL; 2351 2352 retval = irq_chip_pm_get(&desc->irq_data); 2353 if (retval < 0) 2354 return retval; 2355 2356 retval = __setup_irq(irq, desc, act); 2357 2358 if (retval) 2359 irq_chip_pm_put(&desc->irq_data); 2360 2361 return retval; 2362 } 2363 2364 /** 2365 * __request_percpu_irq - allocate a percpu interrupt line 2366 * @irq: Interrupt line to allocate 2367 * @handler: Function to be called when the IRQ occurs. 2368 * @flags: Interrupt type flags (IRQF_TIMER only) 2369 * @devname: An ascii name for the claiming device 2370 * @dev_id: A percpu cookie passed back to the handler function 2371 * 2372 * This call allocates interrupt resources and enables the 2373 * interrupt on the local CPU. If the interrupt is supposed to be 2374 * enabled on other CPUs, it has to be done on each CPU using 2375 * enable_percpu_irq(). 2376 * 2377 * Dev_id must be globally unique. It is a per-cpu variable, and 2378 * the handler gets called with the interrupted CPU's instance of 2379 * that variable. 2380 */ 2381 int __request_percpu_irq(unsigned int irq, irq_handler_t handler, 2382 unsigned long flags, const char *devname, 2383 void __percpu *dev_id) 2384 { 2385 struct irqaction *action; 2386 struct irq_desc *desc; 2387 int retval; 2388 2389 if (!dev_id) 2390 return -EINVAL; 2391 2392 desc = irq_to_desc(irq); 2393 if (!desc || !irq_settings_can_request(desc) || 2394 !irq_settings_is_per_cpu_devid(desc)) 2395 return -EINVAL; 2396 2397 if (flags && flags != IRQF_TIMER) 2398 return -EINVAL; 2399 2400 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 2401 if (!action) 2402 return -ENOMEM; 2403 2404 action->handler = handler; 2405 action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND; 2406 action->name = devname; 2407 action->percpu_dev_id = dev_id; 2408 2409 retval = irq_chip_pm_get(&desc->irq_data); 2410 if (retval < 0) { 2411 kfree(action); 2412 return retval; 2413 } 2414 2415 retval = __setup_irq(irq, desc, action); 2416 2417 if (retval) { 2418 irq_chip_pm_put(&desc->irq_data); 2419 kfree(action); 2420 } 2421 2422 return retval; 2423 } 2424 EXPORT_SYMBOL_GPL(__request_percpu_irq); 2425 2426 /** 2427 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery 2428 * @irq: Interrupt line to allocate 2429 * @handler: Function to be called when the IRQ occurs. 2430 * @name: An ascii name for the claiming device 2431 * @dev_id: A percpu cookie passed back to the handler function 2432 * 2433 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs 2434 * have to be setup on each CPU by calling prepare_percpu_nmi() before 2435 * being enabled on the same CPU by using enable_percpu_nmi(). 2436 * 2437 * Dev_id must be globally unique. It is a per-cpu variable, and 2438 * the handler gets called with the interrupted CPU's instance of 2439 * that variable. 2440 * 2441 * Interrupt lines requested for NMI delivering should have auto enabling 2442 * setting disabled. 2443 * 2444 * If the interrupt line cannot be used to deliver NMIs, function 2445 * will fail returning a negative value. 2446 */ 2447 int request_percpu_nmi(unsigned int irq, irq_handler_t handler, 2448 const char *name, void __percpu *dev_id) 2449 { 2450 struct irqaction *action; 2451 struct irq_desc *desc; 2452 unsigned long flags; 2453 int retval; 2454 2455 if (!handler) 2456 return -EINVAL; 2457 2458 desc = irq_to_desc(irq); 2459 2460 if (!desc || !irq_settings_can_request(desc) || 2461 !irq_settings_is_per_cpu_devid(desc) || 2462 irq_settings_can_autoenable(desc) || 2463 !irq_supports_nmi(desc)) 2464 return -EINVAL; 2465 2466 /* The line cannot already be NMI */ 2467 if (desc->istate & IRQS_NMI) 2468 return -EINVAL; 2469 2470 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 2471 if (!action) 2472 return -ENOMEM; 2473 2474 action->handler = handler; 2475 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD 2476 | IRQF_NOBALANCING; 2477 action->name = name; 2478 action->percpu_dev_id = dev_id; 2479 2480 retval = irq_chip_pm_get(&desc->irq_data); 2481 if (retval < 0) 2482 goto err_out; 2483 2484 retval = __setup_irq(irq, desc, action); 2485 if (retval) 2486 goto err_irq_setup; 2487 2488 raw_spin_lock_irqsave(&desc->lock, flags); 2489 desc->istate |= IRQS_NMI; 2490 raw_spin_unlock_irqrestore(&desc->lock, flags); 2491 2492 return 0; 2493 2494 err_irq_setup: 2495 irq_chip_pm_put(&desc->irq_data); 2496 err_out: 2497 kfree(action); 2498 2499 return retval; 2500 } 2501 2502 /** 2503 * prepare_percpu_nmi - performs CPU local setup for NMI delivery 2504 * @irq: Interrupt line to prepare for NMI delivery 2505 * 2506 * This call prepares an interrupt line to deliver NMI on the current CPU, 2507 * before that interrupt line gets enabled with enable_percpu_nmi(). 2508 * 2509 * As a CPU local operation, this should be called from non-preemptible 2510 * context. 2511 * 2512 * If the interrupt line cannot be used to deliver NMIs, function 2513 * will fail returning a negative value. 2514 */ 2515 int prepare_percpu_nmi(unsigned int irq) 2516 { 2517 unsigned long flags; 2518 struct irq_desc *desc; 2519 int ret = 0; 2520 2521 WARN_ON(preemptible()); 2522 2523 desc = irq_get_desc_lock(irq, &flags, 2524 IRQ_GET_DESC_CHECK_PERCPU); 2525 if (!desc) 2526 return -EINVAL; 2527 2528 if (WARN(!(desc->istate & IRQS_NMI), 2529 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", 2530 irq)) { 2531 ret = -EINVAL; 2532 goto out; 2533 } 2534 2535 ret = irq_nmi_setup(desc); 2536 if (ret) { 2537 pr_err("Failed to setup NMI delivery: irq %u\n", irq); 2538 goto out; 2539 } 2540 2541 out: 2542 irq_put_desc_unlock(desc, flags); 2543 return ret; 2544 } 2545 2546 /** 2547 * teardown_percpu_nmi - undoes NMI setup of IRQ line 2548 * @irq: Interrupt line from which CPU local NMI configuration should be 2549 * removed 2550 * 2551 * This call undoes the setup done by prepare_percpu_nmi(). 2552 * 2553 * IRQ line should not be enabled for the current CPU. 2554 * 2555 * As a CPU local operation, this should be called from non-preemptible 2556 * context. 2557 */ 2558 void teardown_percpu_nmi(unsigned int irq) 2559 { 2560 unsigned long flags; 2561 struct irq_desc *desc; 2562 2563 WARN_ON(preemptible()); 2564 2565 desc = irq_get_desc_lock(irq, &flags, 2566 IRQ_GET_DESC_CHECK_PERCPU); 2567 if (!desc) 2568 return; 2569 2570 if (WARN_ON(!(desc->istate & IRQS_NMI))) 2571 goto out; 2572 2573 irq_nmi_teardown(desc); 2574 out: 2575 irq_put_desc_unlock(desc, flags); 2576 } 2577 2578 /** 2579 * irq_get_irqchip_state - returns the irqchip state of a interrupt. 2580 * @irq: Interrupt line that is forwarded to a VM 2581 * @which: One of IRQCHIP_STATE_* the caller wants to know about 2582 * @state: a pointer to a boolean where the state is to be storeed 2583 * 2584 * This call snapshots the internal irqchip state of an 2585 * interrupt, returning into @state the bit corresponding to 2586 * stage @which 2587 * 2588 * This function should be called with preemption disabled if the 2589 * interrupt controller has per-cpu registers. 2590 */ 2591 int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 2592 bool *state) 2593 { 2594 struct irq_desc *desc; 2595 struct irq_data *data; 2596 struct irq_chip *chip; 2597 unsigned long flags; 2598 int err = -EINVAL; 2599 2600 desc = irq_get_desc_buslock(irq, &flags, 0); 2601 if (!desc) 2602 return err; 2603 2604 data = irq_desc_get_irq_data(desc); 2605 2606 do { 2607 chip = irq_data_get_irq_chip(data); 2608 if (chip->irq_get_irqchip_state) 2609 break; 2610 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 2611 data = data->parent_data; 2612 #else 2613 data = NULL; 2614 #endif 2615 } while (data); 2616 2617 if (data) 2618 err = chip->irq_get_irqchip_state(data, which, state); 2619 2620 irq_put_desc_busunlock(desc, flags); 2621 return err; 2622 } 2623 EXPORT_SYMBOL_GPL(irq_get_irqchip_state); 2624 2625 /** 2626 * irq_set_irqchip_state - set the state of a forwarded interrupt. 2627 * @irq: Interrupt line that is forwarded to a VM 2628 * @which: State to be restored (one of IRQCHIP_STATE_*) 2629 * @val: Value corresponding to @which 2630 * 2631 * This call sets the internal irqchip state of an interrupt, 2632 * depending on the value of @which. 2633 * 2634 * This function should be called with preemption disabled if the 2635 * interrupt controller has per-cpu registers. 2636 */ 2637 int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, 2638 bool val) 2639 { 2640 struct irq_desc *desc; 2641 struct irq_data *data; 2642 struct irq_chip *chip; 2643 unsigned long flags; 2644 int err = -EINVAL; 2645 2646 desc = irq_get_desc_buslock(irq, &flags, 0); 2647 if (!desc) 2648 return err; 2649 2650 data = irq_desc_get_irq_data(desc); 2651 2652 do { 2653 chip = irq_data_get_irq_chip(data); 2654 if (chip->irq_set_irqchip_state) 2655 break; 2656 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 2657 data = data->parent_data; 2658 #else 2659 data = NULL; 2660 #endif 2661 } while (data); 2662 2663 if (data) 2664 err = chip->irq_set_irqchip_state(data, which, val); 2665 2666 irq_put_desc_busunlock(desc, flags); 2667 return err; 2668 } 2669 EXPORT_SYMBOL_GPL(irq_set_irqchip_state); 2670