1 /* 2 * linux/kernel/irq/chip.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code, for irq-chip 8 * based architectures. 9 * 10 * Detailed information is available in Documentation/DocBook/genericirq 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/msi.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 19 #include <trace/events/irq.h> 20 21 #include "internals.h" 22 23 /** 24 * irq_set_chip - set the irq chip for an irq 25 * @irq: irq number 26 * @chip: pointer to irq chip description structure 27 */ 28 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 29 { 30 unsigned long flags; 31 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 32 33 if (!desc) 34 return -EINVAL; 35 36 if (!chip) 37 chip = &no_irq_chip; 38 39 desc->irq_data.chip = chip; 40 irq_put_desc_unlock(desc, flags); 41 /* 42 * For !CONFIG_SPARSE_IRQ make the irq show up in 43 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is 44 * already marked, and this call is harmless. 45 */ 46 irq_reserve_irq(irq); 47 return 0; 48 } 49 EXPORT_SYMBOL(irq_set_chip); 50 51 /** 52 * irq_set_type - set the irq trigger type for an irq 53 * @irq: irq number 54 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 55 */ 56 int irq_set_irq_type(unsigned int irq, unsigned int type) 57 { 58 unsigned long flags; 59 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 60 int ret = 0; 61 62 if (!desc) 63 return -EINVAL; 64 65 type &= IRQ_TYPE_SENSE_MASK; 66 ret = __irq_set_trigger(desc, irq, type); 67 irq_put_desc_busunlock(desc, flags); 68 return ret; 69 } 70 EXPORT_SYMBOL(irq_set_irq_type); 71 72 /** 73 * irq_set_handler_data - set irq handler data for an irq 74 * @irq: Interrupt number 75 * @data: Pointer to interrupt specific data 76 * 77 * Set the hardware irq controller data for an irq 78 */ 79 int irq_set_handler_data(unsigned int irq, void *data) 80 { 81 unsigned long flags; 82 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 83 84 if (!desc) 85 return -EINVAL; 86 desc->irq_data.handler_data = data; 87 irq_put_desc_unlock(desc, flags); 88 return 0; 89 } 90 EXPORT_SYMBOL(irq_set_handler_data); 91 92 /** 93 * irq_set_msi_desc - set MSI descriptor data for an irq 94 * @irq: Interrupt number 95 * @entry: Pointer to MSI descriptor data 96 * 97 * Set the MSI descriptor entry for an irq 98 */ 99 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 100 { 101 unsigned long flags; 102 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 103 104 if (!desc) 105 return -EINVAL; 106 desc->irq_data.msi_desc = entry; 107 if (entry) 108 entry->irq = irq; 109 irq_put_desc_unlock(desc, flags); 110 return 0; 111 } 112 113 /** 114 * irq_set_chip_data - set irq chip data for an irq 115 * @irq: Interrupt number 116 * @data: Pointer to chip specific data 117 * 118 * Set the hardware irq chip data for an irq 119 */ 120 int irq_set_chip_data(unsigned int irq, void *data) 121 { 122 unsigned long flags; 123 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 124 125 if (!desc) 126 return -EINVAL; 127 desc->irq_data.chip_data = data; 128 irq_put_desc_unlock(desc, flags); 129 return 0; 130 } 131 EXPORT_SYMBOL(irq_set_chip_data); 132 133 struct irq_data *irq_get_irq_data(unsigned int irq) 134 { 135 struct irq_desc *desc = irq_to_desc(irq); 136 137 return desc ? &desc->irq_data : NULL; 138 } 139 EXPORT_SYMBOL_GPL(irq_get_irq_data); 140 141 static void irq_state_clr_disabled(struct irq_desc *desc) 142 { 143 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 144 } 145 146 static void irq_state_set_disabled(struct irq_desc *desc) 147 { 148 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 149 } 150 151 static void irq_state_clr_masked(struct irq_desc *desc) 152 { 153 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 154 } 155 156 static void irq_state_set_masked(struct irq_desc *desc) 157 { 158 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 159 } 160 161 int irq_startup(struct irq_desc *desc, bool resend) 162 { 163 int ret = 0; 164 165 irq_state_clr_disabled(desc); 166 desc->depth = 0; 167 168 if (desc->irq_data.chip->irq_startup) { 169 ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 170 irq_state_clr_masked(desc); 171 } else { 172 irq_enable(desc); 173 } 174 if (resend) 175 check_irq_resend(desc, desc->irq_data.irq); 176 return ret; 177 } 178 179 void irq_shutdown(struct irq_desc *desc) 180 { 181 irq_state_set_disabled(desc); 182 desc->depth = 1; 183 if (desc->irq_data.chip->irq_shutdown) 184 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 185 else if (desc->irq_data.chip->irq_disable) 186 desc->irq_data.chip->irq_disable(&desc->irq_data); 187 else 188 desc->irq_data.chip->irq_mask(&desc->irq_data); 189 irq_state_set_masked(desc); 190 } 191 192 void irq_enable(struct irq_desc *desc) 193 { 194 irq_state_clr_disabled(desc); 195 if (desc->irq_data.chip->irq_enable) 196 desc->irq_data.chip->irq_enable(&desc->irq_data); 197 else 198 desc->irq_data.chip->irq_unmask(&desc->irq_data); 199 irq_state_clr_masked(desc); 200 } 201 202 void irq_disable(struct irq_desc *desc) 203 { 204 irq_state_set_disabled(desc); 205 if (desc->irq_data.chip->irq_disable) { 206 desc->irq_data.chip->irq_disable(&desc->irq_data); 207 irq_state_set_masked(desc); 208 } 209 } 210 211 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 212 { 213 if (desc->irq_data.chip->irq_enable) 214 desc->irq_data.chip->irq_enable(&desc->irq_data); 215 else 216 desc->irq_data.chip->irq_unmask(&desc->irq_data); 217 cpumask_set_cpu(cpu, desc->percpu_enabled); 218 } 219 220 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 221 { 222 if (desc->irq_data.chip->irq_disable) 223 desc->irq_data.chip->irq_disable(&desc->irq_data); 224 else 225 desc->irq_data.chip->irq_mask(&desc->irq_data); 226 cpumask_clear_cpu(cpu, desc->percpu_enabled); 227 } 228 229 static inline void mask_ack_irq(struct irq_desc *desc) 230 { 231 if (desc->irq_data.chip->irq_mask_ack) 232 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 233 else { 234 desc->irq_data.chip->irq_mask(&desc->irq_data); 235 if (desc->irq_data.chip->irq_ack) 236 desc->irq_data.chip->irq_ack(&desc->irq_data); 237 } 238 irq_state_set_masked(desc); 239 } 240 241 void mask_irq(struct irq_desc *desc) 242 { 243 if (desc->irq_data.chip->irq_mask) { 244 desc->irq_data.chip->irq_mask(&desc->irq_data); 245 irq_state_set_masked(desc); 246 } 247 } 248 249 void unmask_irq(struct irq_desc *desc) 250 { 251 if (desc->irq_data.chip->irq_unmask) { 252 desc->irq_data.chip->irq_unmask(&desc->irq_data); 253 irq_state_clr_masked(desc); 254 } 255 } 256 257 /* 258 * handle_nested_irq - Handle a nested irq from a irq thread 259 * @irq: the interrupt number 260 * 261 * Handle interrupts which are nested into a threaded interrupt 262 * handler. The handler function is called inside the calling 263 * threads context. 264 */ 265 void handle_nested_irq(unsigned int irq) 266 { 267 struct irq_desc *desc = irq_to_desc(irq); 268 struct irqaction *action; 269 irqreturn_t action_ret; 270 271 might_sleep(); 272 273 raw_spin_lock_irq(&desc->lock); 274 275 kstat_incr_irqs_this_cpu(irq, desc); 276 277 action = desc->action; 278 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) 279 goto out_unlock; 280 281 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 282 raw_spin_unlock_irq(&desc->lock); 283 284 action_ret = action->thread_fn(action->irq, action->dev_id); 285 if (!noirqdebug) 286 note_interrupt(irq, desc, action_ret); 287 288 raw_spin_lock_irq(&desc->lock); 289 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 290 291 out_unlock: 292 raw_spin_unlock_irq(&desc->lock); 293 } 294 EXPORT_SYMBOL_GPL(handle_nested_irq); 295 296 static bool irq_check_poll(struct irq_desc *desc) 297 { 298 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 299 return false; 300 return irq_wait_for_poll(desc); 301 } 302 303 /** 304 * handle_simple_irq - Simple and software-decoded IRQs. 305 * @irq: the interrupt number 306 * @desc: the interrupt description structure for this irq 307 * 308 * Simple interrupts are either sent from a demultiplexing interrupt 309 * handler or come from hardware, where no interrupt hardware control 310 * is necessary. 311 * 312 * Note: The caller is expected to handle the ack, clear, mask and 313 * unmask issues if necessary. 314 */ 315 void 316 handle_simple_irq(unsigned int irq, struct irq_desc *desc) 317 { 318 raw_spin_lock(&desc->lock); 319 320 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 321 if (!irq_check_poll(desc)) 322 goto out_unlock; 323 324 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 325 kstat_incr_irqs_this_cpu(irq, desc); 326 327 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) 328 goto out_unlock; 329 330 handle_irq_event(desc); 331 332 out_unlock: 333 raw_spin_unlock(&desc->lock); 334 } 335 EXPORT_SYMBOL_GPL(handle_simple_irq); 336 337 /* 338 * Called unconditionally from handle_level_irq() and only for oneshot 339 * interrupts from handle_fasteoi_irq() 340 */ 341 static void cond_unmask_irq(struct irq_desc *desc) 342 { 343 /* 344 * We need to unmask in the following cases: 345 * - Standard level irq (IRQF_ONESHOT is not set) 346 * - Oneshot irq which did not wake the thread (caused by a 347 * spurious interrupt or a primary handler handling it 348 * completely). 349 */ 350 if (!irqd_irq_disabled(&desc->irq_data) && 351 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 352 unmask_irq(desc); 353 } 354 355 /** 356 * handle_level_irq - Level type irq handler 357 * @irq: the interrupt number 358 * @desc: the interrupt description structure for this irq 359 * 360 * Level type interrupts are active as long as the hardware line has 361 * the active level. This may require to mask the interrupt and unmask 362 * it after the associated handler has acknowledged the device, so the 363 * interrupt line is back to inactive. 364 */ 365 void 366 handle_level_irq(unsigned int irq, struct irq_desc *desc) 367 { 368 raw_spin_lock(&desc->lock); 369 mask_ack_irq(desc); 370 371 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 372 if (!irq_check_poll(desc)) 373 goto out_unlock; 374 375 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 376 kstat_incr_irqs_this_cpu(irq, desc); 377 378 /* 379 * If its disabled or no action available 380 * keep it masked and get out of here 381 */ 382 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) 383 goto out_unlock; 384 385 handle_irq_event(desc); 386 387 cond_unmask_irq(desc); 388 389 out_unlock: 390 raw_spin_unlock(&desc->lock); 391 } 392 EXPORT_SYMBOL_GPL(handle_level_irq); 393 394 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI 395 static inline void preflow_handler(struct irq_desc *desc) 396 { 397 if (desc->preflow_handler) 398 desc->preflow_handler(&desc->irq_data); 399 } 400 #else 401 static inline void preflow_handler(struct irq_desc *desc) { } 402 #endif 403 404 /** 405 * handle_fasteoi_irq - irq handler for transparent controllers 406 * @irq: the interrupt number 407 * @desc: the interrupt description structure for this irq 408 * 409 * Only a single callback will be issued to the chip: an ->eoi() 410 * call when the interrupt has been serviced. This enables support 411 * for modern forms of interrupt handlers, which handle the flow 412 * details in hardware, transparently. 413 */ 414 void 415 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 416 { 417 raw_spin_lock(&desc->lock); 418 419 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 420 if (!irq_check_poll(desc)) 421 goto out; 422 423 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 424 kstat_incr_irqs_this_cpu(irq, desc); 425 426 /* 427 * If its disabled or no action available 428 * then mask it and get out of here: 429 */ 430 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 431 desc->istate |= IRQS_PENDING; 432 mask_irq(desc); 433 goto out; 434 } 435 436 if (desc->istate & IRQS_ONESHOT) 437 mask_irq(desc); 438 439 preflow_handler(desc); 440 handle_irq_event(desc); 441 442 if (desc->istate & IRQS_ONESHOT) 443 cond_unmask_irq(desc); 444 445 out_eoi: 446 desc->irq_data.chip->irq_eoi(&desc->irq_data); 447 out_unlock: 448 raw_spin_unlock(&desc->lock); 449 return; 450 out: 451 if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) 452 goto out_eoi; 453 goto out_unlock; 454 } 455 456 /** 457 * handle_edge_irq - edge type IRQ handler 458 * @irq: the interrupt number 459 * @desc: the interrupt description structure for this irq 460 * 461 * Interrupt occures on the falling and/or rising edge of a hardware 462 * signal. The occurrence is latched into the irq controller hardware 463 * and must be acked in order to be reenabled. After the ack another 464 * interrupt can happen on the same source even before the first one 465 * is handled by the associated event handler. If this happens it 466 * might be necessary to disable (mask) the interrupt depending on the 467 * controller hardware. This requires to reenable the interrupt inside 468 * of the loop which handles the interrupts which have arrived while 469 * the handler was running. If all pending interrupts are handled, the 470 * loop is left. 471 */ 472 void 473 handle_edge_irq(unsigned int irq, struct irq_desc *desc) 474 { 475 raw_spin_lock(&desc->lock); 476 477 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 478 /* 479 * If we're currently running this IRQ, or its disabled, 480 * we shouldn't process the IRQ. Mark it pending, handle 481 * the necessary masking and go out 482 */ 483 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 484 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 485 if (!irq_check_poll(desc)) { 486 desc->istate |= IRQS_PENDING; 487 mask_ack_irq(desc); 488 goto out_unlock; 489 } 490 } 491 kstat_incr_irqs_this_cpu(irq, desc); 492 493 /* Start handling the irq */ 494 desc->irq_data.chip->irq_ack(&desc->irq_data); 495 496 do { 497 if (unlikely(!desc->action)) { 498 mask_irq(desc); 499 goto out_unlock; 500 } 501 502 /* 503 * When another irq arrived while we were handling 504 * one, we could have masked the irq. 505 * Renable it, if it was not disabled in meantime. 506 */ 507 if (unlikely(desc->istate & IRQS_PENDING)) { 508 if (!irqd_irq_disabled(&desc->irq_data) && 509 irqd_irq_masked(&desc->irq_data)) 510 unmask_irq(desc); 511 } 512 513 handle_irq_event(desc); 514 515 } while ((desc->istate & IRQS_PENDING) && 516 !irqd_irq_disabled(&desc->irq_data)); 517 518 out_unlock: 519 raw_spin_unlock(&desc->lock); 520 } 521 EXPORT_SYMBOL(handle_edge_irq); 522 523 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 524 /** 525 * handle_edge_eoi_irq - edge eoi type IRQ handler 526 * @irq: the interrupt number 527 * @desc: the interrupt description structure for this irq 528 * 529 * Similar as the above handle_edge_irq, but using eoi and w/o the 530 * mask/unmask logic. 531 */ 532 void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) 533 { 534 struct irq_chip *chip = irq_desc_get_chip(desc); 535 536 raw_spin_lock(&desc->lock); 537 538 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 539 /* 540 * If we're currently running this IRQ, or its disabled, 541 * we shouldn't process the IRQ. Mark it pending, handle 542 * the necessary masking and go out 543 */ 544 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 545 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 546 if (!irq_check_poll(desc)) { 547 desc->istate |= IRQS_PENDING; 548 goto out_eoi; 549 } 550 } 551 kstat_incr_irqs_this_cpu(irq, desc); 552 553 do { 554 if (unlikely(!desc->action)) 555 goto out_eoi; 556 557 handle_irq_event(desc); 558 559 } while ((desc->istate & IRQS_PENDING) && 560 !irqd_irq_disabled(&desc->irq_data)); 561 562 out_eoi: 563 chip->irq_eoi(&desc->irq_data); 564 raw_spin_unlock(&desc->lock); 565 } 566 #endif 567 568 /** 569 * handle_percpu_irq - Per CPU local irq handler 570 * @irq: the interrupt number 571 * @desc: the interrupt description structure for this irq 572 * 573 * Per CPU interrupts on SMP machines without locking requirements 574 */ 575 void 576 handle_percpu_irq(unsigned int irq, struct irq_desc *desc) 577 { 578 struct irq_chip *chip = irq_desc_get_chip(desc); 579 580 kstat_incr_irqs_this_cpu(irq, desc); 581 582 if (chip->irq_ack) 583 chip->irq_ack(&desc->irq_data); 584 585 handle_irq_event_percpu(desc, desc->action); 586 587 if (chip->irq_eoi) 588 chip->irq_eoi(&desc->irq_data); 589 } 590 591 /** 592 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 593 * @irq: the interrupt number 594 * @desc: the interrupt description structure for this irq 595 * 596 * Per CPU interrupts on SMP machines without locking requirements. Same as 597 * handle_percpu_irq() above but with the following extras: 598 * 599 * action->percpu_dev_id is a pointer to percpu variables which 600 * contain the real device id for the cpu on which this handler is 601 * called 602 */ 603 void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) 604 { 605 struct irq_chip *chip = irq_desc_get_chip(desc); 606 struct irqaction *action = desc->action; 607 void *dev_id = __this_cpu_ptr(action->percpu_dev_id); 608 irqreturn_t res; 609 610 kstat_incr_irqs_this_cpu(irq, desc); 611 612 if (chip->irq_ack) 613 chip->irq_ack(&desc->irq_data); 614 615 trace_irq_handler_entry(irq, action); 616 res = action->handler(irq, dev_id); 617 trace_irq_handler_exit(irq, action, res); 618 619 if (chip->irq_eoi) 620 chip->irq_eoi(&desc->irq_data); 621 } 622 623 void 624 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 625 const char *name) 626 { 627 unsigned long flags; 628 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 629 630 if (!desc) 631 return; 632 633 if (!handle) { 634 handle = handle_bad_irq; 635 } else { 636 if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) 637 goto out; 638 } 639 640 /* Uninstall? */ 641 if (handle == handle_bad_irq) { 642 if (desc->irq_data.chip != &no_irq_chip) 643 mask_ack_irq(desc); 644 irq_state_set_disabled(desc); 645 desc->depth = 1; 646 } 647 desc->handle_irq = handle; 648 desc->name = name; 649 650 if (handle != handle_bad_irq && is_chained) { 651 irq_settings_set_noprobe(desc); 652 irq_settings_set_norequest(desc); 653 irq_settings_set_nothread(desc); 654 irq_startup(desc, true); 655 } 656 out: 657 irq_put_desc_busunlock(desc, flags); 658 } 659 EXPORT_SYMBOL_GPL(__irq_set_handler); 660 661 void 662 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 663 irq_flow_handler_t handle, const char *name) 664 { 665 irq_set_chip(irq, chip); 666 __irq_set_handler(irq, handle, 0, name); 667 } 668 669 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 670 { 671 unsigned long flags; 672 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 673 674 if (!desc) 675 return; 676 irq_settings_clr_and_set(desc, clr, set); 677 678 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 679 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 680 if (irq_settings_has_no_balance_set(desc)) 681 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 682 if (irq_settings_is_per_cpu(desc)) 683 irqd_set(&desc->irq_data, IRQD_PER_CPU); 684 if (irq_settings_can_move_pcntxt(desc)) 685 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 686 if (irq_settings_is_level(desc)) 687 irqd_set(&desc->irq_data, IRQD_LEVEL); 688 689 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); 690 691 irq_put_desc_unlock(desc, flags); 692 } 693 EXPORT_SYMBOL_GPL(irq_modify_status); 694 695 /** 696 * irq_cpu_online - Invoke all irq_cpu_online functions. 697 * 698 * Iterate through all irqs and invoke the chip.irq_cpu_online() 699 * for each. 700 */ 701 void irq_cpu_online(void) 702 { 703 struct irq_desc *desc; 704 struct irq_chip *chip; 705 unsigned long flags; 706 unsigned int irq; 707 708 for_each_active_irq(irq) { 709 desc = irq_to_desc(irq); 710 if (!desc) 711 continue; 712 713 raw_spin_lock_irqsave(&desc->lock, flags); 714 715 chip = irq_data_get_irq_chip(&desc->irq_data); 716 if (chip && chip->irq_cpu_online && 717 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 718 !irqd_irq_disabled(&desc->irq_data))) 719 chip->irq_cpu_online(&desc->irq_data); 720 721 raw_spin_unlock_irqrestore(&desc->lock, flags); 722 } 723 } 724 725 /** 726 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 727 * 728 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 729 * for each. 730 */ 731 void irq_cpu_offline(void) 732 { 733 struct irq_desc *desc; 734 struct irq_chip *chip; 735 unsigned long flags; 736 unsigned int irq; 737 738 for_each_active_irq(irq) { 739 desc = irq_to_desc(irq); 740 if (!desc) 741 continue; 742 743 raw_spin_lock_irqsave(&desc->lock, flags); 744 745 chip = irq_data_get_irq_chip(&desc->irq_data); 746 if (chip && chip->irq_cpu_offline && 747 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 748 !irqd_irq_disabled(&desc->irq_data))) 749 chip->irq_cpu_offline(&desc->irq_data); 750 751 raw_spin_unlock_irqrestore(&desc->lock, flags); 752 } 753 } 754