1 /* 2 * linux/kernel/irq/chip.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code, for irq-chip 8 * based architectures. 9 * 10 * Detailed information is available in Documentation/DocBook/genericirq 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/msi.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 19 #include <trace/events/irq.h> 20 21 #include "internals.h" 22 23 /** 24 * irq_set_chip - set the irq chip for an irq 25 * @irq: irq number 26 * @chip: pointer to irq chip description structure 27 */ 28 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 29 { 30 unsigned long flags; 31 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 32 33 if (!desc) 34 return -EINVAL; 35 36 if (!chip) 37 chip = &no_irq_chip; 38 39 desc->irq_data.chip = chip; 40 irq_put_desc_unlock(desc, flags); 41 /* 42 * For !CONFIG_SPARSE_IRQ make the irq show up in 43 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is 44 * already marked, and this call is harmless. 45 */ 46 irq_reserve_irq(irq); 47 return 0; 48 } 49 EXPORT_SYMBOL(irq_set_chip); 50 51 /** 52 * irq_set_type - set the irq trigger type for an irq 53 * @irq: irq number 54 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 55 */ 56 int irq_set_irq_type(unsigned int irq, unsigned int type) 57 { 58 unsigned long flags; 59 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 60 int ret = 0; 61 62 if (!desc) 63 return -EINVAL; 64 65 type &= IRQ_TYPE_SENSE_MASK; 66 ret = __irq_set_trigger(desc, irq, type); 67 irq_put_desc_busunlock(desc, flags); 68 return ret; 69 } 70 EXPORT_SYMBOL(irq_set_irq_type); 71 72 /** 73 * irq_set_handler_data - set irq handler data for an irq 74 * @irq: Interrupt number 75 * @data: Pointer to interrupt specific data 76 * 77 * Set the hardware irq controller data for an irq 78 */ 79 int irq_set_handler_data(unsigned int irq, void *data) 80 { 81 unsigned long flags; 82 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 83 84 if (!desc) 85 return -EINVAL; 86 desc->irq_data.handler_data = data; 87 irq_put_desc_unlock(desc, flags); 88 return 0; 89 } 90 EXPORT_SYMBOL(irq_set_handler_data); 91 92 /** 93 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 94 * @irq_base: Interrupt number base 95 * @irq_offset: Interrupt number offset 96 * @entry: Pointer to MSI descriptor data 97 * 98 * Set the MSI descriptor entry for an irq at offset 99 */ 100 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, 101 struct msi_desc *entry) 102 { 103 unsigned long flags; 104 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 105 106 if (!desc) 107 return -EINVAL; 108 desc->irq_data.msi_desc = entry; 109 if (entry && !irq_offset) 110 entry->irq = irq_base; 111 irq_put_desc_unlock(desc, flags); 112 return 0; 113 } 114 115 /** 116 * irq_set_msi_desc - set MSI descriptor data for an irq 117 * @irq: Interrupt number 118 * @entry: Pointer to MSI descriptor data 119 * 120 * Set the MSI descriptor entry for an irq 121 */ 122 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 123 { 124 return irq_set_msi_desc_off(irq, 0, entry); 125 } 126 127 /** 128 * irq_set_chip_data - set irq chip data for an irq 129 * @irq: Interrupt number 130 * @data: Pointer to chip specific data 131 * 132 * Set the hardware irq chip data for an irq 133 */ 134 int irq_set_chip_data(unsigned int irq, void *data) 135 { 136 unsigned long flags; 137 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 138 139 if (!desc) 140 return -EINVAL; 141 desc->irq_data.chip_data = data; 142 irq_put_desc_unlock(desc, flags); 143 return 0; 144 } 145 EXPORT_SYMBOL(irq_set_chip_data); 146 147 struct irq_data *irq_get_irq_data(unsigned int irq) 148 { 149 struct irq_desc *desc = irq_to_desc(irq); 150 151 return desc ? &desc->irq_data : NULL; 152 } 153 EXPORT_SYMBOL_GPL(irq_get_irq_data); 154 155 static void irq_state_clr_disabled(struct irq_desc *desc) 156 { 157 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 158 } 159 160 static void irq_state_set_disabled(struct irq_desc *desc) 161 { 162 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 163 } 164 165 static void irq_state_clr_masked(struct irq_desc *desc) 166 { 167 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 168 } 169 170 static void irq_state_set_masked(struct irq_desc *desc) 171 { 172 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 173 } 174 175 int irq_startup(struct irq_desc *desc, bool resend) 176 { 177 int ret = 0; 178 179 irq_state_clr_disabled(desc); 180 desc->depth = 0; 181 182 if (desc->irq_data.chip->irq_startup) { 183 ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 184 irq_state_clr_masked(desc); 185 } else { 186 irq_enable(desc); 187 } 188 if (resend) 189 check_irq_resend(desc, desc->irq_data.irq); 190 return ret; 191 } 192 193 void irq_shutdown(struct irq_desc *desc) 194 { 195 irq_state_set_disabled(desc); 196 desc->depth = 1; 197 if (desc->irq_data.chip->irq_shutdown) 198 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 199 else if (desc->irq_data.chip->irq_disable) 200 desc->irq_data.chip->irq_disable(&desc->irq_data); 201 else 202 desc->irq_data.chip->irq_mask(&desc->irq_data); 203 irq_state_set_masked(desc); 204 } 205 206 void irq_enable(struct irq_desc *desc) 207 { 208 irq_state_clr_disabled(desc); 209 if (desc->irq_data.chip->irq_enable) 210 desc->irq_data.chip->irq_enable(&desc->irq_data); 211 else 212 desc->irq_data.chip->irq_unmask(&desc->irq_data); 213 irq_state_clr_masked(desc); 214 } 215 216 void irq_disable(struct irq_desc *desc) 217 { 218 irq_state_set_disabled(desc); 219 if (desc->irq_data.chip->irq_disable) { 220 desc->irq_data.chip->irq_disable(&desc->irq_data); 221 irq_state_set_masked(desc); 222 } 223 } 224 225 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 226 { 227 if (desc->irq_data.chip->irq_enable) 228 desc->irq_data.chip->irq_enable(&desc->irq_data); 229 else 230 desc->irq_data.chip->irq_unmask(&desc->irq_data); 231 cpumask_set_cpu(cpu, desc->percpu_enabled); 232 } 233 234 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 235 { 236 if (desc->irq_data.chip->irq_disable) 237 desc->irq_data.chip->irq_disable(&desc->irq_data); 238 else 239 desc->irq_data.chip->irq_mask(&desc->irq_data); 240 cpumask_clear_cpu(cpu, desc->percpu_enabled); 241 } 242 243 static inline void mask_ack_irq(struct irq_desc *desc) 244 { 245 if (desc->irq_data.chip->irq_mask_ack) 246 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 247 else { 248 desc->irq_data.chip->irq_mask(&desc->irq_data); 249 if (desc->irq_data.chip->irq_ack) 250 desc->irq_data.chip->irq_ack(&desc->irq_data); 251 } 252 irq_state_set_masked(desc); 253 } 254 255 void mask_irq(struct irq_desc *desc) 256 { 257 if (desc->irq_data.chip->irq_mask) { 258 desc->irq_data.chip->irq_mask(&desc->irq_data); 259 irq_state_set_masked(desc); 260 } 261 } 262 263 void unmask_irq(struct irq_desc *desc) 264 { 265 if (desc->irq_data.chip->irq_unmask) { 266 desc->irq_data.chip->irq_unmask(&desc->irq_data); 267 irq_state_clr_masked(desc); 268 } 269 } 270 271 /* 272 * handle_nested_irq - Handle a nested irq from a irq thread 273 * @irq: the interrupt number 274 * 275 * Handle interrupts which are nested into a threaded interrupt 276 * handler. The handler function is called inside the calling 277 * threads context. 278 */ 279 void handle_nested_irq(unsigned int irq) 280 { 281 struct irq_desc *desc = irq_to_desc(irq); 282 struct irqaction *action; 283 irqreturn_t action_ret; 284 285 might_sleep(); 286 287 raw_spin_lock_irq(&desc->lock); 288 289 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 290 kstat_incr_irqs_this_cpu(irq, desc); 291 292 action = desc->action; 293 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { 294 desc->istate |= IRQS_PENDING; 295 goto out_unlock; 296 } 297 298 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 299 raw_spin_unlock_irq(&desc->lock); 300 301 action_ret = action->thread_fn(action->irq, action->dev_id); 302 if (!noirqdebug) 303 note_interrupt(irq, desc, action_ret); 304 305 raw_spin_lock_irq(&desc->lock); 306 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 307 308 out_unlock: 309 raw_spin_unlock_irq(&desc->lock); 310 } 311 EXPORT_SYMBOL_GPL(handle_nested_irq); 312 313 static bool irq_check_poll(struct irq_desc *desc) 314 { 315 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 316 return false; 317 return irq_wait_for_poll(desc); 318 } 319 320 /** 321 * handle_simple_irq - Simple and software-decoded IRQs. 322 * @irq: the interrupt number 323 * @desc: the interrupt description structure for this irq 324 * 325 * Simple interrupts are either sent from a demultiplexing interrupt 326 * handler or come from hardware, where no interrupt hardware control 327 * is necessary. 328 * 329 * Note: The caller is expected to handle the ack, clear, mask and 330 * unmask issues if necessary. 331 */ 332 void 333 handle_simple_irq(unsigned int irq, struct irq_desc *desc) 334 { 335 raw_spin_lock(&desc->lock); 336 337 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 338 if (!irq_check_poll(desc)) 339 goto out_unlock; 340 341 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 342 kstat_incr_irqs_this_cpu(irq, desc); 343 344 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 345 desc->istate |= IRQS_PENDING; 346 goto out_unlock; 347 } 348 349 handle_irq_event(desc); 350 351 out_unlock: 352 raw_spin_unlock(&desc->lock); 353 } 354 EXPORT_SYMBOL_GPL(handle_simple_irq); 355 356 /* 357 * Called unconditionally from handle_level_irq() and only for oneshot 358 * interrupts from handle_fasteoi_irq() 359 */ 360 static void cond_unmask_irq(struct irq_desc *desc) 361 { 362 /* 363 * We need to unmask in the following cases: 364 * - Standard level irq (IRQF_ONESHOT is not set) 365 * - Oneshot irq which did not wake the thread (caused by a 366 * spurious interrupt or a primary handler handling it 367 * completely). 368 */ 369 if (!irqd_irq_disabled(&desc->irq_data) && 370 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 371 unmask_irq(desc); 372 } 373 374 /** 375 * handle_level_irq - Level type irq handler 376 * @irq: the interrupt number 377 * @desc: the interrupt description structure for this irq 378 * 379 * Level type interrupts are active as long as the hardware line has 380 * the active level. This may require to mask the interrupt and unmask 381 * it after the associated handler has acknowledged the device, so the 382 * interrupt line is back to inactive. 383 */ 384 void 385 handle_level_irq(unsigned int irq, struct irq_desc *desc) 386 { 387 raw_spin_lock(&desc->lock); 388 mask_ack_irq(desc); 389 390 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 391 if (!irq_check_poll(desc)) 392 goto out_unlock; 393 394 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 395 kstat_incr_irqs_this_cpu(irq, desc); 396 397 /* 398 * If its disabled or no action available 399 * keep it masked and get out of here 400 */ 401 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 402 desc->istate |= IRQS_PENDING; 403 goto out_unlock; 404 } 405 406 handle_irq_event(desc); 407 408 cond_unmask_irq(desc); 409 410 out_unlock: 411 raw_spin_unlock(&desc->lock); 412 } 413 EXPORT_SYMBOL_GPL(handle_level_irq); 414 415 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI 416 static inline void preflow_handler(struct irq_desc *desc) 417 { 418 if (desc->preflow_handler) 419 desc->preflow_handler(&desc->irq_data); 420 } 421 #else 422 static inline void preflow_handler(struct irq_desc *desc) { } 423 #endif 424 425 /** 426 * handle_fasteoi_irq - irq handler for transparent controllers 427 * @irq: the interrupt number 428 * @desc: the interrupt description structure for this irq 429 * 430 * Only a single callback will be issued to the chip: an ->eoi() 431 * call when the interrupt has been serviced. This enables support 432 * for modern forms of interrupt handlers, which handle the flow 433 * details in hardware, transparently. 434 */ 435 void 436 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 437 { 438 raw_spin_lock(&desc->lock); 439 440 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 441 if (!irq_check_poll(desc)) 442 goto out; 443 444 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 445 kstat_incr_irqs_this_cpu(irq, desc); 446 447 /* 448 * If its disabled or no action available 449 * then mask it and get out of here: 450 */ 451 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 452 desc->istate |= IRQS_PENDING; 453 mask_irq(desc); 454 goto out; 455 } 456 457 if (desc->istate & IRQS_ONESHOT) 458 mask_irq(desc); 459 460 preflow_handler(desc); 461 handle_irq_event(desc); 462 463 if (desc->istate & IRQS_ONESHOT) 464 cond_unmask_irq(desc); 465 466 out_eoi: 467 desc->irq_data.chip->irq_eoi(&desc->irq_data); 468 out_unlock: 469 raw_spin_unlock(&desc->lock); 470 return; 471 out: 472 if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) 473 goto out_eoi; 474 goto out_unlock; 475 } 476 477 /** 478 * handle_edge_irq - edge type IRQ handler 479 * @irq: the interrupt number 480 * @desc: the interrupt description structure for this irq 481 * 482 * Interrupt occures on the falling and/or rising edge of a hardware 483 * signal. The occurrence is latched into the irq controller hardware 484 * and must be acked in order to be reenabled. After the ack another 485 * interrupt can happen on the same source even before the first one 486 * is handled by the associated event handler. If this happens it 487 * might be necessary to disable (mask) the interrupt depending on the 488 * controller hardware. This requires to reenable the interrupt inside 489 * of the loop which handles the interrupts which have arrived while 490 * the handler was running. If all pending interrupts are handled, the 491 * loop is left. 492 */ 493 void 494 handle_edge_irq(unsigned int irq, struct irq_desc *desc) 495 { 496 raw_spin_lock(&desc->lock); 497 498 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 499 /* 500 * If we're currently running this IRQ, or its disabled, 501 * we shouldn't process the IRQ. Mark it pending, handle 502 * the necessary masking and go out 503 */ 504 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 505 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 506 if (!irq_check_poll(desc)) { 507 desc->istate |= IRQS_PENDING; 508 mask_ack_irq(desc); 509 goto out_unlock; 510 } 511 } 512 kstat_incr_irqs_this_cpu(irq, desc); 513 514 /* Start handling the irq */ 515 desc->irq_data.chip->irq_ack(&desc->irq_data); 516 517 do { 518 if (unlikely(!desc->action)) { 519 mask_irq(desc); 520 goto out_unlock; 521 } 522 523 /* 524 * When another irq arrived while we were handling 525 * one, we could have masked the irq. 526 * Renable it, if it was not disabled in meantime. 527 */ 528 if (unlikely(desc->istate & IRQS_PENDING)) { 529 if (!irqd_irq_disabled(&desc->irq_data) && 530 irqd_irq_masked(&desc->irq_data)) 531 unmask_irq(desc); 532 } 533 534 handle_irq_event(desc); 535 536 } while ((desc->istate & IRQS_PENDING) && 537 !irqd_irq_disabled(&desc->irq_data)); 538 539 out_unlock: 540 raw_spin_unlock(&desc->lock); 541 } 542 EXPORT_SYMBOL(handle_edge_irq); 543 544 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 545 /** 546 * handle_edge_eoi_irq - edge eoi type IRQ handler 547 * @irq: the interrupt number 548 * @desc: the interrupt description structure for this irq 549 * 550 * Similar as the above handle_edge_irq, but using eoi and w/o the 551 * mask/unmask logic. 552 */ 553 void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) 554 { 555 struct irq_chip *chip = irq_desc_get_chip(desc); 556 557 raw_spin_lock(&desc->lock); 558 559 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 560 /* 561 * If we're currently running this IRQ, or its disabled, 562 * we shouldn't process the IRQ. Mark it pending, handle 563 * the necessary masking and go out 564 */ 565 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 566 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 567 if (!irq_check_poll(desc)) { 568 desc->istate |= IRQS_PENDING; 569 goto out_eoi; 570 } 571 } 572 kstat_incr_irqs_this_cpu(irq, desc); 573 574 do { 575 if (unlikely(!desc->action)) 576 goto out_eoi; 577 578 handle_irq_event(desc); 579 580 } while ((desc->istate & IRQS_PENDING) && 581 !irqd_irq_disabled(&desc->irq_data)); 582 583 out_eoi: 584 chip->irq_eoi(&desc->irq_data); 585 raw_spin_unlock(&desc->lock); 586 } 587 #endif 588 589 /** 590 * handle_percpu_irq - Per CPU local irq handler 591 * @irq: the interrupt number 592 * @desc: the interrupt description structure for this irq 593 * 594 * Per CPU interrupts on SMP machines without locking requirements 595 */ 596 void 597 handle_percpu_irq(unsigned int irq, struct irq_desc *desc) 598 { 599 struct irq_chip *chip = irq_desc_get_chip(desc); 600 601 kstat_incr_irqs_this_cpu(irq, desc); 602 603 if (chip->irq_ack) 604 chip->irq_ack(&desc->irq_data); 605 606 handle_irq_event_percpu(desc, desc->action); 607 608 if (chip->irq_eoi) 609 chip->irq_eoi(&desc->irq_data); 610 } 611 612 /** 613 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 614 * @irq: the interrupt number 615 * @desc: the interrupt description structure for this irq 616 * 617 * Per CPU interrupts on SMP machines without locking requirements. Same as 618 * handle_percpu_irq() above but with the following extras: 619 * 620 * action->percpu_dev_id is a pointer to percpu variables which 621 * contain the real device id for the cpu on which this handler is 622 * called 623 */ 624 void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) 625 { 626 struct irq_chip *chip = irq_desc_get_chip(desc); 627 struct irqaction *action = desc->action; 628 void *dev_id = __this_cpu_ptr(action->percpu_dev_id); 629 irqreturn_t res; 630 631 kstat_incr_irqs_this_cpu(irq, desc); 632 633 if (chip->irq_ack) 634 chip->irq_ack(&desc->irq_data); 635 636 trace_irq_handler_entry(irq, action); 637 res = action->handler(irq, dev_id); 638 trace_irq_handler_exit(irq, action, res); 639 640 if (chip->irq_eoi) 641 chip->irq_eoi(&desc->irq_data); 642 } 643 644 void 645 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 646 const char *name) 647 { 648 unsigned long flags; 649 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 650 651 if (!desc) 652 return; 653 654 if (!handle) { 655 handle = handle_bad_irq; 656 } else { 657 if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) 658 goto out; 659 } 660 661 /* Uninstall? */ 662 if (handle == handle_bad_irq) { 663 if (desc->irq_data.chip != &no_irq_chip) 664 mask_ack_irq(desc); 665 irq_state_set_disabled(desc); 666 desc->depth = 1; 667 } 668 desc->handle_irq = handle; 669 desc->name = name; 670 671 if (handle != handle_bad_irq && is_chained) { 672 irq_settings_set_noprobe(desc); 673 irq_settings_set_norequest(desc); 674 irq_settings_set_nothread(desc); 675 irq_startup(desc, true); 676 } 677 out: 678 irq_put_desc_busunlock(desc, flags); 679 } 680 EXPORT_SYMBOL_GPL(__irq_set_handler); 681 682 void 683 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 684 irq_flow_handler_t handle, const char *name) 685 { 686 irq_set_chip(irq, chip); 687 __irq_set_handler(irq, handle, 0, name); 688 } 689 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 690 691 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 692 { 693 unsigned long flags; 694 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 695 696 if (!desc) 697 return; 698 irq_settings_clr_and_set(desc, clr, set); 699 700 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 701 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 702 if (irq_settings_has_no_balance_set(desc)) 703 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 704 if (irq_settings_is_per_cpu(desc)) 705 irqd_set(&desc->irq_data, IRQD_PER_CPU); 706 if (irq_settings_can_move_pcntxt(desc)) 707 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 708 if (irq_settings_is_level(desc)) 709 irqd_set(&desc->irq_data, IRQD_LEVEL); 710 711 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); 712 713 irq_put_desc_unlock(desc, flags); 714 } 715 EXPORT_SYMBOL_GPL(irq_modify_status); 716 717 /** 718 * irq_cpu_online - Invoke all irq_cpu_online functions. 719 * 720 * Iterate through all irqs and invoke the chip.irq_cpu_online() 721 * for each. 722 */ 723 void irq_cpu_online(void) 724 { 725 struct irq_desc *desc; 726 struct irq_chip *chip; 727 unsigned long flags; 728 unsigned int irq; 729 730 for_each_active_irq(irq) { 731 desc = irq_to_desc(irq); 732 if (!desc) 733 continue; 734 735 raw_spin_lock_irqsave(&desc->lock, flags); 736 737 chip = irq_data_get_irq_chip(&desc->irq_data); 738 if (chip && chip->irq_cpu_online && 739 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 740 !irqd_irq_disabled(&desc->irq_data))) 741 chip->irq_cpu_online(&desc->irq_data); 742 743 raw_spin_unlock_irqrestore(&desc->lock, flags); 744 } 745 } 746 747 /** 748 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 749 * 750 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 751 * for each. 752 */ 753 void irq_cpu_offline(void) 754 { 755 struct irq_desc *desc; 756 struct irq_chip *chip; 757 unsigned long flags; 758 unsigned int irq; 759 760 for_each_active_irq(irq) { 761 desc = irq_to_desc(irq); 762 if (!desc) 763 continue; 764 765 raw_spin_lock_irqsave(&desc->lock, flags); 766 767 chip = irq_data_get_irq_chip(&desc->irq_data); 768 if (chip && chip->irq_cpu_offline && 769 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 770 !irqd_irq_disabled(&desc->irq_data))) 771 chip->irq_cpu_offline(&desc->irq_data); 772 773 raw_spin_unlock_irqrestore(&desc->lock, flags); 774 } 775 } 776