1 /* 2 * linux/kernel/irq/chip.c 3 * 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King 6 * 7 * This file contains the core interrupt handling code, for irq-chip 8 * based architectures. 9 * 10 * Detailed information is available in Documentation/DocBook/genericirq 11 */ 12 13 #include <linux/irq.h> 14 #include <linux/msi.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel_stat.h> 18 19 #include <trace/events/irq.h> 20 21 #include "internals.h" 22 23 /** 24 * irq_set_chip - set the irq chip for an irq 25 * @irq: irq number 26 * @chip: pointer to irq chip description structure 27 */ 28 int irq_set_chip(unsigned int irq, struct irq_chip *chip) 29 { 30 unsigned long flags; 31 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 32 33 if (!desc) 34 return -EINVAL; 35 36 if (!chip) 37 chip = &no_irq_chip; 38 39 desc->irq_data.chip = chip; 40 irq_put_desc_unlock(desc, flags); 41 /* 42 * For !CONFIG_SPARSE_IRQ make the irq show up in 43 * allocated_irqs. For the CONFIG_SPARSE_IRQ case, it is 44 * already marked, and this call is harmless. 45 */ 46 irq_reserve_irq(irq); 47 return 0; 48 } 49 EXPORT_SYMBOL(irq_set_chip); 50 51 /** 52 * irq_set_type - set the irq trigger type for an irq 53 * @irq: irq number 54 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h 55 */ 56 int irq_set_irq_type(unsigned int irq, unsigned int type) 57 { 58 unsigned long flags; 59 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 60 int ret = 0; 61 62 if (!desc) 63 return -EINVAL; 64 65 type &= IRQ_TYPE_SENSE_MASK; 66 ret = __irq_set_trigger(desc, irq, type); 67 irq_put_desc_busunlock(desc, flags); 68 return ret; 69 } 70 EXPORT_SYMBOL(irq_set_irq_type); 71 72 /** 73 * irq_set_handler_data - set irq handler data for an irq 74 * @irq: Interrupt number 75 * @data: Pointer to interrupt specific data 76 * 77 * Set the hardware irq controller data for an irq 78 */ 79 int irq_set_handler_data(unsigned int irq, void *data) 80 { 81 unsigned long flags; 82 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 83 84 if (!desc) 85 return -EINVAL; 86 desc->irq_data.handler_data = data; 87 irq_put_desc_unlock(desc, flags); 88 return 0; 89 } 90 EXPORT_SYMBOL(irq_set_handler_data); 91 92 /** 93 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset 94 * @irq_base: Interrupt number base 95 * @irq_offset: Interrupt number offset 96 * @entry: Pointer to MSI descriptor data 97 * 98 * Set the MSI descriptor entry for an irq at offset 99 */ 100 int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, 101 struct msi_desc *entry) 102 { 103 unsigned long flags; 104 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); 105 106 if (!desc) 107 return -EINVAL; 108 desc->irq_data.msi_desc = entry; 109 if (entry && !irq_offset) 110 entry->irq = irq_base; 111 irq_put_desc_unlock(desc, flags); 112 return 0; 113 } 114 115 /** 116 * irq_set_msi_desc - set MSI descriptor data for an irq 117 * @irq: Interrupt number 118 * @entry: Pointer to MSI descriptor data 119 * 120 * Set the MSI descriptor entry for an irq 121 */ 122 int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry) 123 { 124 return irq_set_msi_desc_off(irq, 0, entry); 125 } 126 127 /** 128 * irq_set_chip_data - set irq chip data for an irq 129 * @irq: Interrupt number 130 * @data: Pointer to chip specific data 131 * 132 * Set the hardware irq chip data for an irq 133 */ 134 int irq_set_chip_data(unsigned int irq, void *data) 135 { 136 unsigned long flags; 137 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 138 139 if (!desc) 140 return -EINVAL; 141 desc->irq_data.chip_data = data; 142 irq_put_desc_unlock(desc, flags); 143 return 0; 144 } 145 EXPORT_SYMBOL(irq_set_chip_data); 146 147 struct irq_data *irq_get_irq_data(unsigned int irq) 148 { 149 struct irq_desc *desc = irq_to_desc(irq); 150 151 return desc ? &desc->irq_data : NULL; 152 } 153 EXPORT_SYMBOL_GPL(irq_get_irq_data); 154 155 static void irq_state_clr_disabled(struct irq_desc *desc) 156 { 157 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); 158 } 159 160 static void irq_state_set_disabled(struct irq_desc *desc) 161 { 162 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); 163 } 164 165 static void irq_state_clr_masked(struct irq_desc *desc) 166 { 167 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); 168 } 169 170 static void irq_state_set_masked(struct irq_desc *desc) 171 { 172 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); 173 } 174 175 int irq_startup(struct irq_desc *desc, bool resend) 176 { 177 int ret = 0; 178 179 irq_state_clr_disabled(desc); 180 desc->depth = 0; 181 182 if (desc->irq_data.chip->irq_startup) { 183 ret = desc->irq_data.chip->irq_startup(&desc->irq_data); 184 irq_state_clr_masked(desc); 185 } else { 186 irq_enable(desc); 187 } 188 if (resend) 189 check_irq_resend(desc, desc->irq_data.irq); 190 return ret; 191 } 192 193 void irq_shutdown(struct irq_desc *desc) 194 { 195 irq_state_set_disabled(desc); 196 desc->depth = 1; 197 if (desc->irq_data.chip->irq_shutdown) 198 desc->irq_data.chip->irq_shutdown(&desc->irq_data); 199 else if (desc->irq_data.chip->irq_disable) 200 desc->irq_data.chip->irq_disable(&desc->irq_data); 201 else 202 desc->irq_data.chip->irq_mask(&desc->irq_data); 203 irq_state_set_masked(desc); 204 } 205 206 void irq_enable(struct irq_desc *desc) 207 { 208 irq_state_clr_disabled(desc); 209 if (desc->irq_data.chip->irq_enable) 210 desc->irq_data.chip->irq_enable(&desc->irq_data); 211 else 212 desc->irq_data.chip->irq_unmask(&desc->irq_data); 213 irq_state_clr_masked(desc); 214 } 215 216 /** 217 * irq_disable - Mark interupt disabled 218 * @desc: irq descriptor which should be disabled 219 * 220 * If the chip does not implement the irq_disable callback, we 221 * use a lazy disable approach. That means we mark the interrupt 222 * disabled, but leave the hardware unmasked. That's an 223 * optimization because we avoid the hardware access for the 224 * common case where no interrupt happens after we marked it 225 * disabled. If an interrupt happens, then the interrupt flow 226 * handler masks the line at the hardware level and marks it 227 * pending. 228 */ 229 void irq_disable(struct irq_desc *desc) 230 { 231 irq_state_set_disabled(desc); 232 if (desc->irq_data.chip->irq_disable) { 233 desc->irq_data.chip->irq_disable(&desc->irq_data); 234 irq_state_set_masked(desc); 235 } 236 } 237 238 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) 239 { 240 if (desc->irq_data.chip->irq_enable) 241 desc->irq_data.chip->irq_enable(&desc->irq_data); 242 else 243 desc->irq_data.chip->irq_unmask(&desc->irq_data); 244 cpumask_set_cpu(cpu, desc->percpu_enabled); 245 } 246 247 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) 248 { 249 if (desc->irq_data.chip->irq_disable) 250 desc->irq_data.chip->irq_disable(&desc->irq_data); 251 else 252 desc->irq_data.chip->irq_mask(&desc->irq_data); 253 cpumask_clear_cpu(cpu, desc->percpu_enabled); 254 } 255 256 static inline void mask_ack_irq(struct irq_desc *desc) 257 { 258 if (desc->irq_data.chip->irq_mask_ack) 259 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); 260 else { 261 desc->irq_data.chip->irq_mask(&desc->irq_data); 262 if (desc->irq_data.chip->irq_ack) 263 desc->irq_data.chip->irq_ack(&desc->irq_data); 264 } 265 irq_state_set_masked(desc); 266 } 267 268 void mask_irq(struct irq_desc *desc) 269 { 270 if (desc->irq_data.chip->irq_mask) { 271 desc->irq_data.chip->irq_mask(&desc->irq_data); 272 irq_state_set_masked(desc); 273 } 274 } 275 276 void unmask_irq(struct irq_desc *desc) 277 { 278 if (desc->irq_data.chip->irq_unmask) { 279 desc->irq_data.chip->irq_unmask(&desc->irq_data); 280 irq_state_clr_masked(desc); 281 } 282 } 283 284 /* 285 * handle_nested_irq - Handle a nested irq from a irq thread 286 * @irq: the interrupt number 287 * 288 * Handle interrupts which are nested into a threaded interrupt 289 * handler. The handler function is called inside the calling 290 * threads context. 291 */ 292 void handle_nested_irq(unsigned int irq) 293 { 294 struct irq_desc *desc = irq_to_desc(irq); 295 struct irqaction *action; 296 irqreturn_t action_ret; 297 298 might_sleep(); 299 300 raw_spin_lock_irq(&desc->lock); 301 302 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 303 kstat_incr_irqs_this_cpu(irq, desc); 304 305 action = desc->action; 306 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { 307 desc->istate |= IRQS_PENDING; 308 goto out_unlock; 309 } 310 311 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 312 raw_spin_unlock_irq(&desc->lock); 313 314 action_ret = action->thread_fn(action->irq, action->dev_id); 315 if (!noirqdebug) 316 note_interrupt(irq, desc, action_ret); 317 318 raw_spin_lock_irq(&desc->lock); 319 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); 320 321 out_unlock: 322 raw_spin_unlock_irq(&desc->lock); 323 } 324 EXPORT_SYMBOL_GPL(handle_nested_irq); 325 326 static bool irq_check_poll(struct irq_desc *desc) 327 { 328 if (!(desc->istate & IRQS_POLL_INPROGRESS)) 329 return false; 330 return irq_wait_for_poll(desc); 331 } 332 333 /** 334 * handle_simple_irq - Simple and software-decoded IRQs. 335 * @irq: the interrupt number 336 * @desc: the interrupt description structure for this irq 337 * 338 * Simple interrupts are either sent from a demultiplexing interrupt 339 * handler or come from hardware, where no interrupt hardware control 340 * is necessary. 341 * 342 * Note: The caller is expected to handle the ack, clear, mask and 343 * unmask issues if necessary. 344 */ 345 void 346 handle_simple_irq(unsigned int irq, struct irq_desc *desc) 347 { 348 raw_spin_lock(&desc->lock); 349 350 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 351 if (!irq_check_poll(desc)) 352 goto out_unlock; 353 354 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 355 kstat_incr_irqs_this_cpu(irq, desc); 356 357 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 358 desc->istate |= IRQS_PENDING; 359 goto out_unlock; 360 } 361 362 handle_irq_event(desc); 363 364 out_unlock: 365 raw_spin_unlock(&desc->lock); 366 } 367 EXPORT_SYMBOL_GPL(handle_simple_irq); 368 369 /* 370 * Called unconditionally from handle_level_irq() and only for oneshot 371 * interrupts from handle_fasteoi_irq() 372 */ 373 static void cond_unmask_irq(struct irq_desc *desc) 374 { 375 /* 376 * We need to unmask in the following cases: 377 * - Standard level irq (IRQF_ONESHOT is not set) 378 * - Oneshot irq which did not wake the thread (caused by a 379 * spurious interrupt or a primary handler handling it 380 * completely). 381 */ 382 if (!irqd_irq_disabled(&desc->irq_data) && 383 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) 384 unmask_irq(desc); 385 } 386 387 /** 388 * handle_level_irq - Level type irq handler 389 * @irq: the interrupt number 390 * @desc: the interrupt description structure for this irq 391 * 392 * Level type interrupts are active as long as the hardware line has 393 * the active level. This may require to mask the interrupt and unmask 394 * it after the associated handler has acknowledged the device, so the 395 * interrupt line is back to inactive. 396 */ 397 void 398 handle_level_irq(unsigned int irq, struct irq_desc *desc) 399 { 400 raw_spin_lock(&desc->lock); 401 mask_ack_irq(desc); 402 403 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 404 if (!irq_check_poll(desc)) 405 goto out_unlock; 406 407 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 408 kstat_incr_irqs_this_cpu(irq, desc); 409 410 /* 411 * If its disabled or no action available 412 * keep it masked and get out of here 413 */ 414 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 415 desc->istate |= IRQS_PENDING; 416 goto out_unlock; 417 } 418 419 handle_irq_event(desc); 420 421 cond_unmask_irq(desc); 422 423 out_unlock: 424 raw_spin_unlock(&desc->lock); 425 } 426 EXPORT_SYMBOL_GPL(handle_level_irq); 427 428 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI 429 static inline void preflow_handler(struct irq_desc *desc) 430 { 431 if (desc->preflow_handler) 432 desc->preflow_handler(&desc->irq_data); 433 } 434 #else 435 static inline void preflow_handler(struct irq_desc *desc) { } 436 #endif 437 438 /** 439 * handle_fasteoi_irq - irq handler for transparent controllers 440 * @irq: the interrupt number 441 * @desc: the interrupt description structure for this irq 442 * 443 * Only a single callback will be issued to the chip: an ->eoi() 444 * call when the interrupt has been serviced. This enables support 445 * for modern forms of interrupt handlers, which handle the flow 446 * details in hardware, transparently. 447 */ 448 void 449 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 450 { 451 raw_spin_lock(&desc->lock); 452 453 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 454 if (!irq_check_poll(desc)) 455 goto out; 456 457 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 458 kstat_incr_irqs_this_cpu(irq, desc); 459 460 /* 461 * If its disabled or no action available 462 * then mask it and get out of here: 463 */ 464 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { 465 desc->istate |= IRQS_PENDING; 466 mask_irq(desc); 467 goto out; 468 } 469 470 if (desc->istate & IRQS_ONESHOT) 471 mask_irq(desc); 472 473 preflow_handler(desc); 474 handle_irq_event(desc); 475 476 if (desc->istate & IRQS_ONESHOT) 477 cond_unmask_irq(desc); 478 479 out_eoi: 480 desc->irq_data.chip->irq_eoi(&desc->irq_data); 481 out_unlock: 482 raw_spin_unlock(&desc->lock); 483 return; 484 out: 485 if (!(desc->irq_data.chip->flags & IRQCHIP_EOI_IF_HANDLED)) 486 goto out_eoi; 487 goto out_unlock; 488 } 489 490 /** 491 * handle_edge_irq - edge type IRQ handler 492 * @irq: the interrupt number 493 * @desc: the interrupt description structure for this irq 494 * 495 * Interrupt occures on the falling and/or rising edge of a hardware 496 * signal. The occurrence is latched into the irq controller hardware 497 * and must be acked in order to be reenabled. After the ack another 498 * interrupt can happen on the same source even before the first one 499 * is handled by the associated event handler. If this happens it 500 * might be necessary to disable (mask) the interrupt depending on the 501 * controller hardware. This requires to reenable the interrupt inside 502 * of the loop which handles the interrupts which have arrived while 503 * the handler was running. If all pending interrupts are handled, the 504 * loop is left. 505 */ 506 void 507 handle_edge_irq(unsigned int irq, struct irq_desc *desc) 508 { 509 raw_spin_lock(&desc->lock); 510 511 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 512 /* 513 * If we're currently running this IRQ, or its disabled, 514 * we shouldn't process the IRQ. Mark it pending, handle 515 * the necessary masking and go out 516 */ 517 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 518 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 519 if (!irq_check_poll(desc)) { 520 desc->istate |= IRQS_PENDING; 521 mask_ack_irq(desc); 522 goto out_unlock; 523 } 524 } 525 kstat_incr_irqs_this_cpu(irq, desc); 526 527 /* Start handling the irq */ 528 desc->irq_data.chip->irq_ack(&desc->irq_data); 529 530 do { 531 if (unlikely(!desc->action)) { 532 mask_irq(desc); 533 goto out_unlock; 534 } 535 536 /* 537 * When another irq arrived while we were handling 538 * one, we could have masked the irq. 539 * Renable it, if it was not disabled in meantime. 540 */ 541 if (unlikely(desc->istate & IRQS_PENDING)) { 542 if (!irqd_irq_disabled(&desc->irq_data) && 543 irqd_irq_masked(&desc->irq_data)) 544 unmask_irq(desc); 545 } 546 547 handle_irq_event(desc); 548 549 } while ((desc->istate & IRQS_PENDING) && 550 !irqd_irq_disabled(&desc->irq_data)); 551 552 out_unlock: 553 raw_spin_unlock(&desc->lock); 554 } 555 EXPORT_SYMBOL(handle_edge_irq); 556 557 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER 558 /** 559 * handle_edge_eoi_irq - edge eoi type IRQ handler 560 * @irq: the interrupt number 561 * @desc: the interrupt description structure for this irq 562 * 563 * Similar as the above handle_edge_irq, but using eoi and w/o the 564 * mask/unmask logic. 565 */ 566 void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) 567 { 568 struct irq_chip *chip = irq_desc_get_chip(desc); 569 570 raw_spin_lock(&desc->lock); 571 572 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 573 /* 574 * If we're currently running this IRQ, or its disabled, 575 * we shouldn't process the IRQ. Mark it pending, handle 576 * the necessary masking and go out 577 */ 578 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 579 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 580 if (!irq_check_poll(desc)) { 581 desc->istate |= IRQS_PENDING; 582 goto out_eoi; 583 } 584 } 585 kstat_incr_irqs_this_cpu(irq, desc); 586 587 do { 588 if (unlikely(!desc->action)) 589 goto out_eoi; 590 591 handle_irq_event(desc); 592 593 } while ((desc->istate & IRQS_PENDING) && 594 !irqd_irq_disabled(&desc->irq_data)); 595 596 out_eoi: 597 chip->irq_eoi(&desc->irq_data); 598 raw_spin_unlock(&desc->lock); 599 } 600 #endif 601 602 /** 603 * handle_percpu_irq - Per CPU local irq handler 604 * @irq: the interrupt number 605 * @desc: the interrupt description structure for this irq 606 * 607 * Per CPU interrupts on SMP machines without locking requirements 608 */ 609 void 610 handle_percpu_irq(unsigned int irq, struct irq_desc *desc) 611 { 612 struct irq_chip *chip = irq_desc_get_chip(desc); 613 614 kstat_incr_irqs_this_cpu(irq, desc); 615 616 if (chip->irq_ack) 617 chip->irq_ack(&desc->irq_data); 618 619 handle_irq_event_percpu(desc, desc->action); 620 621 if (chip->irq_eoi) 622 chip->irq_eoi(&desc->irq_data); 623 } 624 625 /** 626 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids 627 * @irq: the interrupt number 628 * @desc: the interrupt description structure for this irq 629 * 630 * Per CPU interrupts on SMP machines without locking requirements. Same as 631 * handle_percpu_irq() above but with the following extras: 632 * 633 * action->percpu_dev_id is a pointer to percpu variables which 634 * contain the real device id for the cpu on which this handler is 635 * called 636 */ 637 void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) 638 { 639 struct irq_chip *chip = irq_desc_get_chip(desc); 640 struct irqaction *action = desc->action; 641 void *dev_id = __this_cpu_ptr(action->percpu_dev_id); 642 irqreturn_t res; 643 644 kstat_incr_irqs_this_cpu(irq, desc); 645 646 if (chip->irq_ack) 647 chip->irq_ack(&desc->irq_data); 648 649 trace_irq_handler_entry(irq, action); 650 res = action->handler(irq, dev_id); 651 trace_irq_handler_exit(irq, action, res); 652 653 if (chip->irq_eoi) 654 chip->irq_eoi(&desc->irq_data); 655 } 656 657 void 658 __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 659 const char *name) 660 { 661 unsigned long flags; 662 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); 663 664 if (!desc) 665 return; 666 667 if (!handle) { 668 handle = handle_bad_irq; 669 } else { 670 if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) 671 goto out; 672 } 673 674 /* Uninstall? */ 675 if (handle == handle_bad_irq) { 676 if (desc->irq_data.chip != &no_irq_chip) 677 mask_ack_irq(desc); 678 irq_state_set_disabled(desc); 679 desc->depth = 1; 680 } 681 desc->handle_irq = handle; 682 desc->name = name; 683 684 if (handle != handle_bad_irq && is_chained) { 685 irq_settings_set_noprobe(desc); 686 irq_settings_set_norequest(desc); 687 irq_settings_set_nothread(desc); 688 irq_startup(desc, true); 689 } 690 out: 691 irq_put_desc_busunlock(desc, flags); 692 } 693 EXPORT_SYMBOL_GPL(__irq_set_handler); 694 695 void 696 irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, 697 irq_flow_handler_t handle, const char *name) 698 { 699 irq_set_chip(irq, chip); 700 __irq_set_handler(irq, handle, 0, name); 701 } 702 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); 703 704 void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) 705 { 706 unsigned long flags; 707 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); 708 709 if (!desc) 710 return; 711 irq_settings_clr_and_set(desc, clr, set); 712 713 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | 714 IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT); 715 if (irq_settings_has_no_balance_set(desc)) 716 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 717 if (irq_settings_is_per_cpu(desc)) 718 irqd_set(&desc->irq_data, IRQD_PER_CPU); 719 if (irq_settings_can_move_pcntxt(desc)) 720 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); 721 if (irq_settings_is_level(desc)) 722 irqd_set(&desc->irq_data, IRQD_LEVEL); 723 724 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); 725 726 irq_put_desc_unlock(desc, flags); 727 } 728 EXPORT_SYMBOL_GPL(irq_modify_status); 729 730 /** 731 * irq_cpu_online - Invoke all irq_cpu_online functions. 732 * 733 * Iterate through all irqs and invoke the chip.irq_cpu_online() 734 * for each. 735 */ 736 void irq_cpu_online(void) 737 { 738 struct irq_desc *desc; 739 struct irq_chip *chip; 740 unsigned long flags; 741 unsigned int irq; 742 743 for_each_active_irq(irq) { 744 desc = irq_to_desc(irq); 745 if (!desc) 746 continue; 747 748 raw_spin_lock_irqsave(&desc->lock, flags); 749 750 chip = irq_data_get_irq_chip(&desc->irq_data); 751 if (chip && chip->irq_cpu_online && 752 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 753 !irqd_irq_disabled(&desc->irq_data))) 754 chip->irq_cpu_online(&desc->irq_data); 755 756 raw_spin_unlock_irqrestore(&desc->lock, flags); 757 } 758 } 759 760 /** 761 * irq_cpu_offline - Invoke all irq_cpu_offline functions. 762 * 763 * Iterate through all irqs and invoke the chip.irq_cpu_offline() 764 * for each. 765 */ 766 void irq_cpu_offline(void) 767 { 768 struct irq_desc *desc; 769 struct irq_chip *chip; 770 unsigned long flags; 771 unsigned int irq; 772 773 for_each_active_irq(irq) { 774 desc = irq_to_desc(irq); 775 if (!desc) 776 continue; 777 778 raw_spin_lock_irqsave(&desc->lock, flags); 779 780 chip = irq_data_get_irq_chip(&desc->irq_data); 781 if (chip && chip->irq_cpu_offline && 782 (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) || 783 !irqd_irq_disabled(&desc->irq_data))) 784 chip->irq_cpu_offline(&desc->irq_data); 785 786 raw_spin_unlock_irqrestore(&desc->lock, flags); 787 } 788 } 789