1 /* 2 * linux/arch/arm/kernel/irq.c 3 * 4 * Copyright (C) 1992 Linus Torvalds 5 * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. 6 * 7 * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation. 8 * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and 9 * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>. 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 as 13 * published by the Free Software Foundation. 14 * 15 * This file contains the code used by various IRQ handling routines: 16 * asking for different IRQ's should be done through these routines 17 * instead of just grabbing them. Thus setups with different IRQ numbers 18 * shouldn't result in any weird surprises, and installing new handlers 19 * should be easier. 20 * 21 * IRQ's are in fact implemented a bit like signal handlers for the kernel. 22 * Naturally it's not a 1:1 relation, but there are similarities. 23 */ 24 #include <linux/config.h> 25 #include <linux/kernel_stat.h> 26 #include <linux/module.h> 27 #include <linux/signal.h> 28 #include <linux/ioport.h> 29 #include <linux/interrupt.h> 30 #include <linux/ptrace.h> 31 #include <linux/slab.h> 32 #include <linux/random.h> 33 #include <linux/smp.h> 34 #include <linux/init.h> 35 #include <linux/seq_file.h> 36 #include <linux/errno.h> 37 #include <linux/list.h> 38 #include <linux/kallsyms.h> 39 #include <linux/proc_fs.h> 40 41 #include <asm/irq.h> 42 #include <asm/system.h> 43 #include <asm/mach/irq.h> 44 #include <asm/mach/time.h> 45 46 /* 47 * Maximum IRQ count. Currently, this is arbitary. However, it should 48 * not be set too low to prevent false triggering. Conversely, if it 49 * is set too high, then you could miss a stuck IRQ. 50 * 51 * Maybe we ought to set a timer and re-enable the IRQ at a later time? 52 */ 53 #define MAX_IRQ_CNT 100000 54 55 static int noirqdebug; 56 static volatile unsigned long irq_err_count; 57 static DEFINE_SPINLOCK(irq_controller_lock); 58 static LIST_HEAD(irq_pending); 59 60 struct irqdesc irq_desc[NR_IRQS]; 61 void (*init_arch_irq)(void) __initdata = NULL; 62 63 /* 64 * No architecture-specific irq_finish function defined in arm/arch/irqs.h. 65 */ 66 #ifndef irq_finish 67 #define irq_finish(irq) do { } while (0) 68 #endif 69 70 /* 71 * Dummy mask/unmask handler 72 */ 73 void dummy_mask_unmask_irq(unsigned int irq) 74 { 75 } 76 77 irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs) 78 { 79 return IRQ_NONE; 80 } 81 82 void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) 83 { 84 irq_err_count += 1; 85 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); 86 } 87 88 static struct irqchip bad_chip = { 89 .ack = dummy_mask_unmask_irq, 90 .mask = dummy_mask_unmask_irq, 91 .unmask = dummy_mask_unmask_irq, 92 }; 93 94 static struct irqdesc bad_irq_desc = { 95 .chip = &bad_chip, 96 .handle = do_bad_IRQ, 97 .pend = LIST_HEAD_INIT(bad_irq_desc.pend), 98 .disable_depth = 1, 99 }; 100 101 #ifdef CONFIG_SMP 102 void synchronize_irq(unsigned int irq) 103 { 104 struct irqdesc *desc = irq_desc + irq; 105 106 while (desc->running) 107 barrier(); 108 } 109 EXPORT_SYMBOL(synchronize_irq); 110 111 #define smp_set_running(desc) do { desc->running = 1; } while (0) 112 #define smp_clear_running(desc) do { desc->running = 0; } while (0) 113 #else 114 #define smp_set_running(desc) do { } while (0) 115 #define smp_clear_running(desc) do { } while (0) 116 #endif 117 118 /** 119 * disable_irq_nosync - disable an irq without waiting 120 * @irq: Interrupt to disable 121 * 122 * Disable the selected interrupt line. Enables and disables 123 * are nested. We do this lazily. 124 * 125 * This function may be called from IRQ context. 126 */ 127 void disable_irq_nosync(unsigned int irq) 128 { 129 struct irqdesc *desc = irq_desc + irq; 130 unsigned long flags; 131 132 spin_lock_irqsave(&irq_controller_lock, flags); 133 desc->disable_depth++; 134 list_del_init(&desc->pend); 135 spin_unlock_irqrestore(&irq_controller_lock, flags); 136 } 137 EXPORT_SYMBOL(disable_irq_nosync); 138 139 /** 140 * disable_irq - disable an irq and wait for completion 141 * @irq: Interrupt to disable 142 * 143 * Disable the selected interrupt line. Enables and disables 144 * are nested. This functions waits for any pending IRQ 145 * handlers for this interrupt to complete before returning. 146 * If you use this function while holding a resource the IRQ 147 * handler may need you will deadlock. 148 * 149 * This function may be called - with care - from IRQ context. 150 */ 151 void disable_irq(unsigned int irq) 152 { 153 struct irqdesc *desc = irq_desc + irq; 154 155 disable_irq_nosync(irq); 156 if (desc->action) 157 synchronize_irq(irq); 158 } 159 EXPORT_SYMBOL(disable_irq); 160 161 /** 162 * enable_irq - enable interrupt handling on an irq 163 * @irq: Interrupt to enable 164 * 165 * Re-enables the processing of interrupts on this IRQ line. 166 * Note that this may call the interrupt handler, so you may 167 * get unexpected results if you hold IRQs disabled. 168 * 169 * This function may be called from IRQ context. 170 */ 171 void enable_irq(unsigned int irq) 172 { 173 struct irqdesc *desc = irq_desc + irq; 174 unsigned long flags; 175 176 spin_lock_irqsave(&irq_controller_lock, flags); 177 if (unlikely(!desc->disable_depth)) { 178 printk("enable_irq(%u) unbalanced from %p\n", irq, 179 __builtin_return_address(0)); 180 } else if (!--desc->disable_depth) { 181 desc->probing = 0; 182 desc->chip->unmask(irq); 183 184 /* 185 * If the interrupt is waiting to be processed, 186 * try to re-run it. We can't directly run it 187 * from here since the caller might be in an 188 * interrupt-protected region. 189 */ 190 if (desc->pending && list_empty(&desc->pend)) { 191 desc->pending = 0; 192 if (!desc->chip->retrigger || 193 desc->chip->retrigger(irq)) 194 list_add(&desc->pend, &irq_pending); 195 } 196 } 197 spin_unlock_irqrestore(&irq_controller_lock, flags); 198 } 199 EXPORT_SYMBOL(enable_irq); 200 201 /* 202 * Enable wake on selected irq 203 */ 204 void enable_irq_wake(unsigned int irq) 205 { 206 struct irqdesc *desc = irq_desc + irq; 207 unsigned long flags; 208 209 spin_lock_irqsave(&irq_controller_lock, flags); 210 if (desc->chip->set_wake) 211 desc->chip->set_wake(irq, 1); 212 spin_unlock_irqrestore(&irq_controller_lock, flags); 213 } 214 EXPORT_SYMBOL(enable_irq_wake); 215 216 void disable_irq_wake(unsigned int irq) 217 { 218 struct irqdesc *desc = irq_desc + irq; 219 unsigned long flags; 220 221 spin_lock_irqsave(&irq_controller_lock, flags); 222 if (desc->chip->set_wake) 223 desc->chip->set_wake(irq, 0); 224 spin_unlock_irqrestore(&irq_controller_lock, flags); 225 } 226 EXPORT_SYMBOL(disable_irq_wake); 227 228 int show_interrupts(struct seq_file *p, void *v) 229 { 230 int i = *(loff_t *) v, cpu; 231 struct irqaction * action; 232 unsigned long flags; 233 234 if (i == 0) { 235 char cpuname[12]; 236 237 seq_printf(p, " "); 238 for_each_present_cpu(cpu) { 239 sprintf(cpuname, "CPU%d", cpu); 240 seq_printf(p, " %10s", cpuname); 241 } 242 seq_putc(p, '\n'); 243 } 244 245 if (i < NR_IRQS) { 246 spin_lock_irqsave(&irq_controller_lock, flags); 247 action = irq_desc[i].action; 248 if (!action) 249 goto unlock; 250 251 seq_printf(p, "%3d: ", i); 252 for_each_present_cpu(cpu) 253 seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]); 254 seq_printf(p, " %s", action->name); 255 for (action = action->next; action; action = action->next) 256 seq_printf(p, ", %s", action->name); 257 258 seq_putc(p, '\n'); 259 unlock: 260 spin_unlock_irqrestore(&irq_controller_lock, flags); 261 } else if (i == NR_IRQS) { 262 #ifdef CONFIG_ARCH_ACORN 263 show_fiq_list(p, v); 264 #endif 265 #ifdef CONFIG_SMP 266 show_ipi_list(p); 267 show_local_irqs(p); 268 #endif 269 seq_printf(p, "Err: %10lu\n", irq_err_count); 270 } 271 return 0; 272 } 273 274 /* 275 * IRQ lock detection. 276 * 277 * Hopefully, this should get us out of a few locked situations. 278 * However, it may take a while for this to happen, since we need 279 * a large number if IRQs to appear in the same jiffie with the 280 * same instruction pointer (or within 2 instructions). 281 */ 282 static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs) 283 { 284 unsigned long instr_ptr = instruction_pointer(regs); 285 286 if (desc->lck_jif == jiffies && 287 desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) { 288 desc->lck_cnt += 1; 289 290 if (desc->lck_cnt > MAX_IRQ_CNT) { 291 printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq); 292 return 1; 293 } 294 } else { 295 desc->lck_cnt = 0; 296 desc->lck_pc = instruction_pointer(regs); 297 desc->lck_jif = jiffies; 298 } 299 return 0; 300 } 301 302 static void 303 report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret) 304 { 305 static int count = 100; 306 struct irqaction *action; 307 308 if (!count || noirqdebug) 309 return; 310 311 count--; 312 313 if (ret != IRQ_HANDLED && ret != IRQ_NONE) { 314 printk("irq%u: bogus retval mask %x\n", irq, ret); 315 } else { 316 printk("irq%u: nobody cared\n", irq); 317 } 318 show_regs(regs); 319 dump_stack(); 320 printk(KERN_ERR "handlers:"); 321 action = desc->action; 322 do { 323 printk("\n" KERN_ERR "[<%p>]", action->handler); 324 print_symbol(" (%s)", (unsigned long)action->handler); 325 action = action->next; 326 } while (action); 327 printk("\n"); 328 } 329 330 static int 331 __do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs) 332 { 333 unsigned int status; 334 int ret, retval = 0; 335 336 spin_unlock(&irq_controller_lock); 337 338 #ifdef CONFIG_NO_IDLE_HZ 339 if (!(action->flags & SA_TIMER) && system_timer->dyn_tick != NULL) { 340 write_seqlock(&xtime_lock); 341 if (system_timer->dyn_tick->state & DYN_TICK_ENABLED) 342 system_timer->dyn_tick->handler(irq, 0, regs); 343 write_sequnlock(&xtime_lock); 344 } 345 #endif 346 347 if (!(action->flags & SA_INTERRUPT)) 348 local_irq_enable(); 349 350 status = 0; 351 do { 352 ret = action->handler(irq, action->dev_id, regs); 353 if (ret == IRQ_HANDLED) 354 status |= action->flags; 355 retval |= ret; 356 action = action->next; 357 } while (action); 358 359 if (status & SA_SAMPLE_RANDOM) 360 add_interrupt_randomness(irq); 361 362 spin_lock_irq(&irq_controller_lock); 363 364 return retval; 365 } 366 367 /* 368 * This is for software-decoded IRQs. The caller is expected to 369 * handle the ack, clear, mask and unmask issues. 370 */ 371 void 372 do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) 373 { 374 struct irqaction *action; 375 const unsigned int cpu = smp_processor_id(); 376 377 desc->triggered = 1; 378 379 kstat_cpu(cpu).irqs[irq]++; 380 381 smp_set_running(desc); 382 383 action = desc->action; 384 if (action) { 385 int ret = __do_irq(irq, action, regs); 386 if (ret != IRQ_HANDLED) 387 report_bad_irq(irq, regs, desc, ret); 388 } 389 390 smp_clear_running(desc); 391 } 392 393 /* 394 * Most edge-triggered IRQ implementations seem to take a broken 395 * approach to this. Hence the complexity. 396 */ 397 void 398 do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) 399 { 400 const unsigned int cpu = smp_processor_id(); 401 402 desc->triggered = 1; 403 404 /* 405 * If we're currently running this IRQ, or its disabled, 406 * we shouldn't process the IRQ. Instead, turn on the 407 * hardware masks. 408 */ 409 if (unlikely(desc->running || desc->disable_depth)) 410 goto running; 411 412 /* 413 * Acknowledge and clear the IRQ, but don't mask it. 414 */ 415 desc->chip->ack(irq); 416 417 /* 418 * Mark the IRQ currently in progress. 419 */ 420 desc->running = 1; 421 422 kstat_cpu(cpu).irqs[irq]++; 423 424 do { 425 struct irqaction *action; 426 427 action = desc->action; 428 if (!action) 429 break; 430 431 if (desc->pending && !desc->disable_depth) { 432 desc->pending = 0; 433 desc->chip->unmask(irq); 434 } 435 436 __do_irq(irq, action, regs); 437 } while (desc->pending && !desc->disable_depth); 438 439 desc->running = 0; 440 441 /* 442 * If we were disabled or freed, shut down the handler. 443 */ 444 if (likely(desc->action && !check_irq_lock(desc, irq, regs))) 445 return; 446 447 running: 448 /* 449 * We got another IRQ while this one was masked or 450 * currently running. Delay it. 451 */ 452 desc->pending = 1; 453 desc->chip->mask(irq); 454 desc->chip->ack(irq); 455 } 456 457 /* 458 * Level-based IRQ handler. Nice and simple. 459 */ 460 void 461 do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs) 462 { 463 struct irqaction *action; 464 const unsigned int cpu = smp_processor_id(); 465 466 desc->triggered = 1; 467 468 /* 469 * Acknowledge, clear _AND_ disable the interrupt. 470 */ 471 desc->chip->ack(irq); 472 473 if (likely(!desc->disable_depth)) { 474 kstat_cpu(cpu).irqs[irq]++; 475 476 smp_set_running(desc); 477 478 /* 479 * Return with this interrupt masked if no action 480 */ 481 action = desc->action; 482 if (action) { 483 int ret = __do_irq(irq, desc->action, regs); 484 485 if (ret != IRQ_HANDLED) 486 report_bad_irq(irq, regs, desc, ret); 487 488 if (likely(!desc->disable_depth && 489 !check_irq_lock(desc, irq, regs))) 490 desc->chip->unmask(irq); 491 } 492 493 smp_clear_running(desc); 494 } 495 } 496 497 static void do_pending_irqs(struct pt_regs *regs) 498 { 499 struct list_head head, *l, *n; 500 501 do { 502 struct irqdesc *desc; 503 504 /* 505 * First, take the pending interrupts off the list. 506 * The act of calling the handlers may add some IRQs 507 * back onto the list. 508 */ 509 head = irq_pending; 510 INIT_LIST_HEAD(&irq_pending); 511 head.next->prev = &head; 512 head.prev->next = &head; 513 514 /* 515 * Now run each entry. We must delete it from our 516 * list before calling the handler. 517 */ 518 list_for_each_safe(l, n, &head) { 519 desc = list_entry(l, struct irqdesc, pend); 520 list_del_init(&desc->pend); 521 desc_handle_irq(desc - irq_desc, desc, regs); 522 } 523 524 /* 525 * The list must be empty. 526 */ 527 BUG_ON(!list_empty(&head)); 528 } while (!list_empty(&irq_pending)); 529 } 530 531 /* 532 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not 533 * come via this function. Instead, they should provide their 534 * own 'handler' 535 */ 536 asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs) 537 { 538 struct irqdesc *desc = irq_desc + irq; 539 540 /* 541 * Some hardware gives randomly wrong interrupts. Rather 542 * than crashing, do something sensible. 543 */ 544 if (irq >= NR_IRQS) 545 desc = &bad_irq_desc; 546 547 irq_enter(); 548 spin_lock(&irq_controller_lock); 549 desc_handle_irq(irq, desc, regs); 550 551 /* 552 * Now re-run any pending interrupts. 553 */ 554 if (!list_empty(&irq_pending)) 555 do_pending_irqs(regs); 556 557 irq_finish(irq); 558 559 spin_unlock(&irq_controller_lock); 560 irq_exit(); 561 } 562 563 void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained) 564 { 565 struct irqdesc *desc; 566 unsigned long flags; 567 568 if (irq >= NR_IRQS) { 569 printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq); 570 return; 571 } 572 573 if (handle == NULL) 574 handle = do_bad_IRQ; 575 576 desc = irq_desc + irq; 577 578 if (is_chained && desc->chip == &bad_chip) 579 printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq); 580 581 spin_lock_irqsave(&irq_controller_lock, flags); 582 if (handle == do_bad_IRQ) { 583 desc->chip->mask(irq); 584 desc->chip->ack(irq); 585 desc->disable_depth = 1; 586 } 587 desc->handle = handle; 588 if (handle != do_bad_IRQ && is_chained) { 589 desc->valid = 0; 590 desc->probe_ok = 0; 591 desc->disable_depth = 0; 592 desc->chip->unmask(irq); 593 } 594 spin_unlock_irqrestore(&irq_controller_lock, flags); 595 } 596 597 void set_irq_chip(unsigned int irq, struct irqchip *chip) 598 { 599 struct irqdesc *desc; 600 unsigned long flags; 601 602 if (irq >= NR_IRQS) { 603 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq); 604 return; 605 } 606 607 if (chip == NULL) 608 chip = &bad_chip; 609 610 desc = irq_desc + irq; 611 spin_lock_irqsave(&irq_controller_lock, flags); 612 desc->chip = chip; 613 spin_unlock_irqrestore(&irq_controller_lock, flags); 614 } 615 616 int set_irq_type(unsigned int irq, unsigned int type) 617 { 618 struct irqdesc *desc; 619 unsigned long flags; 620 int ret = -ENXIO; 621 622 if (irq >= NR_IRQS) { 623 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); 624 return -ENODEV; 625 } 626 627 desc = irq_desc + irq; 628 if (desc->chip->set_type) { 629 spin_lock_irqsave(&irq_controller_lock, flags); 630 ret = desc->chip->set_type(irq, type); 631 spin_unlock_irqrestore(&irq_controller_lock, flags); 632 } 633 634 return ret; 635 } 636 EXPORT_SYMBOL(set_irq_type); 637 638 void set_irq_flags(unsigned int irq, unsigned int iflags) 639 { 640 struct irqdesc *desc; 641 unsigned long flags; 642 643 if (irq >= NR_IRQS) { 644 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); 645 return; 646 } 647 648 desc = irq_desc + irq; 649 spin_lock_irqsave(&irq_controller_lock, flags); 650 desc->valid = (iflags & IRQF_VALID) != 0; 651 desc->probe_ok = (iflags & IRQF_PROBE) != 0; 652 desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0; 653 spin_unlock_irqrestore(&irq_controller_lock, flags); 654 } 655 656 int setup_irq(unsigned int irq, struct irqaction *new) 657 { 658 int shared = 0; 659 struct irqaction *old, **p; 660 unsigned long flags; 661 struct irqdesc *desc; 662 663 /* 664 * Some drivers like serial.c use request_irq() heavily, 665 * so we have to be careful not to interfere with a 666 * running system. 667 */ 668 if (new->flags & SA_SAMPLE_RANDOM) { 669 /* 670 * This function might sleep, we want to call it first, 671 * outside of the atomic block. 672 * Yes, this might clear the entropy pool if the wrong 673 * driver is attempted to be loaded, without actually 674 * installing a new handler, but is this really a problem, 675 * only the sysadmin is able to do this. 676 */ 677 rand_initialize_irq(irq); 678 } 679 680 /* 681 * The following block of code has to be executed atomically 682 */ 683 desc = irq_desc + irq; 684 spin_lock_irqsave(&irq_controller_lock, flags); 685 p = &desc->action; 686 if ((old = *p) != NULL) { 687 /* 688 * Can't share interrupts unless both agree to and are 689 * the same type. 690 */ 691 if (!(old->flags & new->flags & SA_SHIRQ) || 692 (~old->flags & new->flags) & SA_TRIGGER_MASK) { 693 spin_unlock_irqrestore(&irq_controller_lock, flags); 694 return -EBUSY; 695 } 696 697 /* add new interrupt at end of irq queue */ 698 do { 699 p = &old->next; 700 old = *p; 701 } while (old); 702 shared = 1; 703 } 704 705 *p = new; 706 707 if (!shared) { 708 desc->probing = 0; 709 desc->running = 0; 710 desc->pending = 0; 711 desc->disable_depth = 1; 712 713 if (new->flags & SA_TRIGGER_MASK && 714 desc->chip->set_type) { 715 unsigned int type = new->flags & SA_TRIGGER_MASK; 716 desc->chip->set_type(irq, type); 717 } 718 719 if (!desc->noautoenable) { 720 desc->disable_depth = 0; 721 desc->chip->unmask(irq); 722 } 723 } 724 725 spin_unlock_irqrestore(&irq_controller_lock, flags); 726 return 0; 727 } 728 729 /** 730 * request_irq - allocate an interrupt line 731 * @irq: Interrupt line to allocate 732 * @handler: Function to be called when the IRQ occurs 733 * @irqflags: Interrupt type flags 734 * @devname: An ascii name for the claiming device 735 * @dev_id: A cookie passed back to the handler function 736 * 737 * This call allocates interrupt resources and enables the 738 * interrupt line and IRQ handling. From the point this 739 * call is made your handler function may be invoked. Since 740 * your handler function must clear any interrupt the board 741 * raises, you must take care both to initialise your hardware 742 * and to set up the interrupt handler in the right order. 743 * 744 * Dev_id must be globally unique. Normally the address of the 745 * device data structure is used as the cookie. Since the handler 746 * receives this value it makes sense to use it. 747 * 748 * If your interrupt is shared you must pass a non NULL dev_id 749 * as this is required when freeing the interrupt. 750 * 751 * Flags: 752 * 753 * SA_SHIRQ Interrupt is shared 754 * 755 * SA_INTERRUPT Disable local interrupts while processing 756 * 757 * SA_SAMPLE_RANDOM The interrupt can be used for entropy 758 * 759 */ 760 int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), 761 unsigned long irq_flags, const char * devname, void *dev_id) 762 { 763 unsigned long retval; 764 struct irqaction *action; 765 766 if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler || 767 (irq_flags & SA_SHIRQ && !dev_id)) 768 return -EINVAL; 769 770 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL); 771 if (!action) 772 return -ENOMEM; 773 774 action->handler = handler; 775 action->flags = irq_flags; 776 cpus_clear(action->mask); 777 action->name = devname; 778 action->next = NULL; 779 action->dev_id = dev_id; 780 781 retval = setup_irq(irq, action); 782 783 if (retval) 784 kfree(action); 785 return retval; 786 } 787 788 EXPORT_SYMBOL(request_irq); 789 790 /** 791 * free_irq - free an interrupt 792 * @irq: Interrupt line to free 793 * @dev_id: Device identity to free 794 * 795 * Remove an interrupt handler. The handler is removed and if the 796 * interrupt line is no longer in use by any driver it is disabled. 797 * On a shared IRQ the caller must ensure the interrupt is disabled 798 * on the card it drives before calling this function. 799 * 800 * This function must not be called from interrupt context. 801 */ 802 void free_irq(unsigned int irq, void *dev_id) 803 { 804 struct irqaction * action, **p; 805 unsigned long flags; 806 807 if (irq >= NR_IRQS || !irq_desc[irq].valid) { 808 printk(KERN_ERR "Trying to free IRQ%d\n",irq); 809 dump_stack(); 810 return; 811 } 812 813 spin_lock_irqsave(&irq_controller_lock, flags); 814 for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) { 815 if (action->dev_id != dev_id) 816 continue; 817 818 /* Found it - now free it */ 819 *p = action->next; 820 break; 821 } 822 spin_unlock_irqrestore(&irq_controller_lock, flags); 823 824 if (!action) { 825 printk(KERN_ERR "Trying to free free IRQ%d\n",irq); 826 dump_stack(); 827 } else { 828 synchronize_irq(irq); 829 kfree(action); 830 } 831 } 832 833 EXPORT_SYMBOL(free_irq); 834 835 static DECLARE_MUTEX(probe_sem); 836 837 /* Start the interrupt probing. Unlike other architectures, 838 * we don't return a mask of interrupts from probe_irq_on, 839 * but return the number of interrupts enabled for the probe. 840 * The interrupts which have been enabled for probing is 841 * instead recorded in the irq_desc structure. 842 */ 843 unsigned long probe_irq_on(void) 844 { 845 unsigned int i, irqs = 0; 846 unsigned long delay; 847 848 down(&probe_sem); 849 850 /* 851 * first snaffle up any unassigned but 852 * probe-able interrupts 853 */ 854 spin_lock_irq(&irq_controller_lock); 855 for (i = 0; i < NR_IRQS; i++) { 856 if (!irq_desc[i].probe_ok || irq_desc[i].action) 857 continue; 858 859 irq_desc[i].probing = 1; 860 irq_desc[i].triggered = 0; 861 if (irq_desc[i].chip->set_type) 862 irq_desc[i].chip->set_type(i, IRQT_PROBE); 863 irq_desc[i].chip->unmask(i); 864 irqs += 1; 865 } 866 spin_unlock_irq(&irq_controller_lock); 867 868 /* 869 * wait for spurious interrupts to mask themselves out again 870 */ 871 for (delay = jiffies + HZ/10; time_before(jiffies, delay); ) 872 /* min 100ms delay */; 873 874 /* 875 * now filter out any obviously spurious interrupts 876 */ 877 spin_lock_irq(&irq_controller_lock); 878 for (i = 0; i < NR_IRQS; i++) { 879 if (irq_desc[i].probing && irq_desc[i].triggered) { 880 irq_desc[i].probing = 0; 881 irqs -= 1; 882 } 883 } 884 spin_unlock_irq(&irq_controller_lock); 885 886 return irqs; 887 } 888 889 EXPORT_SYMBOL(probe_irq_on); 890 891 unsigned int probe_irq_mask(unsigned long irqs) 892 { 893 unsigned int mask = 0, i; 894 895 spin_lock_irq(&irq_controller_lock); 896 for (i = 0; i < 16 && i < NR_IRQS; i++) 897 if (irq_desc[i].probing && irq_desc[i].triggered) 898 mask |= 1 << i; 899 spin_unlock_irq(&irq_controller_lock); 900 901 up(&probe_sem); 902 903 return mask; 904 } 905 EXPORT_SYMBOL(probe_irq_mask); 906 907 /* 908 * Possible return values: 909 * >= 0 - interrupt number 910 * -1 - no interrupt/many interrupts 911 */ 912 int probe_irq_off(unsigned long irqs) 913 { 914 unsigned int i; 915 int irq_found = NO_IRQ; 916 917 /* 918 * look at the interrupts, and find exactly one 919 * that we were probing has been triggered 920 */ 921 spin_lock_irq(&irq_controller_lock); 922 for (i = 0; i < NR_IRQS; i++) { 923 if (irq_desc[i].probing && 924 irq_desc[i].triggered) { 925 if (irq_found != NO_IRQ) { 926 irq_found = NO_IRQ; 927 goto out; 928 } 929 irq_found = i; 930 } 931 } 932 933 if (irq_found == -1) 934 irq_found = NO_IRQ; 935 out: 936 spin_unlock_irq(&irq_controller_lock); 937 938 up(&probe_sem); 939 940 return irq_found; 941 } 942 943 EXPORT_SYMBOL(probe_irq_off); 944 945 #ifdef CONFIG_SMP 946 static void route_irq(struct irqdesc *desc, unsigned int irq, unsigned int cpu) 947 { 948 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); 949 950 spin_lock_irq(&irq_controller_lock); 951 desc->cpu = cpu; 952 desc->chip->set_cpu(desc, irq, cpu); 953 spin_unlock_irq(&irq_controller_lock); 954 } 955 956 #ifdef CONFIG_PROC_FS 957 static int 958 irq_affinity_read_proc(char *page, char **start, off_t off, int count, 959 int *eof, void *data) 960 { 961 struct irqdesc *desc = irq_desc + ((int)data); 962 int len = cpumask_scnprintf(page, count, desc->affinity); 963 964 if (count - len < 2) 965 return -EINVAL; 966 page[len++] = '\n'; 967 page[len] = '\0'; 968 969 return len; 970 } 971 972 static int 973 irq_affinity_write_proc(struct file *file, const char __user *buffer, 974 unsigned long count, void *data) 975 { 976 unsigned int irq = (unsigned int)data; 977 struct irqdesc *desc = irq_desc + irq; 978 cpumask_t affinity, tmp; 979 int ret = -EIO; 980 981 if (!desc->chip->set_cpu) 982 goto out; 983 984 ret = cpumask_parse(buffer, count, affinity); 985 if (ret) 986 goto out; 987 988 cpus_and(tmp, affinity, cpu_online_map); 989 if (cpus_empty(tmp)) { 990 ret = -EINVAL; 991 goto out; 992 } 993 994 desc->affinity = affinity; 995 route_irq(desc, irq, first_cpu(tmp)); 996 ret = count; 997 998 out: 999 return ret; 1000 } 1001 #endif 1002 #endif 1003 1004 void __init init_irq_proc(void) 1005 { 1006 #if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS) 1007 struct proc_dir_entry *dir; 1008 int irq; 1009 1010 dir = proc_mkdir("irq", NULL); 1011 if (!dir) 1012 return; 1013 1014 for (irq = 0; irq < NR_IRQS; irq++) { 1015 struct proc_dir_entry *entry; 1016 struct irqdesc *desc; 1017 char name[16]; 1018 1019 desc = irq_desc + irq; 1020 memset(name, 0, sizeof(name)); 1021 snprintf(name, sizeof(name) - 1, "%u", irq); 1022 1023 desc->procdir = proc_mkdir(name, dir); 1024 if (!desc->procdir) 1025 continue; 1026 1027 entry = create_proc_entry("smp_affinity", 0600, desc->procdir); 1028 if (entry) { 1029 entry->nlink = 1; 1030 entry->data = (void *)irq; 1031 entry->read_proc = irq_affinity_read_proc; 1032 entry->write_proc = irq_affinity_write_proc; 1033 } 1034 } 1035 #endif 1036 } 1037 1038 void __init init_IRQ(void) 1039 { 1040 struct irqdesc *desc; 1041 int irq; 1042 1043 #ifdef CONFIG_SMP 1044 bad_irq_desc.affinity = CPU_MASK_ALL; 1045 bad_irq_desc.cpu = smp_processor_id(); 1046 #endif 1047 1048 for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) { 1049 *desc = bad_irq_desc; 1050 INIT_LIST_HEAD(&desc->pend); 1051 } 1052 1053 init_arch_irq(); 1054 } 1055 1056 static int __init noirqdebug_setup(char *str) 1057 { 1058 noirqdebug = 1; 1059 return 1; 1060 } 1061 1062 __setup("noirqdebug", noirqdebug_setup); 1063 1064 #ifdef CONFIG_HOTPLUG_CPU 1065 /* 1066 * The CPU has been marked offline. Migrate IRQs off this CPU. If 1067 * the affinity settings do not allow other CPUs, force them onto any 1068 * available CPU. 1069 */ 1070 void migrate_irqs(void) 1071 { 1072 unsigned int i, cpu = smp_processor_id(); 1073 1074 for (i = 0; i < NR_IRQS; i++) { 1075 struct irqdesc *desc = irq_desc + i; 1076 1077 if (desc->cpu == cpu) { 1078 unsigned int newcpu = any_online_cpu(desc->affinity); 1079 1080 if (newcpu == NR_CPUS) { 1081 if (printk_ratelimit()) 1082 printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", 1083 i, cpu); 1084 1085 cpus_setall(desc->affinity); 1086 newcpu = any_online_cpu(desc->affinity); 1087 } 1088 1089 route_irq(desc, i, newcpu); 1090 } 1091 } 1092 } 1093 #endif /* CONFIG_HOTPLUG_CPU */ 1094