1 /* 2 * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the 3 * Sparc the IRQs are basically 'cast in stone' 4 * and you are supposed to probe the prom's device 5 * node trees to find out who's got which IRQ. 6 * 7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 8 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) 9 * Copyright (C) 1995,2002 Pete A. Zaitcev (zaitcev@yahoo.com) 10 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk) 11 * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org) 12 */ 13 14 #include <linux/module.h> 15 #include <linux/sched.h> 16 #include <linux/ptrace.h> 17 #include <linux/errno.h> 18 #include <linux/linkage.h> 19 #include <linux/kernel_stat.h> 20 #include <linux/signal.h> 21 #include <linux/interrupt.h> 22 #include <linux/slab.h> 23 #include <linux/random.h> 24 #include <linux/init.h> 25 #include <linux/smp.h> 26 #include <linux/delay.h> 27 #include <linux/threads.h> 28 #include <linux/spinlock.h> 29 #include <linux/seq_file.h> 30 31 #include <asm/ptrace.h> 32 #include <asm/processor.h> 33 #include <asm/system.h> 34 #include <asm/psr.h> 35 #include <asm/smp.h> 36 #include <asm/vaddrs.h> 37 #include <asm/timer.h> 38 #include <asm/openprom.h> 39 #include <asm/oplib.h> 40 #include <asm/traps.h> 41 #include <asm/irq.h> 42 #include <asm/io.h> 43 #include <asm/pgalloc.h> 44 #include <asm/pgtable.h> 45 #include <asm/pcic.h> 46 #include <asm/cacheflush.h> 47 #include <asm/irq_regs.h> 48 #include <asm/leon.h> 49 50 #include "kernel.h" 51 #include "irq.h" 52 53 #ifdef CONFIG_SMP 54 #define SMP_NOP2 "nop; nop;\n\t" 55 #define SMP_NOP3 "nop; nop; nop;\n\t" 56 #else 57 #define SMP_NOP2 58 #define SMP_NOP3 59 #endif /* SMP */ 60 unsigned long __raw_local_irq_save(void) 61 { 62 unsigned long retval; 63 unsigned long tmp; 64 65 __asm__ __volatile__( 66 "rd %%psr, %0\n\t" 67 SMP_NOP3 /* Sun4m + Cypress + SMP bug */ 68 "or %0, %2, %1\n\t" 69 "wr %1, 0, %%psr\n\t" 70 "nop; nop; nop\n" 71 : "=&r" (retval), "=r" (tmp) 72 : "i" (PSR_PIL) 73 : "memory"); 74 75 return retval; 76 } 77 78 void raw_local_irq_enable(void) 79 { 80 unsigned long tmp; 81 82 __asm__ __volatile__( 83 "rd %%psr, %0\n\t" 84 SMP_NOP3 /* Sun4m + Cypress + SMP bug */ 85 "andn %0, %1, %0\n\t" 86 "wr %0, 0, %%psr\n\t" 87 "nop; nop; nop\n" 88 : "=&r" (tmp) 89 : "i" (PSR_PIL) 90 : "memory"); 91 } 92 93 void raw_local_irq_restore(unsigned long old_psr) 94 { 95 unsigned long tmp; 96 97 __asm__ __volatile__( 98 "rd %%psr, %0\n\t" 99 "and %2, %1, %2\n\t" 100 SMP_NOP2 /* Sun4m + Cypress + SMP bug */ 101 "andn %0, %1, %0\n\t" 102 "wr %0, %2, %%psr\n\t" 103 "nop; nop; nop\n" 104 : "=&r" (tmp) 105 : "i" (PSR_PIL), "r" (old_psr) 106 : "memory"); 107 } 108 109 EXPORT_SYMBOL(__raw_local_irq_save); 110 EXPORT_SYMBOL(raw_local_irq_enable); 111 EXPORT_SYMBOL(raw_local_irq_restore); 112 113 /* 114 * Dave Redman (djhr@tadpole.co.uk) 115 * 116 * IRQ numbers.. These are no longer restricted to 15.. 117 * 118 * this is done to enable SBUS cards and onboard IO to be masked 119 * correctly. using the interrupt level isn't good enough. 120 * 121 * For example: 122 * A device interrupting at sbus level6 and the Floppy both come in 123 * at IRQ11, but enabling and disabling them requires writing to 124 * different bits in the SLAVIO/SEC. 125 * 126 * As a result of these changes sun4m machines could now support 127 * directed CPU interrupts using the existing enable/disable irq code 128 * with tweaks. 129 * 130 */ 131 132 static void irq_panic(void) 133 { 134 extern char *cputypval; 135 prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval); 136 prom_halt(); 137 } 138 139 void (*sparc_init_timers)(irq_handler_t ) = 140 (void (*)(irq_handler_t )) irq_panic; 141 142 /* 143 * Dave Redman (djhr@tadpole.co.uk) 144 * 145 * There used to be extern calls and hard coded values here.. very sucky! 146 * instead, because some of the devices attach very early, I do something 147 * equally sucky but at least we'll never try to free statically allocated 148 * space or call kmalloc before kmalloc_init :(. 149 * 150 * In fact it's the timer10 that attaches first.. then timer14 151 * then kmalloc_init is called.. then the tty interrupts attach. 152 * hmmm.... 153 * 154 */ 155 #define MAX_STATIC_ALLOC 4 156 struct irqaction static_irqaction[MAX_STATIC_ALLOC]; 157 int static_irq_count; 158 159 static struct { 160 struct irqaction *action; 161 int flags; 162 } sparc_irq[NR_IRQS]; 163 #define SPARC_IRQ_INPROGRESS 1 164 165 /* Used to protect the IRQ action lists */ 166 DEFINE_SPINLOCK(irq_action_lock); 167 168 int show_interrupts(struct seq_file *p, void *v) 169 { 170 int i = *(loff_t *) v; 171 struct irqaction * action; 172 unsigned long flags; 173 #ifdef CONFIG_SMP 174 int j; 175 #endif 176 177 if (sparc_cpu_model == sun4d) { 178 extern int show_sun4d_interrupts(struct seq_file *, void *); 179 180 return show_sun4d_interrupts(p, v); 181 } 182 spin_lock_irqsave(&irq_action_lock, flags); 183 if (i < NR_IRQS) { 184 action = sparc_irq[i].action; 185 if (!action) 186 goto out_unlock; 187 seq_printf(p, "%3d: ", i); 188 #ifndef CONFIG_SMP 189 seq_printf(p, "%10u ", kstat_irqs(i)); 190 #else 191 for_each_online_cpu(j) { 192 seq_printf(p, "%10u ", 193 kstat_cpu(j).irqs[i]); 194 } 195 #endif 196 seq_printf(p, " %c %s", 197 (action->flags & IRQF_DISABLED) ? '+' : ' ', 198 action->name); 199 for (action=action->next; action; action = action->next) { 200 seq_printf(p, ",%s %s", 201 (action->flags & IRQF_DISABLED) ? " +" : "", 202 action->name); 203 } 204 seq_putc(p, '\n'); 205 } 206 out_unlock: 207 spin_unlock_irqrestore(&irq_action_lock, flags); 208 return 0; 209 } 210 211 void free_irq(unsigned int irq, void *dev_id) 212 { 213 struct irqaction * action; 214 struct irqaction **actionp; 215 unsigned long flags; 216 unsigned int cpu_irq; 217 218 if (sparc_cpu_model == sun4d) { 219 extern void sun4d_free_irq(unsigned int, void *); 220 221 sun4d_free_irq(irq, dev_id); 222 return; 223 } 224 cpu_irq = irq & (NR_IRQS - 1); 225 if (cpu_irq > 14) { /* 14 irq levels on the sparc */ 226 printk("Trying to free bogus IRQ %d\n", irq); 227 return; 228 } 229 230 spin_lock_irqsave(&irq_action_lock, flags); 231 232 actionp = &sparc_irq[cpu_irq].action; 233 action = *actionp; 234 235 if (!action->handler) { 236 printk("Trying to free free IRQ%d\n",irq); 237 goto out_unlock; 238 } 239 if (dev_id) { 240 for (; action; action = action->next) { 241 if (action->dev_id == dev_id) 242 break; 243 actionp = &action->next; 244 } 245 if (!action) { 246 printk("Trying to free free shared IRQ%d\n",irq); 247 goto out_unlock; 248 } 249 } else if (action->flags & IRQF_SHARED) { 250 printk("Trying to free shared IRQ%d with NULL device ID\n", irq); 251 goto out_unlock; 252 } 253 if (action->flags & SA_STATIC_ALLOC) 254 { 255 /* This interrupt is marked as specially allocated 256 * so it is a bad idea to free it. 257 */ 258 printk("Attempt to free statically allocated IRQ%d (%s)\n", 259 irq, action->name); 260 goto out_unlock; 261 } 262 263 *actionp = action->next; 264 265 spin_unlock_irqrestore(&irq_action_lock, flags); 266 267 synchronize_irq(irq); 268 269 spin_lock_irqsave(&irq_action_lock, flags); 270 271 kfree(action); 272 273 if (!sparc_irq[cpu_irq].action) 274 __disable_irq(irq); 275 276 out_unlock: 277 spin_unlock_irqrestore(&irq_action_lock, flags); 278 } 279 280 EXPORT_SYMBOL(free_irq); 281 282 /* 283 * This is called when we want to synchronize with 284 * interrupts. We may for example tell a device to 285 * stop sending interrupts: but to make sure there 286 * are no interrupts that are executing on another 287 * CPU we need to call this function. 288 */ 289 #ifdef CONFIG_SMP 290 void synchronize_irq(unsigned int irq) 291 { 292 unsigned int cpu_irq; 293 294 cpu_irq = irq & (NR_IRQS - 1); 295 while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS) 296 cpu_relax(); 297 } 298 EXPORT_SYMBOL(synchronize_irq); 299 #endif /* SMP */ 300 301 void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs) 302 { 303 int i; 304 struct irqaction * action; 305 unsigned int cpu_irq; 306 307 cpu_irq = irq & (NR_IRQS - 1); 308 action = sparc_irq[cpu_irq].action; 309 310 printk("IO device interrupt, irq = %d\n", irq); 311 printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc, 312 regs->npc, regs->u_regs[14]); 313 if (action) { 314 printk("Expecting: "); 315 for (i = 0; i < 16; i++) 316 if (action->handler) 317 printk("[%s:%d:0x%x] ", action->name, 318 (int) i, (unsigned int) action->handler); 319 } 320 printk("AIEEE\n"); 321 panic("bogus interrupt received"); 322 } 323 324 void handler_irq(int irq, struct pt_regs * regs) 325 { 326 struct pt_regs *old_regs; 327 struct irqaction * action; 328 int cpu = smp_processor_id(); 329 #ifdef CONFIG_SMP 330 extern void smp4m_irq_rotate(int cpu); 331 #endif 332 333 old_regs = set_irq_regs(regs); 334 irq_enter(); 335 disable_pil_irq(irq); 336 #ifdef CONFIG_SMP 337 /* Only rotate on lower priority IRQs (scsi, ethernet, etc.). */ 338 if((sparc_cpu_model==sun4m) && (irq < 10)) 339 smp4m_irq_rotate(cpu); 340 #endif 341 action = sparc_irq[irq].action; 342 sparc_irq[irq].flags |= SPARC_IRQ_INPROGRESS; 343 kstat_cpu(cpu).irqs[irq]++; 344 do { 345 if (!action || !action->handler) 346 unexpected_irq(irq, NULL, regs); 347 action->handler(irq, action->dev_id); 348 action = action->next; 349 } while (action); 350 sparc_irq[irq].flags &= ~SPARC_IRQ_INPROGRESS; 351 enable_pil_irq(irq); 352 irq_exit(); 353 set_irq_regs(old_regs); 354 } 355 356 #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE) 357 358 /* Fast IRQs on the Sparc can only have one routine attached to them, 359 * thus no sharing possible. 360 */ 361 static int request_fast_irq(unsigned int irq, 362 void (*handler)(void), 363 unsigned long irqflags, const char *devname) 364 { 365 struct irqaction *action; 366 unsigned long flags; 367 unsigned int cpu_irq; 368 int ret; 369 #ifdef CONFIG_SMP 370 struct tt_entry *trap_table; 371 extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3; 372 #endif 373 374 cpu_irq = irq & (NR_IRQS - 1); 375 if(cpu_irq > 14) { 376 ret = -EINVAL; 377 goto out; 378 } 379 if(!handler) { 380 ret = -EINVAL; 381 goto out; 382 } 383 384 spin_lock_irqsave(&irq_action_lock, flags); 385 386 action = sparc_irq[cpu_irq].action; 387 if(action) { 388 if(action->flags & IRQF_SHARED) 389 panic("Trying to register fast irq when already shared.\n"); 390 if(irqflags & IRQF_SHARED) 391 panic("Trying to register fast irq as shared.\n"); 392 393 /* Anyway, someone already owns it so cannot be made fast. */ 394 printk("request_fast_irq: Trying to register yet already owned.\n"); 395 ret = -EBUSY; 396 goto out_unlock; 397 } 398 399 /* If this is flagged as statically allocated then we use our 400 * private struct which is never freed. 401 */ 402 if (irqflags & SA_STATIC_ALLOC) { 403 if (static_irq_count < MAX_STATIC_ALLOC) 404 action = &static_irqaction[static_irq_count++]; 405 else 406 printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", 407 irq, devname); 408 } 409 410 if (action == NULL) 411 action = kmalloc(sizeof(struct irqaction), 412 GFP_ATOMIC); 413 414 if (!action) { 415 ret = -ENOMEM; 416 goto out_unlock; 417 } 418 419 /* Dork with trap table if we get this far. */ 420 #define INSTANTIATE(table) \ 421 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \ 422 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \ 423 SPARC_BRANCH((unsigned long) handler, \ 424 (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\ 425 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \ 426 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP; 427 428 INSTANTIATE(sparc_ttable) 429 #ifdef CONFIG_SMP 430 trap_table = &trapbase_cpu1; INSTANTIATE(trap_table) 431 trap_table = &trapbase_cpu2; INSTANTIATE(trap_table) 432 trap_table = &trapbase_cpu3; INSTANTIATE(trap_table) 433 #endif 434 #undef INSTANTIATE 435 /* 436 * XXX Correct thing whould be to flush only I- and D-cache lines 437 * which contain the handler in question. But as of time of the 438 * writing we have no CPU-neutral interface to fine-grained flushes. 439 */ 440 flush_cache_all(); 441 442 action->flags = irqflags; 443 action->name = devname; 444 action->dev_id = NULL; 445 action->next = NULL; 446 447 sparc_irq[cpu_irq].action = action; 448 449 __enable_irq(irq); 450 451 ret = 0; 452 out_unlock: 453 spin_unlock_irqrestore(&irq_action_lock, flags); 454 out: 455 return ret; 456 } 457 458 /* These variables are used to access state from the assembler 459 * interrupt handler, floppy_hardint, so we cannot put these in 460 * the floppy driver image because that would not work in the 461 * modular case. 462 */ 463 volatile unsigned char *fdc_status; 464 EXPORT_SYMBOL(fdc_status); 465 466 char *pdma_vaddr; 467 EXPORT_SYMBOL(pdma_vaddr); 468 469 unsigned long pdma_size; 470 EXPORT_SYMBOL(pdma_size); 471 472 volatile int doing_pdma; 473 EXPORT_SYMBOL(doing_pdma); 474 475 char *pdma_base; 476 EXPORT_SYMBOL(pdma_base); 477 478 unsigned long pdma_areasize; 479 EXPORT_SYMBOL(pdma_areasize); 480 481 extern void floppy_hardint(void); 482 483 static irq_handler_t floppy_irq_handler; 484 485 void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs) 486 { 487 struct pt_regs *old_regs; 488 int cpu = smp_processor_id(); 489 490 old_regs = set_irq_regs(regs); 491 disable_pil_irq(irq); 492 irq_enter(); 493 kstat_cpu(cpu).irqs[irq]++; 494 floppy_irq_handler(irq, dev_id); 495 irq_exit(); 496 enable_pil_irq(irq); 497 set_irq_regs(old_regs); 498 // XXX Eek, it's totally changed with preempt_count() and such 499 // if (softirq_pending(cpu)) 500 // do_softirq(); 501 } 502 503 int sparc_floppy_request_irq(int irq, unsigned long flags, 504 irq_handler_t irq_handler) 505 { 506 floppy_irq_handler = irq_handler; 507 return request_fast_irq(irq, floppy_hardint, flags, "floppy"); 508 } 509 EXPORT_SYMBOL(sparc_floppy_request_irq); 510 511 #endif 512 513 int request_irq(unsigned int irq, 514 irq_handler_t handler, 515 unsigned long irqflags, const char * devname, void *dev_id) 516 { 517 struct irqaction * action, **actionp; 518 unsigned long flags; 519 unsigned int cpu_irq; 520 int ret; 521 522 if (sparc_cpu_model == sun4d) { 523 extern int sun4d_request_irq(unsigned int, 524 irq_handler_t , 525 unsigned long, const char *, void *); 526 return sun4d_request_irq(irq, handler, irqflags, devname, dev_id); 527 } 528 cpu_irq = irq & (NR_IRQS - 1); 529 if(cpu_irq > 14) { 530 ret = -EINVAL; 531 goto out; 532 } 533 if (!handler) { 534 ret = -EINVAL; 535 goto out; 536 } 537 538 spin_lock_irqsave(&irq_action_lock, flags); 539 540 actionp = &sparc_irq[cpu_irq].action; 541 action = *actionp; 542 if (action) { 543 if (!(action->flags & IRQF_SHARED) || !(irqflags & IRQF_SHARED)) { 544 ret = -EBUSY; 545 goto out_unlock; 546 } 547 if ((action->flags & IRQF_DISABLED) != (irqflags & IRQF_DISABLED)) { 548 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq); 549 ret = -EBUSY; 550 goto out_unlock; 551 } 552 for ( ; action; action = *actionp) 553 actionp = &action->next; 554 } 555 556 /* If this is flagged as statically allocated then we use our 557 * private struct which is never freed. 558 */ 559 if (irqflags & SA_STATIC_ALLOC) { 560 if (static_irq_count < MAX_STATIC_ALLOC) 561 action = &static_irqaction[static_irq_count++]; 562 else 563 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname); 564 } 565 566 if (action == NULL) 567 action = kmalloc(sizeof(struct irqaction), 568 GFP_ATOMIC); 569 570 if (!action) { 571 ret = -ENOMEM; 572 goto out_unlock; 573 } 574 575 action->handler = handler; 576 action->flags = irqflags; 577 action->name = devname; 578 action->next = NULL; 579 action->dev_id = dev_id; 580 581 *actionp = action; 582 583 __enable_irq(irq); 584 585 ret = 0; 586 out_unlock: 587 spin_unlock_irqrestore(&irq_action_lock, flags); 588 out: 589 return ret; 590 } 591 592 EXPORT_SYMBOL(request_irq); 593 594 void disable_irq_nosync(unsigned int irq) 595 { 596 __disable_irq(irq); 597 } 598 EXPORT_SYMBOL(disable_irq_nosync); 599 600 void disable_irq(unsigned int irq) 601 { 602 __disable_irq(irq); 603 } 604 EXPORT_SYMBOL(disable_irq); 605 606 void enable_irq(unsigned int irq) 607 { 608 __enable_irq(irq); 609 } 610 611 EXPORT_SYMBOL(enable_irq); 612 613 /* We really don't need these at all on the Sparc. We only have 614 * stubs here because they are exported to modules. 615 */ 616 unsigned long probe_irq_on(void) 617 { 618 return 0; 619 } 620 621 EXPORT_SYMBOL(probe_irq_on); 622 623 int probe_irq_off(unsigned long mask) 624 { 625 return 0; 626 } 627 628 EXPORT_SYMBOL(probe_irq_off); 629 630 /* djhr 631 * This could probably be made indirect too and assigned in the CPU 632 * bits of the code. That would be much nicer I think and would also 633 * fit in with the idea of being able to tune your kernel for your machine 634 * by removing unrequired machine and device support. 635 * 636 */ 637 638 void __init init_IRQ(void) 639 { 640 extern void sun4c_init_IRQ( void ); 641 extern void sun4m_init_IRQ( void ); 642 extern void sun4d_init_IRQ( void ); 643 644 switch(sparc_cpu_model) { 645 case sun4c: 646 case sun4: 647 sun4c_init_IRQ(); 648 break; 649 650 case sun4m: 651 #ifdef CONFIG_PCI 652 pcic_probe(); 653 if (pcic_present()) { 654 sun4m_pci_init_IRQ(); 655 break; 656 } 657 #endif 658 sun4m_init_IRQ(); 659 break; 660 661 case sun4d: 662 sun4d_init_IRQ(); 663 break; 664 665 case sparc_leon: 666 leon_init_IRQ(); 667 break; 668 669 default: 670 prom_printf("Cannot initialize IRQs on this Sun machine..."); 671 break; 672 } 673 btfixup(); 674 } 675 676 #ifdef CONFIG_PROC_FS 677 void init_irq_proc(void) 678 { 679 /* For now, nothing... */ 680 } 681 #endif /* CONFIG_PROC_FS */ 682