1 /* 2 * Interrupt request handling routines. On the 3 * Sparc the IRQs are basically 'cast in stone' 4 * and you are supposed to probe the prom's device 5 * node trees to find out who's got which IRQ. 6 * 7 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 8 * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) 9 * Copyright (C) 1995,2002 Pete A. Zaitcev (zaitcev@yahoo.com) 10 * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk) 11 * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org) 12 */ 13 14 #include <linux/kernel_stat.h> 15 #include <linux/seq_file.h> 16 17 #include <asm/cacheflush.h> 18 #include <asm/pcic.h> 19 #include <asm/leon.h> 20 21 #include "kernel.h" 22 #include "irq.h" 23 24 #ifdef CONFIG_SMP 25 #define SMP_NOP2 "nop; nop;\n\t" 26 #define SMP_NOP3 "nop; nop; nop;\n\t" 27 #else 28 #define SMP_NOP2 29 #define SMP_NOP3 30 #endif /* SMP */ 31 32 /* platform specific irq setup */ 33 struct sparc_irq_config sparc_irq_config; 34 35 unsigned long arch_local_irq_save(void) 36 { 37 unsigned long retval; 38 unsigned long tmp; 39 40 __asm__ __volatile__( 41 "rd %%psr, %0\n\t" 42 SMP_NOP3 /* Sun4m + Cypress + SMP bug */ 43 "or %0, %2, %1\n\t" 44 "wr %1, 0, %%psr\n\t" 45 "nop; nop; nop\n" 46 : "=&r" (retval), "=r" (tmp) 47 : "i" (PSR_PIL) 48 : "memory"); 49 50 return retval; 51 } 52 EXPORT_SYMBOL(arch_local_irq_save); 53 54 void arch_local_irq_enable(void) 55 { 56 unsigned long tmp; 57 58 __asm__ __volatile__( 59 "rd %%psr, %0\n\t" 60 SMP_NOP3 /* Sun4m + Cypress + SMP bug */ 61 "andn %0, %1, %0\n\t" 62 "wr %0, 0, %%psr\n\t" 63 "nop; nop; nop\n" 64 : "=&r" (tmp) 65 : "i" (PSR_PIL) 66 : "memory"); 67 } 68 EXPORT_SYMBOL(arch_local_irq_enable); 69 70 void arch_local_irq_restore(unsigned long old_psr) 71 { 72 unsigned long tmp; 73 74 __asm__ __volatile__( 75 "rd %%psr, %0\n\t" 76 "and %2, %1, %2\n\t" 77 SMP_NOP2 /* Sun4m + Cypress + SMP bug */ 78 "andn %0, %1, %0\n\t" 79 "wr %0, %2, %%psr\n\t" 80 "nop; nop; nop\n" 81 : "=&r" (tmp) 82 : "i" (PSR_PIL), "r" (old_psr) 83 : "memory"); 84 } 85 EXPORT_SYMBOL(arch_local_irq_restore); 86 87 /* 88 * Dave Redman (djhr@tadpole.co.uk) 89 * 90 * IRQ numbers.. These are no longer restricted to 15.. 91 * 92 * this is done to enable SBUS cards and onboard IO to be masked 93 * correctly. using the interrupt level isn't good enough. 94 * 95 * For example: 96 * A device interrupting at sbus level6 and the Floppy both come in 97 * at IRQ11, but enabling and disabling them requires writing to 98 * different bits in the SLAVIO/SEC. 99 * 100 * As a result of these changes sun4m machines could now support 101 * directed CPU interrupts using the existing enable/disable irq code 102 * with tweaks. 103 * 104 */ 105 106 107 108 /* 109 * Dave Redman (djhr@tadpole.co.uk) 110 * 111 * There used to be extern calls and hard coded values here.. very sucky! 112 * instead, because some of the devices attach very early, I do something 113 * equally sucky but at least we'll never try to free statically allocated 114 * space or call kmalloc before kmalloc_init :(. 115 * 116 * In fact it's the timer10 that attaches first.. then timer14 117 * then kmalloc_init is called.. then the tty interrupts attach. 118 * hmmm.... 119 * 120 */ 121 #define MAX_STATIC_ALLOC 4 122 struct irqaction static_irqaction[MAX_STATIC_ALLOC]; 123 int static_irq_count; 124 125 static struct { 126 struct irqaction *action; 127 int flags; 128 } sparc_irq[NR_IRQS]; 129 #define SPARC_IRQ_INPROGRESS 1 130 131 /* Used to protect the IRQ action lists */ 132 DEFINE_SPINLOCK(irq_action_lock); 133 134 int show_interrupts(struct seq_file *p, void *v) 135 { 136 int i = *(loff_t *)v; 137 struct irqaction *action; 138 unsigned long flags; 139 #ifdef CONFIG_SMP 140 int j; 141 #endif 142 143 if (sparc_cpu_model == sun4d) 144 return show_sun4d_interrupts(p, v); 145 146 spin_lock_irqsave(&irq_action_lock, flags); 147 if (i < NR_IRQS) { 148 action = sparc_irq[i].action; 149 if (!action) 150 goto out_unlock; 151 seq_printf(p, "%3d: ", i); 152 #ifndef CONFIG_SMP 153 seq_printf(p, "%10u ", kstat_irqs(i)); 154 #else 155 for_each_online_cpu(j) { 156 seq_printf(p, "%10u ", 157 kstat_cpu(j).irqs[i]); 158 } 159 #endif 160 seq_printf(p, " %c %s", 161 (action->flags & IRQF_DISABLED) ? '+' : ' ', 162 action->name); 163 for (action = action->next; action; action = action->next) { 164 seq_printf(p, ",%s %s", 165 (action->flags & IRQF_DISABLED) ? " +" : "", 166 action->name); 167 } 168 seq_putc(p, '\n'); 169 } 170 out_unlock: 171 spin_unlock_irqrestore(&irq_action_lock, flags); 172 return 0; 173 } 174 175 void free_irq(unsigned int irq, void *dev_id) 176 { 177 struct irqaction *action; 178 struct irqaction **actionp; 179 unsigned long flags; 180 unsigned int cpu_irq; 181 182 if (sparc_cpu_model == sun4d) { 183 sun4d_free_irq(irq, dev_id); 184 return; 185 } 186 cpu_irq = irq & (NR_IRQS - 1); 187 if (cpu_irq > 14) { /* 14 irq levels on the sparc */ 188 printk(KERN_ERR "Trying to free bogus IRQ %d\n", irq); 189 return; 190 } 191 192 spin_lock_irqsave(&irq_action_lock, flags); 193 194 actionp = &sparc_irq[cpu_irq].action; 195 action = *actionp; 196 197 if (!action->handler) { 198 printk(KERN_ERR "Trying to free free IRQ%d\n", irq); 199 goto out_unlock; 200 } 201 if (dev_id) { 202 for (; action; action = action->next) { 203 if (action->dev_id == dev_id) 204 break; 205 actionp = &action->next; 206 } 207 if (!action) { 208 printk(KERN_ERR "Trying to free free shared IRQ%d\n", 209 irq); 210 goto out_unlock; 211 } 212 } else if (action->flags & IRQF_SHARED) { 213 printk(KERN_ERR "Trying to free shared IRQ%d with NULL device ID\n", 214 irq); 215 goto out_unlock; 216 } 217 if (action->flags & SA_STATIC_ALLOC) { 218 /* 219 * This interrupt is marked as specially allocated 220 * so it is a bad idea to free it. 221 */ 222 printk(KERN_ERR "Attempt to free statically allocated IRQ%d (%s)\n", 223 irq, action->name); 224 goto out_unlock; 225 } 226 227 *actionp = action->next; 228 229 spin_unlock_irqrestore(&irq_action_lock, flags); 230 231 synchronize_irq(irq); 232 233 spin_lock_irqsave(&irq_action_lock, flags); 234 235 kfree(action); 236 237 if (!sparc_irq[cpu_irq].action) 238 __disable_irq(irq); 239 240 out_unlock: 241 spin_unlock_irqrestore(&irq_action_lock, flags); 242 } 243 EXPORT_SYMBOL(free_irq); 244 245 /* 246 * This is called when we want to synchronize with 247 * interrupts. We may for example tell a device to 248 * stop sending interrupts: but to make sure there 249 * are no interrupts that are executing on another 250 * CPU we need to call this function. 251 */ 252 #ifdef CONFIG_SMP 253 void synchronize_irq(unsigned int irq) 254 { 255 unsigned int cpu_irq; 256 257 cpu_irq = irq & (NR_IRQS - 1); 258 while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS) 259 cpu_relax(); 260 } 261 EXPORT_SYMBOL(synchronize_irq); 262 #endif /* SMP */ 263 264 void unexpected_irq(int irq, void *dev_id, struct pt_regs *regs) 265 { 266 int i; 267 struct irqaction *action; 268 unsigned int cpu_irq; 269 270 cpu_irq = irq & (NR_IRQS - 1); 271 action = sparc_irq[cpu_irq].action; 272 273 printk(KERN_ERR "IO device interrupt, irq = %d\n", irq); 274 printk(KERN_ERR "PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc, 275 regs->npc, regs->u_regs[14]); 276 if (action) { 277 printk(KERN_ERR "Expecting: "); 278 for (i = 0; i < 16; i++) 279 if (action->handler) 280 printk(KERN_CONT "[%s:%d:0x%x] ", action->name, 281 i, (unsigned int)action->handler); 282 } 283 printk(KERN_ERR "AIEEE\n"); 284 panic("bogus interrupt received"); 285 } 286 287 void handler_irq(int pil, struct pt_regs *regs) 288 { 289 struct pt_regs *old_regs; 290 struct irqaction *action; 291 int cpu = smp_processor_id(); 292 293 old_regs = set_irq_regs(regs); 294 irq_enter(); 295 disable_pil_irq(pil); 296 #ifdef CONFIG_SMP 297 /* Only rotate on lower priority IRQs (scsi, ethernet, etc.). */ 298 if ((sparc_cpu_model==sun4m) && (pil < 10)) 299 smp4m_irq_rotate(cpu); 300 #endif 301 action = sparc_irq[pil].action; 302 sparc_irq[pil].flags |= SPARC_IRQ_INPROGRESS; 303 kstat_cpu(cpu).irqs[pil]++; 304 do { 305 if (!action || !action->handler) 306 unexpected_irq(pil, NULL, regs); 307 action->handler(pil, action->dev_id); 308 action = action->next; 309 } while (action); 310 sparc_irq[pil].flags &= ~SPARC_IRQ_INPROGRESS; 311 enable_pil_irq(pil); 312 irq_exit(); 313 set_irq_regs(old_regs); 314 } 315 316 #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE) 317 318 /* 319 * Fast IRQs on the Sparc can only have one routine attached to them, 320 * thus no sharing possible. 321 */ 322 static int request_fast_irq(unsigned int irq, 323 void (*handler)(void), 324 unsigned long irqflags, const char *devname) 325 { 326 struct irqaction *action; 327 unsigned long flags; 328 unsigned int cpu_irq; 329 int ret; 330 #if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON 331 struct tt_entry *trap_table; 332 #endif 333 cpu_irq = irq & (NR_IRQS - 1); 334 if (cpu_irq > 14) { 335 ret = -EINVAL; 336 goto out; 337 } 338 if (!handler) { 339 ret = -EINVAL; 340 goto out; 341 } 342 343 spin_lock_irqsave(&irq_action_lock, flags); 344 345 action = sparc_irq[cpu_irq].action; 346 if (action) { 347 if (action->flags & IRQF_SHARED) 348 panic("Trying to register fast irq when already shared.\n"); 349 if (irqflags & IRQF_SHARED) 350 panic("Trying to register fast irq as shared.\n"); 351 352 /* Anyway, someone already owns it so cannot be made fast. */ 353 printk(KERN_ERR "request_fast_irq: Trying to register yet already owned.\n"); 354 ret = -EBUSY; 355 goto out_unlock; 356 } 357 358 /* 359 * If this is flagged as statically allocated then we use our 360 * private struct which is never freed. 361 */ 362 if (irqflags & SA_STATIC_ALLOC) { 363 if (static_irq_count < MAX_STATIC_ALLOC) 364 action = &static_irqaction[static_irq_count++]; 365 else 366 printk(KERN_ERR "Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", 367 irq, devname); 368 } 369 370 if (action == NULL) 371 action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); 372 if (!action) { 373 ret = -ENOMEM; 374 goto out_unlock; 375 } 376 377 /* Dork with trap table if we get this far. */ 378 #define INSTANTIATE(table) \ 379 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \ 380 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \ 381 SPARC_BRANCH((unsigned long) handler, \ 382 (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\ 383 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \ 384 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP; 385 386 INSTANTIATE(sparc_ttable) 387 #if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON 388 trap_table = &trapbase_cpu1; 389 INSTANTIATE(trap_table) 390 trap_table = &trapbase_cpu2; 391 INSTANTIATE(trap_table) 392 trap_table = &trapbase_cpu3; 393 INSTANTIATE(trap_table) 394 #endif 395 #undef INSTANTIATE 396 /* 397 * XXX Correct thing whould be to flush only I- and D-cache lines 398 * which contain the handler in question. But as of time of the 399 * writing we have no CPU-neutral interface to fine-grained flushes. 400 */ 401 flush_cache_all(); 402 403 action->flags = irqflags; 404 action->name = devname; 405 action->dev_id = NULL; 406 action->next = NULL; 407 408 sparc_irq[cpu_irq].action = action; 409 410 __enable_irq(irq); 411 412 ret = 0; 413 out_unlock: 414 spin_unlock_irqrestore(&irq_action_lock, flags); 415 out: 416 return ret; 417 } 418 419 /* 420 * These variables are used to access state from the assembler 421 * interrupt handler, floppy_hardint, so we cannot put these in 422 * the floppy driver image because that would not work in the 423 * modular case. 424 */ 425 volatile unsigned char *fdc_status; 426 EXPORT_SYMBOL(fdc_status); 427 428 char *pdma_vaddr; 429 EXPORT_SYMBOL(pdma_vaddr); 430 431 unsigned long pdma_size; 432 EXPORT_SYMBOL(pdma_size); 433 434 volatile int doing_pdma; 435 EXPORT_SYMBOL(doing_pdma); 436 437 char *pdma_base; 438 EXPORT_SYMBOL(pdma_base); 439 440 unsigned long pdma_areasize; 441 EXPORT_SYMBOL(pdma_areasize); 442 443 static irq_handler_t floppy_irq_handler; 444 445 void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs) 446 { 447 struct pt_regs *old_regs; 448 int cpu = smp_processor_id(); 449 450 old_regs = set_irq_regs(regs); 451 disable_pil_irq(irq); 452 irq_enter(); 453 kstat_cpu(cpu).irqs[irq]++; 454 floppy_irq_handler(irq, dev_id); 455 irq_exit(); 456 enable_pil_irq(irq); 457 set_irq_regs(old_regs); 458 /* 459 * XXX Eek, it's totally changed with preempt_count() and such 460 * if (softirq_pending(cpu)) 461 * do_softirq(); 462 */ 463 } 464 465 int sparc_floppy_request_irq(int irq, unsigned long flags, 466 irq_handler_t irq_handler) 467 { 468 floppy_irq_handler = irq_handler; 469 return request_fast_irq(irq, floppy_hardint, flags, "floppy"); 470 } 471 EXPORT_SYMBOL(sparc_floppy_request_irq); 472 473 #endif 474 475 int request_irq(unsigned int irq, 476 irq_handler_t handler, 477 unsigned long irqflags, const char *devname, void *dev_id) 478 { 479 struct irqaction *action, **actionp; 480 unsigned long flags; 481 unsigned int cpu_irq; 482 int ret; 483 484 if (sparc_cpu_model == sun4d) 485 return sun4d_request_irq(irq, handler, irqflags, devname, dev_id); 486 487 cpu_irq = irq & (NR_IRQS - 1); 488 if (cpu_irq > 14) { 489 ret = -EINVAL; 490 goto out; 491 } 492 if (!handler) { 493 ret = -EINVAL; 494 goto out; 495 } 496 497 spin_lock_irqsave(&irq_action_lock, flags); 498 499 actionp = &sparc_irq[cpu_irq].action; 500 action = *actionp; 501 if (action) { 502 if (!(action->flags & IRQF_SHARED) || !(irqflags & IRQF_SHARED)) { 503 ret = -EBUSY; 504 goto out_unlock; 505 } 506 if ((action->flags & IRQF_DISABLED) != (irqflags & IRQF_DISABLED)) { 507 printk(KERN_ERR "Attempt to mix fast and slow interrupts on IRQ%d denied\n", 508 irq); 509 ret = -EBUSY; 510 goto out_unlock; 511 } 512 for ( ; action; action = *actionp) 513 actionp = &action->next; 514 } 515 516 /* If this is flagged as statically allocated then we use our 517 * private struct which is never freed. 518 */ 519 if (irqflags & SA_STATIC_ALLOC) { 520 if (static_irq_count < MAX_STATIC_ALLOC) 521 action = &static_irqaction[static_irq_count++]; 522 else 523 printk(KERN_ERR "Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", 524 irq, devname); 525 } 526 if (action == NULL) 527 action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); 528 if (!action) { 529 ret = -ENOMEM; 530 goto out_unlock; 531 } 532 533 action->handler = handler; 534 action->flags = irqflags; 535 action->name = devname; 536 action->next = NULL; 537 action->dev_id = dev_id; 538 539 *actionp = action; 540 541 __enable_irq(irq); 542 543 ret = 0; 544 out_unlock: 545 spin_unlock_irqrestore(&irq_action_lock, flags); 546 out: 547 return ret; 548 } 549 EXPORT_SYMBOL(request_irq); 550 551 void disable_irq_nosync(unsigned int irq) 552 { 553 __disable_irq(irq); 554 } 555 EXPORT_SYMBOL(disable_irq_nosync); 556 557 void disable_irq(unsigned int irq) 558 { 559 __disable_irq(irq); 560 } 561 EXPORT_SYMBOL(disable_irq); 562 563 void enable_irq(unsigned int irq) 564 { 565 __enable_irq(irq); 566 } 567 EXPORT_SYMBOL(enable_irq); 568 569 /* 570 * We really don't need these at all on the Sparc. We only have 571 * stubs here because they are exported to modules. 572 */ 573 unsigned long probe_irq_on(void) 574 { 575 return 0; 576 } 577 EXPORT_SYMBOL(probe_irq_on); 578 579 int probe_irq_off(unsigned long mask) 580 { 581 return 0; 582 } 583 EXPORT_SYMBOL(probe_irq_off); 584 585 static unsigned int build_device_irq(struct platform_device *op, 586 unsigned int real_irq) 587 { 588 return real_irq; 589 } 590 591 /* djhr 592 * This could probably be made indirect too and assigned in the CPU 593 * bits of the code. That would be much nicer I think and would also 594 * fit in with the idea of being able to tune your kernel for your machine 595 * by removing unrequired machine and device support. 596 * 597 */ 598 599 void __init init_IRQ(void) 600 { 601 sparc_irq_config.build_device_irq = build_device_irq; 602 603 switch (sparc_cpu_model) { 604 case sun4c: 605 case sun4: 606 sun4c_init_IRQ(); 607 break; 608 609 case sun4m: 610 #ifdef CONFIG_PCI 611 pcic_probe(); 612 if (pcic_present()) { 613 sun4m_pci_init_IRQ(); 614 break; 615 } 616 #endif 617 sun4m_init_IRQ(); 618 break; 619 620 case sun4d: 621 sun4d_init_IRQ(); 622 break; 623 624 case sparc_leon: 625 leon_init_IRQ(); 626 break; 627 628 default: 629 prom_printf("Cannot initialize IRQs on this Sun machine..."); 630 break; 631 } 632 btfixup(); 633 } 634 635 #ifdef CONFIG_PROC_FS 636 void init_irq_proc(void) 637 { 638 /* For now, nothing... */ 639 } 640 #endif /* CONFIG_PROC_FS */ 641