1 /* $Id: sun4d_irq.c,v 1.29 2001/12/11 04:55:51 davem Exp $ 2 * arch/sparc/kernel/sun4d_irq.c: 3 * SS1000/SC2000 interrupt handling. 4 * 5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 * Heavily based on arch/sparc/kernel/irq.c. 7 */ 8 9 #include <linux/errno.h> 10 #include <linux/linkage.h> 11 #include <linux/kernel_stat.h> 12 #include <linux/signal.h> 13 #include <linux/sched.h> 14 #include <linux/ptrace.h> 15 #include <linux/interrupt.h> 16 #include <linux/slab.h> 17 #include <linux/random.h> 18 #include <linux/init.h> 19 #include <linux/smp.h> 20 #include <linux/spinlock.h> 21 #include <linux/seq_file.h> 22 23 #include <asm/ptrace.h> 24 #include <asm/processor.h> 25 #include <asm/system.h> 26 #include <asm/psr.h> 27 #include <asm/smp.h> 28 #include <asm/vaddrs.h> 29 #include <asm/timer.h> 30 #include <asm/openprom.h> 31 #include <asm/oplib.h> 32 #include <asm/traps.h> 33 #include <asm/irq.h> 34 #include <asm/io.h> 35 #include <asm/pgalloc.h> 36 #include <asm/pgtable.h> 37 #include <asm/sbus.h> 38 #include <asm/sbi.h> 39 #include <asm/cacheflush.h> 40 #include <asm/irq_regs.h> 41 42 /* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */ 43 /* #define DISTRIBUTE_IRQS */ 44 45 struct sun4d_timer_regs *sun4d_timers; 46 #define TIMER_IRQ 10 47 48 #define MAX_STATIC_ALLOC 4 49 extern struct irqaction static_irqaction[MAX_STATIC_ALLOC]; 50 extern int static_irq_count; 51 unsigned char cpu_leds[32]; 52 #ifdef CONFIG_SMP 53 unsigned char sbus_tid[32]; 54 #endif 55 56 static struct irqaction *irq_action[NR_IRQS]; 57 extern spinlock_t irq_action_lock; 58 59 struct sbus_action { 60 struct irqaction *action; 61 /* For SMP this needs to be extended */ 62 } *sbus_actions; 63 64 static int pil_to_sbus[] = { 65 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0, 66 }; 67 68 static int sbus_to_pil[] = { 69 0, 2, 3, 5, 7, 9, 11, 13, 70 }; 71 72 static int nsbi; 73 #ifdef CONFIG_SMP 74 DEFINE_SPINLOCK(sun4d_imsk_lock); 75 #endif 76 77 int show_sun4d_interrupts(struct seq_file *p, void *v) 78 { 79 int i = *(loff_t *) v, j = 0, k = 0, sbusl; 80 struct irqaction * action; 81 unsigned long flags; 82 #ifdef CONFIG_SMP 83 int x; 84 #endif 85 86 spin_lock_irqsave(&irq_action_lock, flags); 87 if (i < NR_IRQS) { 88 sbusl = pil_to_sbus[i]; 89 if (!sbusl) { 90 action = *(i + irq_action); 91 if (!action) 92 goto out_unlock; 93 } else { 94 for (j = 0; j < nsbi; j++) { 95 for (k = 0; k < 4; k++) 96 if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action)) 97 goto found_it; 98 } 99 goto out_unlock; 100 } 101 found_it: seq_printf(p, "%3d: ", i); 102 #ifndef CONFIG_SMP 103 seq_printf(p, "%10u ", kstat_irqs(i)); 104 #else 105 for_each_online_cpu(x) 106 seq_printf(p, "%10u ", 107 kstat_cpu(cpu_logical_map(x)).irqs[i]); 108 #endif 109 seq_printf(p, "%c %s", 110 (action->flags & IRQF_DISABLED) ? '+' : ' ', 111 action->name); 112 action = action->next; 113 for (;;) { 114 for (; action; action = action->next) { 115 seq_printf(p, ",%s %s", 116 (action->flags & IRQF_DISABLED) ? " +" : "", 117 action->name); 118 } 119 if (!sbusl) break; 120 k++; 121 if (k < 4) 122 action = sbus_actions [(j << 5) + (sbusl << 2) + k].action; 123 else { 124 j++; 125 if (j == nsbi) break; 126 k = 0; 127 action = sbus_actions [(j << 5) + (sbusl << 2)].action; 128 } 129 } 130 seq_putc(p, '\n'); 131 } 132 out_unlock: 133 spin_unlock_irqrestore(&irq_action_lock, flags); 134 return 0; 135 } 136 137 void sun4d_free_irq(unsigned int irq, void *dev_id) 138 { 139 struct irqaction *action, **actionp; 140 struct irqaction *tmp = NULL; 141 unsigned long flags; 142 143 spin_lock_irqsave(&irq_action_lock, flags); 144 if (irq < 15) 145 actionp = irq + irq_action; 146 else 147 actionp = &(sbus_actions[irq - (1 << 5)].action); 148 action = *actionp; 149 if (!action) { 150 printk("Trying to free free IRQ%d\n",irq); 151 goto out_unlock; 152 } 153 if (dev_id) { 154 for (; action; action = action->next) { 155 if (action->dev_id == dev_id) 156 break; 157 tmp = action; 158 } 159 if (!action) { 160 printk("Trying to free free shared IRQ%d\n",irq); 161 goto out_unlock; 162 } 163 } else if (action->flags & IRQF_SHARED) { 164 printk("Trying to free shared IRQ%d with NULL device ID\n", irq); 165 goto out_unlock; 166 } 167 if (action->flags & SA_STATIC_ALLOC) 168 { 169 /* This interrupt is marked as specially allocated 170 * so it is a bad idea to free it. 171 */ 172 printk("Attempt to free statically allocated IRQ%d (%s)\n", 173 irq, action->name); 174 goto out_unlock; 175 } 176 177 if (action && tmp) 178 tmp->next = action->next; 179 else 180 *actionp = action->next; 181 182 spin_unlock_irqrestore(&irq_action_lock, flags); 183 184 synchronize_irq(irq); 185 186 spin_lock_irqsave(&irq_action_lock, flags); 187 188 kfree(action); 189 190 if (!(*actionp)) 191 disable_irq(irq); 192 193 out_unlock: 194 spin_unlock_irqrestore(&irq_action_lock, flags); 195 } 196 197 extern void unexpected_irq(int, void *, struct pt_regs *); 198 199 void sun4d_handler_irq(int irq, struct pt_regs * regs) 200 { 201 struct pt_regs *old_regs; 202 struct irqaction * action; 203 int cpu = smp_processor_id(); 204 /* SBUS IRQ level (1 - 7) */ 205 int sbusl = pil_to_sbus[irq]; 206 207 /* FIXME: Is this necessary?? */ 208 cc_get_ipen(); 209 210 cc_set_iclr(1 << irq); 211 212 old_regs = set_irq_regs(regs); 213 irq_enter(); 214 kstat_cpu(cpu).irqs[irq]++; 215 if (!sbusl) { 216 action = *(irq + irq_action); 217 if (!action) 218 unexpected_irq(irq, NULL, regs); 219 do { 220 action->handler(irq, action->dev_id); 221 action = action->next; 222 } while (action); 223 } else { 224 int bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff; 225 int sbino; 226 struct sbus_action *actionp; 227 unsigned mask, slot; 228 int sbil = (sbusl << 2); 229 230 bw_clear_intr_mask(sbusl, bus_mask); 231 232 /* Loop for each pending SBI */ 233 for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1) 234 if (bus_mask & 1) { 235 mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil); 236 mask &= (0xf << sbil); 237 actionp = sbus_actions + (sbino << 5) + (sbil); 238 /* Loop for each pending SBI slot */ 239 for (slot = (1 << sbil); mask; slot <<= 1, actionp++) 240 if (mask & slot) { 241 mask &= ~slot; 242 action = actionp->action; 243 244 if (!action) 245 unexpected_irq(irq, NULL, regs); 246 do { 247 action->handler(irq, action->dev_id); 248 action = action->next; 249 } while (action); 250 release_sbi(SBI2DEVID(sbino), slot); 251 } 252 } 253 } 254 irq_exit(); 255 set_irq_regs(old_regs); 256 } 257 258 unsigned int sun4d_build_irq(struct sbus_dev *sdev, int irq) 259 { 260 int sbusl = pil_to_sbus[irq]; 261 262 if (sbusl) 263 return ((sdev->bus->board + 1) << 5) + (sbusl << 2) + sdev->slot; 264 else 265 return irq; 266 } 267 268 unsigned int sun4d_sbint_to_irq(struct sbus_dev *sdev, unsigned int sbint) 269 { 270 if (sbint >= sizeof(sbus_to_pil)) { 271 printk(KERN_ERR "%s: bogus SBINT %d\n", sdev->prom_name, sbint); 272 BUG(); 273 } 274 return sun4d_build_irq(sdev, sbus_to_pil[sbint]); 275 } 276 277 int sun4d_request_irq(unsigned int irq, 278 irq_handler_t handler, 279 unsigned long irqflags, const char * devname, void *dev_id) 280 { 281 struct irqaction *action, *tmp = NULL, **actionp; 282 unsigned long flags; 283 int ret; 284 285 if(irq > 14 && irq < (1 << 5)) { 286 ret = -EINVAL; 287 goto out; 288 } 289 290 if (!handler) { 291 ret = -EINVAL; 292 goto out; 293 } 294 295 spin_lock_irqsave(&irq_action_lock, flags); 296 297 if (irq >= (1 << 5)) 298 actionp = &(sbus_actions[irq - (1 << 5)].action); 299 else 300 actionp = irq + irq_action; 301 action = *actionp; 302 303 if (action) { 304 if ((action->flags & IRQF_SHARED) && (irqflags & IRQF_SHARED)) { 305 for (tmp = action; tmp->next; tmp = tmp->next); 306 } else { 307 ret = -EBUSY; 308 goto out_unlock; 309 } 310 if ((action->flags & IRQF_DISABLED) ^ (irqflags & IRQF_DISABLED)) { 311 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq); 312 ret = -EBUSY; 313 goto out_unlock; 314 } 315 action = NULL; /* Or else! */ 316 } 317 318 /* If this is flagged as statically allocated then we use our 319 * private struct which is never freed. 320 */ 321 if (irqflags & SA_STATIC_ALLOC) { 322 if (static_irq_count < MAX_STATIC_ALLOC) 323 action = &static_irqaction[static_irq_count++]; 324 else 325 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname); 326 } 327 328 if (action == NULL) 329 action = kmalloc(sizeof(struct irqaction), 330 GFP_ATOMIC); 331 332 if (!action) { 333 ret = -ENOMEM; 334 goto out_unlock; 335 } 336 337 action->handler = handler; 338 action->flags = irqflags; 339 cpus_clear(action->mask); 340 action->name = devname; 341 action->next = NULL; 342 action->dev_id = dev_id; 343 344 if (tmp) 345 tmp->next = action; 346 else 347 *actionp = action; 348 349 enable_irq(irq); 350 351 ret = 0; 352 out_unlock: 353 spin_unlock_irqrestore(&irq_action_lock, flags); 354 out: 355 return ret; 356 } 357 358 static void sun4d_disable_irq(unsigned int irq) 359 { 360 #ifdef CONFIG_SMP 361 int tid = sbus_tid[(irq >> 5) - 1]; 362 unsigned long flags; 363 #endif 364 365 if (irq < NR_IRQS) return; 366 #ifdef CONFIG_SMP 367 spin_lock_irqsave(&sun4d_imsk_lock, flags); 368 cc_set_imsk_other(tid, cc_get_imsk_other(tid) | (1 << sbus_to_pil[(irq >> 2) & 7])); 369 spin_unlock_irqrestore(&sun4d_imsk_lock, flags); 370 #else 371 cc_set_imsk(cc_get_imsk() | (1 << sbus_to_pil[(irq >> 2) & 7])); 372 #endif 373 } 374 375 static void sun4d_enable_irq(unsigned int irq) 376 { 377 #ifdef CONFIG_SMP 378 int tid = sbus_tid[(irq >> 5) - 1]; 379 unsigned long flags; 380 #endif 381 382 if (irq < NR_IRQS) return; 383 #ifdef CONFIG_SMP 384 spin_lock_irqsave(&sun4d_imsk_lock, flags); 385 cc_set_imsk_other(tid, cc_get_imsk_other(tid) & ~(1 << sbus_to_pil[(irq >> 2) & 7])); 386 spin_unlock_irqrestore(&sun4d_imsk_lock, flags); 387 #else 388 cc_set_imsk(cc_get_imsk() & ~(1 << sbus_to_pil[(irq >> 2) & 7])); 389 #endif 390 } 391 392 #ifdef CONFIG_SMP 393 static void sun4d_set_cpu_int(int cpu, int level) 394 { 395 sun4d_send_ipi(cpu, level); 396 } 397 398 static void sun4d_clear_ipi(int cpu, int level) 399 { 400 } 401 402 static void sun4d_set_udt(int cpu) 403 { 404 } 405 406 /* Setup IRQ distribution scheme. */ 407 void __init sun4d_distribute_irqs(void) 408 { 409 #ifdef DISTRIBUTE_IRQS 410 struct sbus_bus *sbus; 411 unsigned long sbus_serving_map; 412 413 sbus_serving_map = cpu_present_map; 414 for_each_sbus(sbus) { 415 if ((sbus->board * 2) == boot_cpu_id && (cpu_present_map & (1 << (sbus->board * 2 + 1)))) 416 sbus_tid[sbus->board] = (sbus->board * 2 + 1); 417 else if (cpu_present_map & (1 << (sbus->board * 2))) 418 sbus_tid[sbus->board] = (sbus->board * 2); 419 else if (cpu_present_map & (1 << (sbus->board * 2 + 1))) 420 sbus_tid[sbus->board] = (sbus->board * 2 + 1); 421 else 422 sbus_tid[sbus->board] = 0xff; 423 if (sbus_tid[sbus->board] != 0xff) 424 sbus_serving_map &= ~(1 << sbus_tid[sbus->board]); 425 } 426 for_each_sbus(sbus) 427 if (sbus_tid[sbus->board] == 0xff) { 428 int i = 31; 429 430 if (!sbus_serving_map) 431 sbus_serving_map = cpu_present_map; 432 while (!(sbus_serving_map & (1 << i))) 433 i--; 434 sbus_tid[sbus->board] = i; 435 sbus_serving_map &= ~(1 << i); 436 } 437 for_each_sbus(sbus) { 438 printk("sbus%d IRQs directed to CPU%d\n", sbus->board, sbus_tid[sbus->board]); 439 set_sbi_tid(sbus->devid, sbus_tid[sbus->board] << 3); 440 } 441 #else 442 struct sbus_bus *sbus; 443 int cpuid = cpu_logical_map(1); 444 445 if (cpuid == -1) 446 cpuid = cpu_logical_map(0); 447 for_each_sbus(sbus) { 448 sbus_tid[sbus->board] = cpuid; 449 set_sbi_tid(sbus->devid, cpuid << 3); 450 } 451 printk("All sbus IRQs directed to CPU%d\n", cpuid); 452 #endif 453 } 454 #endif 455 456 static void sun4d_clear_clock_irq(void) 457 { 458 volatile unsigned int clear_intr; 459 clear_intr = sun4d_timers->l10_timer_limit; 460 } 461 462 static void sun4d_clear_profile_irq(int cpu) 463 { 464 bw_get_prof_limit(cpu); 465 } 466 467 static void sun4d_load_profile_irq(int cpu, unsigned int limit) 468 { 469 bw_set_prof_limit(cpu, limit); 470 } 471 472 static void __init sun4d_init_timers(irq_handler_t counter_fn) 473 { 474 int irq; 475 int cpu; 476 struct resource r; 477 int mid; 478 479 /* Map the User Timer registers. */ 480 memset(&r, 0, sizeof(r)); 481 #ifdef CONFIG_SMP 482 r.start = CSR_BASE(boot_cpu_id)+BW_TIMER_LIMIT; 483 #else 484 r.start = CSR_BASE(0)+BW_TIMER_LIMIT; 485 #endif 486 r.flags = 0xf; 487 sun4d_timers = (struct sun4d_timer_regs *) sbus_ioremap(&r, 0, 488 PAGE_SIZE, "user timer"); 489 490 sun4d_timers->l10_timer_limit = (((1000000/HZ) + 1) << 10); 491 master_l10_counter = &sun4d_timers->l10_cur_count; 492 master_l10_limit = &sun4d_timers->l10_timer_limit; 493 494 irq = request_irq(TIMER_IRQ, 495 counter_fn, 496 (IRQF_DISABLED | SA_STATIC_ALLOC), 497 "timer", NULL); 498 if (irq) { 499 prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ); 500 prom_halt(); 501 } 502 503 /* Enable user timer free run for CPU 0 in BW */ 504 /* bw_set_ctrl(0, bw_get_ctrl(0) | BW_CTRL_USER_TIMER); */ 505 506 cpu = 0; 507 while (!cpu_find_by_instance(cpu, NULL, &mid)) { 508 sun4d_load_profile_irq(mid >> 3, 0); 509 cpu++; 510 } 511 512 #ifdef CONFIG_SMP 513 { 514 unsigned long flags; 515 extern unsigned long lvl14_save[4]; 516 struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)]; 517 extern unsigned int real_irq_entry[], smp4d_ticker[]; 518 extern unsigned int patchme_maybe_smp_msg[]; 519 520 /* Adjust so that we jump directly to smp4d_ticker */ 521 lvl14_save[2] += smp4d_ticker - real_irq_entry; 522 523 /* For SMP we use the level 14 ticker, however the bootup code 524 * has copied the firmware's level 14 vector into the boot cpu's 525 * trap table, we must fix this now or we get squashed. 526 */ 527 local_irq_save(flags); 528 patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */ 529 trap_table->inst_one = lvl14_save[0]; 530 trap_table->inst_two = lvl14_save[1]; 531 trap_table->inst_three = lvl14_save[2]; 532 trap_table->inst_four = lvl14_save[3]; 533 local_flush_cache_all(); 534 local_irq_restore(flags); 535 } 536 #endif 537 } 538 539 void __init sun4d_init_sbi_irq(void) 540 { 541 struct sbus_bus *sbus; 542 unsigned mask; 543 544 nsbi = 0; 545 for_each_sbus(sbus) 546 nsbi++; 547 sbus_actions = kzalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC); 548 if (!sbus_actions) { 549 prom_printf("SUN4D: Cannot allocate sbus_actions, halting.\n"); 550 prom_halt(); 551 } 552 for_each_sbus(sbus) { 553 #ifdef CONFIG_SMP 554 extern unsigned char boot_cpu_id; 555 556 set_sbi_tid(sbus->devid, boot_cpu_id << 3); 557 sbus_tid[sbus->board] = boot_cpu_id; 558 #endif 559 /* Get rid of pending irqs from PROM */ 560 mask = acquire_sbi(sbus->devid, 0xffffffff); 561 if (mask) { 562 printk ("Clearing pending IRQs %08x on SBI %d\n", mask, sbus->board); 563 release_sbi(sbus->devid, mask); 564 } 565 } 566 } 567 568 void __init sun4d_init_IRQ(void) 569 { 570 local_irq_disable(); 571 572 BTFIXUPSET_CALL(sbint_to_irq, sun4d_sbint_to_irq, BTFIXUPCALL_NORM); 573 BTFIXUPSET_CALL(enable_irq, sun4d_enable_irq, BTFIXUPCALL_NORM); 574 BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM); 575 BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM); 576 BTFIXUPSET_CALL(clear_profile_irq, sun4d_clear_profile_irq, BTFIXUPCALL_NORM); 577 BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM); 578 sparc_init_timers = sun4d_init_timers; 579 #ifdef CONFIG_SMP 580 BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM); 581 BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP); 582 BTFIXUPSET_CALL(set_irq_udt, sun4d_set_udt, BTFIXUPCALL_NOP); 583 #endif 584 /* Cannot enable interrupts until OBP ticker is disabled. */ 585 } 586