1 /* 2 * arch/sparc/kernel/sun4d_irq.c: 3 * SS1000/SC2000 interrupt handling. 4 * 5 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 6 * Heavily based on arch/sparc/kernel/irq.c. 7 */ 8 9 #include <linux/errno.h> 10 #include <linux/linkage.h> 11 #include <linux/kernel_stat.h> 12 #include <linux/signal.h> 13 #include <linux/sched.h> 14 #include <linux/ptrace.h> 15 #include <linux/interrupt.h> 16 #include <linux/slab.h> 17 #include <linux/random.h> 18 #include <linux/init.h> 19 #include <linux/smp.h> 20 #include <linux/spinlock.h> 21 #include <linux/seq_file.h> 22 23 #include <asm/ptrace.h> 24 #include <asm/processor.h> 25 #include <asm/system.h> 26 #include <asm/psr.h> 27 #include <asm/smp.h> 28 #include <asm/vaddrs.h> 29 #include <asm/timer.h> 30 #include <asm/openprom.h> 31 #include <asm/oplib.h> 32 #include <asm/traps.h> 33 #include <asm/irq.h> 34 #include <asm/io.h> 35 #include <asm/pgalloc.h> 36 #include <asm/pgtable.h> 37 #include <asm/sbus.h> 38 #include <asm/sbi.h> 39 #include <asm/cacheflush.h> 40 #include <asm/irq_regs.h> 41 42 #include "irq.h" 43 44 /* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */ 45 /* #define DISTRIBUTE_IRQS */ 46 47 struct sun4d_timer_regs *sun4d_timers; 48 #define TIMER_IRQ 10 49 50 #define MAX_STATIC_ALLOC 4 51 extern struct irqaction static_irqaction[MAX_STATIC_ALLOC]; 52 extern int static_irq_count; 53 unsigned char cpu_leds[32]; 54 #ifdef CONFIG_SMP 55 unsigned char sbus_tid[32]; 56 #endif 57 58 static struct irqaction *irq_action[NR_IRQS]; 59 extern spinlock_t irq_action_lock; 60 61 struct sbus_action { 62 struct irqaction *action; 63 /* For SMP this needs to be extended */ 64 } *sbus_actions; 65 66 static int pil_to_sbus[] = { 67 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0, 68 }; 69 70 static int sbus_to_pil[] = { 71 0, 2, 3, 5, 7, 9, 11, 13, 72 }; 73 74 static int nsbi; 75 #ifdef CONFIG_SMP 76 DEFINE_SPINLOCK(sun4d_imsk_lock); 77 #endif 78 79 int show_sun4d_interrupts(struct seq_file *p, void *v) 80 { 81 int i = *(loff_t *) v, j = 0, k = 0, sbusl; 82 struct irqaction * action; 83 unsigned long flags; 84 #ifdef CONFIG_SMP 85 int x; 86 #endif 87 88 spin_lock_irqsave(&irq_action_lock, flags); 89 if (i < NR_IRQS) { 90 sbusl = pil_to_sbus[i]; 91 if (!sbusl) { 92 action = *(i + irq_action); 93 if (!action) 94 goto out_unlock; 95 } else { 96 for (j = 0; j < nsbi; j++) { 97 for (k = 0; k < 4; k++) 98 if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action)) 99 goto found_it; 100 } 101 goto out_unlock; 102 } 103 found_it: seq_printf(p, "%3d: ", i); 104 #ifndef CONFIG_SMP 105 seq_printf(p, "%10u ", kstat_irqs(i)); 106 #else 107 for_each_online_cpu(x) 108 seq_printf(p, "%10u ", 109 kstat_cpu(cpu_logical_map(x)).irqs[i]); 110 #endif 111 seq_printf(p, "%c %s", 112 (action->flags & IRQF_DISABLED) ? '+' : ' ', 113 action->name); 114 action = action->next; 115 for (;;) { 116 for (; action; action = action->next) { 117 seq_printf(p, ",%s %s", 118 (action->flags & IRQF_DISABLED) ? " +" : "", 119 action->name); 120 } 121 if (!sbusl) break; 122 k++; 123 if (k < 4) 124 action = sbus_actions [(j << 5) + (sbusl << 2) + k].action; 125 else { 126 j++; 127 if (j == nsbi) break; 128 k = 0; 129 action = sbus_actions [(j << 5) + (sbusl << 2)].action; 130 } 131 } 132 seq_putc(p, '\n'); 133 } 134 out_unlock: 135 spin_unlock_irqrestore(&irq_action_lock, flags); 136 return 0; 137 } 138 139 void sun4d_free_irq(unsigned int irq, void *dev_id) 140 { 141 struct irqaction *action, **actionp; 142 struct irqaction *tmp = NULL; 143 unsigned long flags; 144 145 spin_lock_irqsave(&irq_action_lock, flags); 146 if (irq < 15) 147 actionp = irq + irq_action; 148 else 149 actionp = &(sbus_actions[irq - (1 << 5)].action); 150 action = *actionp; 151 if (!action) { 152 printk("Trying to free free IRQ%d\n",irq); 153 goto out_unlock; 154 } 155 if (dev_id) { 156 for (; action; action = action->next) { 157 if (action->dev_id == dev_id) 158 break; 159 tmp = action; 160 } 161 if (!action) { 162 printk("Trying to free free shared IRQ%d\n",irq); 163 goto out_unlock; 164 } 165 } else if (action->flags & IRQF_SHARED) { 166 printk("Trying to free shared IRQ%d with NULL device ID\n", irq); 167 goto out_unlock; 168 } 169 if (action->flags & SA_STATIC_ALLOC) 170 { 171 /* This interrupt is marked as specially allocated 172 * so it is a bad idea to free it. 173 */ 174 printk("Attempt to free statically allocated IRQ%d (%s)\n", 175 irq, action->name); 176 goto out_unlock; 177 } 178 179 if (action && tmp) 180 tmp->next = action->next; 181 else 182 *actionp = action->next; 183 184 spin_unlock_irqrestore(&irq_action_lock, flags); 185 186 synchronize_irq(irq); 187 188 spin_lock_irqsave(&irq_action_lock, flags); 189 190 kfree(action); 191 192 if (!(*actionp)) 193 __disable_irq(irq); 194 195 out_unlock: 196 spin_unlock_irqrestore(&irq_action_lock, flags); 197 } 198 199 extern void unexpected_irq(int, void *, struct pt_regs *); 200 201 void sun4d_handler_irq(int irq, struct pt_regs * regs) 202 { 203 struct pt_regs *old_regs; 204 struct irqaction * action; 205 int cpu = smp_processor_id(); 206 /* SBUS IRQ level (1 - 7) */ 207 int sbusl = pil_to_sbus[irq]; 208 209 /* FIXME: Is this necessary?? */ 210 cc_get_ipen(); 211 212 cc_set_iclr(1 << irq); 213 214 old_regs = set_irq_regs(regs); 215 irq_enter(); 216 kstat_cpu(cpu).irqs[irq]++; 217 if (!sbusl) { 218 action = *(irq + irq_action); 219 if (!action) 220 unexpected_irq(irq, NULL, regs); 221 do { 222 action->handler(irq, action->dev_id); 223 action = action->next; 224 } while (action); 225 } else { 226 int bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff; 227 int sbino; 228 struct sbus_action *actionp; 229 unsigned mask, slot; 230 int sbil = (sbusl << 2); 231 232 bw_clear_intr_mask(sbusl, bus_mask); 233 234 /* Loop for each pending SBI */ 235 for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1) 236 if (bus_mask & 1) { 237 mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil); 238 mask &= (0xf << sbil); 239 actionp = sbus_actions + (sbino << 5) + (sbil); 240 /* Loop for each pending SBI slot */ 241 for (slot = (1 << sbil); mask; slot <<= 1, actionp++) 242 if (mask & slot) { 243 mask &= ~slot; 244 action = actionp->action; 245 246 if (!action) 247 unexpected_irq(irq, NULL, regs); 248 do { 249 action->handler(irq, action->dev_id); 250 action = action->next; 251 } while (action); 252 release_sbi(SBI2DEVID(sbino), slot); 253 } 254 } 255 } 256 irq_exit(); 257 set_irq_regs(old_regs); 258 } 259 260 unsigned int sun4d_build_irq(struct sbus_dev *sdev, int irq) 261 { 262 int sbusl = pil_to_sbus[irq]; 263 264 if (sbusl) 265 return ((sdev->bus->board + 1) << 5) + (sbusl << 2) + sdev->slot; 266 else 267 return irq; 268 } 269 270 unsigned int sun4d_sbint_to_irq(struct sbus_dev *sdev, unsigned int sbint) 271 { 272 if (sbint >= sizeof(sbus_to_pil)) { 273 printk(KERN_ERR "%s: bogus SBINT %d\n", sdev->prom_name, sbint); 274 BUG(); 275 } 276 return sun4d_build_irq(sdev, sbus_to_pil[sbint]); 277 } 278 279 int sun4d_request_irq(unsigned int irq, 280 irq_handler_t handler, 281 unsigned long irqflags, const char * devname, void *dev_id) 282 { 283 struct irqaction *action, *tmp = NULL, **actionp; 284 unsigned long flags; 285 int ret; 286 287 if(irq > 14 && irq < (1 << 5)) { 288 ret = -EINVAL; 289 goto out; 290 } 291 292 if (!handler) { 293 ret = -EINVAL; 294 goto out; 295 } 296 297 spin_lock_irqsave(&irq_action_lock, flags); 298 299 if (irq >= (1 << 5)) 300 actionp = &(sbus_actions[irq - (1 << 5)].action); 301 else 302 actionp = irq + irq_action; 303 action = *actionp; 304 305 if (action) { 306 if ((action->flags & IRQF_SHARED) && (irqflags & IRQF_SHARED)) { 307 for (tmp = action; tmp->next; tmp = tmp->next); 308 } else { 309 ret = -EBUSY; 310 goto out_unlock; 311 } 312 if ((action->flags & IRQF_DISABLED) ^ (irqflags & IRQF_DISABLED)) { 313 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq); 314 ret = -EBUSY; 315 goto out_unlock; 316 } 317 action = NULL; /* Or else! */ 318 } 319 320 /* If this is flagged as statically allocated then we use our 321 * private struct which is never freed. 322 */ 323 if (irqflags & SA_STATIC_ALLOC) { 324 if (static_irq_count < MAX_STATIC_ALLOC) 325 action = &static_irqaction[static_irq_count++]; 326 else 327 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname); 328 } 329 330 if (action == NULL) 331 action = kmalloc(sizeof(struct irqaction), 332 GFP_ATOMIC); 333 334 if (!action) { 335 ret = -ENOMEM; 336 goto out_unlock; 337 } 338 339 action->handler = handler; 340 action->flags = irqflags; 341 cpus_clear(action->mask); 342 action->name = devname; 343 action->next = NULL; 344 action->dev_id = dev_id; 345 346 if (tmp) 347 tmp->next = action; 348 else 349 *actionp = action; 350 351 __enable_irq(irq); 352 353 ret = 0; 354 out_unlock: 355 spin_unlock_irqrestore(&irq_action_lock, flags); 356 out: 357 return ret; 358 } 359 360 static void sun4d_disable_irq(unsigned int irq) 361 { 362 #ifdef CONFIG_SMP 363 int tid = sbus_tid[(irq >> 5) - 1]; 364 unsigned long flags; 365 #endif 366 367 if (irq < NR_IRQS) return; 368 #ifdef CONFIG_SMP 369 spin_lock_irqsave(&sun4d_imsk_lock, flags); 370 cc_set_imsk_other(tid, cc_get_imsk_other(tid) | (1 << sbus_to_pil[(irq >> 2) & 7])); 371 spin_unlock_irqrestore(&sun4d_imsk_lock, flags); 372 #else 373 cc_set_imsk(cc_get_imsk() | (1 << sbus_to_pil[(irq >> 2) & 7])); 374 #endif 375 } 376 377 static void sun4d_enable_irq(unsigned int irq) 378 { 379 #ifdef CONFIG_SMP 380 int tid = sbus_tid[(irq >> 5) - 1]; 381 unsigned long flags; 382 #endif 383 384 if (irq < NR_IRQS) return; 385 #ifdef CONFIG_SMP 386 spin_lock_irqsave(&sun4d_imsk_lock, flags); 387 cc_set_imsk_other(tid, cc_get_imsk_other(tid) & ~(1 << sbus_to_pil[(irq >> 2) & 7])); 388 spin_unlock_irqrestore(&sun4d_imsk_lock, flags); 389 #else 390 cc_set_imsk(cc_get_imsk() & ~(1 << sbus_to_pil[(irq >> 2) & 7])); 391 #endif 392 } 393 394 #ifdef CONFIG_SMP 395 static void sun4d_set_cpu_int(int cpu, int level) 396 { 397 sun4d_send_ipi(cpu, level); 398 } 399 400 static void sun4d_clear_ipi(int cpu, int level) 401 { 402 } 403 404 static void sun4d_set_udt(int cpu) 405 { 406 } 407 408 /* Setup IRQ distribution scheme. */ 409 void __init sun4d_distribute_irqs(void) 410 { 411 #ifdef DISTRIBUTE_IRQS 412 struct sbus_bus *sbus; 413 unsigned long sbus_serving_map; 414 415 sbus_serving_map = cpu_present_map; 416 for_each_sbus(sbus) { 417 if ((sbus->board * 2) == boot_cpu_id && (cpu_present_map & (1 << (sbus->board * 2 + 1)))) 418 sbus_tid[sbus->board] = (sbus->board * 2 + 1); 419 else if (cpu_present_map & (1 << (sbus->board * 2))) 420 sbus_tid[sbus->board] = (sbus->board * 2); 421 else if (cpu_present_map & (1 << (sbus->board * 2 + 1))) 422 sbus_tid[sbus->board] = (sbus->board * 2 + 1); 423 else 424 sbus_tid[sbus->board] = 0xff; 425 if (sbus_tid[sbus->board] != 0xff) 426 sbus_serving_map &= ~(1 << sbus_tid[sbus->board]); 427 } 428 for_each_sbus(sbus) 429 if (sbus_tid[sbus->board] == 0xff) { 430 int i = 31; 431 432 if (!sbus_serving_map) 433 sbus_serving_map = cpu_present_map; 434 while (!(sbus_serving_map & (1 << i))) 435 i--; 436 sbus_tid[sbus->board] = i; 437 sbus_serving_map &= ~(1 << i); 438 } 439 for_each_sbus(sbus) { 440 printk("sbus%d IRQs directed to CPU%d\n", sbus->board, sbus_tid[sbus->board]); 441 set_sbi_tid(sbus->devid, sbus_tid[sbus->board] << 3); 442 } 443 #else 444 struct sbus_bus *sbus; 445 int cpuid = cpu_logical_map(1); 446 447 if (cpuid == -1) 448 cpuid = cpu_logical_map(0); 449 for_each_sbus(sbus) { 450 sbus_tid[sbus->board] = cpuid; 451 set_sbi_tid(sbus->devid, cpuid << 3); 452 } 453 printk("All sbus IRQs directed to CPU%d\n", cpuid); 454 #endif 455 } 456 #endif 457 458 static void sun4d_clear_clock_irq(void) 459 { 460 volatile unsigned int clear_intr; 461 clear_intr = sun4d_timers->l10_timer_limit; 462 } 463 464 static void sun4d_clear_profile_irq(int cpu) 465 { 466 bw_get_prof_limit(cpu); 467 } 468 469 static void sun4d_load_profile_irq(int cpu, unsigned int limit) 470 { 471 bw_set_prof_limit(cpu, limit); 472 } 473 474 static void __init sun4d_init_timers(irq_handler_t counter_fn) 475 { 476 int irq; 477 int cpu; 478 struct resource r; 479 int mid; 480 481 /* Map the User Timer registers. */ 482 memset(&r, 0, sizeof(r)); 483 #ifdef CONFIG_SMP 484 r.start = CSR_BASE(boot_cpu_id)+BW_TIMER_LIMIT; 485 #else 486 r.start = CSR_BASE(0)+BW_TIMER_LIMIT; 487 #endif 488 r.flags = 0xf; 489 sun4d_timers = (struct sun4d_timer_regs *) sbus_ioremap(&r, 0, 490 PAGE_SIZE, "user timer"); 491 492 sun4d_timers->l10_timer_limit = (((1000000/HZ) + 1) << 10); 493 master_l10_counter = &sun4d_timers->l10_cur_count; 494 master_l10_limit = &sun4d_timers->l10_timer_limit; 495 496 irq = request_irq(TIMER_IRQ, 497 counter_fn, 498 (IRQF_DISABLED | SA_STATIC_ALLOC), 499 "timer", NULL); 500 if (irq) { 501 prom_printf("time_init: unable to attach IRQ%d\n",TIMER_IRQ); 502 prom_halt(); 503 } 504 505 /* Enable user timer free run for CPU 0 in BW */ 506 /* bw_set_ctrl(0, bw_get_ctrl(0) | BW_CTRL_USER_TIMER); */ 507 508 cpu = 0; 509 while (!cpu_find_by_instance(cpu, NULL, &mid)) { 510 sun4d_load_profile_irq(mid >> 3, 0); 511 cpu++; 512 } 513 514 #ifdef CONFIG_SMP 515 { 516 unsigned long flags; 517 extern unsigned long lvl14_save[4]; 518 struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)]; 519 extern unsigned int real_irq_entry[], smp4d_ticker[]; 520 extern unsigned int patchme_maybe_smp_msg[]; 521 522 /* Adjust so that we jump directly to smp4d_ticker */ 523 lvl14_save[2] += smp4d_ticker - real_irq_entry; 524 525 /* For SMP we use the level 14 ticker, however the bootup code 526 * has copied the firmware's level 14 vector into the boot cpu's 527 * trap table, we must fix this now or we get squashed. 528 */ 529 local_irq_save(flags); 530 patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */ 531 trap_table->inst_one = lvl14_save[0]; 532 trap_table->inst_two = lvl14_save[1]; 533 trap_table->inst_three = lvl14_save[2]; 534 trap_table->inst_four = lvl14_save[3]; 535 local_flush_cache_all(); 536 local_irq_restore(flags); 537 } 538 #endif 539 } 540 541 void __init sun4d_init_sbi_irq(void) 542 { 543 struct sbus_bus *sbus; 544 unsigned mask; 545 546 nsbi = 0; 547 for_each_sbus(sbus) 548 nsbi++; 549 sbus_actions = kzalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC); 550 if (!sbus_actions) { 551 prom_printf("SUN4D: Cannot allocate sbus_actions, halting.\n"); 552 prom_halt(); 553 } 554 for_each_sbus(sbus) { 555 #ifdef CONFIG_SMP 556 extern unsigned char boot_cpu_id; 557 558 set_sbi_tid(sbus->devid, boot_cpu_id << 3); 559 sbus_tid[sbus->board] = boot_cpu_id; 560 #endif 561 /* Get rid of pending irqs from PROM */ 562 mask = acquire_sbi(sbus->devid, 0xffffffff); 563 if (mask) { 564 printk ("Clearing pending IRQs %08x on SBI %d\n", mask, sbus->board); 565 release_sbi(sbus->devid, mask); 566 } 567 } 568 } 569 570 void __init sun4d_init_IRQ(void) 571 { 572 local_irq_disable(); 573 574 BTFIXUPSET_CALL(sbint_to_irq, sun4d_sbint_to_irq, BTFIXUPCALL_NORM); 575 BTFIXUPSET_CALL(enable_irq, sun4d_enable_irq, BTFIXUPCALL_NORM); 576 BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM); 577 BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM); 578 BTFIXUPSET_CALL(clear_profile_irq, sun4d_clear_profile_irq, BTFIXUPCALL_NORM); 579 BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM); 580 sparc_init_timers = sun4d_init_timers; 581 #ifdef CONFIG_SMP 582 BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM); 583 BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP); 584 BTFIXUPSET_CALL(set_irq_udt, sun4d_set_udt, BTFIXUPCALL_NOP); 585 #endif 586 /* Cannot enable interrupts until OBP ticker is disabled. */ 587 } 588