1 /* 2 * ip27-irq.c: Highlevel interrupt handling for IP27 architecture. 3 * 4 * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org) 5 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 6 * Copyright (C) 1999 - 2001 Kanoj Sarcar 7 */ 8 9 #undef DEBUG 10 11 #include <linux/config.h> 12 #include <linux/init.h> 13 #include <linux/irq.h> 14 #include <linux/errno.h> 15 #include <linux/signal.h> 16 #include <linux/sched.h> 17 #include <linux/types.h> 18 #include <linux/interrupt.h> 19 #include <linux/ioport.h> 20 #include <linux/timex.h> 21 #include <linux/slab.h> 22 #include <linux/random.h> 23 #include <linux/smp_lock.h> 24 #include <linux/kernel.h> 25 #include <linux/kernel_stat.h> 26 #include <linux/delay.h> 27 #include <linux/bitops.h> 28 29 #include <asm/bootinfo.h> 30 #include <asm/io.h> 31 #include <asm/mipsregs.h> 32 #include <asm/system.h> 33 34 #include <asm/ptrace.h> 35 #include <asm/processor.h> 36 #include <asm/pci/bridge.h> 37 #include <asm/sn/addrs.h> 38 #include <asm/sn/agent.h> 39 #include <asm/sn/arch.h> 40 #include <asm/sn/hub.h> 41 #include <asm/sn/intr.h> 42 43 /* 44 * Linux has a controller-independent x86 interrupt architecture. 45 * every controller has a 'controller-template', that is used 46 * by the main code to do the right thing. Each driver-visible 47 * interrupt source is transparently wired to the apropriate 48 * controller. Thus drivers need not be aware of the 49 * interrupt-controller. 50 * 51 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC, 52 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC. 53 * (IO-APICs assumed to be messaging to Pentium local-APICs) 54 * 55 * the code is designed to be easily extended with new/different 56 * interrupt controllers, without having to do assembly magic. 57 */ 58 59 extern asmlinkage void ip27_irq(void); 60 61 extern struct bridge_controller *irq_to_bridge[]; 62 extern int irq_to_slot[]; 63 64 /* 65 * use these macros to get the encoded nasid and widget id 66 * from the irq value 67 */ 68 #define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)] 69 #define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i] 70 71 static inline int alloc_level(int cpu, int irq) 72 { 73 struct hub_data *hub = hub_data(cpu_to_node(cpu)); 74 struct slice_data *si = cpu_data[cpu].data; 75 int level; 76 77 level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE); 78 if (level >= LEVELS_PER_SLICE) 79 panic("Cpu %d flooded with devices\n", cpu); 80 81 __set_bit(level, hub->irq_alloc_mask); 82 si->level_to_irq[level] = irq; 83 84 return level; 85 } 86 87 static inline int find_level(cpuid_t *cpunum, int irq) 88 { 89 int cpu, i; 90 91 for (cpu = 0; cpu <= NR_CPUS; cpu++) { 92 struct slice_data *si = cpu_data[cpu].data; 93 94 if (!cpu_online(cpu)) 95 continue; 96 97 for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) 98 if (si->level_to_irq[i] == irq) { 99 *cpunum = cpu; 100 101 return i; 102 } 103 } 104 105 panic("Could not identify cpu/level for irq %d\n", irq); 106 } 107 108 /* 109 * Find first bit set 110 */ 111 static int ms1bit(unsigned long x) 112 { 113 int b = 0, s; 114 115 s = 16; if (x >> 16 == 0) s = 0; b += s; x >>= s; 116 s = 8; if (x >> 8 == 0) s = 0; b += s; x >>= s; 117 s = 4; if (x >> 4 == 0) s = 0; b += s; x >>= s; 118 s = 2; if (x >> 2 == 0) s = 0; b += s; x >>= s; 119 s = 1; if (x >> 1 == 0) s = 0; b += s; 120 121 return b; 122 } 123 124 /* 125 * This code is unnecessarily complex, because we do SA_INTERRUPT 126 * intr enabling. Basically, once we grab the set of intrs we need 127 * to service, we must mask _all_ these interrupts; firstly, to make 128 * sure the same intr does not intr again, causing recursion that 129 * can lead to stack overflow. Secondly, we can not just mask the 130 * one intr we are do_IRQing, because the non-masked intrs in the 131 * first set might intr again, causing multiple servicings of the 132 * same intr. This effect is mostly seen for intercpu intrs. 133 * Kanoj 05.13.00 134 */ 135 136 void ip27_do_irq_mask0(struct pt_regs *regs) 137 { 138 int irq, swlevel; 139 hubreg_t pend0, mask0; 140 cpuid_t cpu = smp_processor_id(); 141 int pi_int_mask0 = 142 (cputoslice(cpu) == 0) ? PI_INT_MASK0_A : PI_INT_MASK0_B; 143 144 /* copied from Irix intpend0() */ 145 pend0 = LOCAL_HUB_L(PI_INT_PEND0); 146 mask0 = LOCAL_HUB_L(pi_int_mask0); 147 148 pend0 &= mask0; /* Pick intrs we should look at */ 149 if (!pend0) 150 return; 151 152 swlevel = ms1bit(pend0); 153 #ifdef CONFIG_SMP 154 if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) { 155 LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ); 156 } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) { 157 LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ); 158 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { 159 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); 160 smp_call_function_interrupt(); 161 } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { 162 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); 163 smp_call_function_interrupt(); 164 } else 165 #endif 166 { 167 /* "map" swlevel to irq */ 168 struct slice_data *si = cpu_data[cpu].data; 169 170 irq = si->level_to_irq[swlevel]; 171 do_IRQ(irq, regs); 172 } 173 174 LOCAL_HUB_L(PI_INT_PEND0); 175 } 176 177 void ip27_do_irq_mask1(struct pt_regs *regs) 178 { 179 int irq, swlevel; 180 hubreg_t pend1, mask1; 181 cpuid_t cpu = smp_processor_id(); 182 int pi_int_mask1 = (cputoslice(cpu) == 0) ? PI_INT_MASK1_A : PI_INT_MASK1_B; 183 struct slice_data *si = cpu_data[cpu].data; 184 185 /* copied from Irix intpend0() */ 186 pend1 = LOCAL_HUB_L(PI_INT_PEND1); 187 mask1 = LOCAL_HUB_L(pi_int_mask1); 188 189 pend1 &= mask1; /* Pick intrs we should look at */ 190 if (!pend1) 191 return; 192 193 swlevel = ms1bit(pend1); 194 /* "map" swlevel to irq */ 195 irq = si->level_to_irq[swlevel]; 196 LOCAL_HUB_CLR_INTR(swlevel); 197 do_IRQ(irq, regs); 198 199 LOCAL_HUB_L(PI_INT_PEND1); 200 } 201 202 void ip27_prof_timer(struct pt_regs *regs) 203 { 204 panic("CPU %d got a profiling interrupt", smp_processor_id()); 205 } 206 207 void ip27_hub_error(struct pt_regs *regs) 208 { 209 panic("CPU %d got a hub error interrupt", smp_processor_id()); 210 } 211 212 static int intr_connect_level(int cpu, int bit) 213 { 214 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); 215 struct slice_data *si = cpu_data[cpu].data; 216 unsigned long flags; 217 218 set_bit(bit, si->irq_enable_mask); 219 220 local_irq_save(flags); 221 if (!cputoslice(cpu)) { 222 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); 223 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); 224 } else { 225 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); 226 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); 227 } 228 local_irq_restore(flags); 229 230 return 0; 231 } 232 233 static int intr_disconnect_level(int cpu, int bit) 234 { 235 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); 236 struct slice_data *si = cpu_data[cpu].data; 237 238 clear_bit(bit, si->irq_enable_mask); 239 240 if (!cputoslice(cpu)) { 241 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); 242 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); 243 } else { 244 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); 245 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); 246 } 247 248 return 0; 249 } 250 251 /* Startup one of the (PCI ...) IRQs routes over a bridge. */ 252 static unsigned int startup_bridge_irq(unsigned int irq) 253 { 254 struct bridge_controller *bc; 255 bridgereg_t device; 256 bridge_t *bridge; 257 int pin, swlevel; 258 cpuid_t cpu; 259 260 pin = SLOT_FROM_PCI_IRQ(irq); 261 bc = IRQ_TO_BRIDGE(irq); 262 bridge = bc->base; 263 264 pr_debug("bridge_startup(): irq= 0x%x pin=%d\n", irq, pin); 265 /* 266 * "map" irq to a swlevel greater than 6 since the first 6 bits 267 * of INT_PEND0 are taken 268 */ 269 swlevel = find_level(&cpu, irq); 270 bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8)); 271 bridge->b_int_enable |= (1 << pin); 272 bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */ 273 274 /* 275 * Enable sending of an interrupt clear packt to the hub on a high to 276 * low transition of the interrupt pin. 277 * 278 * IRIX sets additional bits in the address which are documented as 279 * reserved in the bridge docs. 280 */ 281 bridge->b_int_mode |= (1UL << pin); 282 283 /* 284 * We assume the bridge to have a 1:1 mapping between devices 285 * (slots) and intr pins. 286 */ 287 device = bridge->b_int_device; 288 device &= ~(7 << (pin*3)); 289 device |= (pin << (pin*3)); 290 bridge->b_int_device = device; 291 292 bridge->b_wid_tflush; 293 294 return 0; /* Never anything pending. */ 295 } 296 297 /* Shutdown one of the (PCI ...) IRQs routes over a bridge. */ 298 static void shutdown_bridge_irq(unsigned int irq) 299 { 300 struct bridge_controller *bc = IRQ_TO_BRIDGE(irq); 301 struct hub_data *hub = hub_data(cpu_to_node(bc->irq_cpu)); 302 bridge_t *bridge = bc->base; 303 struct slice_data *si = cpu_data[bc->irq_cpu].data; 304 int pin, swlevel; 305 cpuid_t cpu; 306 307 pr_debug("bridge_shutdown: irq 0x%x\n", irq); 308 pin = SLOT_FROM_PCI_IRQ(irq); 309 310 /* 311 * map irq to a swlevel greater than 6 since the first 6 bits 312 * of INT_PEND0 are taken 313 */ 314 swlevel = find_level(&cpu, irq); 315 intr_disconnect_level(cpu, swlevel); 316 317 __clear_bit(swlevel, hub->irq_alloc_mask); 318 si->level_to_irq[swlevel] = -1; 319 320 bridge->b_int_enable &= ~(1 << pin); 321 bridge->b_wid_tflush; 322 } 323 324 static inline void enable_bridge_irq(unsigned int irq) 325 { 326 cpuid_t cpu; 327 int swlevel; 328 329 swlevel = find_level(&cpu, irq); /* Criminal offence */ 330 intr_connect_level(cpu, swlevel); 331 } 332 333 static inline void disable_bridge_irq(unsigned int irq) 334 { 335 cpuid_t cpu; 336 int swlevel; 337 338 swlevel = find_level(&cpu, irq); /* Criminal offence */ 339 intr_disconnect_level(cpu, swlevel); 340 } 341 342 static void mask_and_ack_bridge_irq(unsigned int irq) 343 { 344 disable_bridge_irq(irq); 345 } 346 347 static void end_bridge_irq(unsigned int irq) 348 { 349 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) && 350 irq_desc[irq].action) 351 enable_bridge_irq(irq); 352 } 353 354 static struct hw_interrupt_type bridge_irq_type = { 355 .typename = "bridge", 356 .startup = startup_bridge_irq, 357 .shutdown = shutdown_bridge_irq, 358 .enable = enable_bridge_irq, 359 .disable = disable_bridge_irq, 360 .ack = mask_and_ack_bridge_irq, 361 .end = end_bridge_irq, 362 }; 363 364 static unsigned long irq_map[NR_IRQS / BITS_PER_LONG]; 365 366 static int allocate_irqno(void) 367 { 368 int irq; 369 370 again: 371 irq = find_first_zero_bit(irq_map, NR_IRQS); 372 373 if (irq >= NR_IRQS) 374 return -ENOSPC; 375 376 if (test_and_set_bit(irq, irq_map)) 377 goto again; 378 379 return irq; 380 } 381 382 void free_irqno(unsigned int irq) 383 { 384 clear_bit(irq, irq_map); 385 } 386 387 void __devinit register_bridge_irq(unsigned int irq) 388 { 389 irq_desc[irq].status = IRQ_DISABLED; 390 irq_desc[irq].action = 0; 391 irq_desc[irq].depth = 1; 392 irq_desc[irq].handler = &bridge_irq_type; 393 } 394 395 int __devinit request_bridge_irq(struct bridge_controller *bc) 396 { 397 int irq = allocate_irqno(); 398 int swlevel, cpu; 399 nasid_t nasid; 400 401 if (irq < 0) 402 return irq; 403 404 /* 405 * "map" irq to a swlevel greater than 6 since the first 6 bits 406 * of INT_PEND0 are taken 407 */ 408 cpu = bc->irq_cpu; 409 swlevel = alloc_level(cpu, irq); 410 if (unlikely(swlevel < 0)) { 411 free_irqno(irq); 412 413 return -EAGAIN; 414 } 415 416 /* Make sure it's not already pending when we connect it. */ 417 nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); 418 REMOTE_HUB_CLR_INTR(nasid, swlevel); 419 420 intr_connect_level(cpu, swlevel); 421 422 register_bridge_irq(irq); 423 424 return irq; 425 } 426 427 void __init arch_init_irq(void) 428 { 429 set_except_vector(0, ip27_irq); 430 } 431 432 void install_ipi(void) 433 { 434 int slice = LOCAL_HUB_L(PI_CPU_NUM); 435 int cpu = smp_processor_id(); 436 struct slice_data *si = cpu_data[cpu].data; 437 struct hub_data *hub = hub_data(cpu_to_node(cpu)); 438 int resched, call; 439 440 resched = CPU_RESCHED_A_IRQ + slice; 441 __set_bit(resched, hub->irq_alloc_mask); 442 __set_bit(resched, si->irq_enable_mask); 443 LOCAL_HUB_CLR_INTR(resched); 444 445 call = CPU_CALL_A_IRQ + slice; 446 __set_bit(call, hub->irq_alloc_mask); 447 __set_bit(call, si->irq_enable_mask); 448 LOCAL_HUB_CLR_INTR(call); 449 450 if (slice == 0) { 451 LOCAL_HUB_S(PI_INT_MASK0_A, si->irq_enable_mask[0]); 452 LOCAL_HUB_S(PI_INT_MASK1_A, si->irq_enable_mask[1]); 453 } else { 454 LOCAL_HUB_S(PI_INT_MASK0_B, si->irq_enable_mask[0]); 455 LOCAL_HUB_S(PI_INT_MASK1_B, si->irq_enable_mask[1]); 456 } 457 } 458