1 /* 2 * ip27-irq.c: Highlevel interrupt handling for IP27 architecture. 3 * 4 * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org) 5 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 6 * Copyright (C) 1999 - 2001 Kanoj Sarcar 7 */ 8 9 #undef DEBUG 10 11 #include <linux/init.h> 12 #include <linux/irq.h> 13 #include <linux/errno.h> 14 #include <linux/signal.h> 15 #include <linux/sched.h> 16 #include <linux/types.h> 17 #include <linux/interrupt.h> 18 #include <linux/ioport.h> 19 #include <linux/timex.h> 20 #include <linux/slab.h> 21 #include <linux/random.h> 22 #include <linux/kernel.h> 23 #include <linux/kernel_stat.h> 24 #include <linux/delay.h> 25 #include <linux/bitops.h> 26 27 #include <asm/bootinfo.h> 28 #include <asm/io.h> 29 #include <asm/mipsregs.h> 30 #include <asm/system.h> 31 32 #include <asm/processor.h> 33 #include <asm/pci/bridge.h> 34 #include <asm/sn/addrs.h> 35 #include <asm/sn/agent.h> 36 #include <asm/sn/arch.h> 37 #include <asm/sn/hub.h> 38 #include <asm/sn/intr.h> 39 40 /* 41 * Linux has a controller-independent x86 interrupt architecture. 42 * every controller has a 'controller-template', that is used 43 * by the main code to do the right thing. Each driver-visible 44 * interrupt source is transparently wired to the apropriate 45 * controller. Thus drivers need not be aware of the 46 * interrupt-controller. 47 * 48 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC, 49 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC. 50 * (IO-APICs assumed to be messaging to Pentium local-APICs) 51 * 52 * the code is designed to be easily extended with new/different 53 * interrupt controllers, without having to do assembly magic. 54 */ 55 56 extern asmlinkage void ip27_irq(void); 57 58 extern struct bridge_controller *irq_to_bridge[]; 59 extern int irq_to_slot[]; 60 61 /* 62 * use these macros to get the encoded nasid and widget id 63 * from the irq value 64 */ 65 #define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)] 66 #define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i] 67 68 static inline int alloc_level(int cpu, int irq) 69 { 70 struct hub_data *hub = hub_data(cpu_to_node(cpu)); 71 struct slice_data *si = cpu_data[cpu].data; 72 int level; 73 74 level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE); 75 if (level >= LEVELS_PER_SLICE) 76 panic("Cpu %d flooded with devices\n", cpu); 77 78 __set_bit(level, hub->irq_alloc_mask); 79 si->level_to_irq[level] = irq; 80 81 return level; 82 } 83 84 static inline int find_level(cpuid_t *cpunum, int irq) 85 { 86 int cpu, i; 87 88 for_each_online_cpu(cpu) { 89 struct slice_data *si = cpu_data[cpu].data; 90 91 for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++) 92 if (si->level_to_irq[i] == irq) { 93 *cpunum = cpu; 94 95 return i; 96 } 97 } 98 99 panic("Could not identify cpu/level for irq %d\n", irq); 100 } 101 102 /* 103 * Find first bit set 104 */ 105 static int ms1bit(unsigned long x) 106 { 107 int b = 0, s; 108 109 s = 16; if (x >> 16 == 0) s = 0; b += s; x >>= s; 110 s = 8; if (x >> 8 == 0) s = 0; b += s; x >>= s; 111 s = 4; if (x >> 4 == 0) s = 0; b += s; x >>= s; 112 s = 2; if (x >> 2 == 0) s = 0; b += s; x >>= s; 113 s = 1; if (x >> 1 == 0) s = 0; b += s; 114 115 return b; 116 } 117 118 /* 119 * This code is unnecessarily complex, because we do IRQF_DISABLED 120 * intr enabling. Basically, once we grab the set of intrs we need 121 * to service, we must mask _all_ these interrupts; firstly, to make 122 * sure the same intr does not intr again, causing recursion that 123 * can lead to stack overflow. Secondly, we can not just mask the 124 * one intr we are do_IRQing, because the non-masked intrs in the 125 * first set might intr again, causing multiple servicings of the 126 * same intr. This effect is mostly seen for intercpu intrs. 127 * Kanoj 05.13.00 128 */ 129 130 static void ip27_do_irq_mask0(void) 131 { 132 int irq, swlevel; 133 hubreg_t pend0, mask0; 134 cpuid_t cpu = smp_processor_id(); 135 int pi_int_mask0 = 136 (cputoslice(cpu) == 0) ? PI_INT_MASK0_A : PI_INT_MASK0_B; 137 138 /* copied from Irix intpend0() */ 139 pend0 = LOCAL_HUB_L(PI_INT_PEND0); 140 mask0 = LOCAL_HUB_L(pi_int_mask0); 141 142 pend0 &= mask0; /* Pick intrs we should look at */ 143 if (!pend0) 144 return; 145 146 swlevel = ms1bit(pend0); 147 #ifdef CONFIG_SMP 148 if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) { 149 LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ); 150 } else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) { 151 LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ); 152 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { 153 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); 154 smp_call_function_interrupt(); 155 } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { 156 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); 157 smp_call_function_interrupt(); 158 } else 159 #endif 160 { 161 /* "map" swlevel to irq */ 162 struct slice_data *si = cpu_data[cpu].data; 163 164 irq = si->level_to_irq[swlevel]; 165 do_IRQ(irq); 166 } 167 168 LOCAL_HUB_L(PI_INT_PEND0); 169 } 170 171 static void ip27_do_irq_mask1(void) 172 { 173 int irq, swlevel; 174 hubreg_t pend1, mask1; 175 cpuid_t cpu = smp_processor_id(); 176 int pi_int_mask1 = (cputoslice(cpu) == 0) ? PI_INT_MASK1_A : PI_INT_MASK1_B; 177 struct slice_data *si = cpu_data[cpu].data; 178 179 /* copied from Irix intpend0() */ 180 pend1 = LOCAL_HUB_L(PI_INT_PEND1); 181 mask1 = LOCAL_HUB_L(pi_int_mask1); 182 183 pend1 &= mask1; /* Pick intrs we should look at */ 184 if (!pend1) 185 return; 186 187 swlevel = ms1bit(pend1); 188 /* "map" swlevel to irq */ 189 irq = si->level_to_irq[swlevel]; 190 LOCAL_HUB_CLR_INTR(swlevel); 191 do_IRQ(irq); 192 193 LOCAL_HUB_L(PI_INT_PEND1); 194 } 195 196 static void ip27_prof_timer(void) 197 { 198 panic("CPU %d got a profiling interrupt", smp_processor_id()); 199 } 200 201 static void ip27_hub_error(void) 202 { 203 panic("CPU %d got a hub error interrupt", smp_processor_id()); 204 } 205 206 static int intr_connect_level(int cpu, int bit) 207 { 208 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); 209 struct slice_data *si = cpu_data[cpu].data; 210 211 set_bit(bit, si->irq_enable_mask); 212 213 if (!cputoslice(cpu)) { 214 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); 215 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); 216 } else { 217 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); 218 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); 219 } 220 221 return 0; 222 } 223 224 static int intr_disconnect_level(int cpu, int bit) 225 { 226 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); 227 struct slice_data *si = cpu_data[cpu].data; 228 229 clear_bit(bit, si->irq_enable_mask); 230 231 if (!cputoslice(cpu)) { 232 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); 233 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); 234 } else { 235 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); 236 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); 237 } 238 239 return 0; 240 } 241 242 /* Startup one of the (PCI ...) IRQs routes over a bridge. */ 243 static unsigned int startup_bridge_irq(unsigned int irq) 244 { 245 struct bridge_controller *bc; 246 bridgereg_t device; 247 bridge_t *bridge; 248 int pin, swlevel; 249 cpuid_t cpu; 250 251 pin = SLOT_FROM_PCI_IRQ(irq); 252 bc = IRQ_TO_BRIDGE(irq); 253 bridge = bc->base; 254 255 pr_debug("bridge_startup(): irq= 0x%x pin=%d\n", irq, pin); 256 /* 257 * "map" irq to a swlevel greater than 6 since the first 6 bits 258 * of INT_PEND0 are taken 259 */ 260 swlevel = find_level(&cpu, irq); 261 bridge->b_int_addr[pin].addr = (0x20000 | swlevel | (bc->nasid << 8)); 262 bridge->b_int_enable |= (1 << pin); 263 bridge->b_int_enable |= 0x7ffffe00; /* more stuff in int_enable */ 264 265 /* 266 * Enable sending of an interrupt clear packt to the hub on a high to 267 * low transition of the interrupt pin. 268 * 269 * IRIX sets additional bits in the address which are documented as 270 * reserved in the bridge docs. 271 */ 272 bridge->b_int_mode |= (1UL << pin); 273 274 /* 275 * We assume the bridge to have a 1:1 mapping between devices 276 * (slots) and intr pins. 277 */ 278 device = bridge->b_int_device; 279 device &= ~(7 << (pin*3)); 280 device |= (pin << (pin*3)); 281 bridge->b_int_device = device; 282 283 bridge->b_wid_tflush; 284 285 intr_connect_level(cpu, swlevel); 286 287 return 0; /* Never anything pending. */ 288 } 289 290 /* Shutdown one of the (PCI ...) IRQs routes over a bridge. */ 291 static void shutdown_bridge_irq(unsigned int irq) 292 { 293 struct bridge_controller *bc = IRQ_TO_BRIDGE(irq); 294 bridge_t *bridge = bc->base; 295 int pin, swlevel; 296 cpuid_t cpu; 297 298 pr_debug("bridge_shutdown: irq 0x%x\n", irq); 299 pin = SLOT_FROM_PCI_IRQ(irq); 300 301 /* 302 * map irq to a swlevel greater than 6 since the first 6 bits 303 * of INT_PEND0 are taken 304 */ 305 swlevel = find_level(&cpu, irq); 306 intr_disconnect_level(cpu, swlevel); 307 308 bridge->b_int_enable &= ~(1 << pin); 309 bridge->b_wid_tflush; 310 } 311 312 static inline void enable_bridge_irq(unsigned int irq) 313 { 314 cpuid_t cpu; 315 int swlevel; 316 317 swlevel = find_level(&cpu, irq); /* Criminal offence */ 318 intr_connect_level(cpu, swlevel); 319 } 320 321 static inline void disable_bridge_irq(unsigned int irq) 322 { 323 cpuid_t cpu; 324 int swlevel; 325 326 swlevel = find_level(&cpu, irq); /* Criminal offence */ 327 intr_disconnect_level(cpu, swlevel); 328 } 329 330 static struct irq_chip bridge_irq_type = { 331 .name = "bridge", 332 .startup = startup_bridge_irq, 333 .shutdown = shutdown_bridge_irq, 334 .ack = disable_bridge_irq, 335 .mask = disable_bridge_irq, 336 .mask_ack = disable_bridge_irq, 337 .unmask = enable_bridge_irq, 338 }; 339 340 void __devinit register_bridge_irq(unsigned int irq) 341 { 342 set_irq_chip_and_handler(irq, &bridge_irq_type, handle_level_irq); 343 } 344 345 int __devinit request_bridge_irq(struct bridge_controller *bc) 346 { 347 int irq = allocate_irqno(); 348 int swlevel, cpu; 349 nasid_t nasid; 350 351 if (irq < 0) 352 return irq; 353 354 /* 355 * "map" irq to a swlevel greater than 6 since the first 6 bits 356 * of INT_PEND0 are taken 357 */ 358 cpu = bc->irq_cpu; 359 swlevel = alloc_level(cpu, irq); 360 if (unlikely(swlevel < 0)) { 361 free_irqno(irq); 362 363 return -EAGAIN; 364 } 365 366 /* Make sure it's not already pending when we connect it. */ 367 nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); 368 REMOTE_HUB_CLR_INTR(nasid, swlevel); 369 370 intr_connect_level(cpu, swlevel); 371 372 register_bridge_irq(irq); 373 374 return irq; 375 } 376 377 asmlinkage void plat_irq_dispatch(void) 378 { 379 unsigned long pending = read_c0_cause() & read_c0_status(); 380 extern unsigned int rt_timer_irq; 381 382 if (pending & CAUSEF_IP4) 383 do_IRQ(rt_timer_irq); 384 else if (pending & CAUSEF_IP2) /* PI_INT_PEND_0 or CC_PEND_{A|B} */ 385 ip27_do_irq_mask0(); 386 else if (pending & CAUSEF_IP3) /* PI_INT_PEND_1 */ 387 ip27_do_irq_mask1(); 388 else if (pending & CAUSEF_IP5) 389 ip27_prof_timer(); 390 else if (pending & CAUSEF_IP6) 391 ip27_hub_error(); 392 } 393 394 void __init arch_init_irq(void) 395 { 396 } 397 398 void install_ipi(void) 399 { 400 int slice = LOCAL_HUB_L(PI_CPU_NUM); 401 int cpu = smp_processor_id(); 402 struct slice_data *si = cpu_data[cpu].data; 403 struct hub_data *hub = hub_data(cpu_to_node(cpu)); 404 int resched, call; 405 406 resched = CPU_RESCHED_A_IRQ + slice; 407 __set_bit(resched, hub->irq_alloc_mask); 408 __set_bit(resched, si->irq_enable_mask); 409 LOCAL_HUB_CLR_INTR(resched); 410 411 call = CPU_CALL_A_IRQ + slice; 412 __set_bit(call, hub->irq_alloc_mask); 413 __set_bit(call, si->irq_enable_mask); 414 LOCAL_HUB_CLR_INTR(call); 415 416 if (slice == 0) { 417 LOCAL_HUB_S(PI_INT_MASK0_A, si->irq_enable_mask[0]); 418 LOCAL_HUB_S(PI_INT_MASK1_A, si->irq_enable_mask[1]); 419 } else { 420 LOCAL_HUB_S(PI_INT_MASK0_B, si->irq_enable_mask[0]); 421 LOCAL_HUB_S(PI_INT_MASK1_B, si->irq_enable_mask[1]); 422 } 423 } 424