1 /* 2 * Cell Internal Interrupt Controller 3 * 4 * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org) 5 * IBM, Corp. 6 * 7 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 8 * 9 * Author: Arnd Bergmann <arndb@de.ibm.com> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 24 * 25 * TODO: 26 * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers 27 * vs node numbers in the setup code 28 * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from 29 * a non-active node to the active node) 30 */ 31 32 #include <linux/interrupt.h> 33 #include <linux/irq.h> 34 #include <linux/module.h> 35 #include <linux/percpu.h> 36 #include <linux/types.h> 37 #include <linux/ioport.h> 38 #include <linux/kernel_stat.h> 39 40 #include <asm/io.h> 41 #include <asm/pgtable.h> 42 #include <asm/prom.h> 43 #include <asm/ptrace.h> 44 #include <asm/machdep.h> 45 #include <asm/cell-regs.h> 46 47 #include "interrupt.h" 48 49 struct iic { 50 struct cbe_iic_thread_regs __iomem *regs; 51 u8 target_id; 52 u8 eoi_stack[16]; 53 int eoi_ptr; 54 struct device_node *node; 55 }; 56 57 static DEFINE_PER_CPU(struct iic, iic); 58 #define IIC_NODE_COUNT 2 59 static struct irq_host *iic_host; 60 61 /* Convert between "pending" bits and hw irq number */ 62 static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) 63 { 64 unsigned char unit = bits.source & 0xf; 65 unsigned char node = bits.source >> 4; 66 unsigned char class = bits.class & 3; 67 68 /* Decode IPIs */ 69 if (bits.flags & CBE_IIC_IRQ_IPI) 70 return IIC_IRQ_TYPE_IPI | (bits.prio >> 4); 71 else 72 return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit; 73 } 74 75 static void iic_mask(unsigned int irq) 76 { 77 } 78 79 static void iic_unmask(unsigned int irq) 80 { 81 } 82 83 static void iic_eoi(unsigned int irq) 84 { 85 struct iic *iic = &__get_cpu_var(iic); 86 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); 87 BUG_ON(iic->eoi_ptr < 0); 88 } 89 90 static struct irq_chip iic_chip = { 91 .typename = " CELL-IIC ", 92 .mask = iic_mask, 93 .unmask = iic_unmask, 94 .eoi = iic_eoi, 95 }; 96 97 98 static void iic_ioexc_eoi(unsigned int irq) 99 { 100 } 101 102 static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) 103 { 104 struct cbe_iic_regs __iomem *node_iic = (void __iomem *)desc->handler_data; 105 unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; 106 unsigned long bits, ack; 107 int cascade; 108 109 for (;;) { 110 bits = in_be64(&node_iic->iic_is); 111 if (bits == 0) 112 break; 113 /* pre-ack edge interrupts */ 114 ack = bits & IIC_ISR_EDGE_MASK; 115 if (ack) 116 out_be64(&node_iic->iic_is, ack); 117 /* handle them */ 118 for (cascade = 63; cascade >= 0; cascade--) 119 if (bits & (0x8000000000000000UL >> cascade)) { 120 unsigned int cirq = 121 irq_linear_revmap(iic_host, 122 base | cascade); 123 if (cirq != NO_IRQ) 124 generic_handle_irq(cirq); 125 } 126 /* post-ack level interrupts */ 127 ack = bits & ~IIC_ISR_EDGE_MASK; 128 if (ack) 129 out_be64(&node_iic->iic_is, ack); 130 } 131 desc->chip->eoi(irq); 132 } 133 134 135 static struct irq_chip iic_ioexc_chip = { 136 .typename = " CELL-IOEX", 137 .mask = iic_mask, 138 .unmask = iic_unmask, 139 .eoi = iic_ioexc_eoi, 140 }; 141 142 /* Get an IRQ number from the pending state register of the IIC */ 143 static unsigned int iic_get_irq(void) 144 { 145 struct cbe_iic_pending_bits pending; 146 struct iic *iic; 147 unsigned int virq; 148 149 iic = &__get_cpu_var(iic); 150 *(unsigned long *) &pending = 151 in_be64((unsigned long __iomem *) &iic->regs->pending_destr); 152 if (!(pending.flags & CBE_IIC_IRQ_VALID)) 153 return NO_IRQ; 154 virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending)); 155 if (virq == NO_IRQ) 156 return NO_IRQ; 157 iic->eoi_stack[++iic->eoi_ptr] = pending.prio; 158 BUG_ON(iic->eoi_ptr > 15); 159 return virq; 160 } 161 162 void iic_setup_cpu(void) 163 { 164 out_be64(&__get_cpu_var(iic).regs->prio, 0xff); 165 } 166 167 u8 iic_get_target_id(int cpu) 168 { 169 return per_cpu(iic, cpu).target_id; 170 } 171 172 EXPORT_SYMBOL_GPL(iic_get_target_id); 173 174 #ifdef CONFIG_SMP 175 176 /* Use the highest interrupt priorities for IPI */ 177 static inline int iic_ipi_to_irq(int ipi) 178 { 179 return IIC_IRQ_TYPE_IPI + 0xf - ipi; 180 } 181 182 void iic_cause_IPI(int cpu, int mesg) 183 { 184 out_be64(&per_cpu(iic, cpu).regs->generate, (0xf - mesg) << 4); 185 } 186 187 struct irq_host *iic_get_irq_host(int node) 188 { 189 return iic_host; 190 } 191 EXPORT_SYMBOL_GPL(iic_get_irq_host); 192 193 static irqreturn_t iic_ipi_action(int irq, void *dev_id) 194 { 195 int ipi = (int)(long)dev_id; 196 197 smp_message_recv(ipi); 198 199 return IRQ_HANDLED; 200 } 201 static void iic_request_ipi(int ipi, const char *name) 202 { 203 int virq; 204 205 virq = irq_create_mapping(iic_host, iic_ipi_to_irq(ipi)); 206 if (virq == NO_IRQ) { 207 printk(KERN_ERR 208 "iic: failed to map IPI %s\n", name); 209 return; 210 } 211 if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, name, 212 (void *)(long)ipi)) 213 printk(KERN_ERR 214 "iic: failed to request IPI %s\n", name); 215 } 216 217 void iic_request_IPIs(void) 218 { 219 iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call"); 220 iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched"); 221 #ifdef CONFIG_DEBUGGER 222 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); 223 #endif /* CONFIG_DEBUGGER */ 224 } 225 226 #endif /* CONFIG_SMP */ 227 228 229 static int iic_host_match(struct irq_host *h, struct device_node *node) 230 { 231 return of_device_is_compatible(node, 232 "IBM,CBEA-Internal-Interrupt-Controller"); 233 } 234 235 extern int noirqdebug; 236 237 static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) 238 { 239 const unsigned int cpu = smp_processor_id(); 240 241 spin_lock(&desc->lock); 242 243 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 244 245 /* 246 * If we're currently running this IRQ, or its disabled, 247 * we shouldn't process the IRQ. Mark it pending, handle 248 * the necessary masking and go out 249 */ 250 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || 251 !desc->action)) { 252 desc->status |= IRQ_PENDING; 253 goto out_eoi; 254 } 255 256 kstat_cpu(cpu).irqs[irq]++; 257 258 /* Mark the IRQ currently in progress.*/ 259 desc->status |= IRQ_INPROGRESS; 260 261 do { 262 struct irqaction *action = desc->action; 263 irqreturn_t action_ret; 264 265 if (unlikely(!action)) 266 goto out_eoi; 267 268 desc->status &= ~IRQ_PENDING; 269 spin_unlock(&desc->lock); 270 action_ret = handle_IRQ_event(irq, action); 271 if (!noirqdebug) 272 note_interrupt(irq, desc, action_ret); 273 spin_lock(&desc->lock); 274 275 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); 276 277 desc->status &= ~IRQ_INPROGRESS; 278 out_eoi: 279 desc->chip->eoi(irq); 280 spin_unlock(&desc->lock); 281 } 282 283 static int iic_host_map(struct irq_host *h, unsigned int virq, 284 irq_hw_number_t hw) 285 { 286 switch (hw & IIC_IRQ_TYPE_MASK) { 287 case IIC_IRQ_TYPE_IPI: 288 set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq); 289 break; 290 case IIC_IRQ_TYPE_IOEXC: 291 set_irq_chip_and_handler(virq, &iic_ioexc_chip, 292 handle_iic_irq); 293 break; 294 default: 295 set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq); 296 } 297 return 0; 298 } 299 300 static int iic_host_xlate(struct irq_host *h, struct device_node *ct, 301 u32 *intspec, unsigned int intsize, 302 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 303 304 { 305 unsigned int node, ext, unit, class; 306 const u32 *val; 307 308 if (!of_device_is_compatible(ct, 309 "IBM,CBEA-Internal-Interrupt-Controller")) 310 return -ENODEV; 311 if (intsize != 1) 312 return -ENODEV; 313 val = of_get_property(ct, "#interrupt-cells", NULL); 314 if (val == NULL || *val != 1) 315 return -ENODEV; 316 317 node = intspec[0] >> 24; 318 ext = (intspec[0] >> 16) & 0xff; 319 class = (intspec[0] >> 8) & 0xff; 320 unit = intspec[0] & 0xff; 321 322 /* Check if node is in supported range */ 323 if (node > 1) 324 return -EINVAL; 325 326 /* Build up interrupt number, special case for IO exceptions */ 327 *out_hwirq = (node << IIC_IRQ_NODE_SHIFT); 328 if (unit == IIC_UNIT_IIC && class == 1) 329 *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext; 330 else 331 *out_hwirq |= IIC_IRQ_TYPE_NORMAL | 332 (class << IIC_IRQ_CLASS_SHIFT) | unit; 333 334 /* Dummy flags, ignored by iic code */ 335 *out_flags = IRQ_TYPE_EDGE_RISING; 336 337 return 0; 338 } 339 340 static struct irq_host_ops iic_host_ops = { 341 .match = iic_host_match, 342 .map = iic_host_map, 343 .xlate = iic_host_xlate, 344 }; 345 346 static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr, 347 struct device_node *node) 348 { 349 /* XXX FIXME: should locate the linux CPU number from the HW cpu 350 * number properly. We are lucky for now 351 */ 352 struct iic *iic = &per_cpu(iic, hw_cpu); 353 354 iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs)); 355 BUG_ON(iic->regs == NULL); 356 357 iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe); 358 iic->eoi_stack[0] = 0xff; 359 iic->node = of_node_get(node); 360 out_be64(&iic->regs->prio, 0); 361 362 printk(KERN_INFO "IIC for CPU %d target id 0x%x : %s\n", 363 hw_cpu, iic->target_id, node->full_name); 364 } 365 366 static int __init setup_iic(void) 367 { 368 struct device_node *dn; 369 struct resource r0, r1; 370 unsigned int node, cascade, found = 0; 371 struct cbe_iic_regs __iomem *node_iic; 372 const u32 *np; 373 374 for (dn = NULL; 375 (dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) { 376 if (!of_device_is_compatible(dn, 377 "IBM,CBEA-Internal-Interrupt-Controller")) 378 continue; 379 np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL); 380 if (np == NULL) { 381 printk(KERN_WARNING "IIC: CPU association not found\n"); 382 of_node_put(dn); 383 return -ENODEV; 384 } 385 if (of_address_to_resource(dn, 0, &r0) || 386 of_address_to_resource(dn, 1, &r1)) { 387 printk(KERN_WARNING "IIC: Can't resolve addresses\n"); 388 of_node_put(dn); 389 return -ENODEV; 390 } 391 found++; 392 init_one_iic(np[0], r0.start, dn); 393 init_one_iic(np[1], r1.start, dn); 394 395 /* Setup cascade for IO exceptions. XXX cleanup tricks to get 396 * node vs CPU etc... 397 * Note that we configure the IIC_IRR here with a hard coded 398 * priority of 1. We might want to improve that later. 399 */ 400 node = np[0] >> 1; 401 node_iic = cbe_get_cpu_iic_regs(np[0]); 402 cascade = node << IIC_IRQ_NODE_SHIFT; 403 cascade |= 1 << IIC_IRQ_CLASS_SHIFT; 404 cascade |= IIC_UNIT_IIC; 405 cascade = irq_create_mapping(iic_host, cascade); 406 if (cascade == NO_IRQ) 407 continue; 408 /* 409 * irq_data is a generic pointer that gets passed back 410 * to us later, so the forced cast is fine. 411 */ 412 set_irq_data(cascade, (void __force *)node_iic); 413 set_irq_chained_handler(cascade , iic_ioexc_cascade); 414 out_be64(&node_iic->iic_ir, 415 (1 << 12) /* priority */ | 416 (node << 4) /* dest node */ | 417 IIC_UNIT_THREAD_0 /* route them to thread 0 */); 418 /* Flush pending (make sure it triggers if there is 419 * anything pending 420 */ 421 out_be64(&node_iic->iic_is, 0xfffffffffffffffful); 422 } 423 424 if (found) 425 return 0; 426 else 427 return -ENODEV; 428 } 429 430 void __init iic_init_IRQ(void) 431 { 432 /* Setup an irq host data structure */ 433 iic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT, 434 &iic_host_ops, IIC_IRQ_INVALID); 435 BUG_ON(iic_host == NULL); 436 irq_set_default_host(iic_host); 437 438 /* Discover and initialize iics */ 439 if (setup_iic() < 0) 440 panic("IIC: Failed to initialize !\n"); 441 442 /* Set master interrupt handling function */ 443 ppc_md.get_irq = iic_get_irq; 444 445 /* Enable on current CPU */ 446 iic_setup_cpu(); 447 } 448 449 void iic_set_interrupt_routing(int cpu, int thread, int priority) 450 { 451 struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu); 452 u64 iic_ir = 0; 453 int node = cpu >> 1; 454 455 /* Set which node and thread will handle the next interrupt */ 456 iic_ir |= CBE_IIC_IR_PRIO(priority) | 457 CBE_IIC_IR_DEST_NODE(node); 458 if (thread == 0) 459 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0); 460 else 461 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1); 462 out_be64(&iic_regs->iic_ir, iic_ir); 463 } 464