1 /* 2 * arch/powerpc/kernel/mpic.c 3 * 4 * Driver for interrupt controllers following the OpenPIC standard, the 5 * common implementation beeing IBM's MPIC. This driver also can deal 6 * with various broken implementations of this HW. 7 * 8 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. 9 * 10 * This file is subject to the terms and conditions of the GNU General Public 11 * License. See the file COPYING in the main directory of this archive 12 * for more details. 13 */ 14 15 #undef DEBUG 16 #undef DEBUG_IPI 17 #undef DEBUG_IRQ 18 #undef DEBUG_LOW 19 20 #include <linux/config.h> 21 #include <linux/types.h> 22 #include <linux/kernel.h> 23 #include <linux/init.h> 24 #include <linux/irq.h> 25 #include <linux/smp.h> 26 #include <linux/interrupt.h> 27 #include <linux/bootmem.h> 28 #include <linux/spinlock.h> 29 #include <linux/pci.h> 30 31 #include <asm/ptrace.h> 32 #include <asm/signal.h> 33 #include <asm/io.h> 34 #include <asm/pgtable.h> 35 #include <asm/irq.h> 36 #include <asm/machdep.h> 37 #include <asm/mpic.h> 38 #include <asm/smp.h> 39 40 #ifdef DEBUG 41 #define DBG(fmt...) printk(fmt) 42 #else 43 #define DBG(fmt...) 44 #endif 45 46 static struct mpic *mpics; 47 static struct mpic *mpic_primary; 48 static DEFINE_SPINLOCK(mpic_lock); 49 50 #ifdef CONFIG_PPC32 /* XXX for now */ 51 #ifdef CONFIG_IRQ_ALL_CPUS 52 #define distribute_irqs (1) 53 #else 54 #define distribute_irqs (0) 55 #endif 56 #endif 57 58 /* 59 * Register accessor functions 60 */ 61 62 63 static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base, 64 unsigned int reg) 65 { 66 if (be) 67 return in_be32(base + (reg >> 2)); 68 else 69 return in_le32(base + (reg >> 2)); 70 } 71 72 static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base, 73 unsigned int reg, u32 value) 74 { 75 if (be) 76 out_be32(base + (reg >> 2), value); 77 else 78 out_le32(base + (reg >> 2), value); 79 } 80 81 static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi) 82 { 83 unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0; 84 unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10); 85 86 if (mpic->flags & MPIC_BROKEN_IPI) 87 be = !be; 88 return _mpic_read(be, mpic->gregs, offset); 89 } 90 91 static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value) 92 { 93 unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10); 94 95 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value); 96 } 97 98 static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg) 99 { 100 unsigned int cpu = 0; 101 102 if (mpic->flags & MPIC_PRIMARY) 103 cpu = hard_smp_processor_id(); 104 105 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg); 106 } 107 108 static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value) 109 { 110 unsigned int cpu = 0; 111 112 if (mpic->flags & MPIC_PRIMARY) 113 cpu = hard_smp_processor_id(); 114 115 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value); 116 } 117 118 static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg) 119 { 120 unsigned int isu = src_no >> mpic->isu_shift; 121 unsigned int idx = src_no & mpic->isu_mask; 122 123 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu], 124 reg + (idx * MPIC_IRQ_STRIDE)); 125 } 126 127 static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no, 128 unsigned int reg, u32 value) 129 { 130 unsigned int isu = src_no >> mpic->isu_shift; 131 unsigned int idx = src_no & mpic->isu_mask; 132 133 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu], 134 reg + (idx * MPIC_IRQ_STRIDE), value); 135 } 136 137 #define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r)) 138 #define mpic_write(b,r,v) _mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v)) 139 #define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i)) 140 #define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v)) 141 #define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i)) 142 #define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v)) 143 #define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r)) 144 #define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v)) 145 146 147 /* 148 * Low level utility functions 149 */ 150 151 152 153 /* Check if we have one of those nice broken MPICs with a flipped endian on 154 * reads from IPI registers 155 */ 156 static void __init mpic_test_broken_ipi(struct mpic *mpic) 157 { 158 u32 r; 159 160 mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK); 161 r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0); 162 163 if (r == le32_to_cpu(MPIC_VECPRI_MASK)) { 164 printk(KERN_INFO "mpic: Detected reversed IPI registers\n"); 165 mpic->flags |= MPIC_BROKEN_IPI; 166 } 167 } 168 169 #ifdef CONFIG_MPIC_BROKEN_U3 170 171 /* Test if an interrupt is sourced from HyperTransport (used on broken U3s) 172 * to force the edge setting on the MPIC and do the ack workaround. 173 */ 174 static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source) 175 { 176 if (source >= 128 || !mpic->fixups) 177 return 0; 178 return mpic->fixups[source].base != NULL; 179 } 180 181 182 static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source) 183 { 184 struct mpic_irq_fixup *fixup = &mpic->fixups[source]; 185 186 if (fixup->applebase) { 187 unsigned int soff = (fixup->index >> 3) & ~3; 188 unsigned int mask = 1U << (fixup->index & 0x1f); 189 writel(mask, fixup->applebase + soff); 190 } else { 191 spin_lock(&mpic->fixup_lock); 192 writeb(0x11 + 2 * fixup->index, fixup->base + 2); 193 writel(fixup->data, fixup->base + 4); 194 spin_unlock(&mpic->fixup_lock); 195 } 196 } 197 198 static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, 199 unsigned int irqflags) 200 { 201 struct mpic_irq_fixup *fixup = &mpic->fixups[source]; 202 unsigned long flags; 203 u32 tmp; 204 205 if (fixup->base == NULL) 206 return; 207 208 DBG("startup_ht_interrupt(%u, %u) index: %d\n", 209 source, irqflags, fixup->index); 210 spin_lock_irqsave(&mpic->fixup_lock, flags); 211 /* Enable and configure */ 212 writeb(0x10 + 2 * fixup->index, fixup->base + 2); 213 tmp = readl(fixup->base + 4); 214 tmp &= ~(0x23U); 215 if (irqflags & IRQ_LEVEL) 216 tmp |= 0x22; 217 writel(tmp, fixup->base + 4); 218 spin_unlock_irqrestore(&mpic->fixup_lock, flags); 219 } 220 221 static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source, 222 unsigned int irqflags) 223 { 224 struct mpic_irq_fixup *fixup = &mpic->fixups[source]; 225 unsigned long flags; 226 u32 tmp; 227 228 if (fixup->base == NULL) 229 return; 230 231 DBG("shutdown_ht_interrupt(%u, %u)\n", source, irqflags); 232 233 /* Disable */ 234 spin_lock_irqsave(&mpic->fixup_lock, flags); 235 writeb(0x10 + 2 * fixup->index, fixup->base + 2); 236 tmp = readl(fixup->base + 4); 237 tmp &= ~1U; 238 writel(tmp, fixup->base + 4); 239 spin_unlock_irqrestore(&mpic->fixup_lock, flags); 240 } 241 242 static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase, 243 unsigned int devfn, u32 vdid) 244 { 245 int i, irq, n; 246 u8 __iomem *base; 247 u32 tmp; 248 u8 pos; 249 250 for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0; 251 pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) { 252 u8 id = readb(devbase + pos + PCI_CAP_LIST_ID); 253 if (id == PCI_CAP_ID_HT_IRQCONF) { 254 id = readb(devbase + pos + 3); 255 if (id == 0x80) 256 break; 257 } 258 } 259 if (pos == 0) 260 return; 261 262 base = devbase + pos; 263 writeb(0x01, base + 2); 264 n = (readl(base + 4) >> 16) & 0xff; 265 266 printk(KERN_INFO "mpic: - HT:%02x.%x [0x%02x] vendor %04x device %04x" 267 " has %d irqs\n", 268 devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1); 269 270 for (i = 0; i <= n; i++) { 271 writeb(0x10 + 2 * i, base + 2); 272 tmp = readl(base + 4); 273 irq = (tmp >> 16) & 0xff; 274 DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp); 275 /* mask it , will be unmasked later */ 276 tmp |= 0x1; 277 writel(tmp, base + 4); 278 mpic->fixups[irq].index = i; 279 mpic->fixups[irq].base = base; 280 /* Apple HT PIC has a non-standard way of doing EOIs */ 281 if ((vdid & 0xffff) == 0x106b) 282 mpic->fixups[irq].applebase = devbase + 0x60; 283 else 284 mpic->fixups[irq].applebase = NULL; 285 writeb(0x11 + 2 * i, base + 2); 286 mpic->fixups[irq].data = readl(base + 4) | 0x80000000; 287 } 288 } 289 290 291 static void __init mpic_scan_ht_pics(struct mpic *mpic) 292 { 293 unsigned int devfn; 294 u8 __iomem *cfgspace; 295 296 printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n"); 297 298 /* Allocate fixups array */ 299 mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup)); 300 BUG_ON(mpic->fixups == NULL); 301 memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup)); 302 303 /* Init spinlock */ 304 spin_lock_init(&mpic->fixup_lock); 305 306 /* Map U3 config space. We assume all IO-APICs are on the primary bus 307 * so we only need to map 64kB. 308 */ 309 cfgspace = ioremap(0xf2000000, 0x10000); 310 BUG_ON(cfgspace == NULL); 311 312 /* Now we scan all slots. We do a very quick scan, we read the header 313 * type, vendor ID and device ID only, that's plenty enough 314 */ 315 for (devfn = 0; devfn < 0x100; devfn++) { 316 u8 __iomem *devbase = cfgspace + (devfn << 8); 317 u8 hdr_type = readb(devbase + PCI_HEADER_TYPE); 318 u32 l = readl(devbase + PCI_VENDOR_ID); 319 u16 s; 320 321 DBG("devfn %x, l: %x\n", devfn, l); 322 323 /* If no device, skip */ 324 if (l == 0xffffffff || l == 0x00000000 || 325 l == 0x0000ffff || l == 0xffff0000) 326 goto next; 327 /* Check if is supports capability lists */ 328 s = readw(devbase + PCI_STATUS); 329 if (!(s & PCI_STATUS_CAP_LIST)) 330 goto next; 331 332 mpic_scan_ht_pic(mpic, devbase, devfn, l); 333 334 next: 335 /* next device, if function 0 */ 336 if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0) 337 devfn += 7; 338 } 339 } 340 341 #endif /* CONFIG_MPIC_BROKEN_U3 */ 342 343 344 /* Find an mpic associated with a given linux interrupt */ 345 static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi) 346 { 347 struct mpic *mpic = mpics; 348 349 while(mpic) { 350 /* search IPIs first since they may override the main interrupts */ 351 if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) { 352 if (is_ipi) 353 *is_ipi = 1; 354 return mpic; 355 } 356 if (irq >= mpic->irq_offset && 357 irq < (mpic->irq_offset + mpic->irq_count)) { 358 if (is_ipi) 359 *is_ipi = 0; 360 return mpic; 361 } 362 mpic = mpic -> next; 363 } 364 return NULL; 365 } 366 367 /* Convert a cpu mask from logical to physical cpu numbers. */ 368 static inline u32 mpic_physmask(u32 cpumask) 369 { 370 int i; 371 u32 mask = 0; 372 373 for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1) 374 mask |= (cpumask & 1) << get_hard_smp_processor_id(i); 375 return mask; 376 } 377 378 #ifdef CONFIG_SMP 379 /* Get the mpic structure from the IPI number */ 380 static inline struct mpic * mpic_from_ipi(unsigned int ipi) 381 { 382 return container_of(irq_desc[ipi].handler, struct mpic, hc_ipi); 383 } 384 #endif 385 386 /* Get the mpic structure from the irq number */ 387 static inline struct mpic * mpic_from_irq(unsigned int irq) 388 { 389 return container_of(irq_desc[irq].handler, struct mpic, hc_irq); 390 } 391 392 /* Send an EOI */ 393 static inline void mpic_eoi(struct mpic *mpic) 394 { 395 mpic_cpu_write(MPIC_CPU_EOI, 0); 396 (void)mpic_cpu_read(MPIC_CPU_WHOAMI); 397 } 398 399 #ifdef CONFIG_SMP 400 static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) 401 { 402 struct mpic *mpic = dev_id; 403 404 smp_message_recv(irq - mpic->ipi_offset, regs); 405 return IRQ_HANDLED; 406 } 407 #endif /* CONFIG_SMP */ 408 409 /* 410 * Linux descriptor level callbacks 411 */ 412 413 414 static void mpic_enable_irq(unsigned int irq) 415 { 416 unsigned int loops = 100000; 417 struct mpic *mpic = mpic_from_irq(irq); 418 unsigned int src = irq - mpic->irq_offset; 419 420 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src); 421 422 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 423 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & 424 ~MPIC_VECPRI_MASK); 425 426 /* make sure mask gets to controller before we return to user */ 427 do { 428 if (!loops--) { 429 printk(KERN_ERR "mpic_enable_irq timeout\n"); 430 break; 431 } 432 } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK); 433 434 #ifdef CONFIG_MPIC_BROKEN_U3 435 if (mpic->flags & MPIC_BROKEN_U3) { 436 unsigned int src = irq - mpic->irq_offset; 437 if (mpic_is_ht_interrupt(mpic, src) && 438 (irq_desc[irq].status & IRQ_LEVEL)) 439 mpic_ht_end_irq(mpic, src); 440 } 441 #endif /* CONFIG_MPIC_BROKEN_U3 */ 442 } 443 444 static unsigned int mpic_startup_irq(unsigned int irq) 445 { 446 #ifdef CONFIG_MPIC_BROKEN_U3 447 struct mpic *mpic = mpic_from_irq(irq); 448 unsigned int src = irq - mpic->irq_offset; 449 450 if (mpic_is_ht_interrupt(mpic, src)) 451 mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status); 452 453 #endif /* CONFIG_MPIC_BROKEN_U3 */ 454 455 mpic_enable_irq(irq); 456 457 return 0; 458 } 459 460 static void mpic_disable_irq(unsigned int irq) 461 { 462 unsigned int loops = 100000; 463 struct mpic *mpic = mpic_from_irq(irq); 464 unsigned int src = irq - mpic->irq_offset; 465 466 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); 467 468 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 469 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) | 470 MPIC_VECPRI_MASK); 471 472 /* make sure mask gets to controller before we return to user */ 473 do { 474 if (!loops--) { 475 printk(KERN_ERR "mpic_enable_irq timeout\n"); 476 break; 477 } 478 } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK)); 479 } 480 481 static void mpic_shutdown_irq(unsigned int irq) 482 { 483 #ifdef CONFIG_MPIC_BROKEN_U3 484 struct mpic *mpic = mpic_from_irq(irq); 485 unsigned int src = irq - mpic->irq_offset; 486 487 if (mpic_is_ht_interrupt(mpic, src)) 488 mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status); 489 490 #endif /* CONFIG_MPIC_BROKEN_U3 */ 491 492 mpic_disable_irq(irq); 493 } 494 495 static void mpic_end_irq(unsigned int irq) 496 { 497 struct mpic *mpic = mpic_from_irq(irq); 498 499 #ifdef DEBUG_IRQ 500 DBG("%s: end_irq: %d\n", mpic->name, irq); 501 #endif 502 /* We always EOI on end_irq() even for edge interrupts since that 503 * should only lower the priority, the MPIC should have properly 504 * latched another edge interrupt coming in anyway 505 */ 506 507 #ifdef CONFIG_MPIC_BROKEN_U3 508 if (mpic->flags & MPIC_BROKEN_U3) { 509 unsigned int src = irq - mpic->irq_offset; 510 if (mpic_is_ht_interrupt(mpic, src) && 511 (irq_desc[irq].status & IRQ_LEVEL)) 512 mpic_ht_end_irq(mpic, src); 513 } 514 #endif /* CONFIG_MPIC_BROKEN_U3 */ 515 516 mpic_eoi(mpic); 517 } 518 519 #ifdef CONFIG_SMP 520 521 static void mpic_enable_ipi(unsigned int irq) 522 { 523 struct mpic *mpic = mpic_from_ipi(irq); 524 unsigned int src = irq - mpic->ipi_offset; 525 526 DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src); 527 mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); 528 } 529 530 static void mpic_disable_ipi(unsigned int irq) 531 { 532 /* NEVER disable an IPI... that's just plain wrong! */ 533 } 534 535 static void mpic_end_ipi(unsigned int irq) 536 { 537 struct mpic *mpic = mpic_from_ipi(irq); 538 539 /* 540 * IPIs are marked IRQ_PER_CPU. This has the side effect of 541 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from 542 * applying to them. We EOI them late to avoid re-entering. 543 * We mark IPI's with SA_INTERRUPT as they must run with 544 * irqs disabled. 545 */ 546 mpic_eoi(mpic); 547 } 548 549 #endif /* CONFIG_SMP */ 550 551 static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) 552 { 553 struct mpic *mpic = mpic_from_irq(irq); 554 555 cpumask_t tmp; 556 557 cpus_and(tmp, cpumask, cpu_online_map); 558 559 mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION, 560 mpic_physmask(cpus_addr(tmp)[0])); 561 } 562 563 564 /* 565 * Exported functions 566 */ 567 568 569 struct mpic * __init mpic_alloc(unsigned long phys_addr, 570 unsigned int flags, 571 unsigned int isu_size, 572 unsigned int irq_offset, 573 unsigned int irq_count, 574 unsigned int ipi_offset, 575 unsigned char *senses, 576 unsigned int senses_count, 577 const char *name) 578 { 579 struct mpic *mpic; 580 u32 reg; 581 const char *vers; 582 int i; 583 584 mpic = alloc_bootmem(sizeof(struct mpic)); 585 if (mpic == NULL) 586 return NULL; 587 588 589 memset(mpic, 0, sizeof(struct mpic)); 590 mpic->name = name; 591 592 mpic->hc_irq.typename = name; 593 mpic->hc_irq.startup = mpic_startup_irq; 594 mpic->hc_irq.shutdown = mpic_shutdown_irq; 595 mpic->hc_irq.enable = mpic_enable_irq; 596 mpic->hc_irq.disable = mpic_disable_irq; 597 mpic->hc_irq.end = mpic_end_irq; 598 if (flags & MPIC_PRIMARY) 599 mpic->hc_irq.set_affinity = mpic_set_affinity; 600 #ifdef CONFIG_SMP 601 mpic->hc_ipi.typename = name; 602 mpic->hc_ipi.enable = mpic_enable_ipi; 603 mpic->hc_ipi.disable = mpic_disable_ipi; 604 mpic->hc_ipi.end = mpic_end_ipi; 605 #endif /* CONFIG_SMP */ 606 607 mpic->flags = flags; 608 mpic->isu_size = isu_size; 609 mpic->irq_offset = irq_offset; 610 mpic->irq_count = irq_count; 611 mpic->ipi_offset = ipi_offset; 612 mpic->num_sources = 0; /* so far */ 613 mpic->senses = senses; 614 mpic->senses_count = senses_count; 615 616 /* Map the global registers */ 617 mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000); 618 mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2); 619 BUG_ON(mpic->gregs == NULL); 620 621 /* Reset */ 622 if (flags & MPIC_WANTS_RESET) { 623 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0, 624 mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) 625 | MPIC_GREG_GCONF_RESET); 626 while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) 627 & MPIC_GREG_GCONF_RESET) 628 mb(); 629 } 630 631 /* Read feature register, calculate num CPUs and, for non-ISU 632 * MPICs, num sources as well. On ISU MPICs, sources are counted 633 * as ISUs are added 634 */ 635 reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0); 636 mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK) 637 >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1; 638 if (isu_size == 0) 639 mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK) 640 >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1; 641 642 /* Map the per-CPU registers */ 643 for (i = 0; i < mpic->num_cpus; i++) { 644 mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE + 645 i * MPIC_CPU_STRIDE, 0x1000); 646 BUG_ON(mpic->cpuregs[i] == NULL); 647 } 648 649 /* Initialize main ISU if none provided */ 650 if (mpic->isu_size == 0) { 651 mpic->isu_size = mpic->num_sources; 652 mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE, 653 MPIC_IRQ_STRIDE * mpic->isu_size); 654 BUG_ON(mpic->isus[0] == NULL); 655 } 656 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); 657 mpic->isu_mask = (1 << mpic->isu_shift) - 1; 658 659 /* Display version */ 660 switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) { 661 case 1: 662 vers = "1.0"; 663 break; 664 case 2: 665 vers = "1.2"; 666 break; 667 case 3: 668 vers = "1.3"; 669 break; 670 default: 671 vers = "<unknown>"; 672 break; 673 } 674 printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n", 675 name, vers, phys_addr, mpic->num_cpus); 676 printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size, 677 mpic->isu_shift, mpic->isu_mask); 678 679 mpic->next = mpics; 680 mpics = mpic; 681 682 if (flags & MPIC_PRIMARY) 683 mpic_primary = mpic; 684 685 return mpic; 686 } 687 688 void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, 689 unsigned long phys_addr) 690 { 691 unsigned int isu_first = isu_num * mpic->isu_size; 692 693 BUG_ON(isu_num >= MPIC_MAX_ISU); 694 695 mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size); 696 if ((isu_first + mpic->isu_size) > mpic->num_sources) 697 mpic->num_sources = isu_first + mpic->isu_size; 698 } 699 700 void __init mpic_setup_cascade(unsigned int irq, mpic_cascade_t handler, 701 void *data) 702 { 703 struct mpic *mpic = mpic_find(irq, NULL); 704 unsigned long flags; 705 706 /* Synchronization here is a bit dodgy, so don't try to replace cascade 707 * interrupts on the fly too often ... but normally it's set up at boot. 708 */ 709 spin_lock_irqsave(&mpic_lock, flags); 710 if (mpic->cascade) 711 mpic_disable_irq(mpic->cascade_vec + mpic->irq_offset); 712 mpic->cascade = NULL; 713 wmb(); 714 mpic->cascade_vec = irq - mpic->irq_offset; 715 mpic->cascade_data = data; 716 wmb(); 717 mpic->cascade = handler; 718 mpic_enable_irq(irq); 719 spin_unlock_irqrestore(&mpic_lock, flags); 720 } 721 722 void __init mpic_init(struct mpic *mpic) 723 { 724 int i; 725 726 BUG_ON(mpic->num_sources == 0); 727 728 printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources); 729 730 /* Set current processor priority to max */ 731 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf); 732 733 /* Initialize timers: just disable them all */ 734 for (i = 0; i < 4; i++) { 735 mpic_write(mpic->tmregs, 736 i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0); 737 mpic_write(mpic->tmregs, 738 i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI, 739 MPIC_VECPRI_MASK | 740 (MPIC_VEC_TIMER_0 + i)); 741 } 742 743 /* Initialize IPIs to our reserved vectors and mark them disabled for now */ 744 mpic_test_broken_ipi(mpic); 745 for (i = 0; i < 4; i++) { 746 mpic_ipi_write(i, 747 MPIC_VECPRI_MASK | 748 (10 << MPIC_VECPRI_PRIORITY_SHIFT) | 749 (MPIC_VEC_IPI_0 + i)); 750 #ifdef CONFIG_SMP 751 if (!(mpic->flags & MPIC_PRIMARY)) 752 continue; 753 irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU; 754 irq_desc[mpic->ipi_offset+i].handler = &mpic->hc_ipi; 755 #endif /* CONFIG_SMP */ 756 } 757 758 /* Initialize interrupt sources */ 759 if (mpic->irq_count == 0) 760 mpic->irq_count = mpic->num_sources; 761 762 #ifdef CONFIG_MPIC_BROKEN_U3 763 /* Do the HT PIC fixups on U3 broken mpic */ 764 DBG("MPIC flags: %x\n", mpic->flags); 765 if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY)) 766 mpic_scan_ht_pics(mpic); 767 #endif /* CONFIG_MPIC_BROKEN_U3 */ 768 769 for (i = 0; i < mpic->num_sources; i++) { 770 /* start with vector = source number, and masked */ 771 u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT); 772 int level = 0; 773 774 /* if it's an IPI, we skip it */ 775 if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) && 776 (mpic->irq_offset + i) < (mpic->ipi_offset + i + 4)) 777 continue; 778 779 /* do senses munging */ 780 if (mpic->senses && i < mpic->senses_count) { 781 if (mpic->senses[i] & IRQ_SENSE_LEVEL) 782 vecpri |= MPIC_VECPRI_SENSE_LEVEL; 783 if (mpic->senses[i] & IRQ_POLARITY_POSITIVE) 784 vecpri |= MPIC_VECPRI_POLARITY_POSITIVE; 785 } else 786 vecpri |= MPIC_VECPRI_SENSE_LEVEL; 787 788 /* remember if it was a level interrupts */ 789 level = (vecpri & MPIC_VECPRI_SENSE_LEVEL); 790 791 /* deal with broken U3 */ 792 if (mpic->flags & MPIC_BROKEN_U3) { 793 #ifdef CONFIG_MPIC_BROKEN_U3 794 if (mpic_is_ht_interrupt(mpic, i)) { 795 vecpri &= ~(MPIC_VECPRI_SENSE_MASK | 796 MPIC_VECPRI_POLARITY_MASK); 797 vecpri |= MPIC_VECPRI_POLARITY_POSITIVE; 798 } 799 #else 800 printk(KERN_ERR "mpic: BROKEN_U3 set, but CONFIG doesn't match\n"); 801 #endif 802 } 803 804 DBG("setup source %d, vecpri: %08x, level: %d\n", i, vecpri, 805 (level != 0)); 806 807 /* init hw */ 808 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri); 809 mpic_irq_write(i, MPIC_IRQ_DESTINATION, 810 1 << hard_smp_processor_id()); 811 812 /* init linux descriptors */ 813 if (i < mpic->irq_count) { 814 irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0; 815 irq_desc[mpic->irq_offset+i].handler = &mpic->hc_irq; 816 } 817 } 818 819 /* Init spurrious vector */ 820 mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS); 821 822 /* Disable 8259 passthrough */ 823 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0, 824 mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) 825 | MPIC_GREG_GCONF_8259_PTHROU_DIS); 826 827 /* Set current processor priority to 0 */ 828 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0); 829 } 830 831 832 833 void mpic_irq_set_priority(unsigned int irq, unsigned int pri) 834 { 835 int is_ipi; 836 struct mpic *mpic = mpic_find(irq, &is_ipi); 837 unsigned long flags; 838 u32 reg; 839 840 spin_lock_irqsave(&mpic_lock, flags); 841 if (is_ipi) { 842 reg = mpic_ipi_read(irq - mpic->ipi_offset) & 843 ~MPIC_VECPRI_PRIORITY_MASK; 844 mpic_ipi_write(irq - mpic->ipi_offset, 845 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 846 } else { 847 reg = mpic_irq_read(irq - mpic->irq_offset,MPIC_IRQ_VECTOR_PRI) 848 & ~MPIC_VECPRI_PRIORITY_MASK; 849 mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI, 850 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 851 } 852 spin_unlock_irqrestore(&mpic_lock, flags); 853 } 854 855 unsigned int mpic_irq_get_priority(unsigned int irq) 856 { 857 int is_ipi; 858 struct mpic *mpic = mpic_find(irq, &is_ipi); 859 unsigned long flags; 860 u32 reg; 861 862 spin_lock_irqsave(&mpic_lock, flags); 863 if (is_ipi) 864 reg = mpic_ipi_read(irq - mpic->ipi_offset); 865 else 866 reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI); 867 spin_unlock_irqrestore(&mpic_lock, flags); 868 return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT; 869 } 870 871 void mpic_setup_this_cpu(void) 872 { 873 #ifdef CONFIG_SMP 874 struct mpic *mpic = mpic_primary; 875 unsigned long flags; 876 u32 msk = 1 << hard_smp_processor_id(); 877 unsigned int i; 878 879 BUG_ON(mpic == NULL); 880 881 DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); 882 883 spin_lock_irqsave(&mpic_lock, flags); 884 885 /* let the mpic know we want intrs. default affinity is 0xffffffff 886 * until changed via /proc. That's how it's done on x86. If we want 887 * it differently, then we should make sure we also change the default 888 * values of irq_affinity in irq.c. 889 */ 890 if (distribute_irqs) { 891 for (i = 0; i < mpic->num_sources ; i++) 892 mpic_irq_write(i, MPIC_IRQ_DESTINATION, 893 mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk); 894 } 895 896 /* Set current processor priority to 0 */ 897 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0); 898 899 spin_unlock_irqrestore(&mpic_lock, flags); 900 #endif /* CONFIG_SMP */ 901 } 902 903 int mpic_cpu_get_priority(void) 904 { 905 struct mpic *mpic = mpic_primary; 906 907 return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI); 908 } 909 910 void mpic_cpu_set_priority(int prio) 911 { 912 struct mpic *mpic = mpic_primary; 913 914 prio &= MPIC_CPU_TASKPRI_MASK; 915 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio); 916 } 917 918 /* 919 * XXX: someone who knows mpic should check this. 920 * do we need to eoi the ipi including for kexec cpu here (see xics comments)? 921 * or can we reset the mpic in the new kernel? 922 */ 923 void mpic_teardown_this_cpu(int secondary) 924 { 925 struct mpic *mpic = mpic_primary; 926 unsigned long flags; 927 u32 msk = 1 << hard_smp_processor_id(); 928 unsigned int i; 929 930 BUG_ON(mpic == NULL); 931 932 DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); 933 spin_lock_irqsave(&mpic_lock, flags); 934 935 /* let the mpic know we don't want intrs. */ 936 for (i = 0; i < mpic->num_sources ; i++) 937 mpic_irq_write(i, MPIC_IRQ_DESTINATION, 938 mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk); 939 940 /* Set current processor priority to max */ 941 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf); 942 943 spin_unlock_irqrestore(&mpic_lock, flags); 944 } 945 946 947 void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask) 948 { 949 struct mpic *mpic = mpic_primary; 950 951 BUG_ON(mpic == NULL); 952 953 #ifdef DEBUG_IPI 954 DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no); 955 #endif 956 957 mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10, 958 mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0])); 959 } 960 961 int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs) 962 { 963 u32 irq; 964 965 irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK; 966 #ifdef DEBUG_LOW 967 DBG("%s: get_one_irq(): %d\n", mpic->name, irq); 968 #endif 969 if (mpic->cascade && irq == mpic->cascade_vec) { 970 #ifdef DEBUG_LOW 971 DBG("%s: cascading ...\n", mpic->name); 972 #endif 973 irq = mpic->cascade(regs, mpic->cascade_data); 974 mpic_eoi(mpic); 975 return irq; 976 } 977 if (unlikely(irq == MPIC_VEC_SPURRIOUS)) 978 return -1; 979 if (irq < MPIC_VEC_IPI_0) { 980 #ifdef DEBUG_IRQ 981 DBG("%s: irq %d\n", mpic->name, irq + mpic->irq_offset); 982 #endif 983 return irq + mpic->irq_offset; 984 } 985 #ifdef DEBUG_IPI 986 DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0); 987 #endif 988 return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset; 989 } 990 991 int mpic_get_irq(struct pt_regs *regs) 992 { 993 struct mpic *mpic = mpic_primary; 994 995 BUG_ON(mpic == NULL); 996 997 return mpic_get_one_irq(mpic, regs); 998 } 999 1000 1001 #ifdef CONFIG_SMP 1002 void mpic_request_ipis(void) 1003 { 1004 struct mpic *mpic = mpic_primary; 1005 1006 BUG_ON(mpic == NULL); 1007 1008 printk("requesting IPIs ... \n"); 1009 1010 /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */ 1011 request_irq(mpic->ipi_offset+0, mpic_ipi_action, SA_INTERRUPT, 1012 "IPI0 (call function)", mpic); 1013 request_irq(mpic->ipi_offset+1, mpic_ipi_action, SA_INTERRUPT, 1014 "IPI1 (reschedule)", mpic); 1015 request_irq(mpic->ipi_offset+2, mpic_ipi_action, SA_INTERRUPT, 1016 "IPI2 (unused)", mpic); 1017 request_irq(mpic->ipi_offset+3, mpic_ipi_action, SA_INTERRUPT, 1018 "IPI3 (debugger break)", mpic); 1019 1020 printk("IPIs requested... \n"); 1021 } 1022 1023 void smp_mpic_message_pass(int target, int msg) 1024 { 1025 /* make sure we're sending something that translates to an IPI */ 1026 if ((unsigned int)msg > 3) { 1027 printk("SMP %d: smp_message_pass: unknown msg %d\n", 1028 smp_processor_id(), msg); 1029 return; 1030 } 1031 switch (target) { 1032 case MSG_ALL: 1033 mpic_send_ipi(msg, 0xffffffff); 1034 break; 1035 case MSG_ALL_BUT_SELF: 1036 mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id())); 1037 break; 1038 default: 1039 mpic_send_ipi(msg, 1 << target); 1040 break; 1041 } 1042 } 1043 #endif /* CONFIG_SMP */ 1044