1 /* 2 * arch/powerpc/kernel/mpic.c 3 * 4 * Driver for interrupt controllers following the OpenPIC standard, the 5 * common implementation beeing IBM's MPIC. This driver also can deal 6 * with various broken implementations of this HW. 7 * 8 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. 9 * 10 * This file is subject to the terms and conditions of the GNU General Public 11 * License. See the file COPYING in the main directory of this archive 12 * for more details. 13 */ 14 15 #undef DEBUG 16 17 #include <linux/config.h> 18 #include <linux/types.h> 19 #include <linux/kernel.h> 20 #include <linux/init.h> 21 #include <linux/irq.h> 22 #include <linux/smp.h> 23 #include <linux/interrupt.h> 24 #include <linux/bootmem.h> 25 #include <linux/spinlock.h> 26 #include <linux/pci.h> 27 28 #include <asm/ptrace.h> 29 #include <asm/signal.h> 30 #include <asm/io.h> 31 #include <asm/pgtable.h> 32 #include <asm/irq.h> 33 #include <asm/machdep.h> 34 #include <asm/mpic.h> 35 #include <asm/smp.h> 36 37 #ifdef DEBUG 38 #define DBG(fmt...) printk(fmt) 39 #else 40 #define DBG(fmt...) 41 #endif 42 43 static struct mpic *mpics; 44 static struct mpic *mpic_primary; 45 static DEFINE_SPINLOCK(mpic_lock); 46 47 #ifdef CONFIG_PPC32 /* XXX for now */ 48 #define distribute_irqs CONFIG_IRQ_ALL_CPUS 49 #endif 50 51 /* 52 * Register accessor functions 53 */ 54 55 56 static inline u32 _mpic_read(unsigned int be, volatile u32 __iomem *base, 57 unsigned int reg) 58 { 59 if (be) 60 return in_be32(base + (reg >> 2)); 61 else 62 return in_le32(base + (reg >> 2)); 63 } 64 65 static inline void _mpic_write(unsigned int be, volatile u32 __iomem *base, 66 unsigned int reg, u32 value) 67 { 68 if (be) 69 out_be32(base + (reg >> 2), value); 70 else 71 out_le32(base + (reg >> 2), value); 72 } 73 74 static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi) 75 { 76 unsigned int be = (mpic->flags & MPIC_BIG_ENDIAN) != 0; 77 unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10); 78 79 if (mpic->flags & MPIC_BROKEN_IPI) 80 be = !be; 81 return _mpic_read(be, mpic->gregs, offset); 82 } 83 84 static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value) 85 { 86 unsigned int offset = MPIC_GREG_IPI_VECTOR_PRI_0 + (ipi * 0x10); 87 88 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->gregs, offset, value); 89 } 90 91 static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg) 92 { 93 unsigned int cpu = 0; 94 95 if (mpic->flags & MPIC_PRIMARY) 96 cpu = hard_smp_processor_id(); 97 98 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg); 99 } 100 101 static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value) 102 { 103 unsigned int cpu = 0; 104 105 if (mpic->flags & MPIC_PRIMARY) 106 cpu = hard_smp_processor_id(); 107 108 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->cpuregs[cpu], reg, value); 109 } 110 111 static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg) 112 { 113 unsigned int isu = src_no >> mpic->isu_shift; 114 unsigned int idx = src_no & mpic->isu_mask; 115 116 return _mpic_read(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu], 117 reg + (idx * MPIC_IRQ_STRIDE)); 118 } 119 120 static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no, 121 unsigned int reg, u32 value) 122 { 123 unsigned int isu = src_no >> mpic->isu_shift; 124 unsigned int idx = src_no & mpic->isu_mask; 125 126 _mpic_write(mpic->flags & MPIC_BIG_ENDIAN, mpic->isus[isu], 127 reg + (idx * MPIC_IRQ_STRIDE), value); 128 } 129 130 #define mpic_read(b,r) _mpic_read(mpic->flags & MPIC_BIG_ENDIAN,(b),(r)) 131 #define mpic_write(b,r,v) _mpic_write(mpic->flags & MPIC_BIG_ENDIAN,(b),(r),(v)) 132 #define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i)) 133 #define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v)) 134 #define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i)) 135 #define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v)) 136 #define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r)) 137 #define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v)) 138 139 140 /* 141 * Low level utility functions 142 */ 143 144 145 146 /* Check if we have one of those nice broken MPICs with a flipped endian on 147 * reads from IPI registers 148 */ 149 static void __init mpic_test_broken_ipi(struct mpic *mpic) 150 { 151 u32 r; 152 153 mpic_write(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0, MPIC_VECPRI_MASK); 154 r = mpic_read(mpic->gregs, MPIC_GREG_IPI_VECTOR_PRI_0); 155 156 if (r == le32_to_cpu(MPIC_VECPRI_MASK)) { 157 printk(KERN_INFO "mpic: Detected reversed IPI registers\n"); 158 mpic->flags |= MPIC_BROKEN_IPI; 159 } 160 } 161 162 #ifdef CONFIG_MPIC_BROKEN_U3 163 164 /* Test if an interrupt is sourced from HyperTransport (used on broken U3s) 165 * to force the edge setting on the MPIC and do the ack workaround. 166 */ 167 static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source_no) 168 { 169 if (source_no >= 128 || !mpic->fixups) 170 return 0; 171 return mpic->fixups[source_no].base != NULL; 172 } 173 174 static inline void mpic_apic_end_irq(struct mpic *mpic, unsigned int source_no) 175 { 176 struct mpic_irq_fixup *fixup = &mpic->fixups[source_no]; 177 u32 tmp; 178 179 spin_lock(&mpic->fixup_lock); 180 writeb(0x11 + 2 * fixup->irq, fixup->base); 181 tmp = readl(fixup->base + 2); 182 writel(tmp | 0x80000000ul, fixup->base + 2); 183 /* config writes shouldn't be posted but let's be safe ... */ 184 (void)readl(fixup->base + 2); 185 spin_unlock(&mpic->fixup_lock); 186 } 187 188 189 static void __init mpic_amd8111_read_irq(struct mpic *mpic, u8 __iomem *devbase) 190 { 191 int i, irq; 192 u32 tmp; 193 194 printk(KERN_INFO "mpic: - Workarounds on AMD 8111 @ %p\n", devbase); 195 196 for (i=0; i < 24; i++) { 197 writeb(0x10 + 2*i, devbase + 0xf2); 198 tmp = readl(devbase + 0xf4); 199 if ((tmp & 0x1) || !(tmp & 0x20)) 200 continue; 201 irq = (tmp >> 16) & 0xff; 202 mpic->fixups[irq].irq = i; 203 mpic->fixups[irq].base = devbase + 0xf2; 204 } 205 } 206 207 static void __init mpic_amd8131_read_irq(struct mpic *mpic, u8 __iomem *devbase) 208 { 209 int i, irq; 210 u32 tmp; 211 212 printk(KERN_INFO "mpic: - Workarounds on AMD 8131 @ %p\n", devbase); 213 214 for (i=0; i < 4; i++) { 215 writeb(0x10 + 2*i, devbase + 0xba); 216 tmp = readl(devbase + 0xbc); 217 if ((tmp & 0x1) || !(tmp & 0x20)) 218 continue; 219 irq = (tmp >> 16) & 0xff; 220 mpic->fixups[irq].irq = i; 221 mpic->fixups[irq].base = devbase + 0xba; 222 } 223 } 224 225 static void __init mpic_scan_ioapics(struct mpic *mpic) 226 { 227 unsigned int devfn; 228 u8 __iomem *cfgspace; 229 230 printk(KERN_INFO "mpic: Setting up IO-APICs workarounds for U3\n"); 231 232 /* Allocate fixups array */ 233 mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup)); 234 BUG_ON(mpic->fixups == NULL); 235 memset(mpic->fixups, 0, 128 * sizeof(struct mpic_irq_fixup)); 236 237 /* Init spinlock */ 238 spin_lock_init(&mpic->fixup_lock); 239 240 /* Map u3 config space. We assume all IO-APICs are on the primary bus 241 * and slot will never be above "0xf" so we only need to map 32k 242 */ 243 cfgspace = (unsigned char __iomem *)ioremap(0xf2000000, 0x8000); 244 BUG_ON(cfgspace == NULL); 245 246 /* Now we scan all slots. We do a very quick scan, we read the header type, 247 * vendor ID and device ID only, that's plenty enough 248 */ 249 for (devfn = 0; devfn < PCI_DEVFN(0x10,0); devfn ++) { 250 u8 __iomem *devbase = cfgspace + (devfn << 8); 251 u8 hdr_type = readb(devbase + PCI_HEADER_TYPE); 252 u32 l = readl(devbase + PCI_VENDOR_ID); 253 u16 vendor_id, device_id; 254 int multifunc = 0; 255 256 DBG("devfn %x, l: %x\n", devfn, l); 257 258 /* If no device, skip */ 259 if (l == 0xffffffff || l == 0x00000000 || 260 l == 0x0000ffff || l == 0xffff0000) 261 goto next; 262 263 /* Check if it's a multifunction device (only really used 264 * to function 0 though 265 */ 266 multifunc = !!(hdr_type & 0x80); 267 vendor_id = l & 0xffff; 268 device_id = (l >> 16) & 0xffff; 269 270 /* If a known device, go to fixup setup code */ 271 if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7460) 272 mpic_amd8111_read_irq(mpic, devbase); 273 if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7450) 274 mpic_amd8131_read_irq(mpic, devbase); 275 next: 276 /* next device, if function 0 */ 277 if ((PCI_FUNC(devfn) == 0) && !multifunc) 278 devfn += 7; 279 } 280 } 281 282 #endif /* CONFIG_MPIC_BROKEN_U3 */ 283 284 285 /* Find an mpic associated with a given linux interrupt */ 286 static struct mpic *mpic_find(unsigned int irq, unsigned int *is_ipi) 287 { 288 struct mpic *mpic = mpics; 289 290 while(mpic) { 291 /* search IPIs first since they may override the main interrupts */ 292 if (irq >= mpic->ipi_offset && irq < (mpic->ipi_offset + 4)) { 293 if (is_ipi) 294 *is_ipi = 1; 295 return mpic; 296 } 297 if (irq >= mpic->irq_offset && 298 irq < (mpic->irq_offset + mpic->irq_count)) { 299 if (is_ipi) 300 *is_ipi = 0; 301 return mpic; 302 } 303 mpic = mpic -> next; 304 } 305 return NULL; 306 } 307 308 /* Convert a cpu mask from logical to physical cpu numbers. */ 309 static inline u32 mpic_physmask(u32 cpumask) 310 { 311 int i; 312 u32 mask = 0; 313 314 for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1) 315 mask |= (cpumask & 1) << get_hard_smp_processor_id(i); 316 return mask; 317 } 318 319 #ifdef CONFIG_SMP 320 /* Get the mpic structure from the IPI number */ 321 static inline struct mpic * mpic_from_ipi(unsigned int ipi) 322 { 323 return container_of(irq_desc[ipi].handler, struct mpic, hc_ipi); 324 } 325 #endif 326 327 /* Get the mpic structure from the irq number */ 328 static inline struct mpic * mpic_from_irq(unsigned int irq) 329 { 330 return container_of(irq_desc[irq].handler, struct mpic, hc_irq); 331 } 332 333 /* Send an EOI */ 334 static inline void mpic_eoi(struct mpic *mpic) 335 { 336 mpic_cpu_write(MPIC_CPU_EOI, 0); 337 (void)mpic_cpu_read(MPIC_CPU_WHOAMI); 338 } 339 340 #ifdef CONFIG_SMP 341 static irqreturn_t mpic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) 342 { 343 struct mpic *mpic = dev_id; 344 345 smp_message_recv(irq - mpic->ipi_offset, regs); 346 return IRQ_HANDLED; 347 } 348 #endif /* CONFIG_SMP */ 349 350 /* 351 * Linux descriptor level callbacks 352 */ 353 354 355 static void mpic_enable_irq(unsigned int irq) 356 { 357 unsigned int loops = 100000; 358 struct mpic *mpic = mpic_from_irq(irq); 359 unsigned int src = irq - mpic->irq_offset; 360 361 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src); 362 363 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 364 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & 365 ~MPIC_VECPRI_MASK); 366 367 /* make sure mask gets to controller before we return to user */ 368 do { 369 if (!loops--) { 370 printk(KERN_ERR "mpic_enable_irq timeout\n"); 371 break; 372 } 373 } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK); 374 } 375 376 static void mpic_disable_irq(unsigned int irq) 377 { 378 unsigned int loops = 100000; 379 struct mpic *mpic = mpic_from_irq(irq); 380 unsigned int src = irq - mpic->irq_offset; 381 382 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); 383 384 mpic_irq_write(src, MPIC_IRQ_VECTOR_PRI, 385 mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) | 386 MPIC_VECPRI_MASK); 387 388 /* make sure mask gets to controller before we return to user */ 389 do { 390 if (!loops--) { 391 printk(KERN_ERR "mpic_enable_irq timeout\n"); 392 break; 393 } 394 } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK)); 395 } 396 397 static void mpic_end_irq(unsigned int irq) 398 { 399 struct mpic *mpic = mpic_from_irq(irq); 400 401 DBG("%s: end_irq: %d\n", mpic->name, irq); 402 403 /* We always EOI on end_irq() even for edge interrupts since that 404 * should only lower the priority, the MPIC should have properly 405 * latched another edge interrupt coming in anyway 406 */ 407 408 #ifdef CONFIG_MPIC_BROKEN_U3 409 if (mpic->flags & MPIC_BROKEN_U3) { 410 unsigned int src = irq - mpic->irq_offset; 411 if (mpic_is_ht_interrupt(mpic, src)) 412 mpic_apic_end_irq(mpic, src); 413 } 414 #endif /* CONFIG_MPIC_BROKEN_U3 */ 415 416 mpic_eoi(mpic); 417 } 418 419 #ifdef CONFIG_SMP 420 421 static void mpic_enable_ipi(unsigned int irq) 422 { 423 struct mpic *mpic = mpic_from_ipi(irq); 424 unsigned int src = irq - mpic->ipi_offset; 425 426 DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src); 427 mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); 428 } 429 430 static void mpic_disable_ipi(unsigned int irq) 431 { 432 /* NEVER disable an IPI... that's just plain wrong! */ 433 } 434 435 static void mpic_end_ipi(unsigned int irq) 436 { 437 struct mpic *mpic = mpic_from_ipi(irq); 438 439 /* 440 * IPIs are marked IRQ_PER_CPU. This has the side effect of 441 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from 442 * applying to them. We EOI them late to avoid re-entering. 443 * We mark IPI's with SA_INTERRUPT as they must run with 444 * irqs disabled. 445 */ 446 mpic_eoi(mpic); 447 } 448 449 #endif /* CONFIG_SMP */ 450 451 static void mpic_set_affinity(unsigned int irq, cpumask_t cpumask) 452 { 453 struct mpic *mpic = mpic_from_irq(irq); 454 455 cpumask_t tmp; 456 457 cpus_and(tmp, cpumask, cpu_online_map); 458 459 mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_DESTINATION, 460 mpic_physmask(cpus_addr(tmp)[0])); 461 } 462 463 464 /* 465 * Exported functions 466 */ 467 468 469 struct mpic * __init mpic_alloc(unsigned long phys_addr, 470 unsigned int flags, 471 unsigned int isu_size, 472 unsigned int irq_offset, 473 unsigned int irq_count, 474 unsigned int ipi_offset, 475 unsigned char *senses, 476 unsigned int senses_count, 477 const char *name) 478 { 479 struct mpic *mpic; 480 u32 reg; 481 const char *vers; 482 int i; 483 484 mpic = alloc_bootmem(sizeof(struct mpic)); 485 if (mpic == NULL) 486 return NULL; 487 488 489 memset(mpic, 0, sizeof(struct mpic)); 490 mpic->name = name; 491 492 mpic->hc_irq.typename = name; 493 mpic->hc_irq.enable = mpic_enable_irq; 494 mpic->hc_irq.disable = mpic_disable_irq; 495 mpic->hc_irq.end = mpic_end_irq; 496 if (flags & MPIC_PRIMARY) 497 mpic->hc_irq.set_affinity = mpic_set_affinity; 498 #ifdef CONFIG_SMP 499 mpic->hc_ipi.typename = name; 500 mpic->hc_ipi.enable = mpic_enable_ipi; 501 mpic->hc_ipi.disable = mpic_disable_ipi; 502 mpic->hc_ipi.end = mpic_end_ipi; 503 #endif /* CONFIG_SMP */ 504 505 mpic->flags = flags; 506 mpic->isu_size = isu_size; 507 mpic->irq_offset = irq_offset; 508 mpic->irq_count = irq_count; 509 mpic->ipi_offset = ipi_offset; 510 mpic->num_sources = 0; /* so far */ 511 mpic->senses = senses; 512 mpic->senses_count = senses_count; 513 514 /* Map the global registers */ 515 mpic->gregs = ioremap(phys_addr + MPIC_GREG_BASE, 0x1000); 516 mpic->tmregs = mpic->gregs + ((MPIC_TIMER_BASE - MPIC_GREG_BASE) >> 2); 517 BUG_ON(mpic->gregs == NULL); 518 519 /* Reset */ 520 if (flags & MPIC_WANTS_RESET) { 521 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0, 522 mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) 523 | MPIC_GREG_GCONF_RESET); 524 while( mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) 525 & MPIC_GREG_GCONF_RESET) 526 mb(); 527 } 528 529 /* Read feature register, calculate num CPUs and, for non-ISU 530 * MPICs, num sources as well. On ISU MPICs, sources are counted 531 * as ISUs are added 532 */ 533 reg = mpic_read(mpic->gregs, MPIC_GREG_FEATURE_0); 534 mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK) 535 >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1; 536 if (isu_size == 0) 537 mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK) 538 >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1; 539 540 /* Map the per-CPU registers */ 541 for (i = 0; i < mpic->num_cpus; i++) { 542 mpic->cpuregs[i] = ioremap(phys_addr + MPIC_CPU_BASE + 543 i * MPIC_CPU_STRIDE, 0x1000); 544 BUG_ON(mpic->cpuregs[i] == NULL); 545 } 546 547 /* Initialize main ISU if none provided */ 548 if (mpic->isu_size == 0) { 549 mpic->isu_size = mpic->num_sources; 550 mpic->isus[0] = ioremap(phys_addr + MPIC_IRQ_BASE, 551 MPIC_IRQ_STRIDE * mpic->isu_size); 552 BUG_ON(mpic->isus[0] == NULL); 553 } 554 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); 555 mpic->isu_mask = (1 << mpic->isu_shift) - 1; 556 557 /* Display version */ 558 switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) { 559 case 1: 560 vers = "1.0"; 561 break; 562 case 2: 563 vers = "1.2"; 564 break; 565 case 3: 566 vers = "1.3"; 567 break; 568 default: 569 vers = "<unknown>"; 570 break; 571 } 572 printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %lx, max %d CPUs\n", 573 name, vers, phys_addr, mpic->num_cpus); 574 printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", mpic->isu_size, 575 mpic->isu_shift, mpic->isu_mask); 576 577 mpic->next = mpics; 578 mpics = mpic; 579 580 if (flags & MPIC_PRIMARY) 581 mpic_primary = mpic; 582 583 return mpic; 584 } 585 586 void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, 587 unsigned long phys_addr) 588 { 589 unsigned int isu_first = isu_num * mpic->isu_size; 590 591 BUG_ON(isu_num >= MPIC_MAX_ISU); 592 593 mpic->isus[isu_num] = ioremap(phys_addr, MPIC_IRQ_STRIDE * mpic->isu_size); 594 if ((isu_first + mpic->isu_size) > mpic->num_sources) 595 mpic->num_sources = isu_first + mpic->isu_size; 596 } 597 598 void __init mpic_setup_cascade(unsigned int irq, mpic_cascade_t handler, 599 void *data) 600 { 601 struct mpic *mpic = mpic_find(irq, NULL); 602 unsigned long flags; 603 604 /* Synchronization here is a bit dodgy, so don't try to replace cascade 605 * interrupts on the fly too often ... but normally it's set up at boot. 606 */ 607 spin_lock_irqsave(&mpic_lock, flags); 608 if (mpic->cascade) 609 mpic_disable_irq(mpic->cascade_vec + mpic->irq_offset); 610 mpic->cascade = NULL; 611 wmb(); 612 mpic->cascade_vec = irq - mpic->irq_offset; 613 mpic->cascade_data = data; 614 wmb(); 615 mpic->cascade = handler; 616 mpic_enable_irq(irq); 617 spin_unlock_irqrestore(&mpic_lock, flags); 618 } 619 620 void __init mpic_init(struct mpic *mpic) 621 { 622 int i; 623 624 BUG_ON(mpic->num_sources == 0); 625 626 printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources); 627 628 /* Set current processor priority to max */ 629 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf); 630 631 /* Initialize timers: just disable them all */ 632 for (i = 0; i < 4; i++) { 633 mpic_write(mpic->tmregs, 634 i * MPIC_TIMER_STRIDE + MPIC_TIMER_DESTINATION, 0); 635 mpic_write(mpic->tmregs, 636 i * MPIC_TIMER_STRIDE + MPIC_TIMER_VECTOR_PRI, 637 MPIC_VECPRI_MASK | 638 (MPIC_VEC_TIMER_0 + i)); 639 } 640 641 /* Initialize IPIs to our reserved vectors and mark them disabled for now */ 642 mpic_test_broken_ipi(mpic); 643 for (i = 0; i < 4; i++) { 644 mpic_ipi_write(i, 645 MPIC_VECPRI_MASK | 646 (10 << MPIC_VECPRI_PRIORITY_SHIFT) | 647 (MPIC_VEC_IPI_0 + i)); 648 #ifdef CONFIG_SMP 649 if (!(mpic->flags & MPIC_PRIMARY)) 650 continue; 651 irq_desc[mpic->ipi_offset+i].status |= IRQ_PER_CPU; 652 irq_desc[mpic->ipi_offset+i].handler = &mpic->hc_ipi; 653 #endif /* CONFIG_SMP */ 654 } 655 656 /* Initialize interrupt sources */ 657 if (mpic->irq_count == 0) 658 mpic->irq_count = mpic->num_sources; 659 660 #ifdef CONFIG_MPIC_BROKEN_U3 661 /* Do the ioapic fixups on U3 broken mpic */ 662 DBG("MPIC flags: %x\n", mpic->flags); 663 if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY)) 664 mpic_scan_ioapics(mpic); 665 #endif /* CONFIG_MPIC_BROKEN_U3 */ 666 667 for (i = 0; i < mpic->num_sources; i++) { 668 /* start with vector = source number, and masked */ 669 u32 vecpri = MPIC_VECPRI_MASK | i | (8 << MPIC_VECPRI_PRIORITY_SHIFT); 670 int level = 0; 671 672 /* if it's an IPI, we skip it */ 673 if ((mpic->irq_offset + i) >= (mpic->ipi_offset + i) && 674 (mpic->irq_offset + i) < (mpic->ipi_offset + i + 4)) 675 continue; 676 677 /* do senses munging */ 678 if (mpic->senses && i < mpic->senses_count) { 679 if (mpic->senses[i] & IRQ_SENSE_LEVEL) 680 vecpri |= MPIC_VECPRI_SENSE_LEVEL; 681 if (mpic->senses[i] & IRQ_POLARITY_POSITIVE) 682 vecpri |= MPIC_VECPRI_POLARITY_POSITIVE; 683 } else 684 vecpri |= MPIC_VECPRI_SENSE_LEVEL; 685 686 /* remember if it was a level interrupts */ 687 level = (vecpri & MPIC_VECPRI_SENSE_LEVEL); 688 689 /* deal with broken U3 */ 690 if (mpic->flags & MPIC_BROKEN_U3) { 691 #ifdef CONFIG_MPIC_BROKEN_U3 692 if (mpic_is_ht_interrupt(mpic, i)) { 693 vecpri &= ~(MPIC_VECPRI_SENSE_MASK | 694 MPIC_VECPRI_POLARITY_MASK); 695 vecpri |= MPIC_VECPRI_POLARITY_POSITIVE; 696 } 697 #else 698 printk(KERN_ERR "mpic: BROKEN_U3 set, but CONFIG doesn't match\n"); 699 #endif 700 } 701 702 DBG("setup source %d, vecpri: %08x, level: %d\n", i, vecpri, 703 (level != 0)); 704 705 /* init hw */ 706 mpic_irq_write(i, MPIC_IRQ_VECTOR_PRI, vecpri); 707 mpic_irq_write(i, MPIC_IRQ_DESTINATION, 708 1 << hard_smp_processor_id()); 709 710 /* init linux descriptors */ 711 if (i < mpic->irq_count) { 712 irq_desc[mpic->irq_offset+i].status = level ? IRQ_LEVEL : 0; 713 irq_desc[mpic->irq_offset+i].handler = &mpic->hc_irq; 714 } 715 } 716 717 /* Init spurrious vector */ 718 mpic_write(mpic->gregs, MPIC_GREG_SPURIOUS, MPIC_VEC_SPURRIOUS); 719 720 /* Disable 8259 passthrough */ 721 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0, 722 mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_0) 723 | MPIC_GREG_GCONF_8259_PTHROU_DIS); 724 725 /* Set current processor priority to 0 */ 726 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0); 727 } 728 729 730 731 void mpic_irq_set_priority(unsigned int irq, unsigned int pri) 732 { 733 int is_ipi; 734 struct mpic *mpic = mpic_find(irq, &is_ipi); 735 unsigned long flags; 736 u32 reg; 737 738 spin_lock_irqsave(&mpic_lock, flags); 739 if (is_ipi) { 740 reg = mpic_ipi_read(irq - mpic->ipi_offset) & 741 ~MPIC_VECPRI_PRIORITY_MASK; 742 mpic_ipi_write(irq - mpic->ipi_offset, 743 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 744 } else { 745 reg = mpic_irq_read(irq - mpic->irq_offset,MPIC_IRQ_VECTOR_PRI) 746 & ~MPIC_VECPRI_PRIORITY_MASK; 747 mpic_irq_write(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI, 748 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 749 } 750 spin_unlock_irqrestore(&mpic_lock, flags); 751 } 752 753 unsigned int mpic_irq_get_priority(unsigned int irq) 754 { 755 int is_ipi; 756 struct mpic *mpic = mpic_find(irq, &is_ipi); 757 unsigned long flags; 758 u32 reg; 759 760 spin_lock_irqsave(&mpic_lock, flags); 761 if (is_ipi) 762 reg = mpic_ipi_read(irq - mpic->ipi_offset); 763 else 764 reg = mpic_irq_read(irq - mpic->irq_offset, MPIC_IRQ_VECTOR_PRI); 765 spin_unlock_irqrestore(&mpic_lock, flags); 766 return (reg & MPIC_VECPRI_PRIORITY_MASK) >> MPIC_VECPRI_PRIORITY_SHIFT; 767 } 768 769 void mpic_setup_this_cpu(void) 770 { 771 #ifdef CONFIG_SMP 772 struct mpic *mpic = mpic_primary; 773 unsigned long flags; 774 u32 msk = 1 << hard_smp_processor_id(); 775 unsigned int i; 776 777 BUG_ON(mpic == NULL); 778 779 DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); 780 781 spin_lock_irqsave(&mpic_lock, flags); 782 783 /* let the mpic know we want intrs. default affinity is 0xffffffff 784 * until changed via /proc. That's how it's done on x86. If we want 785 * it differently, then we should make sure we also change the default 786 * values of irq_affinity in irq.c. 787 */ 788 if (distribute_irqs) { 789 for (i = 0; i < mpic->num_sources ; i++) 790 mpic_irq_write(i, MPIC_IRQ_DESTINATION, 791 mpic_irq_read(i, MPIC_IRQ_DESTINATION) | msk); 792 } 793 794 /* Set current processor priority to 0 */ 795 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0); 796 797 spin_unlock_irqrestore(&mpic_lock, flags); 798 #endif /* CONFIG_SMP */ 799 } 800 801 int mpic_cpu_get_priority(void) 802 { 803 struct mpic *mpic = mpic_primary; 804 805 return mpic_cpu_read(MPIC_CPU_CURRENT_TASK_PRI); 806 } 807 808 void mpic_cpu_set_priority(int prio) 809 { 810 struct mpic *mpic = mpic_primary; 811 812 prio &= MPIC_CPU_TASKPRI_MASK; 813 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, prio); 814 } 815 816 /* 817 * XXX: someone who knows mpic should check this. 818 * do we need to eoi the ipi including for kexec cpu here (see xics comments)? 819 * or can we reset the mpic in the new kernel? 820 */ 821 void mpic_teardown_this_cpu(int secondary) 822 { 823 struct mpic *mpic = mpic_primary; 824 unsigned long flags; 825 u32 msk = 1 << hard_smp_processor_id(); 826 unsigned int i; 827 828 BUG_ON(mpic == NULL); 829 830 DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); 831 spin_lock_irqsave(&mpic_lock, flags); 832 833 /* let the mpic know we don't want intrs. */ 834 for (i = 0; i < mpic->num_sources ; i++) 835 mpic_irq_write(i, MPIC_IRQ_DESTINATION, 836 mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk); 837 838 /* Set current processor priority to max */ 839 mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf); 840 841 spin_unlock_irqrestore(&mpic_lock, flags); 842 } 843 844 845 void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask) 846 { 847 struct mpic *mpic = mpic_primary; 848 849 BUG_ON(mpic == NULL); 850 851 DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no); 852 853 mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10, 854 mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0])); 855 } 856 857 int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs) 858 { 859 u32 irq; 860 861 irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK; 862 DBG("%s: get_one_irq(): %d\n", mpic->name, irq); 863 864 if (mpic->cascade && irq == mpic->cascade_vec) { 865 DBG("%s: cascading ...\n", mpic->name); 866 irq = mpic->cascade(regs, mpic->cascade_data); 867 mpic_eoi(mpic); 868 return irq; 869 } 870 if (unlikely(irq == MPIC_VEC_SPURRIOUS)) 871 return -1; 872 if (irq < MPIC_VEC_IPI_0) 873 return irq + mpic->irq_offset; 874 DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0); 875 return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset; 876 } 877 878 int mpic_get_irq(struct pt_regs *regs) 879 { 880 struct mpic *mpic = mpic_primary; 881 882 BUG_ON(mpic == NULL); 883 884 return mpic_get_one_irq(mpic, regs); 885 } 886 887 888 #ifdef CONFIG_SMP 889 void mpic_request_ipis(void) 890 { 891 struct mpic *mpic = mpic_primary; 892 893 BUG_ON(mpic == NULL); 894 895 printk("requesting IPIs ... \n"); 896 897 /* IPIs are marked SA_INTERRUPT as they must run with irqs disabled */ 898 request_irq(mpic->ipi_offset+0, mpic_ipi_action, SA_INTERRUPT, 899 "IPI0 (call function)", mpic); 900 request_irq(mpic->ipi_offset+1, mpic_ipi_action, SA_INTERRUPT, 901 "IPI1 (reschedule)", mpic); 902 request_irq(mpic->ipi_offset+2, mpic_ipi_action, SA_INTERRUPT, 903 "IPI2 (unused)", mpic); 904 request_irq(mpic->ipi_offset+3, mpic_ipi_action, SA_INTERRUPT, 905 "IPI3 (debugger break)", mpic); 906 907 printk("IPIs requested... \n"); 908 } 909 910 void smp_mpic_message_pass(int target, int msg) 911 { 912 /* make sure we're sending something that translates to an IPI */ 913 if ((unsigned int)msg > 3) { 914 printk("SMP %d: smp_message_pass: unknown msg %d\n", 915 smp_processor_id(), msg); 916 return; 917 } 918 switch (target) { 919 case MSG_ALL: 920 mpic_send_ipi(msg, 0xffffffff); 921 break; 922 case MSG_ALL_BUT_SELF: 923 mpic_send_ipi(msg, 0xffffffff & ~(1 << smp_processor_id())); 924 break; 925 default: 926 mpic_send_ipi(msg, 1 << target); 927 break; 928 } 929 } 930 #endif /* CONFIG_SMP */ 931