1 /* 2 * arch/powerpc/kernel/mpic.c 3 * 4 * Driver for interrupt controllers following the OpenPIC standard, the 5 * common implementation beeing IBM's MPIC. This driver also can deal 6 * with various broken implementations of this HW. 7 * 8 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. 9 * Copyright 2010-2011 Freescale Semiconductor, Inc. 10 * 11 * This file is subject to the terms and conditions of the GNU General Public 12 * License. See the file COPYING in the main directory of this archive 13 * for more details. 14 */ 15 16 #undef DEBUG 17 #undef DEBUG_IPI 18 #undef DEBUG_IRQ 19 #undef DEBUG_LOW 20 21 #include <linux/types.h> 22 #include <linux/kernel.h> 23 #include <linux/init.h> 24 #include <linux/irq.h> 25 #include <linux/smp.h> 26 #include <linux/interrupt.h> 27 #include <linux/bootmem.h> 28 #include <linux/spinlock.h> 29 #include <linux/pci.h> 30 #include <linux/slab.h> 31 #include <linux/syscore_ops.h> 32 33 #include <asm/ptrace.h> 34 #include <asm/signal.h> 35 #include <asm/io.h> 36 #include <asm/pgtable.h> 37 #include <asm/irq.h> 38 #include <asm/machdep.h> 39 #include <asm/mpic.h> 40 #include <asm/smp.h> 41 42 #include "mpic.h" 43 44 #ifdef DEBUG 45 #define DBG(fmt...) printk(fmt) 46 #else 47 #define DBG(fmt...) 48 #endif 49 50 static struct mpic *mpics; 51 static struct mpic *mpic_primary; 52 static DEFINE_RAW_SPINLOCK(mpic_lock); 53 54 #ifdef CONFIG_PPC32 /* XXX for now */ 55 #ifdef CONFIG_IRQ_ALL_CPUS 56 #define distribute_irqs (1) 57 #else 58 #define distribute_irqs (0) 59 #endif 60 #endif 61 62 #ifdef CONFIG_MPIC_WEIRD 63 static u32 mpic_infos[][MPIC_IDX_END] = { 64 [0] = { /* Original OpenPIC compatible MPIC */ 65 MPIC_GREG_BASE, 66 MPIC_GREG_FEATURE_0, 67 MPIC_GREG_GLOBAL_CONF_0, 68 MPIC_GREG_VENDOR_ID, 69 MPIC_GREG_IPI_VECTOR_PRI_0, 70 MPIC_GREG_IPI_STRIDE, 71 MPIC_GREG_SPURIOUS, 72 MPIC_GREG_TIMER_FREQ, 73 74 MPIC_TIMER_BASE, 75 MPIC_TIMER_STRIDE, 76 MPIC_TIMER_CURRENT_CNT, 77 MPIC_TIMER_BASE_CNT, 78 MPIC_TIMER_VECTOR_PRI, 79 MPIC_TIMER_DESTINATION, 80 81 MPIC_CPU_BASE, 82 MPIC_CPU_STRIDE, 83 MPIC_CPU_IPI_DISPATCH_0, 84 MPIC_CPU_IPI_DISPATCH_STRIDE, 85 MPIC_CPU_CURRENT_TASK_PRI, 86 MPIC_CPU_WHOAMI, 87 MPIC_CPU_INTACK, 88 MPIC_CPU_EOI, 89 MPIC_CPU_MCACK, 90 91 MPIC_IRQ_BASE, 92 MPIC_IRQ_STRIDE, 93 MPIC_IRQ_VECTOR_PRI, 94 MPIC_VECPRI_VECTOR_MASK, 95 MPIC_VECPRI_POLARITY_POSITIVE, 96 MPIC_VECPRI_POLARITY_NEGATIVE, 97 MPIC_VECPRI_SENSE_LEVEL, 98 MPIC_VECPRI_SENSE_EDGE, 99 MPIC_VECPRI_POLARITY_MASK, 100 MPIC_VECPRI_SENSE_MASK, 101 MPIC_IRQ_DESTINATION 102 }, 103 [1] = { /* Tsi108/109 PIC */ 104 TSI108_GREG_BASE, 105 TSI108_GREG_FEATURE_0, 106 TSI108_GREG_GLOBAL_CONF_0, 107 TSI108_GREG_VENDOR_ID, 108 TSI108_GREG_IPI_VECTOR_PRI_0, 109 TSI108_GREG_IPI_STRIDE, 110 TSI108_GREG_SPURIOUS, 111 TSI108_GREG_TIMER_FREQ, 112 113 TSI108_TIMER_BASE, 114 TSI108_TIMER_STRIDE, 115 TSI108_TIMER_CURRENT_CNT, 116 TSI108_TIMER_BASE_CNT, 117 TSI108_TIMER_VECTOR_PRI, 118 TSI108_TIMER_DESTINATION, 119 120 TSI108_CPU_BASE, 121 TSI108_CPU_STRIDE, 122 TSI108_CPU_IPI_DISPATCH_0, 123 TSI108_CPU_IPI_DISPATCH_STRIDE, 124 TSI108_CPU_CURRENT_TASK_PRI, 125 TSI108_CPU_WHOAMI, 126 TSI108_CPU_INTACK, 127 TSI108_CPU_EOI, 128 TSI108_CPU_MCACK, 129 130 TSI108_IRQ_BASE, 131 TSI108_IRQ_STRIDE, 132 TSI108_IRQ_VECTOR_PRI, 133 TSI108_VECPRI_VECTOR_MASK, 134 TSI108_VECPRI_POLARITY_POSITIVE, 135 TSI108_VECPRI_POLARITY_NEGATIVE, 136 TSI108_VECPRI_SENSE_LEVEL, 137 TSI108_VECPRI_SENSE_EDGE, 138 TSI108_VECPRI_POLARITY_MASK, 139 TSI108_VECPRI_SENSE_MASK, 140 TSI108_IRQ_DESTINATION 141 }, 142 }; 143 144 #define MPIC_INFO(name) mpic->hw_set[MPIC_IDX_##name] 145 146 #else /* CONFIG_MPIC_WEIRD */ 147 148 #define MPIC_INFO(name) MPIC_##name 149 150 #endif /* CONFIG_MPIC_WEIRD */ 151 152 static inline unsigned int mpic_processor_id(struct mpic *mpic) 153 { 154 unsigned int cpu = 0; 155 156 if (mpic->flags & MPIC_PRIMARY) 157 cpu = hard_smp_processor_id(); 158 159 return cpu; 160 } 161 162 /* 163 * Register accessor functions 164 */ 165 166 167 static inline u32 _mpic_read(enum mpic_reg_type type, 168 struct mpic_reg_bank *rb, 169 unsigned int reg) 170 { 171 switch(type) { 172 #ifdef CONFIG_PPC_DCR 173 case mpic_access_dcr: 174 return dcr_read(rb->dhost, reg); 175 #endif 176 case mpic_access_mmio_be: 177 return in_be32(rb->base + (reg >> 2)); 178 case mpic_access_mmio_le: 179 default: 180 return in_le32(rb->base + (reg >> 2)); 181 } 182 } 183 184 static inline void _mpic_write(enum mpic_reg_type type, 185 struct mpic_reg_bank *rb, 186 unsigned int reg, u32 value) 187 { 188 switch(type) { 189 #ifdef CONFIG_PPC_DCR 190 case mpic_access_dcr: 191 dcr_write(rb->dhost, reg, value); 192 break; 193 #endif 194 case mpic_access_mmio_be: 195 out_be32(rb->base + (reg >> 2), value); 196 break; 197 case mpic_access_mmio_le: 198 default: 199 out_le32(rb->base + (reg >> 2), value); 200 break; 201 } 202 } 203 204 static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi) 205 { 206 enum mpic_reg_type type = mpic->reg_type; 207 unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) + 208 (ipi * MPIC_INFO(GREG_IPI_STRIDE)); 209 210 if ((mpic->flags & MPIC_BROKEN_IPI) && type == mpic_access_mmio_le) 211 type = mpic_access_mmio_be; 212 return _mpic_read(type, &mpic->gregs, offset); 213 } 214 215 static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value) 216 { 217 unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) + 218 (ipi * MPIC_INFO(GREG_IPI_STRIDE)); 219 220 _mpic_write(mpic->reg_type, &mpic->gregs, offset, value); 221 } 222 223 static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm) 224 { 225 unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) + 226 ((tm & 3) * MPIC_INFO(TIMER_STRIDE)); 227 228 if (tm >= 4) 229 offset += 0x1000 / 4; 230 231 return _mpic_read(mpic->reg_type, &mpic->tmregs, offset); 232 } 233 234 static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value) 235 { 236 unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) + 237 ((tm & 3) * MPIC_INFO(TIMER_STRIDE)); 238 239 if (tm >= 4) 240 offset += 0x1000 / 4; 241 242 _mpic_write(mpic->reg_type, &mpic->tmregs, offset, value); 243 } 244 245 static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg) 246 { 247 unsigned int cpu = mpic_processor_id(mpic); 248 249 return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg); 250 } 251 252 static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value) 253 { 254 unsigned int cpu = mpic_processor_id(mpic); 255 256 _mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value); 257 } 258 259 static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg) 260 { 261 unsigned int isu = src_no >> mpic->isu_shift; 262 unsigned int idx = src_no & mpic->isu_mask; 263 unsigned int val; 264 265 val = _mpic_read(mpic->reg_type, &mpic->isus[isu], 266 reg + (idx * MPIC_INFO(IRQ_STRIDE))); 267 #ifdef CONFIG_MPIC_BROKEN_REGREAD 268 if (reg == 0) 269 val = (val & (MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY)) | 270 mpic->isu_reg0_shadow[src_no]; 271 #endif 272 return val; 273 } 274 275 static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no, 276 unsigned int reg, u32 value) 277 { 278 unsigned int isu = src_no >> mpic->isu_shift; 279 unsigned int idx = src_no & mpic->isu_mask; 280 281 _mpic_write(mpic->reg_type, &mpic->isus[isu], 282 reg + (idx * MPIC_INFO(IRQ_STRIDE)), value); 283 284 #ifdef CONFIG_MPIC_BROKEN_REGREAD 285 if (reg == 0) 286 mpic->isu_reg0_shadow[src_no] = 287 value & ~(MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY); 288 #endif 289 } 290 291 #define mpic_read(b,r) _mpic_read(mpic->reg_type,&(b),(r)) 292 #define mpic_write(b,r,v) _mpic_write(mpic->reg_type,&(b),(r),(v)) 293 #define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i)) 294 #define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v)) 295 #define mpic_tm_read(i) _mpic_tm_read(mpic,(i)) 296 #define mpic_tm_write(i,v) _mpic_tm_write(mpic,(i),(v)) 297 #define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i)) 298 #define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v)) 299 #define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r)) 300 #define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v)) 301 302 303 /* 304 * Low level utility functions 305 */ 306 307 308 static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr, 309 struct mpic_reg_bank *rb, unsigned int offset, 310 unsigned int size) 311 { 312 rb->base = ioremap(phys_addr + offset, size); 313 BUG_ON(rb->base == NULL); 314 } 315 316 #ifdef CONFIG_PPC_DCR 317 static void _mpic_map_dcr(struct mpic *mpic, struct device_node *node, 318 struct mpic_reg_bank *rb, 319 unsigned int offset, unsigned int size) 320 { 321 const u32 *dbasep; 322 323 dbasep = of_get_property(node, "dcr-reg", NULL); 324 325 rb->dhost = dcr_map(node, *dbasep + offset, size); 326 BUG_ON(!DCR_MAP_OK(rb->dhost)); 327 } 328 329 static inline void mpic_map(struct mpic *mpic, struct device_node *node, 330 phys_addr_t phys_addr, struct mpic_reg_bank *rb, 331 unsigned int offset, unsigned int size) 332 { 333 if (mpic->flags & MPIC_USES_DCR) 334 _mpic_map_dcr(mpic, node, rb, offset, size); 335 else 336 _mpic_map_mmio(mpic, phys_addr, rb, offset, size); 337 } 338 #else /* CONFIG_PPC_DCR */ 339 #define mpic_map(m,n,p,b,o,s) _mpic_map_mmio(m,p,b,o,s) 340 #endif /* !CONFIG_PPC_DCR */ 341 342 343 344 /* Check if we have one of those nice broken MPICs with a flipped endian on 345 * reads from IPI registers 346 */ 347 static void __init mpic_test_broken_ipi(struct mpic *mpic) 348 { 349 u32 r; 350 351 mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK); 352 r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0)); 353 354 if (r == le32_to_cpu(MPIC_VECPRI_MASK)) { 355 printk(KERN_INFO "mpic: Detected reversed IPI registers\n"); 356 mpic->flags |= MPIC_BROKEN_IPI; 357 } 358 } 359 360 #ifdef CONFIG_MPIC_U3_HT_IRQS 361 362 /* Test if an interrupt is sourced from HyperTransport (used on broken U3s) 363 * to force the edge setting on the MPIC and do the ack workaround. 364 */ 365 static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source) 366 { 367 if (source >= 128 || !mpic->fixups) 368 return 0; 369 return mpic->fixups[source].base != NULL; 370 } 371 372 373 static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source) 374 { 375 struct mpic_irq_fixup *fixup = &mpic->fixups[source]; 376 377 if (fixup->applebase) { 378 unsigned int soff = (fixup->index >> 3) & ~3; 379 unsigned int mask = 1U << (fixup->index & 0x1f); 380 writel(mask, fixup->applebase + soff); 381 } else { 382 raw_spin_lock(&mpic->fixup_lock); 383 writeb(0x11 + 2 * fixup->index, fixup->base + 2); 384 writel(fixup->data, fixup->base + 4); 385 raw_spin_unlock(&mpic->fixup_lock); 386 } 387 } 388 389 static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, 390 bool level) 391 { 392 struct mpic_irq_fixup *fixup = &mpic->fixups[source]; 393 unsigned long flags; 394 u32 tmp; 395 396 if (fixup->base == NULL) 397 return; 398 399 DBG("startup_ht_interrupt(0x%x) index: %d\n", 400 source, fixup->index); 401 raw_spin_lock_irqsave(&mpic->fixup_lock, flags); 402 /* Enable and configure */ 403 writeb(0x10 + 2 * fixup->index, fixup->base + 2); 404 tmp = readl(fixup->base + 4); 405 tmp &= ~(0x23U); 406 if (level) 407 tmp |= 0x22; 408 writel(tmp, fixup->base + 4); 409 raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); 410 411 #ifdef CONFIG_PM 412 /* use the lowest bit inverted to the actual HW, 413 * set if this fixup was enabled, clear otherwise */ 414 mpic->save_data[source].fixup_data = tmp | 1; 415 #endif 416 } 417 418 static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source) 419 { 420 struct mpic_irq_fixup *fixup = &mpic->fixups[source]; 421 unsigned long flags; 422 u32 tmp; 423 424 if (fixup->base == NULL) 425 return; 426 427 DBG("shutdown_ht_interrupt(0x%x)\n", source); 428 429 /* Disable */ 430 raw_spin_lock_irqsave(&mpic->fixup_lock, flags); 431 writeb(0x10 + 2 * fixup->index, fixup->base + 2); 432 tmp = readl(fixup->base + 4); 433 tmp |= 1; 434 writel(tmp, fixup->base + 4); 435 raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); 436 437 #ifdef CONFIG_PM 438 /* use the lowest bit inverted to the actual HW, 439 * set if this fixup was enabled, clear otherwise */ 440 mpic->save_data[source].fixup_data = tmp & ~1; 441 #endif 442 } 443 444 #ifdef CONFIG_PCI_MSI 445 static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase, 446 unsigned int devfn) 447 { 448 u8 __iomem *base; 449 u8 pos, flags; 450 u64 addr = 0; 451 452 for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0; 453 pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) { 454 u8 id = readb(devbase + pos + PCI_CAP_LIST_ID); 455 if (id == PCI_CAP_ID_HT) { 456 id = readb(devbase + pos + 3); 457 if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_MSI_MAPPING) 458 break; 459 } 460 } 461 462 if (pos == 0) 463 return; 464 465 base = devbase + pos; 466 467 flags = readb(base + HT_MSI_FLAGS); 468 if (!(flags & HT_MSI_FLAGS_FIXED)) { 469 addr = readl(base + HT_MSI_ADDR_LO) & HT_MSI_ADDR_LO_MASK; 470 addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32); 471 } 472 473 printk(KERN_DEBUG "mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n", 474 PCI_SLOT(devfn), PCI_FUNC(devfn), 475 flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr); 476 477 if (!(flags & HT_MSI_FLAGS_ENABLE)) 478 writeb(flags | HT_MSI_FLAGS_ENABLE, base + HT_MSI_FLAGS); 479 } 480 #else 481 static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase, 482 unsigned int devfn) 483 { 484 return; 485 } 486 #endif 487 488 static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase, 489 unsigned int devfn, u32 vdid) 490 { 491 int i, irq, n; 492 u8 __iomem *base; 493 u32 tmp; 494 u8 pos; 495 496 for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0; 497 pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) { 498 u8 id = readb(devbase + pos + PCI_CAP_LIST_ID); 499 if (id == PCI_CAP_ID_HT) { 500 id = readb(devbase + pos + 3); 501 if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_IRQ) 502 break; 503 } 504 } 505 if (pos == 0) 506 return; 507 508 base = devbase + pos; 509 writeb(0x01, base + 2); 510 n = (readl(base + 4) >> 16) & 0xff; 511 512 printk(KERN_INFO "mpic: - HT:%02x.%x [0x%02x] vendor %04x device %04x" 513 " has %d irqs\n", 514 devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1); 515 516 for (i = 0; i <= n; i++) { 517 writeb(0x10 + 2 * i, base + 2); 518 tmp = readl(base + 4); 519 irq = (tmp >> 16) & 0xff; 520 DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp); 521 /* mask it , will be unmasked later */ 522 tmp |= 0x1; 523 writel(tmp, base + 4); 524 mpic->fixups[irq].index = i; 525 mpic->fixups[irq].base = base; 526 /* Apple HT PIC has a non-standard way of doing EOIs */ 527 if ((vdid & 0xffff) == 0x106b) 528 mpic->fixups[irq].applebase = devbase + 0x60; 529 else 530 mpic->fixups[irq].applebase = NULL; 531 writeb(0x11 + 2 * i, base + 2); 532 mpic->fixups[irq].data = readl(base + 4) | 0x80000000; 533 } 534 } 535 536 537 static void __init mpic_scan_ht_pics(struct mpic *mpic) 538 { 539 unsigned int devfn; 540 u8 __iomem *cfgspace; 541 542 printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n"); 543 544 /* Allocate fixups array */ 545 mpic->fixups = kzalloc(128 * sizeof(*mpic->fixups), GFP_KERNEL); 546 BUG_ON(mpic->fixups == NULL); 547 548 /* Init spinlock */ 549 raw_spin_lock_init(&mpic->fixup_lock); 550 551 /* Map U3 config space. We assume all IO-APICs are on the primary bus 552 * so we only need to map 64kB. 553 */ 554 cfgspace = ioremap(0xf2000000, 0x10000); 555 BUG_ON(cfgspace == NULL); 556 557 /* Now we scan all slots. We do a very quick scan, we read the header 558 * type, vendor ID and device ID only, that's plenty enough 559 */ 560 for (devfn = 0; devfn < 0x100; devfn++) { 561 u8 __iomem *devbase = cfgspace + (devfn << 8); 562 u8 hdr_type = readb(devbase + PCI_HEADER_TYPE); 563 u32 l = readl(devbase + PCI_VENDOR_ID); 564 u16 s; 565 566 DBG("devfn %x, l: %x\n", devfn, l); 567 568 /* If no device, skip */ 569 if (l == 0xffffffff || l == 0x00000000 || 570 l == 0x0000ffff || l == 0xffff0000) 571 goto next; 572 /* Check if is supports capability lists */ 573 s = readw(devbase + PCI_STATUS); 574 if (!(s & PCI_STATUS_CAP_LIST)) 575 goto next; 576 577 mpic_scan_ht_pic(mpic, devbase, devfn, l); 578 mpic_scan_ht_msi(mpic, devbase, devfn); 579 580 next: 581 /* next device, if function 0 */ 582 if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0) 583 devfn += 7; 584 } 585 } 586 587 #else /* CONFIG_MPIC_U3_HT_IRQS */ 588 589 static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source) 590 { 591 return 0; 592 } 593 594 static void __init mpic_scan_ht_pics(struct mpic *mpic) 595 { 596 } 597 598 #endif /* CONFIG_MPIC_U3_HT_IRQS */ 599 600 #ifdef CONFIG_SMP 601 static int irq_choose_cpu(const struct cpumask *mask) 602 { 603 int cpuid; 604 605 if (cpumask_equal(mask, cpu_all_mask)) { 606 static int irq_rover = 0; 607 static DEFINE_RAW_SPINLOCK(irq_rover_lock); 608 unsigned long flags; 609 610 /* Round-robin distribution... */ 611 do_round_robin: 612 raw_spin_lock_irqsave(&irq_rover_lock, flags); 613 614 irq_rover = cpumask_next(irq_rover, cpu_online_mask); 615 if (irq_rover >= nr_cpu_ids) 616 irq_rover = cpumask_first(cpu_online_mask); 617 618 cpuid = irq_rover; 619 620 raw_spin_unlock_irqrestore(&irq_rover_lock, flags); 621 } else { 622 cpuid = cpumask_first_and(mask, cpu_online_mask); 623 if (cpuid >= nr_cpu_ids) 624 goto do_round_robin; 625 } 626 627 return get_hard_smp_processor_id(cpuid); 628 } 629 #else 630 static int irq_choose_cpu(const struct cpumask *mask) 631 { 632 return hard_smp_processor_id(); 633 } 634 #endif 635 636 /* Find an mpic associated with a given linux interrupt */ 637 static struct mpic *mpic_find(unsigned int irq) 638 { 639 if (irq < NUM_ISA_INTERRUPTS) 640 return NULL; 641 642 return irq_get_chip_data(irq); 643 } 644 645 /* Determine if the linux irq is an IPI */ 646 static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq) 647 { 648 unsigned int src = virq_to_hw(irq); 649 650 return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]); 651 } 652 653 /* Determine if the linux irq is a timer */ 654 static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int irq) 655 { 656 unsigned int src = virq_to_hw(irq); 657 658 return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]); 659 } 660 661 /* Convert a cpu mask from logical to physical cpu numbers. */ 662 static inline u32 mpic_physmask(u32 cpumask) 663 { 664 int i; 665 u32 mask = 0; 666 667 for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1) 668 mask |= (cpumask & 1) << get_hard_smp_processor_id(i); 669 return mask; 670 } 671 672 #ifdef CONFIG_SMP 673 /* Get the mpic structure from the IPI number */ 674 static inline struct mpic * mpic_from_ipi(struct irq_data *d) 675 { 676 return irq_data_get_irq_chip_data(d); 677 } 678 #endif 679 680 /* Get the mpic structure from the irq number */ 681 static inline struct mpic * mpic_from_irq(unsigned int irq) 682 { 683 return irq_get_chip_data(irq); 684 } 685 686 /* Get the mpic structure from the irq data */ 687 static inline struct mpic * mpic_from_irq_data(struct irq_data *d) 688 { 689 return irq_data_get_irq_chip_data(d); 690 } 691 692 /* Send an EOI */ 693 static inline void mpic_eoi(struct mpic *mpic) 694 { 695 mpic_cpu_write(MPIC_INFO(CPU_EOI), 0); 696 (void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI)); 697 } 698 699 /* 700 * Linux descriptor level callbacks 701 */ 702 703 704 void mpic_unmask_irq(struct irq_data *d) 705 { 706 unsigned int loops = 100000; 707 struct mpic *mpic = mpic_from_irq_data(d); 708 unsigned int src = irqd_to_hwirq(d); 709 710 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src); 711 712 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), 713 mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & 714 ~MPIC_VECPRI_MASK); 715 /* make sure mask gets to controller before we return to user */ 716 do { 717 if (!loops--) { 718 printk(KERN_ERR "%s: timeout on hwirq %u\n", 719 __func__, src); 720 break; 721 } 722 } while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK); 723 } 724 725 void mpic_mask_irq(struct irq_data *d) 726 { 727 unsigned int loops = 100000; 728 struct mpic *mpic = mpic_from_irq_data(d); 729 unsigned int src = irqd_to_hwirq(d); 730 731 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src); 732 733 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), 734 mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) | 735 MPIC_VECPRI_MASK); 736 737 /* make sure mask gets to controller before we return to user */ 738 do { 739 if (!loops--) { 740 printk(KERN_ERR "%s: timeout on hwirq %u\n", 741 __func__, src); 742 break; 743 } 744 } while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK)); 745 } 746 747 void mpic_end_irq(struct irq_data *d) 748 { 749 struct mpic *mpic = mpic_from_irq_data(d); 750 751 #ifdef DEBUG_IRQ 752 DBG("%s: end_irq: %d\n", mpic->name, d->irq); 753 #endif 754 /* We always EOI on end_irq() even for edge interrupts since that 755 * should only lower the priority, the MPIC should have properly 756 * latched another edge interrupt coming in anyway 757 */ 758 759 mpic_eoi(mpic); 760 } 761 762 #ifdef CONFIG_MPIC_U3_HT_IRQS 763 764 static void mpic_unmask_ht_irq(struct irq_data *d) 765 { 766 struct mpic *mpic = mpic_from_irq_data(d); 767 unsigned int src = irqd_to_hwirq(d); 768 769 mpic_unmask_irq(d); 770 771 if (irqd_is_level_type(d)) 772 mpic_ht_end_irq(mpic, src); 773 } 774 775 static unsigned int mpic_startup_ht_irq(struct irq_data *d) 776 { 777 struct mpic *mpic = mpic_from_irq_data(d); 778 unsigned int src = irqd_to_hwirq(d); 779 780 mpic_unmask_irq(d); 781 mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d)); 782 783 return 0; 784 } 785 786 static void mpic_shutdown_ht_irq(struct irq_data *d) 787 { 788 struct mpic *mpic = mpic_from_irq_data(d); 789 unsigned int src = irqd_to_hwirq(d); 790 791 mpic_shutdown_ht_interrupt(mpic, src); 792 mpic_mask_irq(d); 793 } 794 795 static void mpic_end_ht_irq(struct irq_data *d) 796 { 797 struct mpic *mpic = mpic_from_irq_data(d); 798 unsigned int src = irqd_to_hwirq(d); 799 800 #ifdef DEBUG_IRQ 801 DBG("%s: end_irq: %d\n", mpic->name, d->irq); 802 #endif 803 /* We always EOI on end_irq() even for edge interrupts since that 804 * should only lower the priority, the MPIC should have properly 805 * latched another edge interrupt coming in anyway 806 */ 807 808 if (irqd_is_level_type(d)) 809 mpic_ht_end_irq(mpic, src); 810 mpic_eoi(mpic); 811 } 812 #endif /* !CONFIG_MPIC_U3_HT_IRQS */ 813 814 #ifdef CONFIG_SMP 815 816 static void mpic_unmask_ipi(struct irq_data *d) 817 { 818 struct mpic *mpic = mpic_from_ipi(d); 819 unsigned int src = virq_to_hw(d->irq) - mpic->ipi_vecs[0]; 820 821 DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src); 822 mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); 823 } 824 825 static void mpic_mask_ipi(struct irq_data *d) 826 { 827 /* NEVER disable an IPI... that's just plain wrong! */ 828 } 829 830 static void mpic_end_ipi(struct irq_data *d) 831 { 832 struct mpic *mpic = mpic_from_ipi(d); 833 834 /* 835 * IPIs are marked IRQ_PER_CPU. This has the side effect of 836 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from 837 * applying to them. We EOI them late to avoid re-entering. 838 * We mark IPI's with IRQF_DISABLED as they must run with 839 * irqs disabled. 840 */ 841 mpic_eoi(mpic); 842 } 843 844 #endif /* CONFIG_SMP */ 845 846 static void mpic_unmask_tm(struct irq_data *d) 847 { 848 struct mpic *mpic = mpic_from_irq_data(d); 849 unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0]; 850 851 DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, d->irq, src); 852 mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK); 853 mpic_tm_read(src); 854 } 855 856 static void mpic_mask_tm(struct irq_data *d) 857 { 858 struct mpic *mpic = mpic_from_irq_data(d); 859 unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0]; 860 861 mpic_tm_write(src, mpic_tm_read(src) | MPIC_VECPRI_MASK); 862 mpic_tm_read(src); 863 } 864 865 int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, 866 bool force) 867 { 868 struct mpic *mpic = mpic_from_irq_data(d); 869 unsigned int src = irqd_to_hwirq(d); 870 871 if (mpic->flags & MPIC_SINGLE_DEST_CPU) { 872 int cpuid = irq_choose_cpu(cpumask); 873 874 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); 875 } else { 876 u32 mask = cpumask_bits(cpumask)[0]; 877 878 mask &= cpumask_bits(cpu_online_mask)[0]; 879 880 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 881 mpic_physmask(mask)); 882 } 883 884 return 0; 885 } 886 887 static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) 888 { 889 /* Now convert sense value */ 890 switch(type & IRQ_TYPE_SENSE_MASK) { 891 case IRQ_TYPE_EDGE_RISING: 892 return MPIC_INFO(VECPRI_SENSE_EDGE) | 893 MPIC_INFO(VECPRI_POLARITY_POSITIVE); 894 case IRQ_TYPE_EDGE_FALLING: 895 case IRQ_TYPE_EDGE_BOTH: 896 return MPIC_INFO(VECPRI_SENSE_EDGE) | 897 MPIC_INFO(VECPRI_POLARITY_NEGATIVE); 898 case IRQ_TYPE_LEVEL_HIGH: 899 return MPIC_INFO(VECPRI_SENSE_LEVEL) | 900 MPIC_INFO(VECPRI_POLARITY_POSITIVE); 901 case IRQ_TYPE_LEVEL_LOW: 902 default: 903 return MPIC_INFO(VECPRI_SENSE_LEVEL) | 904 MPIC_INFO(VECPRI_POLARITY_NEGATIVE); 905 } 906 } 907 908 int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) 909 { 910 struct mpic *mpic = mpic_from_irq_data(d); 911 unsigned int src = irqd_to_hwirq(d); 912 unsigned int vecpri, vold, vnew; 913 914 DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", 915 mpic, d->irq, src, flow_type); 916 917 if (src >= mpic->irq_count) 918 return -EINVAL; 919 920 if (flow_type == IRQ_TYPE_NONE) 921 if (mpic->senses && src < mpic->senses_count) 922 flow_type = mpic->senses[src]; 923 if (flow_type == IRQ_TYPE_NONE) 924 flow_type = IRQ_TYPE_LEVEL_LOW; 925 926 irqd_set_trigger_type(d, flow_type); 927 928 if (mpic_is_ht_interrupt(mpic, src)) 929 vecpri = MPIC_VECPRI_POLARITY_POSITIVE | 930 MPIC_VECPRI_SENSE_EDGE; 931 else 932 vecpri = mpic_type_to_vecpri(mpic, flow_type); 933 934 vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)); 935 vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) | 936 MPIC_INFO(VECPRI_SENSE_MASK)); 937 vnew |= vecpri; 938 if (vold != vnew) 939 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew); 940 941 return IRQ_SET_MASK_OK_NOCOPY;; 942 } 943 944 void mpic_set_vector(unsigned int virq, unsigned int vector) 945 { 946 struct mpic *mpic = mpic_from_irq(virq); 947 unsigned int src = virq_to_hw(virq); 948 unsigned int vecpri; 949 950 DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n", 951 mpic, virq, src, vector); 952 953 if (src >= mpic->irq_count) 954 return; 955 956 vecpri = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)); 957 vecpri = vecpri & ~MPIC_INFO(VECPRI_VECTOR_MASK); 958 vecpri |= vector; 959 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); 960 } 961 962 void mpic_set_destination(unsigned int virq, unsigned int cpuid) 963 { 964 struct mpic *mpic = mpic_from_irq(virq); 965 unsigned int src = virq_to_hw(virq); 966 967 DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n", 968 mpic, virq, src, cpuid); 969 970 if (src >= mpic->irq_count) 971 return; 972 973 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); 974 } 975 976 static struct irq_chip mpic_irq_chip = { 977 .irq_mask = mpic_mask_irq, 978 .irq_unmask = mpic_unmask_irq, 979 .irq_eoi = mpic_end_irq, 980 .irq_set_type = mpic_set_irq_type, 981 }; 982 983 #ifdef CONFIG_SMP 984 static struct irq_chip mpic_ipi_chip = { 985 .irq_mask = mpic_mask_ipi, 986 .irq_unmask = mpic_unmask_ipi, 987 .irq_eoi = mpic_end_ipi, 988 }; 989 #endif /* CONFIG_SMP */ 990 991 static struct irq_chip mpic_tm_chip = { 992 .irq_mask = mpic_mask_tm, 993 .irq_unmask = mpic_unmask_tm, 994 .irq_eoi = mpic_end_irq, 995 }; 996 997 #ifdef CONFIG_MPIC_U3_HT_IRQS 998 static struct irq_chip mpic_irq_ht_chip = { 999 .irq_startup = mpic_startup_ht_irq, 1000 .irq_shutdown = mpic_shutdown_ht_irq, 1001 .irq_mask = mpic_mask_irq, 1002 .irq_unmask = mpic_unmask_ht_irq, 1003 .irq_eoi = mpic_end_ht_irq, 1004 .irq_set_type = mpic_set_irq_type, 1005 }; 1006 #endif /* CONFIG_MPIC_U3_HT_IRQS */ 1007 1008 1009 static int mpic_host_match(struct irq_host *h, struct device_node *node) 1010 { 1011 /* Exact match, unless mpic node is NULL */ 1012 return h->of_node == NULL || h->of_node == node; 1013 } 1014 1015 static int mpic_host_map(struct irq_host *h, unsigned int virq, 1016 irq_hw_number_t hw) 1017 { 1018 struct mpic *mpic = h->host_data; 1019 struct irq_chip *chip; 1020 1021 DBG("mpic: map virq %d, hwirq 0x%lx\n", virq, hw); 1022 1023 if (hw == mpic->spurious_vec) 1024 return -EINVAL; 1025 if (mpic->protected && test_bit(hw, mpic->protected)) 1026 return -EINVAL; 1027 1028 #ifdef CONFIG_SMP 1029 else if (hw >= mpic->ipi_vecs[0]) { 1030 WARN_ON(!(mpic->flags & MPIC_PRIMARY)); 1031 1032 DBG("mpic: mapping as IPI\n"); 1033 irq_set_chip_data(virq, mpic); 1034 irq_set_chip_and_handler(virq, &mpic->hc_ipi, 1035 handle_percpu_irq); 1036 return 0; 1037 } 1038 #endif /* CONFIG_SMP */ 1039 1040 if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) { 1041 WARN_ON(!(mpic->flags & MPIC_PRIMARY)); 1042 1043 DBG("mpic: mapping as timer\n"); 1044 irq_set_chip_data(virq, mpic); 1045 irq_set_chip_and_handler(virq, &mpic->hc_tm, 1046 handle_fasteoi_irq); 1047 return 0; 1048 } 1049 1050 if (hw >= mpic->irq_count) 1051 return -EINVAL; 1052 1053 mpic_msi_reserve_hwirq(mpic, hw); 1054 1055 /* Default chip */ 1056 chip = &mpic->hc_irq; 1057 1058 #ifdef CONFIG_MPIC_U3_HT_IRQS 1059 /* Check for HT interrupts, override vecpri */ 1060 if (mpic_is_ht_interrupt(mpic, hw)) 1061 chip = &mpic->hc_ht_irq; 1062 #endif /* CONFIG_MPIC_U3_HT_IRQS */ 1063 1064 DBG("mpic: mapping to irq chip @%p\n", chip); 1065 1066 irq_set_chip_data(virq, mpic); 1067 irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq); 1068 1069 /* Set default irq type */ 1070 irq_set_irq_type(virq, IRQ_TYPE_NONE); 1071 1072 /* If the MPIC was reset, then all vectors have already been 1073 * initialized. Otherwise, a per source lazy initialization 1074 * is done here. 1075 */ 1076 if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) { 1077 mpic_set_vector(virq, hw); 1078 mpic_set_destination(virq, mpic_processor_id(mpic)); 1079 mpic_irq_set_priority(virq, 8); 1080 } 1081 1082 return 0; 1083 } 1084 1085 static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, 1086 const u32 *intspec, unsigned int intsize, 1087 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 1088 1089 { 1090 struct mpic *mpic = h->host_data; 1091 static unsigned char map_mpic_senses[4] = { 1092 IRQ_TYPE_EDGE_RISING, 1093 IRQ_TYPE_LEVEL_LOW, 1094 IRQ_TYPE_LEVEL_HIGH, 1095 IRQ_TYPE_EDGE_FALLING, 1096 }; 1097 1098 *out_hwirq = intspec[0]; 1099 if (intsize >= 4 && (mpic->flags & MPIC_FSL)) { 1100 /* 1101 * Freescale MPIC with extended intspec: 1102 * First two cells are as usual. Third specifies 1103 * an "interrupt type". Fourth is type-specific data. 1104 * 1105 * See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt 1106 */ 1107 switch (intspec[2]) { 1108 case 0: 1109 case 1: /* no EISR/EIMR support for now, treat as shared IRQ */ 1110 break; 1111 case 2: 1112 if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs)) 1113 return -EINVAL; 1114 1115 *out_hwirq = mpic->ipi_vecs[intspec[0]]; 1116 break; 1117 case 3: 1118 if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs)) 1119 return -EINVAL; 1120 1121 *out_hwirq = mpic->timer_vecs[intspec[0]]; 1122 break; 1123 default: 1124 pr_debug("%s: unknown irq type %u\n", 1125 __func__, intspec[2]); 1126 return -EINVAL; 1127 } 1128 1129 *out_flags = map_mpic_senses[intspec[1] & 3]; 1130 } else if (intsize > 1) { 1131 u32 mask = 0x3; 1132 1133 /* Apple invented a new race of encoding on machines with 1134 * an HT APIC. They encode, among others, the index within 1135 * the HT APIC. We don't care about it here since thankfully, 1136 * it appears that they have the APIC already properly 1137 * configured, and thus our current fixup code that reads the 1138 * APIC config works fine. However, we still need to mask out 1139 * bits in the specifier to make sure we only get bit 0 which 1140 * is the level/edge bit (the only sense bit exposed by Apple), 1141 * as their bit 1 means something else. 1142 */ 1143 if (machine_is(powermac)) 1144 mask = 0x1; 1145 *out_flags = map_mpic_senses[intspec[1] & mask]; 1146 } else 1147 *out_flags = IRQ_TYPE_NONE; 1148 1149 DBG("mpic: xlate (%d cells: 0x%08x 0x%08x) to line 0x%lx sense 0x%x\n", 1150 intsize, intspec[0], intspec[1], *out_hwirq, *out_flags); 1151 1152 return 0; 1153 } 1154 1155 static struct irq_host_ops mpic_host_ops = { 1156 .match = mpic_host_match, 1157 .map = mpic_host_map, 1158 .xlate = mpic_host_xlate, 1159 }; 1160 1161 static int mpic_reset_prohibited(struct device_node *node) 1162 { 1163 return node && of_get_property(node, "pic-no-reset", NULL); 1164 } 1165 1166 /* 1167 * Exported functions 1168 */ 1169 1170 struct mpic * __init mpic_alloc(struct device_node *node, 1171 phys_addr_t phys_addr, 1172 unsigned int flags, 1173 unsigned int isu_size, 1174 unsigned int irq_count, 1175 const char *name) 1176 { 1177 struct mpic *mpic; 1178 u32 greg_feature; 1179 const char *vers; 1180 int i; 1181 int intvec_top; 1182 u64 paddr = phys_addr; 1183 1184 mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL); 1185 if (mpic == NULL) 1186 return NULL; 1187 1188 mpic->name = name; 1189 1190 mpic->hc_irq = mpic_irq_chip; 1191 mpic->hc_irq.name = name; 1192 if (flags & MPIC_PRIMARY) 1193 mpic->hc_irq.irq_set_affinity = mpic_set_affinity; 1194 #ifdef CONFIG_MPIC_U3_HT_IRQS 1195 mpic->hc_ht_irq = mpic_irq_ht_chip; 1196 mpic->hc_ht_irq.name = name; 1197 if (flags & MPIC_PRIMARY) 1198 mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity; 1199 #endif /* CONFIG_MPIC_U3_HT_IRQS */ 1200 1201 #ifdef CONFIG_SMP 1202 mpic->hc_ipi = mpic_ipi_chip; 1203 mpic->hc_ipi.name = name; 1204 #endif /* CONFIG_SMP */ 1205 1206 mpic->hc_tm = mpic_tm_chip; 1207 mpic->hc_tm.name = name; 1208 1209 mpic->flags = flags; 1210 mpic->isu_size = isu_size; 1211 mpic->irq_count = irq_count; 1212 mpic->num_sources = 0; /* so far */ 1213 1214 if (flags & MPIC_LARGE_VECTORS) 1215 intvec_top = 2047; 1216 else 1217 intvec_top = 255; 1218 1219 mpic->timer_vecs[0] = intvec_top - 12; 1220 mpic->timer_vecs[1] = intvec_top - 11; 1221 mpic->timer_vecs[2] = intvec_top - 10; 1222 mpic->timer_vecs[3] = intvec_top - 9; 1223 mpic->timer_vecs[4] = intvec_top - 8; 1224 mpic->timer_vecs[5] = intvec_top - 7; 1225 mpic->timer_vecs[6] = intvec_top - 6; 1226 mpic->timer_vecs[7] = intvec_top - 5; 1227 mpic->ipi_vecs[0] = intvec_top - 4; 1228 mpic->ipi_vecs[1] = intvec_top - 3; 1229 mpic->ipi_vecs[2] = intvec_top - 2; 1230 mpic->ipi_vecs[3] = intvec_top - 1; 1231 mpic->spurious_vec = intvec_top; 1232 1233 /* Check for "big-endian" in device-tree */ 1234 if (node && of_get_property(node, "big-endian", NULL) != NULL) 1235 mpic->flags |= MPIC_BIG_ENDIAN; 1236 if (node && of_device_is_compatible(node, "fsl,mpic")) 1237 mpic->flags |= MPIC_FSL; 1238 1239 /* Look for protected sources */ 1240 if (node) { 1241 int psize; 1242 unsigned int bits, mapsize; 1243 const u32 *psrc = 1244 of_get_property(node, "protected-sources", &psize); 1245 if (psrc) { 1246 psize /= 4; 1247 bits = intvec_top + 1; 1248 mapsize = BITS_TO_LONGS(bits) * sizeof(unsigned long); 1249 mpic->protected = kzalloc(mapsize, GFP_KERNEL); 1250 BUG_ON(mpic->protected == NULL); 1251 for (i = 0; i < psize; i++) { 1252 if (psrc[i] > intvec_top) 1253 continue; 1254 __set_bit(psrc[i], mpic->protected); 1255 } 1256 } 1257 } 1258 1259 #ifdef CONFIG_MPIC_WEIRD 1260 mpic->hw_set = mpic_infos[MPIC_GET_REGSET(flags)]; 1261 #endif 1262 1263 /* default register type */ 1264 mpic->reg_type = (flags & MPIC_BIG_ENDIAN) ? 1265 mpic_access_mmio_be : mpic_access_mmio_le; 1266 1267 /* If no physical address is passed in, a device-node is mandatory */ 1268 BUG_ON(paddr == 0 && node == NULL); 1269 1270 /* If no physical address passed in, check if it's dcr based */ 1271 if (paddr == 0 && of_get_property(node, "dcr-reg", NULL) != NULL) { 1272 #ifdef CONFIG_PPC_DCR 1273 mpic->flags |= MPIC_USES_DCR; 1274 mpic->reg_type = mpic_access_dcr; 1275 #else 1276 BUG(); 1277 #endif /* CONFIG_PPC_DCR */ 1278 } 1279 1280 /* If the MPIC is not DCR based, and no physical address was passed 1281 * in, try to obtain one 1282 */ 1283 if (paddr == 0 && !(mpic->flags & MPIC_USES_DCR)) { 1284 const u32 *reg = of_get_property(node, "reg", NULL); 1285 BUG_ON(reg == NULL); 1286 paddr = of_translate_address(node, reg); 1287 BUG_ON(paddr == OF_BAD_ADDR); 1288 } 1289 1290 /* Map the global registers */ 1291 mpic_map(mpic, node, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000); 1292 mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); 1293 1294 /* Reset */ 1295 1296 /* When using a device-node, reset requests are only honored if the MPIC 1297 * is allowed to reset. 1298 */ 1299 if (mpic_reset_prohibited(node)) 1300 mpic->flags |= MPIC_NO_RESET; 1301 1302 if ((flags & MPIC_WANTS_RESET) && !(mpic->flags & MPIC_NO_RESET)) { 1303 printk(KERN_DEBUG "mpic: Resetting\n"); 1304 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), 1305 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1306 | MPIC_GREG_GCONF_RESET); 1307 while( mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1308 & MPIC_GREG_GCONF_RESET) 1309 mb(); 1310 } 1311 1312 /* CoreInt */ 1313 if (flags & MPIC_ENABLE_COREINT) 1314 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), 1315 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1316 | MPIC_GREG_GCONF_COREINT); 1317 1318 if (flags & MPIC_ENABLE_MCK) 1319 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), 1320 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1321 | MPIC_GREG_GCONF_MCK); 1322 1323 /* Read feature register, calculate num CPUs and, for non-ISU 1324 * MPICs, num sources as well. On ISU MPICs, sources are counted 1325 * as ISUs are added 1326 */ 1327 greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0)); 1328 mpic->num_cpus = ((greg_feature & MPIC_GREG_FEATURE_LAST_CPU_MASK) 1329 >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1; 1330 if (isu_size == 0) { 1331 if (flags & MPIC_BROKEN_FRR_NIRQS) 1332 mpic->num_sources = mpic->irq_count; 1333 else 1334 mpic->num_sources = 1335 ((greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK) 1336 >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1; 1337 } 1338 1339 /* Map the per-CPU registers */ 1340 for (i = 0; i < mpic->num_cpus; i++) { 1341 mpic_map(mpic, node, paddr, &mpic->cpuregs[i], 1342 MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE), 1343 0x1000); 1344 } 1345 1346 /* Initialize main ISU if none provided */ 1347 if (mpic->isu_size == 0) { 1348 mpic->isu_size = mpic->num_sources; 1349 mpic_map(mpic, node, paddr, &mpic->isus[0], 1350 MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); 1351 } 1352 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); 1353 mpic->isu_mask = (1 << mpic->isu_shift) - 1; 1354 1355 mpic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1356 isu_size ? isu_size : mpic->num_sources, 1357 &mpic_host_ops, 1358 flags & MPIC_LARGE_VECTORS ? 2048 : 256); 1359 if (mpic->irqhost == NULL) 1360 return NULL; 1361 1362 mpic->irqhost->host_data = mpic; 1363 1364 /* Display version */ 1365 switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) { 1366 case 1: 1367 vers = "1.0"; 1368 break; 1369 case 2: 1370 vers = "1.2"; 1371 break; 1372 case 3: 1373 vers = "1.3"; 1374 break; 1375 default: 1376 vers = "<unknown>"; 1377 break; 1378 } 1379 printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx," 1380 " max %d CPUs\n", 1381 name, vers, (unsigned long long)paddr, mpic->num_cpus); 1382 printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", 1383 mpic->isu_size, mpic->isu_shift, mpic->isu_mask); 1384 1385 mpic->next = mpics; 1386 mpics = mpic; 1387 1388 if (flags & MPIC_PRIMARY) { 1389 mpic_primary = mpic; 1390 irq_set_default_host(mpic->irqhost); 1391 } 1392 1393 return mpic; 1394 } 1395 1396 void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, 1397 phys_addr_t paddr) 1398 { 1399 unsigned int isu_first = isu_num * mpic->isu_size; 1400 1401 BUG_ON(isu_num >= MPIC_MAX_ISU); 1402 1403 mpic_map(mpic, mpic->irqhost->of_node, 1404 paddr, &mpic->isus[isu_num], 0, 1405 MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); 1406 1407 if ((isu_first + mpic->isu_size) > mpic->num_sources) 1408 mpic->num_sources = isu_first + mpic->isu_size; 1409 } 1410 1411 void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count) 1412 { 1413 mpic->senses = senses; 1414 mpic->senses_count = count; 1415 } 1416 1417 void __init mpic_init(struct mpic *mpic) 1418 { 1419 int i; 1420 int cpu; 1421 1422 BUG_ON(mpic->num_sources == 0); 1423 1424 printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources); 1425 1426 /* Set current processor priority to max */ 1427 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); 1428 1429 /* Initialize timers to our reserved vectors and mask them for now */ 1430 for (i = 0; i < 4; i++) { 1431 mpic_write(mpic->tmregs, 1432 i * MPIC_INFO(TIMER_STRIDE) + 1433 MPIC_INFO(TIMER_DESTINATION), 1434 1 << hard_smp_processor_id()); 1435 mpic_write(mpic->tmregs, 1436 i * MPIC_INFO(TIMER_STRIDE) + 1437 MPIC_INFO(TIMER_VECTOR_PRI), 1438 MPIC_VECPRI_MASK | 1439 (9 << MPIC_VECPRI_PRIORITY_SHIFT) | 1440 (mpic->timer_vecs[0] + i)); 1441 } 1442 1443 /* Initialize IPIs to our reserved vectors and mark them disabled for now */ 1444 mpic_test_broken_ipi(mpic); 1445 for (i = 0; i < 4; i++) { 1446 mpic_ipi_write(i, 1447 MPIC_VECPRI_MASK | 1448 (10 << MPIC_VECPRI_PRIORITY_SHIFT) | 1449 (mpic->ipi_vecs[0] + i)); 1450 } 1451 1452 /* Initialize interrupt sources */ 1453 if (mpic->irq_count == 0) 1454 mpic->irq_count = mpic->num_sources; 1455 1456 /* Do the HT PIC fixups on U3 broken mpic */ 1457 DBG("MPIC flags: %x\n", mpic->flags); 1458 if ((mpic->flags & MPIC_U3_HT_IRQS) && (mpic->flags & MPIC_PRIMARY)) { 1459 mpic_scan_ht_pics(mpic); 1460 mpic_u3msi_init(mpic); 1461 } 1462 1463 mpic_pasemi_msi_init(mpic); 1464 1465 cpu = mpic_processor_id(mpic); 1466 1467 if (!(mpic->flags & MPIC_NO_RESET)) { 1468 for (i = 0; i < mpic->num_sources; i++) { 1469 /* start with vector = source number, and masked */ 1470 u32 vecpri = MPIC_VECPRI_MASK | i | 1471 (8 << MPIC_VECPRI_PRIORITY_SHIFT); 1472 1473 /* check if protected */ 1474 if (mpic->protected && test_bit(i, mpic->protected)) 1475 continue; 1476 /* init hw */ 1477 mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); 1478 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu); 1479 } 1480 } 1481 1482 /* Init spurious vector */ 1483 mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), mpic->spurious_vec); 1484 1485 /* Disable 8259 passthrough, if supported */ 1486 if (!(mpic->flags & MPIC_NO_PTHROU_DIS)) 1487 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), 1488 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1489 | MPIC_GREG_GCONF_8259_PTHROU_DIS); 1490 1491 if (mpic->flags & MPIC_NO_BIAS) 1492 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), 1493 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1494 | MPIC_GREG_GCONF_NO_BIAS); 1495 1496 /* Set current processor priority to 0 */ 1497 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0); 1498 1499 #ifdef CONFIG_PM 1500 /* allocate memory to save mpic state */ 1501 mpic->save_data = kmalloc(mpic->num_sources * sizeof(*mpic->save_data), 1502 GFP_KERNEL); 1503 BUG_ON(mpic->save_data == NULL); 1504 #endif 1505 } 1506 1507 void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio) 1508 { 1509 u32 v; 1510 1511 v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1); 1512 v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK; 1513 v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio); 1514 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v); 1515 } 1516 1517 void __init mpic_set_serial_int(struct mpic *mpic, int enable) 1518 { 1519 unsigned long flags; 1520 u32 v; 1521 1522 raw_spin_lock_irqsave(&mpic_lock, flags); 1523 v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1); 1524 if (enable) 1525 v |= MPIC_GREG_GLOBAL_CONF_1_SIE; 1526 else 1527 v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE; 1528 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v); 1529 raw_spin_unlock_irqrestore(&mpic_lock, flags); 1530 } 1531 1532 void mpic_irq_set_priority(unsigned int irq, unsigned int pri) 1533 { 1534 struct mpic *mpic = mpic_find(irq); 1535 unsigned int src = virq_to_hw(irq); 1536 unsigned long flags; 1537 u32 reg; 1538 1539 if (!mpic) 1540 return; 1541 1542 raw_spin_lock_irqsave(&mpic_lock, flags); 1543 if (mpic_is_ipi(mpic, irq)) { 1544 reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) & 1545 ~MPIC_VECPRI_PRIORITY_MASK; 1546 mpic_ipi_write(src - mpic->ipi_vecs[0], 1547 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 1548 } else if (mpic_is_tm(mpic, irq)) { 1549 reg = mpic_tm_read(src - mpic->timer_vecs[0]) & 1550 ~MPIC_VECPRI_PRIORITY_MASK; 1551 mpic_tm_write(src - mpic->timer_vecs[0], 1552 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 1553 } else { 1554 reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) 1555 & ~MPIC_VECPRI_PRIORITY_MASK; 1556 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), 1557 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 1558 } 1559 raw_spin_unlock_irqrestore(&mpic_lock, flags); 1560 } 1561 1562 void mpic_setup_this_cpu(void) 1563 { 1564 #ifdef CONFIG_SMP 1565 struct mpic *mpic = mpic_primary; 1566 unsigned long flags; 1567 u32 msk = 1 << hard_smp_processor_id(); 1568 unsigned int i; 1569 1570 BUG_ON(mpic == NULL); 1571 1572 DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); 1573 1574 raw_spin_lock_irqsave(&mpic_lock, flags); 1575 1576 /* let the mpic know we want intrs. default affinity is 0xffffffff 1577 * until changed via /proc. That's how it's done on x86. If we want 1578 * it differently, then we should make sure we also change the default 1579 * values of irq_desc[].affinity in irq.c. 1580 */ 1581 if (distribute_irqs) { 1582 for (i = 0; i < mpic->num_sources ; i++) 1583 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1584 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk); 1585 } 1586 1587 /* Set current processor priority to 0 */ 1588 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0); 1589 1590 raw_spin_unlock_irqrestore(&mpic_lock, flags); 1591 #endif /* CONFIG_SMP */ 1592 } 1593 1594 int mpic_cpu_get_priority(void) 1595 { 1596 struct mpic *mpic = mpic_primary; 1597 1598 return mpic_cpu_read(MPIC_INFO(CPU_CURRENT_TASK_PRI)); 1599 } 1600 1601 void mpic_cpu_set_priority(int prio) 1602 { 1603 struct mpic *mpic = mpic_primary; 1604 1605 prio &= MPIC_CPU_TASKPRI_MASK; 1606 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), prio); 1607 } 1608 1609 void mpic_teardown_this_cpu(int secondary) 1610 { 1611 struct mpic *mpic = mpic_primary; 1612 unsigned long flags; 1613 u32 msk = 1 << hard_smp_processor_id(); 1614 unsigned int i; 1615 1616 BUG_ON(mpic == NULL); 1617 1618 DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); 1619 raw_spin_lock_irqsave(&mpic_lock, flags); 1620 1621 /* let the mpic know we don't want intrs. */ 1622 for (i = 0; i < mpic->num_sources ; i++) 1623 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1624 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) & ~msk); 1625 1626 /* Set current processor priority to max */ 1627 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); 1628 /* We need to EOI the IPI since not all platforms reset the MPIC 1629 * on boot and new interrupts wouldn't get delivered otherwise. 1630 */ 1631 mpic_eoi(mpic); 1632 1633 raw_spin_unlock_irqrestore(&mpic_lock, flags); 1634 } 1635 1636 1637 static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg) 1638 { 1639 u32 src; 1640 1641 src = mpic_cpu_read(reg) & MPIC_INFO(VECPRI_VECTOR_MASK); 1642 #ifdef DEBUG_LOW 1643 DBG("%s: get_one_irq(reg 0x%x): %d\n", mpic->name, reg, src); 1644 #endif 1645 if (unlikely(src == mpic->spurious_vec)) { 1646 if (mpic->flags & MPIC_SPV_EOI) 1647 mpic_eoi(mpic); 1648 return NO_IRQ; 1649 } 1650 if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { 1651 if (printk_ratelimit()) 1652 printk(KERN_WARNING "%s: Got protected source %d !\n", 1653 mpic->name, (int)src); 1654 mpic_eoi(mpic); 1655 return NO_IRQ; 1656 } 1657 1658 return irq_linear_revmap(mpic->irqhost, src); 1659 } 1660 1661 unsigned int mpic_get_one_irq(struct mpic *mpic) 1662 { 1663 return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_INTACK)); 1664 } 1665 1666 unsigned int mpic_get_irq(void) 1667 { 1668 struct mpic *mpic = mpic_primary; 1669 1670 BUG_ON(mpic == NULL); 1671 1672 return mpic_get_one_irq(mpic); 1673 } 1674 1675 unsigned int mpic_get_coreint_irq(void) 1676 { 1677 #ifdef CONFIG_BOOKE 1678 struct mpic *mpic = mpic_primary; 1679 u32 src; 1680 1681 BUG_ON(mpic == NULL); 1682 1683 src = mfspr(SPRN_EPR); 1684 1685 if (unlikely(src == mpic->spurious_vec)) { 1686 if (mpic->flags & MPIC_SPV_EOI) 1687 mpic_eoi(mpic); 1688 return NO_IRQ; 1689 } 1690 if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { 1691 if (printk_ratelimit()) 1692 printk(KERN_WARNING "%s: Got protected source %d !\n", 1693 mpic->name, (int)src); 1694 return NO_IRQ; 1695 } 1696 1697 return irq_linear_revmap(mpic->irqhost, src); 1698 #else 1699 return NO_IRQ; 1700 #endif 1701 } 1702 1703 unsigned int mpic_get_mcirq(void) 1704 { 1705 struct mpic *mpic = mpic_primary; 1706 1707 BUG_ON(mpic == NULL); 1708 1709 return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_MCACK)); 1710 } 1711 1712 #ifdef CONFIG_SMP 1713 void mpic_request_ipis(void) 1714 { 1715 struct mpic *mpic = mpic_primary; 1716 int i; 1717 BUG_ON(mpic == NULL); 1718 1719 printk(KERN_INFO "mpic: requesting IPIs...\n"); 1720 1721 for (i = 0; i < 4; i++) { 1722 unsigned int vipi = irq_create_mapping(mpic->irqhost, 1723 mpic->ipi_vecs[0] + i); 1724 if (vipi == NO_IRQ) { 1725 printk(KERN_ERR "Failed to map %s\n", smp_ipi_name[i]); 1726 continue; 1727 } 1728 smp_request_message_ipi(vipi, i); 1729 } 1730 } 1731 1732 void smp_mpic_message_pass(int cpu, int msg) 1733 { 1734 struct mpic *mpic = mpic_primary; 1735 u32 physmask; 1736 1737 BUG_ON(mpic == NULL); 1738 1739 /* make sure we're sending something that translates to an IPI */ 1740 if ((unsigned int)msg > 3) { 1741 printk("SMP %d: smp_message_pass: unknown msg %d\n", 1742 smp_processor_id(), msg); 1743 return; 1744 } 1745 1746 #ifdef DEBUG_IPI 1747 DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg); 1748 #endif 1749 1750 physmask = 1 << get_hard_smp_processor_id(cpu); 1751 1752 mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) + 1753 msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask); 1754 } 1755 1756 int __init smp_mpic_probe(void) 1757 { 1758 int nr_cpus; 1759 1760 DBG("smp_mpic_probe()...\n"); 1761 1762 nr_cpus = cpumask_weight(cpu_possible_mask); 1763 1764 DBG("nr_cpus: %d\n", nr_cpus); 1765 1766 if (nr_cpus > 1) 1767 mpic_request_ipis(); 1768 1769 return nr_cpus; 1770 } 1771 1772 void __devinit smp_mpic_setup_cpu(int cpu) 1773 { 1774 mpic_setup_this_cpu(); 1775 } 1776 1777 void mpic_reset_core(int cpu) 1778 { 1779 struct mpic *mpic = mpic_primary; 1780 u32 pir; 1781 int cpuid = get_hard_smp_processor_id(cpu); 1782 1783 /* Set target bit for core reset */ 1784 pir = mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT)); 1785 pir |= (1 << cpuid); 1786 mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir); 1787 mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT)); 1788 1789 /* Restore target bit after reset complete */ 1790 pir &= ~(1 << cpuid); 1791 mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir); 1792 mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT)); 1793 } 1794 #endif /* CONFIG_SMP */ 1795 1796 #ifdef CONFIG_PM 1797 static void mpic_suspend_one(struct mpic *mpic) 1798 { 1799 int i; 1800 1801 for (i = 0; i < mpic->num_sources; i++) { 1802 mpic->save_data[i].vecprio = 1803 mpic_irq_read(i, MPIC_INFO(IRQ_VECTOR_PRI)); 1804 mpic->save_data[i].dest = 1805 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)); 1806 } 1807 } 1808 1809 static int mpic_suspend(void) 1810 { 1811 struct mpic *mpic = mpics; 1812 1813 while (mpic) { 1814 mpic_suspend_one(mpic); 1815 mpic = mpic->next; 1816 } 1817 1818 return 0; 1819 } 1820 1821 static void mpic_resume_one(struct mpic *mpic) 1822 { 1823 int i; 1824 1825 for (i = 0; i < mpic->num_sources; i++) { 1826 mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), 1827 mpic->save_data[i].vecprio); 1828 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1829 mpic->save_data[i].dest); 1830 1831 #ifdef CONFIG_MPIC_U3_HT_IRQS 1832 if (mpic->fixups) { 1833 struct mpic_irq_fixup *fixup = &mpic->fixups[i]; 1834 1835 if (fixup->base) { 1836 /* we use the lowest bit in an inverted meaning */ 1837 if ((mpic->save_data[i].fixup_data & 1) == 0) 1838 continue; 1839 1840 /* Enable and configure */ 1841 writeb(0x10 + 2 * fixup->index, fixup->base + 2); 1842 1843 writel(mpic->save_data[i].fixup_data & ~1, 1844 fixup->base + 4); 1845 } 1846 } 1847 #endif 1848 } /* end for loop */ 1849 } 1850 1851 static void mpic_resume(void) 1852 { 1853 struct mpic *mpic = mpics; 1854 1855 while (mpic) { 1856 mpic_resume_one(mpic); 1857 mpic = mpic->next; 1858 } 1859 } 1860 1861 static struct syscore_ops mpic_syscore_ops = { 1862 .resume = mpic_resume, 1863 .suspend = mpic_suspend, 1864 }; 1865 1866 static int mpic_init_sys(void) 1867 { 1868 register_syscore_ops(&mpic_syscore_ops); 1869 return 0; 1870 } 1871 1872 device_initcall(mpic_init_sys); 1873 #endif 1874