1 /* 2 * arch/powerpc/kernel/mpic.c 3 * 4 * Driver for interrupt controllers following the OpenPIC standard, the 5 * common implementation beeing IBM's MPIC. This driver also can deal 6 * with various broken implementations of this HW. 7 * 8 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. 9 * Copyright 2010-2011 Freescale Semiconductor, Inc. 10 * 11 * This file is subject to the terms and conditions of the GNU General Public 12 * License. See the file COPYING in the main directory of this archive 13 * for more details. 14 */ 15 16 #undef DEBUG 17 #undef DEBUG_IPI 18 #undef DEBUG_IRQ 19 #undef DEBUG_LOW 20 21 #include <linux/types.h> 22 #include <linux/kernel.h> 23 #include <linux/init.h> 24 #include <linux/irq.h> 25 #include <linux/smp.h> 26 #include <linux/interrupt.h> 27 #include <linux/bootmem.h> 28 #include <linux/spinlock.h> 29 #include <linux/pci.h> 30 #include <linux/slab.h> 31 #include <linux/syscore_ops.h> 32 33 #include <asm/ptrace.h> 34 #include <asm/signal.h> 35 #include <asm/io.h> 36 #include <asm/pgtable.h> 37 #include <asm/irq.h> 38 #include <asm/machdep.h> 39 #include <asm/mpic.h> 40 #include <asm/smp.h> 41 42 #include "mpic.h" 43 44 #ifdef DEBUG 45 #define DBG(fmt...) printk(fmt) 46 #else 47 #define DBG(fmt...) 48 #endif 49 50 static struct mpic *mpics; 51 static struct mpic *mpic_primary; 52 static DEFINE_RAW_SPINLOCK(mpic_lock); 53 54 #ifdef CONFIG_PPC32 /* XXX for now */ 55 #ifdef CONFIG_IRQ_ALL_CPUS 56 #define distribute_irqs (1) 57 #else 58 #define distribute_irqs (0) 59 #endif 60 #endif 61 62 #ifdef CONFIG_MPIC_WEIRD 63 static u32 mpic_infos[][MPIC_IDX_END] = { 64 [0] = { /* Original OpenPIC compatible MPIC */ 65 MPIC_GREG_BASE, 66 MPIC_GREG_FEATURE_0, 67 MPIC_GREG_GLOBAL_CONF_0, 68 MPIC_GREG_VENDOR_ID, 69 MPIC_GREG_IPI_VECTOR_PRI_0, 70 MPIC_GREG_IPI_STRIDE, 71 MPIC_GREG_SPURIOUS, 72 MPIC_GREG_TIMER_FREQ, 73 74 MPIC_TIMER_BASE, 75 MPIC_TIMER_STRIDE, 76 MPIC_TIMER_CURRENT_CNT, 77 MPIC_TIMER_BASE_CNT, 78 MPIC_TIMER_VECTOR_PRI, 79 MPIC_TIMER_DESTINATION, 80 81 MPIC_CPU_BASE, 82 MPIC_CPU_STRIDE, 83 MPIC_CPU_IPI_DISPATCH_0, 84 MPIC_CPU_IPI_DISPATCH_STRIDE, 85 MPIC_CPU_CURRENT_TASK_PRI, 86 MPIC_CPU_WHOAMI, 87 MPIC_CPU_INTACK, 88 MPIC_CPU_EOI, 89 MPIC_CPU_MCACK, 90 91 MPIC_IRQ_BASE, 92 MPIC_IRQ_STRIDE, 93 MPIC_IRQ_VECTOR_PRI, 94 MPIC_VECPRI_VECTOR_MASK, 95 MPIC_VECPRI_POLARITY_POSITIVE, 96 MPIC_VECPRI_POLARITY_NEGATIVE, 97 MPIC_VECPRI_SENSE_LEVEL, 98 MPIC_VECPRI_SENSE_EDGE, 99 MPIC_VECPRI_POLARITY_MASK, 100 MPIC_VECPRI_SENSE_MASK, 101 MPIC_IRQ_DESTINATION 102 }, 103 [1] = { /* Tsi108/109 PIC */ 104 TSI108_GREG_BASE, 105 TSI108_GREG_FEATURE_0, 106 TSI108_GREG_GLOBAL_CONF_0, 107 TSI108_GREG_VENDOR_ID, 108 TSI108_GREG_IPI_VECTOR_PRI_0, 109 TSI108_GREG_IPI_STRIDE, 110 TSI108_GREG_SPURIOUS, 111 TSI108_GREG_TIMER_FREQ, 112 113 TSI108_TIMER_BASE, 114 TSI108_TIMER_STRIDE, 115 TSI108_TIMER_CURRENT_CNT, 116 TSI108_TIMER_BASE_CNT, 117 TSI108_TIMER_VECTOR_PRI, 118 TSI108_TIMER_DESTINATION, 119 120 TSI108_CPU_BASE, 121 TSI108_CPU_STRIDE, 122 TSI108_CPU_IPI_DISPATCH_0, 123 TSI108_CPU_IPI_DISPATCH_STRIDE, 124 TSI108_CPU_CURRENT_TASK_PRI, 125 TSI108_CPU_WHOAMI, 126 TSI108_CPU_INTACK, 127 TSI108_CPU_EOI, 128 TSI108_CPU_MCACK, 129 130 TSI108_IRQ_BASE, 131 TSI108_IRQ_STRIDE, 132 TSI108_IRQ_VECTOR_PRI, 133 TSI108_VECPRI_VECTOR_MASK, 134 TSI108_VECPRI_POLARITY_POSITIVE, 135 TSI108_VECPRI_POLARITY_NEGATIVE, 136 TSI108_VECPRI_SENSE_LEVEL, 137 TSI108_VECPRI_SENSE_EDGE, 138 TSI108_VECPRI_POLARITY_MASK, 139 TSI108_VECPRI_SENSE_MASK, 140 TSI108_IRQ_DESTINATION 141 }, 142 }; 143 144 #define MPIC_INFO(name) mpic->hw_set[MPIC_IDX_##name] 145 146 #else /* CONFIG_MPIC_WEIRD */ 147 148 #define MPIC_INFO(name) MPIC_##name 149 150 #endif /* CONFIG_MPIC_WEIRD */ 151 152 static inline unsigned int mpic_processor_id(struct mpic *mpic) 153 { 154 unsigned int cpu = 0; 155 156 if (mpic->flags & MPIC_PRIMARY) 157 cpu = hard_smp_processor_id(); 158 159 return cpu; 160 } 161 162 /* 163 * Register accessor functions 164 */ 165 166 167 static inline u32 _mpic_read(enum mpic_reg_type type, 168 struct mpic_reg_bank *rb, 169 unsigned int reg) 170 { 171 switch(type) { 172 #ifdef CONFIG_PPC_DCR 173 case mpic_access_dcr: 174 return dcr_read(rb->dhost, reg); 175 #endif 176 case mpic_access_mmio_be: 177 return in_be32(rb->base + (reg >> 2)); 178 case mpic_access_mmio_le: 179 default: 180 return in_le32(rb->base + (reg >> 2)); 181 } 182 } 183 184 static inline void _mpic_write(enum mpic_reg_type type, 185 struct mpic_reg_bank *rb, 186 unsigned int reg, u32 value) 187 { 188 switch(type) { 189 #ifdef CONFIG_PPC_DCR 190 case mpic_access_dcr: 191 dcr_write(rb->dhost, reg, value); 192 break; 193 #endif 194 case mpic_access_mmio_be: 195 out_be32(rb->base + (reg >> 2), value); 196 break; 197 case mpic_access_mmio_le: 198 default: 199 out_le32(rb->base + (reg >> 2), value); 200 break; 201 } 202 } 203 204 static inline u32 _mpic_ipi_read(struct mpic *mpic, unsigned int ipi) 205 { 206 enum mpic_reg_type type = mpic->reg_type; 207 unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) + 208 (ipi * MPIC_INFO(GREG_IPI_STRIDE)); 209 210 if ((mpic->flags & MPIC_BROKEN_IPI) && type == mpic_access_mmio_le) 211 type = mpic_access_mmio_be; 212 return _mpic_read(type, &mpic->gregs, offset); 213 } 214 215 static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 value) 216 { 217 unsigned int offset = MPIC_INFO(GREG_IPI_VECTOR_PRI_0) + 218 (ipi * MPIC_INFO(GREG_IPI_STRIDE)); 219 220 _mpic_write(mpic->reg_type, &mpic->gregs, offset, value); 221 } 222 223 static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm) 224 { 225 unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) + 226 ((tm & 3) * MPIC_INFO(TIMER_STRIDE)); 227 228 if (tm >= 4) 229 offset += 0x1000 / 4; 230 231 return _mpic_read(mpic->reg_type, &mpic->tmregs, offset); 232 } 233 234 static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value) 235 { 236 unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) + 237 ((tm & 3) * MPIC_INFO(TIMER_STRIDE)); 238 239 if (tm >= 4) 240 offset += 0x1000 / 4; 241 242 _mpic_write(mpic->reg_type, &mpic->tmregs, offset, value); 243 } 244 245 static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg) 246 { 247 unsigned int cpu = mpic_processor_id(mpic); 248 249 return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg); 250 } 251 252 static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value) 253 { 254 unsigned int cpu = mpic_processor_id(mpic); 255 256 _mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value); 257 } 258 259 static inline u32 _mpic_irq_read(struct mpic *mpic, unsigned int src_no, unsigned int reg) 260 { 261 unsigned int isu = src_no >> mpic->isu_shift; 262 unsigned int idx = src_no & mpic->isu_mask; 263 unsigned int val; 264 265 val = _mpic_read(mpic->reg_type, &mpic->isus[isu], 266 reg + (idx * MPIC_INFO(IRQ_STRIDE))); 267 #ifdef CONFIG_MPIC_BROKEN_REGREAD 268 if (reg == 0) 269 val = (val & (MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY)) | 270 mpic->isu_reg0_shadow[src_no]; 271 #endif 272 return val; 273 } 274 275 static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no, 276 unsigned int reg, u32 value) 277 { 278 unsigned int isu = src_no >> mpic->isu_shift; 279 unsigned int idx = src_no & mpic->isu_mask; 280 281 _mpic_write(mpic->reg_type, &mpic->isus[isu], 282 reg + (idx * MPIC_INFO(IRQ_STRIDE)), value); 283 284 #ifdef CONFIG_MPIC_BROKEN_REGREAD 285 if (reg == 0) 286 mpic->isu_reg0_shadow[src_no] = 287 value & ~(MPIC_VECPRI_MASK | MPIC_VECPRI_ACTIVITY); 288 #endif 289 } 290 291 #define mpic_read(b,r) _mpic_read(mpic->reg_type,&(b),(r)) 292 #define mpic_write(b,r,v) _mpic_write(mpic->reg_type,&(b),(r),(v)) 293 #define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i)) 294 #define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v)) 295 #define mpic_tm_read(i) _mpic_tm_read(mpic,(i)) 296 #define mpic_tm_write(i,v) _mpic_tm_write(mpic,(i),(v)) 297 #define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i)) 298 #define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v)) 299 #define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r)) 300 #define mpic_irq_write(s,r,v) _mpic_irq_write(mpic,(s),(r),(v)) 301 302 303 /* 304 * Low level utility functions 305 */ 306 307 308 static void _mpic_map_mmio(struct mpic *mpic, phys_addr_t phys_addr, 309 struct mpic_reg_bank *rb, unsigned int offset, 310 unsigned int size) 311 { 312 rb->base = ioremap(phys_addr + offset, size); 313 BUG_ON(rb->base == NULL); 314 } 315 316 #ifdef CONFIG_PPC_DCR 317 static void _mpic_map_dcr(struct mpic *mpic, struct device_node *node, 318 struct mpic_reg_bank *rb, 319 unsigned int offset, unsigned int size) 320 { 321 const u32 *dbasep; 322 323 dbasep = of_get_property(node, "dcr-reg", NULL); 324 325 rb->dhost = dcr_map(node, *dbasep + offset, size); 326 BUG_ON(!DCR_MAP_OK(rb->dhost)); 327 } 328 329 static inline void mpic_map(struct mpic *mpic, struct device_node *node, 330 phys_addr_t phys_addr, struct mpic_reg_bank *rb, 331 unsigned int offset, unsigned int size) 332 { 333 if (mpic->flags & MPIC_USES_DCR) 334 _mpic_map_dcr(mpic, node, rb, offset, size); 335 else 336 _mpic_map_mmio(mpic, phys_addr, rb, offset, size); 337 } 338 #else /* CONFIG_PPC_DCR */ 339 #define mpic_map(m,n,p,b,o,s) _mpic_map_mmio(m,p,b,o,s) 340 #endif /* !CONFIG_PPC_DCR */ 341 342 343 344 /* Check if we have one of those nice broken MPICs with a flipped endian on 345 * reads from IPI registers 346 */ 347 static void __init mpic_test_broken_ipi(struct mpic *mpic) 348 { 349 u32 r; 350 351 mpic_write(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0), MPIC_VECPRI_MASK); 352 r = mpic_read(mpic->gregs, MPIC_INFO(GREG_IPI_VECTOR_PRI_0)); 353 354 if (r == le32_to_cpu(MPIC_VECPRI_MASK)) { 355 printk(KERN_INFO "mpic: Detected reversed IPI registers\n"); 356 mpic->flags |= MPIC_BROKEN_IPI; 357 } 358 } 359 360 #ifdef CONFIG_MPIC_U3_HT_IRQS 361 362 /* Test if an interrupt is sourced from HyperTransport (used on broken U3s) 363 * to force the edge setting on the MPIC and do the ack workaround. 364 */ 365 static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source) 366 { 367 if (source >= 128 || !mpic->fixups) 368 return 0; 369 return mpic->fixups[source].base != NULL; 370 } 371 372 373 static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source) 374 { 375 struct mpic_irq_fixup *fixup = &mpic->fixups[source]; 376 377 if (fixup->applebase) { 378 unsigned int soff = (fixup->index >> 3) & ~3; 379 unsigned int mask = 1U << (fixup->index & 0x1f); 380 writel(mask, fixup->applebase + soff); 381 } else { 382 raw_spin_lock(&mpic->fixup_lock); 383 writeb(0x11 + 2 * fixup->index, fixup->base + 2); 384 writel(fixup->data, fixup->base + 4); 385 raw_spin_unlock(&mpic->fixup_lock); 386 } 387 } 388 389 static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, 390 bool level) 391 { 392 struct mpic_irq_fixup *fixup = &mpic->fixups[source]; 393 unsigned long flags; 394 u32 tmp; 395 396 if (fixup->base == NULL) 397 return; 398 399 DBG("startup_ht_interrupt(0x%x) index: %d\n", 400 source, fixup->index); 401 raw_spin_lock_irqsave(&mpic->fixup_lock, flags); 402 /* Enable and configure */ 403 writeb(0x10 + 2 * fixup->index, fixup->base + 2); 404 tmp = readl(fixup->base + 4); 405 tmp &= ~(0x23U); 406 if (level) 407 tmp |= 0x22; 408 writel(tmp, fixup->base + 4); 409 raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); 410 411 #ifdef CONFIG_PM 412 /* use the lowest bit inverted to the actual HW, 413 * set if this fixup was enabled, clear otherwise */ 414 mpic->save_data[source].fixup_data = tmp | 1; 415 #endif 416 } 417 418 static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source) 419 { 420 struct mpic_irq_fixup *fixup = &mpic->fixups[source]; 421 unsigned long flags; 422 u32 tmp; 423 424 if (fixup->base == NULL) 425 return; 426 427 DBG("shutdown_ht_interrupt(0x%x)\n", source); 428 429 /* Disable */ 430 raw_spin_lock_irqsave(&mpic->fixup_lock, flags); 431 writeb(0x10 + 2 * fixup->index, fixup->base + 2); 432 tmp = readl(fixup->base + 4); 433 tmp |= 1; 434 writel(tmp, fixup->base + 4); 435 raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); 436 437 #ifdef CONFIG_PM 438 /* use the lowest bit inverted to the actual HW, 439 * set if this fixup was enabled, clear otherwise */ 440 mpic->save_data[source].fixup_data = tmp & ~1; 441 #endif 442 } 443 444 #ifdef CONFIG_PCI_MSI 445 static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase, 446 unsigned int devfn) 447 { 448 u8 __iomem *base; 449 u8 pos, flags; 450 u64 addr = 0; 451 452 for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0; 453 pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) { 454 u8 id = readb(devbase + pos + PCI_CAP_LIST_ID); 455 if (id == PCI_CAP_ID_HT) { 456 id = readb(devbase + pos + 3); 457 if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_MSI_MAPPING) 458 break; 459 } 460 } 461 462 if (pos == 0) 463 return; 464 465 base = devbase + pos; 466 467 flags = readb(base + HT_MSI_FLAGS); 468 if (!(flags & HT_MSI_FLAGS_FIXED)) { 469 addr = readl(base + HT_MSI_ADDR_LO) & HT_MSI_ADDR_LO_MASK; 470 addr = addr | ((u64)readl(base + HT_MSI_ADDR_HI) << 32); 471 } 472 473 printk(KERN_DEBUG "mpic: - HT:%02x.%x %s MSI mapping found @ 0x%llx\n", 474 PCI_SLOT(devfn), PCI_FUNC(devfn), 475 flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled", addr); 476 477 if (!(flags & HT_MSI_FLAGS_ENABLE)) 478 writeb(flags | HT_MSI_FLAGS_ENABLE, base + HT_MSI_FLAGS); 479 } 480 #else 481 static void __init mpic_scan_ht_msi(struct mpic *mpic, u8 __iomem *devbase, 482 unsigned int devfn) 483 { 484 return; 485 } 486 #endif 487 488 static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase, 489 unsigned int devfn, u32 vdid) 490 { 491 int i, irq, n; 492 u8 __iomem *base; 493 u32 tmp; 494 u8 pos; 495 496 for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0; 497 pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) { 498 u8 id = readb(devbase + pos + PCI_CAP_LIST_ID); 499 if (id == PCI_CAP_ID_HT) { 500 id = readb(devbase + pos + 3); 501 if ((id & HT_5BIT_CAP_MASK) == HT_CAPTYPE_IRQ) 502 break; 503 } 504 } 505 if (pos == 0) 506 return; 507 508 base = devbase + pos; 509 writeb(0x01, base + 2); 510 n = (readl(base + 4) >> 16) & 0xff; 511 512 printk(KERN_INFO "mpic: - HT:%02x.%x [0x%02x] vendor %04x device %04x" 513 " has %d irqs\n", 514 devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1); 515 516 for (i = 0; i <= n; i++) { 517 writeb(0x10 + 2 * i, base + 2); 518 tmp = readl(base + 4); 519 irq = (tmp >> 16) & 0xff; 520 DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp); 521 /* mask it , will be unmasked later */ 522 tmp |= 0x1; 523 writel(tmp, base + 4); 524 mpic->fixups[irq].index = i; 525 mpic->fixups[irq].base = base; 526 /* Apple HT PIC has a non-standard way of doing EOIs */ 527 if ((vdid & 0xffff) == 0x106b) 528 mpic->fixups[irq].applebase = devbase + 0x60; 529 else 530 mpic->fixups[irq].applebase = NULL; 531 writeb(0x11 + 2 * i, base + 2); 532 mpic->fixups[irq].data = readl(base + 4) | 0x80000000; 533 } 534 } 535 536 537 static void __init mpic_scan_ht_pics(struct mpic *mpic) 538 { 539 unsigned int devfn; 540 u8 __iomem *cfgspace; 541 542 printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n"); 543 544 /* Allocate fixups array */ 545 mpic->fixups = kzalloc(128 * sizeof(*mpic->fixups), GFP_KERNEL); 546 BUG_ON(mpic->fixups == NULL); 547 548 /* Init spinlock */ 549 raw_spin_lock_init(&mpic->fixup_lock); 550 551 /* Map U3 config space. We assume all IO-APICs are on the primary bus 552 * so we only need to map 64kB. 553 */ 554 cfgspace = ioremap(0xf2000000, 0x10000); 555 BUG_ON(cfgspace == NULL); 556 557 /* Now we scan all slots. We do a very quick scan, we read the header 558 * type, vendor ID and device ID only, that's plenty enough 559 */ 560 for (devfn = 0; devfn < 0x100; devfn++) { 561 u8 __iomem *devbase = cfgspace + (devfn << 8); 562 u8 hdr_type = readb(devbase + PCI_HEADER_TYPE); 563 u32 l = readl(devbase + PCI_VENDOR_ID); 564 u16 s; 565 566 DBG("devfn %x, l: %x\n", devfn, l); 567 568 /* If no device, skip */ 569 if (l == 0xffffffff || l == 0x00000000 || 570 l == 0x0000ffff || l == 0xffff0000) 571 goto next; 572 /* Check if is supports capability lists */ 573 s = readw(devbase + PCI_STATUS); 574 if (!(s & PCI_STATUS_CAP_LIST)) 575 goto next; 576 577 mpic_scan_ht_pic(mpic, devbase, devfn, l); 578 mpic_scan_ht_msi(mpic, devbase, devfn); 579 580 next: 581 /* next device, if function 0 */ 582 if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0) 583 devfn += 7; 584 } 585 } 586 587 #else /* CONFIG_MPIC_U3_HT_IRQS */ 588 589 static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source) 590 { 591 return 0; 592 } 593 594 static void __init mpic_scan_ht_pics(struct mpic *mpic) 595 { 596 } 597 598 #endif /* CONFIG_MPIC_U3_HT_IRQS */ 599 600 /* Find an mpic associated with a given linux interrupt */ 601 static struct mpic *mpic_find(unsigned int irq) 602 { 603 if (irq < NUM_ISA_INTERRUPTS) 604 return NULL; 605 606 return irq_get_chip_data(irq); 607 } 608 609 /* Determine if the linux irq is an IPI */ 610 static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq) 611 { 612 unsigned int src = virq_to_hw(irq); 613 614 return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]); 615 } 616 617 /* Determine if the linux irq is a timer */ 618 static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int irq) 619 { 620 unsigned int src = virq_to_hw(irq); 621 622 return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]); 623 } 624 625 /* Convert a cpu mask from logical to physical cpu numbers. */ 626 static inline u32 mpic_physmask(u32 cpumask) 627 { 628 int i; 629 u32 mask = 0; 630 631 for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1) 632 mask |= (cpumask & 1) << get_hard_smp_processor_id(i); 633 return mask; 634 } 635 636 #ifdef CONFIG_SMP 637 /* Get the mpic structure from the IPI number */ 638 static inline struct mpic * mpic_from_ipi(struct irq_data *d) 639 { 640 return irq_data_get_irq_chip_data(d); 641 } 642 #endif 643 644 /* Get the mpic structure from the irq number */ 645 static inline struct mpic * mpic_from_irq(unsigned int irq) 646 { 647 return irq_get_chip_data(irq); 648 } 649 650 /* Get the mpic structure from the irq data */ 651 static inline struct mpic * mpic_from_irq_data(struct irq_data *d) 652 { 653 return irq_data_get_irq_chip_data(d); 654 } 655 656 /* Send an EOI */ 657 static inline void mpic_eoi(struct mpic *mpic) 658 { 659 mpic_cpu_write(MPIC_INFO(CPU_EOI), 0); 660 (void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI)); 661 } 662 663 /* 664 * Linux descriptor level callbacks 665 */ 666 667 668 void mpic_unmask_irq(struct irq_data *d) 669 { 670 unsigned int loops = 100000; 671 struct mpic *mpic = mpic_from_irq_data(d); 672 unsigned int src = irqd_to_hwirq(d); 673 674 DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src); 675 676 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), 677 mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & 678 ~MPIC_VECPRI_MASK); 679 /* make sure mask gets to controller before we return to user */ 680 do { 681 if (!loops--) { 682 printk(KERN_ERR "%s: timeout on hwirq %u\n", 683 __func__, src); 684 break; 685 } 686 } while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK); 687 } 688 689 void mpic_mask_irq(struct irq_data *d) 690 { 691 unsigned int loops = 100000; 692 struct mpic *mpic = mpic_from_irq_data(d); 693 unsigned int src = irqd_to_hwirq(d); 694 695 DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src); 696 697 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), 698 mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) | 699 MPIC_VECPRI_MASK); 700 701 /* make sure mask gets to controller before we return to user */ 702 do { 703 if (!loops--) { 704 printk(KERN_ERR "%s: timeout on hwirq %u\n", 705 __func__, src); 706 break; 707 } 708 } while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK)); 709 } 710 711 void mpic_end_irq(struct irq_data *d) 712 { 713 struct mpic *mpic = mpic_from_irq_data(d); 714 715 #ifdef DEBUG_IRQ 716 DBG("%s: end_irq: %d\n", mpic->name, d->irq); 717 #endif 718 /* We always EOI on end_irq() even for edge interrupts since that 719 * should only lower the priority, the MPIC should have properly 720 * latched another edge interrupt coming in anyway 721 */ 722 723 mpic_eoi(mpic); 724 } 725 726 #ifdef CONFIG_MPIC_U3_HT_IRQS 727 728 static void mpic_unmask_ht_irq(struct irq_data *d) 729 { 730 struct mpic *mpic = mpic_from_irq_data(d); 731 unsigned int src = irqd_to_hwirq(d); 732 733 mpic_unmask_irq(d); 734 735 if (irqd_is_level_type(d)) 736 mpic_ht_end_irq(mpic, src); 737 } 738 739 static unsigned int mpic_startup_ht_irq(struct irq_data *d) 740 { 741 struct mpic *mpic = mpic_from_irq_data(d); 742 unsigned int src = irqd_to_hwirq(d); 743 744 mpic_unmask_irq(d); 745 mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d)); 746 747 return 0; 748 } 749 750 static void mpic_shutdown_ht_irq(struct irq_data *d) 751 { 752 struct mpic *mpic = mpic_from_irq_data(d); 753 unsigned int src = irqd_to_hwirq(d); 754 755 mpic_shutdown_ht_interrupt(mpic, src); 756 mpic_mask_irq(d); 757 } 758 759 static void mpic_end_ht_irq(struct irq_data *d) 760 { 761 struct mpic *mpic = mpic_from_irq_data(d); 762 unsigned int src = irqd_to_hwirq(d); 763 764 #ifdef DEBUG_IRQ 765 DBG("%s: end_irq: %d\n", mpic->name, d->irq); 766 #endif 767 /* We always EOI on end_irq() even for edge interrupts since that 768 * should only lower the priority, the MPIC should have properly 769 * latched another edge interrupt coming in anyway 770 */ 771 772 if (irqd_is_level_type(d)) 773 mpic_ht_end_irq(mpic, src); 774 mpic_eoi(mpic); 775 } 776 #endif /* !CONFIG_MPIC_U3_HT_IRQS */ 777 778 #ifdef CONFIG_SMP 779 780 static void mpic_unmask_ipi(struct irq_data *d) 781 { 782 struct mpic *mpic = mpic_from_ipi(d); 783 unsigned int src = virq_to_hw(d->irq) - mpic->ipi_vecs[0]; 784 785 DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src); 786 mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); 787 } 788 789 static void mpic_mask_ipi(struct irq_data *d) 790 { 791 /* NEVER disable an IPI... that's just plain wrong! */ 792 } 793 794 static void mpic_end_ipi(struct irq_data *d) 795 { 796 struct mpic *mpic = mpic_from_ipi(d); 797 798 /* 799 * IPIs are marked IRQ_PER_CPU. This has the side effect of 800 * preventing the IRQ_PENDING/IRQ_INPROGRESS logic from 801 * applying to them. We EOI them late to avoid re-entering. 802 * We mark IPI's with IRQF_DISABLED as they must run with 803 * irqs disabled. 804 */ 805 mpic_eoi(mpic); 806 } 807 808 #endif /* CONFIG_SMP */ 809 810 static void mpic_unmask_tm(struct irq_data *d) 811 { 812 struct mpic *mpic = mpic_from_irq_data(d); 813 unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0]; 814 815 DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, d->irq, src); 816 mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK); 817 mpic_tm_read(src); 818 } 819 820 static void mpic_mask_tm(struct irq_data *d) 821 { 822 struct mpic *mpic = mpic_from_irq_data(d); 823 unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0]; 824 825 mpic_tm_write(src, mpic_tm_read(src) | MPIC_VECPRI_MASK); 826 mpic_tm_read(src); 827 } 828 829 int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, 830 bool force) 831 { 832 struct mpic *mpic = mpic_from_irq_data(d); 833 unsigned int src = irqd_to_hwirq(d); 834 835 if (mpic->flags & MPIC_SINGLE_DEST_CPU) { 836 int cpuid = irq_choose_cpu(cpumask); 837 838 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); 839 } else { 840 u32 mask = cpumask_bits(cpumask)[0]; 841 842 mask &= cpumask_bits(cpu_online_mask)[0]; 843 844 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 845 mpic_physmask(mask)); 846 } 847 848 return 0; 849 } 850 851 static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) 852 { 853 /* Now convert sense value */ 854 switch(type & IRQ_TYPE_SENSE_MASK) { 855 case IRQ_TYPE_EDGE_RISING: 856 return MPIC_INFO(VECPRI_SENSE_EDGE) | 857 MPIC_INFO(VECPRI_POLARITY_POSITIVE); 858 case IRQ_TYPE_EDGE_FALLING: 859 case IRQ_TYPE_EDGE_BOTH: 860 return MPIC_INFO(VECPRI_SENSE_EDGE) | 861 MPIC_INFO(VECPRI_POLARITY_NEGATIVE); 862 case IRQ_TYPE_LEVEL_HIGH: 863 return MPIC_INFO(VECPRI_SENSE_LEVEL) | 864 MPIC_INFO(VECPRI_POLARITY_POSITIVE); 865 case IRQ_TYPE_LEVEL_LOW: 866 default: 867 return MPIC_INFO(VECPRI_SENSE_LEVEL) | 868 MPIC_INFO(VECPRI_POLARITY_NEGATIVE); 869 } 870 } 871 872 int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) 873 { 874 struct mpic *mpic = mpic_from_irq_data(d); 875 unsigned int src = irqd_to_hwirq(d); 876 unsigned int vecpri, vold, vnew; 877 878 DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", 879 mpic, d->irq, src, flow_type); 880 881 if (src >= mpic->irq_count) 882 return -EINVAL; 883 884 if (flow_type == IRQ_TYPE_NONE) 885 if (mpic->senses && src < mpic->senses_count) 886 flow_type = mpic->senses[src]; 887 if (flow_type == IRQ_TYPE_NONE) 888 flow_type = IRQ_TYPE_LEVEL_LOW; 889 890 irqd_set_trigger_type(d, flow_type); 891 892 if (mpic_is_ht_interrupt(mpic, src)) 893 vecpri = MPIC_VECPRI_POLARITY_POSITIVE | 894 MPIC_VECPRI_SENSE_EDGE; 895 else 896 vecpri = mpic_type_to_vecpri(mpic, flow_type); 897 898 vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)); 899 vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) | 900 MPIC_INFO(VECPRI_SENSE_MASK)); 901 vnew |= vecpri; 902 if (vold != vnew) 903 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew); 904 905 return IRQ_SET_MASK_OK_NOCOPY;; 906 } 907 908 void mpic_set_vector(unsigned int virq, unsigned int vector) 909 { 910 struct mpic *mpic = mpic_from_irq(virq); 911 unsigned int src = virq_to_hw(virq); 912 unsigned int vecpri; 913 914 DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n", 915 mpic, virq, src, vector); 916 917 if (src >= mpic->irq_count) 918 return; 919 920 vecpri = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)); 921 vecpri = vecpri & ~MPIC_INFO(VECPRI_VECTOR_MASK); 922 vecpri |= vector; 923 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); 924 } 925 926 void mpic_set_destination(unsigned int virq, unsigned int cpuid) 927 { 928 struct mpic *mpic = mpic_from_irq(virq); 929 unsigned int src = virq_to_hw(virq); 930 931 DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n", 932 mpic, virq, src, cpuid); 933 934 if (src >= mpic->irq_count) 935 return; 936 937 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); 938 } 939 940 static struct irq_chip mpic_irq_chip = { 941 .irq_mask = mpic_mask_irq, 942 .irq_unmask = mpic_unmask_irq, 943 .irq_eoi = mpic_end_irq, 944 .irq_set_type = mpic_set_irq_type, 945 }; 946 947 #ifdef CONFIG_SMP 948 static struct irq_chip mpic_ipi_chip = { 949 .irq_mask = mpic_mask_ipi, 950 .irq_unmask = mpic_unmask_ipi, 951 .irq_eoi = mpic_end_ipi, 952 }; 953 #endif /* CONFIG_SMP */ 954 955 static struct irq_chip mpic_tm_chip = { 956 .irq_mask = mpic_mask_tm, 957 .irq_unmask = mpic_unmask_tm, 958 .irq_eoi = mpic_end_irq, 959 }; 960 961 #ifdef CONFIG_MPIC_U3_HT_IRQS 962 static struct irq_chip mpic_irq_ht_chip = { 963 .irq_startup = mpic_startup_ht_irq, 964 .irq_shutdown = mpic_shutdown_ht_irq, 965 .irq_mask = mpic_mask_irq, 966 .irq_unmask = mpic_unmask_ht_irq, 967 .irq_eoi = mpic_end_ht_irq, 968 .irq_set_type = mpic_set_irq_type, 969 }; 970 #endif /* CONFIG_MPIC_U3_HT_IRQS */ 971 972 973 static int mpic_host_match(struct irq_host *h, struct device_node *node) 974 { 975 /* Exact match, unless mpic node is NULL */ 976 return h->of_node == NULL || h->of_node == node; 977 } 978 979 static int mpic_host_map(struct irq_host *h, unsigned int virq, 980 irq_hw_number_t hw) 981 { 982 struct mpic *mpic = h->host_data; 983 struct irq_chip *chip; 984 985 DBG("mpic: map virq %d, hwirq 0x%lx\n", virq, hw); 986 987 if (hw == mpic->spurious_vec) 988 return -EINVAL; 989 if (mpic->protected && test_bit(hw, mpic->protected)) 990 return -EINVAL; 991 992 #ifdef CONFIG_SMP 993 else if (hw >= mpic->ipi_vecs[0]) { 994 WARN_ON(!(mpic->flags & MPIC_PRIMARY)); 995 996 DBG("mpic: mapping as IPI\n"); 997 irq_set_chip_data(virq, mpic); 998 irq_set_chip_and_handler(virq, &mpic->hc_ipi, 999 handle_percpu_irq); 1000 return 0; 1001 } 1002 #endif /* CONFIG_SMP */ 1003 1004 if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) { 1005 WARN_ON(!(mpic->flags & MPIC_PRIMARY)); 1006 1007 DBG("mpic: mapping as timer\n"); 1008 irq_set_chip_data(virq, mpic); 1009 irq_set_chip_and_handler(virq, &mpic->hc_tm, 1010 handle_fasteoi_irq); 1011 return 0; 1012 } 1013 1014 if (hw >= mpic->irq_count) 1015 return -EINVAL; 1016 1017 mpic_msi_reserve_hwirq(mpic, hw); 1018 1019 /* Default chip */ 1020 chip = &mpic->hc_irq; 1021 1022 #ifdef CONFIG_MPIC_U3_HT_IRQS 1023 /* Check for HT interrupts, override vecpri */ 1024 if (mpic_is_ht_interrupt(mpic, hw)) 1025 chip = &mpic->hc_ht_irq; 1026 #endif /* CONFIG_MPIC_U3_HT_IRQS */ 1027 1028 DBG("mpic: mapping to irq chip @%p\n", chip); 1029 1030 irq_set_chip_data(virq, mpic); 1031 irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq); 1032 1033 /* Set default irq type */ 1034 irq_set_irq_type(virq, IRQ_TYPE_NONE); 1035 1036 /* If the MPIC was reset, then all vectors have already been 1037 * initialized. Otherwise, a per source lazy initialization 1038 * is done here. 1039 */ 1040 if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) { 1041 mpic_set_vector(virq, hw); 1042 mpic_set_destination(virq, mpic_processor_id(mpic)); 1043 mpic_irq_set_priority(virq, 8); 1044 } 1045 1046 return 0; 1047 } 1048 1049 static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, 1050 const u32 *intspec, unsigned int intsize, 1051 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 1052 1053 { 1054 struct mpic *mpic = h->host_data; 1055 static unsigned char map_mpic_senses[4] = { 1056 IRQ_TYPE_EDGE_RISING, 1057 IRQ_TYPE_LEVEL_LOW, 1058 IRQ_TYPE_LEVEL_HIGH, 1059 IRQ_TYPE_EDGE_FALLING, 1060 }; 1061 1062 *out_hwirq = intspec[0]; 1063 if (intsize >= 4 && (mpic->flags & MPIC_FSL)) { 1064 /* 1065 * Freescale MPIC with extended intspec: 1066 * First two cells are as usual. Third specifies 1067 * an "interrupt type". Fourth is type-specific data. 1068 * 1069 * See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt 1070 */ 1071 switch (intspec[2]) { 1072 case 0: 1073 case 1: /* no EISR/EIMR support for now, treat as shared IRQ */ 1074 break; 1075 case 2: 1076 if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs)) 1077 return -EINVAL; 1078 1079 *out_hwirq = mpic->ipi_vecs[intspec[0]]; 1080 break; 1081 case 3: 1082 if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs)) 1083 return -EINVAL; 1084 1085 *out_hwirq = mpic->timer_vecs[intspec[0]]; 1086 break; 1087 default: 1088 pr_debug("%s: unknown irq type %u\n", 1089 __func__, intspec[2]); 1090 return -EINVAL; 1091 } 1092 1093 *out_flags = map_mpic_senses[intspec[1] & 3]; 1094 } else if (intsize > 1) { 1095 u32 mask = 0x3; 1096 1097 /* Apple invented a new race of encoding on machines with 1098 * an HT APIC. They encode, among others, the index within 1099 * the HT APIC. We don't care about it here since thankfully, 1100 * it appears that they have the APIC already properly 1101 * configured, and thus our current fixup code that reads the 1102 * APIC config works fine. However, we still need to mask out 1103 * bits in the specifier to make sure we only get bit 0 which 1104 * is the level/edge bit (the only sense bit exposed by Apple), 1105 * as their bit 1 means something else. 1106 */ 1107 if (machine_is(powermac)) 1108 mask = 0x1; 1109 *out_flags = map_mpic_senses[intspec[1] & mask]; 1110 } else 1111 *out_flags = IRQ_TYPE_NONE; 1112 1113 DBG("mpic: xlate (%d cells: 0x%08x 0x%08x) to line 0x%lx sense 0x%x\n", 1114 intsize, intspec[0], intspec[1], *out_hwirq, *out_flags); 1115 1116 return 0; 1117 } 1118 1119 static struct irq_host_ops mpic_host_ops = { 1120 .match = mpic_host_match, 1121 .map = mpic_host_map, 1122 .xlate = mpic_host_xlate, 1123 }; 1124 1125 static int mpic_reset_prohibited(struct device_node *node) 1126 { 1127 return node && of_get_property(node, "pic-no-reset", NULL); 1128 } 1129 1130 /* 1131 * Exported functions 1132 */ 1133 1134 struct mpic * __init mpic_alloc(struct device_node *node, 1135 phys_addr_t phys_addr, 1136 unsigned int flags, 1137 unsigned int isu_size, 1138 unsigned int irq_count, 1139 const char *name) 1140 { 1141 struct mpic *mpic; 1142 u32 greg_feature; 1143 const char *vers; 1144 int i; 1145 int intvec_top; 1146 u64 paddr = phys_addr; 1147 1148 mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL); 1149 if (mpic == NULL) 1150 return NULL; 1151 1152 mpic->name = name; 1153 1154 mpic->hc_irq = mpic_irq_chip; 1155 mpic->hc_irq.name = name; 1156 if (flags & MPIC_PRIMARY) 1157 mpic->hc_irq.irq_set_affinity = mpic_set_affinity; 1158 #ifdef CONFIG_MPIC_U3_HT_IRQS 1159 mpic->hc_ht_irq = mpic_irq_ht_chip; 1160 mpic->hc_ht_irq.name = name; 1161 if (flags & MPIC_PRIMARY) 1162 mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity; 1163 #endif /* CONFIG_MPIC_U3_HT_IRQS */ 1164 1165 #ifdef CONFIG_SMP 1166 mpic->hc_ipi = mpic_ipi_chip; 1167 mpic->hc_ipi.name = name; 1168 #endif /* CONFIG_SMP */ 1169 1170 mpic->hc_tm = mpic_tm_chip; 1171 mpic->hc_tm.name = name; 1172 1173 mpic->flags = flags; 1174 mpic->isu_size = isu_size; 1175 mpic->irq_count = irq_count; 1176 mpic->num_sources = 0; /* so far */ 1177 1178 if (flags & MPIC_LARGE_VECTORS) 1179 intvec_top = 2047; 1180 else 1181 intvec_top = 255; 1182 1183 mpic->timer_vecs[0] = intvec_top - 12; 1184 mpic->timer_vecs[1] = intvec_top - 11; 1185 mpic->timer_vecs[2] = intvec_top - 10; 1186 mpic->timer_vecs[3] = intvec_top - 9; 1187 mpic->timer_vecs[4] = intvec_top - 8; 1188 mpic->timer_vecs[5] = intvec_top - 7; 1189 mpic->timer_vecs[6] = intvec_top - 6; 1190 mpic->timer_vecs[7] = intvec_top - 5; 1191 mpic->ipi_vecs[0] = intvec_top - 4; 1192 mpic->ipi_vecs[1] = intvec_top - 3; 1193 mpic->ipi_vecs[2] = intvec_top - 2; 1194 mpic->ipi_vecs[3] = intvec_top - 1; 1195 mpic->spurious_vec = intvec_top; 1196 1197 /* Check for "big-endian" in device-tree */ 1198 if (node && of_get_property(node, "big-endian", NULL) != NULL) 1199 mpic->flags |= MPIC_BIG_ENDIAN; 1200 if (node && of_device_is_compatible(node, "fsl,mpic")) 1201 mpic->flags |= MPIC_FSL; 1202 1203 /* Look for protected sources */ 1204 if (node) { 1205 int psize; 1206 unsigned int bits, mapsize; 1207 const u32 *psrc = 1208 of_get_property(node, "protected-sources", &psize); 1209 if (psrc) { 1210 psize /= 4; 1211 bits = intvec_top + 1; 1212 mapsize = BITS_TO_LONGS(bits) * sizeof(unsigned long); 1213 mpic->protected = kzalloc(mapsize, GFP_KERNEL); 1214 BUG_ON(mpic->protected == NULL); 1215 for (i = 0; i < psize; i++) { 1216 if (psrc[i] > intvec_top) 1217 continue; 1218 __set_bit(psrc[i], mpic->protected); 1219 } 1220 } 1221 } 1222 1223 #ifdef CONFIG_MPIC_WEIRD 1224 mpic->hw_set = mpic_infos[MPIC_GET_REGSET(flags)]; 1225 #endif 1226 1227 /* default register type */ 1228 mpic->reg_type = (flags & MPIC_BIG_ENDIAN) ? 1229 mpic_access_mmio_be : mpic_access_mmio_le; 1230 1231 /* If no physical address is passed in, a device-node is mandatory */ 1232 BUG_ON(paddr == 0 && node == NULL); 1233 1234 /* If no physical address passed in, check if it's dcr based */ 1235 if (paddr == 0 && of_get_property(node, "dcr-reg", NULL) != NULL) { 1236 #ifdef CONFIG_PPC_DCR 1237 mpic->flags |= MPIC_USES_DCR; 1238 mpic->reg_type = mpic_access_dcr; 1239 #else 1240 BUG(); 1241 #endif /* CONFIG_PPC_DCR */ 1242 } 1243 1244 /* If the MPIC is not DCR based, and no physical address was passed 1245 * in, try to obtain one 1246 */ 1247 if (paddr == 0 && !(mpic->flags & MPIC_USES_DCR)) { 1248 const u32 *reg = of_get_property(node, "reg", NULL); 1249 BUG_ON(reg == NULL); 1250 paddr = of_translate_address(node, reg); 1251 BUG_ON(paddr == OF_BAD_ADDR); 1252 } 1253 1254 /* Map the global registers */ 1255 mpic_map(mpic, node, paddr, &mpic->gregs, MPIC_INFO(GREG_BASE), 0x1000); 1256 mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); 1257 1258 /* Reset */ 1259 1260 /* When using a device-node, reset requests are only honored if the MPIC 1261 * is allowed to reset. 1262 */ 1263 if (mpic_reset_prohibited(node)) 1264 mpic->flags |= MPIC_NO_RESET; 1265 1266 if ((flags & MPIC_WANTS_RESET) && !(mpic->flags & MPIC_NO_RESET)) { 1267 printk(KERN_DEBUG "mpic: Resetting\n"); 1268 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), 1269 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1270 | MPIC_GREG_GCONF_RESET); 1271 while( mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1272 & MPIC_GREG_GCONF_RESET) 1273 mb(); 1274 } 1275 1276 /* CoreInt */ 1277 if (flags & MPIC_ENABLE_COREINT) 1278 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), 1279 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1280 | MPIC_GREG_GCONF_COREINT); 1281 1282 if (flags & MPIC_ENABLE_MCK) 1283 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), 1284 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1285 | MPIC_GREG_GCONF_MCK); 1286 1287 /* Read feature register, calculate num CPUs and, for non-ISU 1288 * MPICs, num sources as well. On ISU MPICs, sources are counted 1289 * as ISUs are added 1290 */ 1291 greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0)); 1292 mpic->num_cpus = ((greg_feature & MPIC_GREG_FEATURE_LAST_CPU_MASK) 1293 >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1; 1294 if (isu_size == 0) { 1295 if (flags & MPIC_BROKEN_FRR_NIRQS) 1296 mpic->num_sources = mpic->irq_count; 1297 else 1298 mpic->num_sources = 1299 ((greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK) 1300 >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1; 1301 } 1302 1303 /* Map the per-CPU registers */ 1304 for (i = 0; i < mpic->num_cpus; i++) { 1305 mpic_map(mpic, node, paddr, &mpic->cpuregs[i], 1306 MPIC_INFO(CPU_BASE) + i * MPIC_INFO(CPU_STRIDE), 1307 0x1000); 1308 } 1309 1310 /* Initialize main ISU if none provided */ 1311 if (mpic->isu_size == 0) { 1312 mpic->isu_size = mpic->num_sources; 1313 mpic_map(mpic, node, paddr, &mpic->isus[0], 1314 MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); 1315 } 1316 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); 1317 mpic->isu_mask = (1 << mpic->isu_shift) - 1; 1318 1319 mpic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1320 isu_size ? isu_size : mpic->num_sources, 1321 &mpic_host_ops, 1322 flags & MPIC_LARGE_VECTORS ? 2048 : 256); 1323 if (mpic->irqhost == NULL) 1324 return NULL; 1325 1326 mpic->irqhost->host_data = mpic; 1327 1328 /* Display version */ 1329 switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) { 1330 case 1: 1331 vers = "1.0"; 1332 break; 1333 case 2: 1334 vers = "1.2"; 1335 break; 1336 case 3: 1337 vers = "1.3"; 1338 break; 1339 default: 1340 vers = "<unknown>"; 1341 break; 1342 } 1343 printk(KERN_INFO "mpic: Setting up MPIC \"%s\" version %s at %llx," 1344 " max %d CPUs\n", 1345 name, vers, (unsigned long long)paddr, mpic->num_cpus); 1346 printk(KERN_INFO "mpic: ISU size: %d, shift: %d, mask: %x\n", 1347 mpic->isu_size, mpic->isu_shift, mpic->isu_mask); 1348 1349 mpic->next = mpics; 1350 mpics = mpic; 1351 1352 if (flags & MPIC_PRIMARY) { 1353 mpic_primary = mpic; 1354 irq_set_default_host(mpic->irqhost); 1355 } 1356 1357 return mpic; 1358 } 1359 1360 void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, 1361 phys_addr_t paddr) 1362 { 1363 unsigned int isu_first = isu_num * mpic->isu_size; 1364 1365 BUG_ON(isu_num >= MPIC_MAX_ISU); 1366 1367 mpic_map(mpic, mpic->irqhost->of_node, 1368 paddr, &mpic->isus[isu_num], 0, 1369 MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); 1370 1371 if ((isu_first + mpic->isu_size) > mpic->num_sources) 1372 mpic->num_sources = isu_first + mpic->isu_size; 1373 } 1374 1375 void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count) 1376 { 1377 mpic->senses = senses; 1378 mpic->senses_count = count; 1379 } 1380 1381 void __init mpic_init(struct mpic *mpic) 1382 { 1383 int i; 1384 int cpu; 1385 1386 BUG_ON(mpic->num_sources == 0); 1387 1388 printk(KERN_INFO "mpic: Initializing for %d sources\n", mpic->num_sources); 1389 1390 /* Set current processor priority to max */ 1391 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); 1392 1393 /* Initialize timers to our reserved vectors and mask them for now */ 1394 for (i = 0; i < 4; i++) { 1395 mpic_write(mpic->tmregs, 1396 i * MPIC_INFO(TIMER_STRIDE) + 1397 MPIC_INFO(TIMER_DESTINATION), 1398 1 << hard_smp_processor_id()); 1399 mpic_write(mpic->tmregs, 1400 i * MPIC_INFO(TIMER_STRIDE) + 1401 MPIC_INFO(TIMER_VECTOR_PRI), 1402 MPIC_VECPRI_MASK | 1403 (9 << MPIC_VECPRI_PRIORITY_SHIFT) | 1404 (mpic->timer_vecs[0] + i)); 1405 } 1406 1407 /* Initialize IPIs to our reserved vectors and mark them disabled for now */ 1408 mpic_test_broken_ipi(mpic); 1409 for (i = 0; i < 4; i++) { 1410 mpic_ipi_write(i, 1411 MPIC_VECPRI_MASK | 1412 (10 << MPIC_VECPRI_PRIORITY_SHIFT) | 1413 (mpic->ipi_vecs[0] + i)); 1414 } 1415 1416 /* Initialize interrupt sources */ 1417 if (mpic->irq_count == 0) 1418 mpic->irq_count = mpic->num_sources; 1419 1420 /* Do the HT PIC fixups on U3 broken mpic */ 1421 DBG("MPIC flags: %x\n", mpic->flags); 1422 if ((mpic->flags & MPIC_U3_HT_IRQS) && (mpic->flags & MPIC_PRIMARY)) { 1423 mpic_scan_ht_pics(mpic); 1424 mpic_u3msi_init(mpic); 1425 } 1426 1427 mpic_pasemi_msi_init(mpic); 1428 1429 cpu = mpic_processor_id(mpic); 1430 1431 if (!(mpic->flags & MPIC_NO_RESET)) { 1432 for (i = 0; i < mpic->num_sources; i++) { 1433 /* start with vector = source number, and masked */ 1434 u32 vecpri = MPIC_VECPRI_MASK | i | 1435 (8 << MPIC_VECPRI_PRIORITY_SHIFT); 1436 1437 /* check if protected */ 1438 if (mpic->protected && test_bit(i, mpic->protected)) 1439 continue; 1440 /* init hw */ 1441 mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); 1442 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu); 1443 } 1444 } 1445 1446 /* Init spurious vector */ 1447 mpic_write(mpic->gregs, MPIC_INFO(GREG_SPURIOUS), mpic->spurious_vec); 1448 1449 /* Disable 8259 passthrough, if supported */ 1450 if (!(mpic->flags & MPIC_NO_PTHROU_DIS)) 1451 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), 1452 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1453 | MPIC_GREG_GCONF_8259_PTHROU_DIS); 1454 1455 if (mpic->flags & MPIC_NO_BIAS) 1456 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), 1457 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1458 | MPIC_GREG_GCONF_NO_BIAS); 1459 1460 /* Set current processor priority to 0 */ 1461 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0); 1462 1463 #ifdef CONFIG_PM 1464 /* allocate memory to save mpic state */ 1465 mpic->save_data = kmalloc(mpic->num_sources * sizeof(*mpic->save_data), 1466 GFP_KERNEL); 1467 BUG_ON(mpic->save_data == NULL); 1468 #endif 1469 } 1470 1471 void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio) 1472 { 1473 u32 v; 1474 1475 v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1); 1476 v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK; 1477 v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio); 1478 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v); 1479 } 1480 1481 void __init mpic_set_serial_int(struct mpic *mpic, int enable) 1482 { 1483 unsigned long flags; 1484 u32 v; 1485 1486 raw_spin_lock_irqsave(&mpic_lock, flags); 1487 v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1); 1488 if (enable) 1489 v |= MPIC_GREG_GLOBAL_CONF_1_SIE; 1490 else 1491 v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE; 1492 mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v); 1493 raw_spin_unlock_irqrestore(&mpic_lock, flags); 1494 } 1495 1496 void mpic_irq_set_priority(unsigned int irq, unsigned int pri) 1497 { 1498 struct mpic *mpic = mpic_find(irq); 1499 unsigned int src = virq_to_hw(irq); 1500 unsigned long flags; 1501 u32 reg; 1502 1503 if (!mpic) 1504 return; 1505 1506 raw_spin_lock_irqsave(&mpic_lock, flags); 1507 if (mpic_is_ipi(mpic, irq)) { 1508 reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) & 1509 ~MPIC_VECPRI_PRIORITY_MASK; 1510 mpic_ipi_write(src - mpic->ipi_vecs[0], 1511 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 1512 } else if (mpic_is_tm(mpic, irq)) { 1513 reg = mpic_tm_read(src - mpic->timer_vecs[0]) & 1514 ~MPIC_VECPRI_PRIORITY_MASK; 1515 mpic_tm_write(src - mpic->timer_vecs[0], 1516 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 1517 } else { 1518 reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) 1519 & ~MPIC_VECPRI_PRIORITY_MASK; 1520 mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), 1521 reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); 1522 } 1523 raw_spin_unlock_irqrestore(&mpic_lock, flags); 1524 } 1525 1526 void mpic_setup_this_cpu(void) 1527 { 1528 #ifdef CONFIG_SMP 1529 struct mpic *mpic = mpic_primary; 1530 unsigned long flags; 1531 u32 msk = 1 << hard_smp_processor_id(); 1532 unsigned int i; 1533 1534 BUG_ON(mpic == NULL); 1535 1536 DBG("%s: setup_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); 1537 1538 raw_spin_lock_irqsave(&mpic_lock, flags); 1539 1540 /* let the mpic know we want intrs. default affinity is 0xffffffff 1541 * until changed via /proc. That's how it's done on x86. If we want 1542 * it differently, then we should make sure we also change the default 1543 * values of irq_desc[].affinity in irq.c. 1544 */ 1545 if (distribute_irqs) { 1546 for (i = 0; i < mpic->num_sources ; i++) 1547 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1548 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk); 1549 } 1550 1551 /* Set current processor priority to 0 */ 1552 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0); 1553 1554 raw_spin_unlock_irqrestore(&mpic_lock, flags); 1555 #endif /* CONFIG_SMP */ 1556 } 1557 1558 int mpic_cpu_get_priority(void) 1559 { 1560 struct mpic *mpic = mpic_primary; 1561 1562 return mpic_cpu_read(MPIC_INFO(CPU_CURRENT_TASK_PRI)); 1563 } 1564 1565 void mpic_cpu_set_priority(int prio) 1566 { 1567 struct mpic *mpic = mpic_primary; 1568 1569 prio &= MPIC_CPU_TASKPRI_MASK; 1570 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), prio); 1571 } 1572 1573 void mpic_teardown_this_cpu(int secondary) 1574 { 1575 struct mpic *mpic = mpic_primary; 1576 unsigned long flags; 1577 u32 msk = 1 << hard_smp_processor_id(); 1578 unsigned int i; 1579 1580 BUG_ON(mpic == NULL); 1581 1582 DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); 1583 raw_spin_lock_irqsave(&mpic_lock, flags); 1584 1585 /* let the mpic know we don't want intrs. */ 1586 for (i = 0; i < mpic->num_sources ; i++) 1587 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1588 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) & ~msk); 1589 1590 /* Set current processor priority to max */ 1591 mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); 1592 /* We need to EOI the IPI since not all platforms reset the MPIC 1593 * on boot and new interrupts wouldn't get delivered otherwise. 1594 */ 1595 mpic_eoi(mpic); 1596 1597 raw_spin_unlock_irqrestore(&mpic_lock, flags); 1598 } 1599 1600 1601 static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg) 1602 { 1603 u32 src; 1604 1605 src = mpic_cpu_read(reg) & MPIC_INFO(VECPRI_VECTOR_MASK); 1606 #ifdef DEBUG_LOW 1607 DBG("%s: get_one_irq(reg 0x%x): %d\n", mpic->name, reg, src); 1608 #endif 1609 if (unlikely(src == mpic->spurious_vec)) { 1610 if (mpic->flags & MPIC_SPV_EOI) 1611 mpic_eoi(mpic); 1612 return NO_IRQ; 1613 } 1614 if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { 1615 if (printk_ratelimit()) 1616 printk(KERN_WARNING "%s: Got protected source %d !\n", 1617 mpic->name, (int)src); 1618 mpic_eoi(mpic); 1619 return NO_IRQ; 1620 } 1621 1622 return irq_linear_revmap(mpic->irqhost, src); 1623 } 1624 1625 unsigned int mpic_get_one_irq(struct mpic *mpic) 1626 { 1627 return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_INTACK)); 1628 } 1629 1630 unsigned int mpic_get_irq(void) 1631 { 1632 struct mpic *mpic = mpic_primary; 1633 1634 BUG_ON(mpic == NULL); 1635 1636 return mpic_get_one_irq(mpic); 1637 } 1638 1639 unsigned int mpic_get_coreint_irq(void) 1640 { 1641 #ifdef CONFIG_BOOKE 1642 struct mpic *mpic = mpic_primary; 1643 u32 src; 1644 1645 BUG_ON(mpic == NULL); 1646 1647 src = mfspr(SPRN_EPR); 1648 1649 if (unlikely(src == mpic->spurious_vec)) { 1650 if (mpic->flags & MPIC_SPV_EOI) 1651 mpic_eoi(mpic); 1652 return NO_IRQ; 1653 } 1654 if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { 1655 if (printk_ratelimit()) 1656 printk(KERN_WARNING "%s: Got protected source %d !\n", 1657 mpic->name, (int)src); 1658 return NO_IRQ; 1659 } 1660 1661 return irq_linear_revmap(mpic->irqhost, src); 1662 #else 1663 return NO_IRQ; 1664 #endif 1665 } 1666 1667 unsigned int mpic_get_mcirq(void) 1668 { 1669 struct mpic *mpic = mpic_primary; 1670 1671 BUG_ON(mpic == NULL); 1672 1673 return _mpic_get_one_irq(mpic, MPIC_INFO(CPU_MCACK)); 1674 } 1675 1676 #ifdef CONFIG_SMP 1677 void mpic_request_ipis(void) 1678 { 1679 struct mpic *mpic = mpic_primary; 1680 int i; 1681 BUG_ON(mpic == NULL); 1682 1683 printk(KERN_INFO "mpic: requesting IPIs...\n"); 1684 1685 for (i = 0; i < 4; i++) { 1686 unsigned int vipi = irq_create_mapping(mpic->irqhost, 1687 mpic->ipi_vecs[0] + i); 1688 if (vipi == NO_IRQ) { 1689 printk(KERN_ERR "Failed to map %s\n", smp_ipi_name[i]); 1690 continue; 1691 } 1692 smp_request_message_ipi(vipi, i); 1693 } 1694 } 1695 1696 void smp_mpic_message_pass(int cpu, int msg) 1697 { 1698 struct mpic *mpic = mpic_primary; 1699 u32 physmask; 1700 1701 BUG_ON(mpic == NULL); 1702 1703 /* make sure we're sending something that translates to an IPI */ 1704 if ((unsigned int)msg > 3) { 1705 printk("SMP %d: smp_message_pass: unknown msg %d\n", 1706 smp_processor_id(), msg); 1707 return; 1708 } 1709 1710 #ifdef DEBUG_IPI 1711 DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg); 1712 #endif 1713 1714 physmask = 1 << get_hard_smp_processor_id(cpu); 1715 1716 mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) + 1717 msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask); 1718 } 1719 1720 int __init smp_mpic_probe(void) 1721 { 1722 int nr_cpus; 1723 1724 DBG("smp_mpic_probe()...\n"); 1725 1726 nr_cpus = cpumask_weight(cpu_possible_mask); 1727 1728 DBG("nr_cpus: %d\n", nr_cpus); 1729 1730 if (nr_cpus > 1) 1731 mpic_request_ipis(); 1732 1733 return nr_cpus; 1734 } 1735 1736 void __devinit smp_mpic_setup_cpu(int cpu) 1737 { 1738 mpic_setup_this_cpu(); 1739 } 1740 1741 void mpic_reset_core(int cpu) 1742 { 1743 struct mpic *mpic = mpic_primary; 1744 u32 pir; 1745 int cpuid = get_hard_smp_processor_id(cpu); 1746 1747 /* Set target bit for core reset */ 1748 pir = mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT)); 1749 pir |= (1 << cpuid); 1750 mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir); 1751 mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT)); 1752 1753 /* Restore target bit after reset complete */ 1754 pir &= ~(1 << cpuid); 1755 mpic_write(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT), pir); 1756 mpic_read(mpic->gregs, MPIC_INFO(GREG_PROCESSOR_INIT)); 1757 } 1758 #endif /* CONFIG_SMP */ 1759 1760 #ifdef CONFIG_PM 1761 static void mpic_suspend_one(struct mpic *mpic) 1762 { 1763 int i; 1764 1765 for (i = 0; i < mpic->num_sources; i++) { 1766 mpic->save_data[i].vecprio = 1767 mpic_irq_read(i, MPIC_INFO(IRQ_VECTOR_PRI)); 1768 mpic->save_data[i].dest = 1769 mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)); 1770 } 1771 } 1772 1773 static int mpic_suspend(void) 1774 { 1775 struct mpic *mpic = mpics; 1776 1777 while (mpic) { 1778 mpic_suspend_one(mpic); 1779 mpic = mpic->next; 1780 } 1781 1782 return 0; 1783 } 1784 1785 static void mpic_resume_one(struct mpic *mpic) 1786 { 1787 int i; 1788 1789 for (i = 0; i < mpic->num_sources; i++) { 1790 mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), 1791 mpic->save_data[i].vecprio); 1792 mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1793 mpic->save_data[i].dest); 1794 1795 #ifdef CONFIG_MPIC_U3_HT_IRQS 1796 if (mpic->fixups) { 1797 struct mpic_irq_fixup *fixup = &mpic->fixups[i]; 1798 1799 if (fixup->base) { 1800 /* we use the lowest bit in an inverted meaning */ 1801 if ((mpic->save_data[i].fixup_data & 1) == 0) 1802 continue; 1803 1804 /* Enable and configure */ 1805 writeb(0x10 + 2 * fixup->index, fixup->base + 2); 1806 1807 writel(mpic->save_data[i].fixup_data & ~1, 1808 fixup->base + 4); 1809 } 1810 } 1811 #endif 1812 } /* end for loop */ 1813 } 1814 1815 static void mpic_resume(void) 1816 { 1817 struct mpic *mpic = mpics; 1818 1819 while (mpic) { 1820 mpic_resume_one(mpic); 1821 mpic = mpic->next; 1822 } 1823 } 1824 1825 static struct syscore_ops mpic_syscore_ops = { 1826 .resume = mpic_resume, 1827 .suspend = mpic_suspend, 1828 }; 1829 1830 static int mpic_init_sys(void) 1831 { 1832 register_syscore_ops(&mpic_syscore_ops); 1833 return 0; 1834 } 1835 1836 device_initcall(mpic_init_sys); 1837 #endif 1838