1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * arch/powerpc/sysdev/ipic.c 4 * 5 * IPIC routines implementations. 6 * 7 * Copyright 2005 Freescale Semiconductor, Inc. 8 */ 9 #include <linux/kernel.h> 10 #include <linux/init.h> 11 #include <linux/errno.h> 12 #include <linux/reboot.h> 13 #include <linux/slab.h> 14 #include <linux/stddef.h> 15 #include <linux/sched.h> 16 #include <linux/signal.h> 17 #include <linux/syscore_ops.h> 18 #include <linux/device.h> 19 #include <linux/spinlock.h> 20 #include <linux/fsl_devices.h> 21 #include <linux/irqdomain.h> 22 #include <linux/of_address.h> 23 #include <asm/irq.h> 24 #include <asm/io.h> 25 #include <asm/ipic.h> 26 27 #include "ipic.h" 28 29 static struct ipic * primary_ipic; 30 static struct irq_chip ipic_level_irq_chip, ipic_edge_irq_chip; 31 static DEFINE_RAW_SPINLOCK(ipic_lock); 32 33 static struct ipic_info ipic_info[] = { 34 [1] = { 35 .mask = IPIC_SIMSR_H, 36 .prio = IPIC_SIPRR_C, 37 .force = IPIC_SIFCR_H, 38 .bit = 16, 39 .prio_mask = 0, 40 }, 41 [2] = { 42 .mask = IPIC_SIMSR_H, 43 .prio = IPIC_SIPRR_C, 44 .force = IPIC_SIFCR_H, 45 .bit = 17, 46 .prio_mask = 1, 47 }, 48 [3] = { 49 .mask = IPIC_SIMSR_H, 50 .prio = IPIC_SIPRR_C, 51 .force = IPIC_SIFCR_H, 52 .bit = 18, 53 .prio_mask = 2, 54 }, 55 [4] = { 56 .mask = IPIC_SIMSR_H, 57 .prio = IPIC_SIPRR_C, 58 .force = IPIC_SIFCR_H, 59 .bit = 19, 60 .prio_mask = 3, 61 }, 62 [5] = { 63 .mask = IPIC_SIMSR_H, 64 .prio = IPIC_SIPRR_C, 65 .force = IPIC_SIFCR_H, 66 .bit = 20, 67 .prio_mask = 4, 68 }, 69 [6] = { 70 .mask = IPIC_SIMSR_H, 71 .prio = IPIC_SIPRR_C, 72 .force = IPIC_SIFCR_H, 73 .bit = 21, 74 .prio_mask = 5, 75 }, 76 [7] = { 77 .mask = IPIC_SIMSR_H, 78 .prio = IPIC_SIPRR_C, 79 .force = IPIC_SIFCR_H, 80 .bit = 22, 81 .prio_mask = 6, 82 }, 83 [8] = { 84 .mask = IPIC_SIMSR_H, 85 .prio = IPIC_SIPRR_C, 86 .force = IPIC_SIFCR_H, 87 .bit = 23, 88 .prio_mask = 7, 89 }, 90 [9] = { 91 .mask = IPIC_SIMSR_H, 92 .prio = IPIC_SIPRR_D, 93 .force = IPIC_SIFCR_H, 94 .bit = 24, 95 .prio_mask = 0, 96 }, 97 [10] = { 98 .mask = IPIC_SIMSR_H, 99 .prio = IPIC_SIPRR_D, 100 .force = IPIC_SIFCR_H, 101 .bit = 25, 102 .prio_mask = 1, 103 }, 104 [11] = { 105 .mask = IPIC_SIMSR_H, 106 .prio = IPIC_SIPRR_D, 107 .force = IPIC_SIFCR_H, 108 .bit = 26, 109 .prio_mask = 2, 110 }, 111 [12] = { 112 .mask = IPIC_SIMSR_H, 113 .prio = IPIC_SIPRR_D, 114 .force = IPIC_SIFCR_H, 115 .bit = 27, 116 .prio_mask = 3, 117 }, 118 [13] = { 119 .mask = IPIC_SIMSR_H, 120 .prio = IPIC_SIPRR_D, 121 .force = IPIC_SIFCR_H, 122 .bit = 28, 123 .prio_mask = 4, 124 }, 125 [14] = { 126 .mask = IPIC_SIMSR_H, 127 .prio = IPIC_SIPRR_D, 128 .force = IPIC_SIFCR_H, 129 .bit = 29, 130 .prio_mask = 5, 131 }, 132 [15] = { 133 .mask = IPIC_SIMSR_H, 134 .prio = IPIC_SIPRR_D, 135 .force = IPIC_SIFCR_H, 136 .bit = 30, 137 .prio_mask = 6, 138 }, 139 [16] = { 140 .mask = IPIC_SIMSR_H, 141 .prio = IPIC_SIPRR_D, 142 .force = IPIC_SIFCR_H, 143 .bit = 31, 144 .prio_mask = 7, 145 }, 146 [17] = { 147 .ack = IPIC_SEPNR, 148 .mask = IPIC_SEMSR, 149 .prio = IPIC_SMPRR_A, 150 .force = IPIC_SEFCR, 151 .bit = 1, 152 .prio_mask = 5, 153 }, 154 [18] = { 155 .ack = IPIC_SEPNR, 156 .mask = IPIC_SEMSR, 157 .prio = IPIC_SMPRR_A, 158 .force = IPIC_SEFCR, 159 .bit = 2, 160 .prio_mask = 6, 161 }, 162 [19] = { 163 .ack = IPIC_SEPNR, 164 .mask = IPIC_SEMSR, 165 .prio = IPIC_SMPRR_A, 166 .force = IPIC_SEFCR, 167 .bit = 3, 168 .prio_mask = 7, 169 }, 170 [20] = { 171 .ack = IPIC_SEPNR, 172 .mask = IPIC_SEMSR, 173 .prio = IPIC_SMPRR_B, 174 .force = IPIC_SEFCR, 175 .bit = 4, 176 .prio_mask = 4, 177 }, 178 [21] = { 179 .ack = IPIC_SEPNR, 180 .mask = IPIC_SEMSR, 181 .prio = IPIC_SMPRR_B, 182 .force = IPIC_SEFCR, 183 .bit = 5, 184 .prio_mask = 5, 185 }, 186 [22] = { 187 .ack = IPIC_SEPNR, 188 .mask = IPIC_SEMSR, 189 .prio = IPIC_SMPRR_B, 190 .force = IPIC_SEFCR, 191 .bit = 6, 192 .prio_mask = 6, 193 }, 194 [23] = { 195 .ack = IPIC_SEPNR, 196 .mask = IPIC_SEMSR, 197 .prio = IPIC_SMPRR_B, 198 .force = IPIC_SEFCR, 199 .bit = 7, 200 .prio_mask = 7, 201 }, 202 [32] = { 203 .mask = IPIC_SIMSR_H, 204 .prio = IPIC_SIPRR_A, 205 .force = IPIC_SIFCR_H, 206 .bit = 0, 207 .prio_mask = 0, 208 }, 209 [33] = { 210 .mask = IPIC_SIMSR_H, 211 .prio = IPIC_SIPRR_A, 212 .force = IPIC_SIFCR_H, 213 .bit = 1, 214 .prio_mask = 1, 215 }, 216 [34] = { 217 .mask = IPIC_SIMSR_H, 218 .prio = IPIC_SIPRR_A, 219 .force = IPIC_SIFCR_H, 220 .bit = 2, 221 .prio_mask = 2, 222 }, 223 [35] = { 224 .mask = IPIC_SIMSR_H, 225 .prio = IPIC_SIPRR_A, 226 .force = IPIC_SIFCR_H, 227 .bit = 3, 228 .prio_mask = 3, 229 }, 230 [36] = { 231 .mask = IPIC_SIMSR_H, 232 .prio = IPIC_SIPRR_A, 233 .force = IPIC_SIFCR_H, 234 .bit = 4, 235 .prio_mask = 4, 236 }, 237 [37] = { 238 .mask = IPIC_SIMSR_H, 239 .prio = IPIC_SIPRR_A, 240 .force = IPIC_SIFCR_H, 241 .bit = 5, 242 .prio_mask = 5, 243 }, 244 [38] = { 245 .mask = IPIC_SIMSR_H, 246 .prio = IPIC_SIPRR_A, 247 .force = IPIC_SIFCR_H, 248 .bit = 6, 249 .prio_mask = 6, 250 }, 251 [39] = { 252 .mask = IPIC_SIMSR_H, 253 .prio = IPIC_SIPRR_A, 254 .force = IPIC_SIFCR_H, 255 .bit = 7, 256 .prio_mask = 7, 257 }, 258 [40] = { 259 .mask = IPIC_SIMSR_H, 260 .prio = IPIC_SIPRR_B, 261 .force = IPIC_SIFCR_H, 262 .bit = 8, 263 .prio_mask = 0, 264 }, 265 [41] = { 266 .mask = IPIC_SIMSR_H, 267 .prio = IPIC_SIPRR_B, 268 .force = IPIC_SIFCR_H, 269 .bit = 9, 270 .prio_mask = 1, 271 }, 272 [42] = { 273 .mask = IPIC_SIMSR_H, 274 .prio = IPIC_SIPRR_B, 275 .force = IPIC_SIFCR_H, 276 .bit = 10, 277 .prio_mask = 2, 278 }, 279 [43] = { 280 .mask = IPIC_SIMSR_H, 281 .prio = IPIC_SIPRR_B, 282 .force = IPIC_SIFCR_H, 283 .bit = 11, 284 .prio_mask = 3, 285 }, 286 [44] = { 287 .mask = IPIC_SIMSR_H, 288 .prio = IPIC_SIPRR_B, 289 .force = IPIC_SIFCR_H, 290 .bit = 12, 291 .prio_mask = 4, 292 }, 293 [45] = { 294 .mask = IPIC_SIMSR_H, 295 .prio = IPIC_SIPRR_B, 296 .force = IPIC_SIFCR_H, 297 .bit = 13, 298 .prio_mask = 5, 299 }, 300 [46] = { 301 .mask = IPIC_SIMSR_H, 302 .prio = IPIC_SIPRR_B, 303 .force = IPIC_SIFCR_H, 304 .bit = 14, 305 .prio_mask = 6, 306 }, 307 [47] = { 308 .mask = IPIC_SIMSR_H, 309 .prio = IPIC_SIPRR_B, 310 .force = IPIC_SIFCR_H, 311 .bit = 15, 312 .prio_mask = 7, 313 }, 314 [48] = { 315 .ack = IPIC_SEPNR, 316 .mask = IPIC_SEMSR, 317 .prio = IPIC_SMPRR_A, 318 .force = IPIC_SEFCR, 319 .bit = 0, 320 .prio_mask = 4, 321 }, 322 [64] = { 323 .mask = IPIC_SIMSR_L, 324 .prio = IPIC_SMPRR_A, 325 .force = IPIC_SIFCR_L, 326 .bit = 0, 327 .prio_mask = 0, 328 }, 329 [65] = { 330 .mask = IPIC_SIMSR_L, 331 .prio = IPIC_SMPRR_A, 332 .force = IPIC_SIFCR_L, 333 .bit = 1, 334 .prio_mask = 1, 335 }, 336 [66] = { 337 .mask = IPIC_SIMSR_L, 338 .prio = IPIC_SMPRR_A, 339 .force = IPIC_SIFCR_L, 340 .bit = 2, 341 .prio_mask = 2, 342 }, 343 [67] = { 344 .mask = IPIC_SIMSR_L, 345 .prio = IPIC_SMPRR_A, 346 .force = IPIC_SIFCR_L, 347 .bit = 3, 348 .prio_mask = 3, 349 }, 350 [68] = { 351 .mask = IPIC_SIMSR_L, 352 .prio = IPIC_SMPRR_B, 353 .force = IPIC_SIFCR_L, 354 .bit = 4, 355 .prio_mask = 0, 356 }, 357 [69] = { 358 .mask = IPIC_SIMSR_L, 359 .prio = IPIC_SMPRR_B, 360 .force = IPIC_SIFCR_L, 361 .bit = 5, 362 .prio_mask = 1, 363 }, 364 [70] = { 365 .mask = IPIC_SIMSR_L, 366 .prio = IPIC_SMPRR_B, 367 .force = IPIC_SIFCR_L, 368 .bit = 6, 369 .prio_mask = 2, 370 }, 371 [71] = { 372 .mask = IPIC_SIMSR_L, 373 .prio = IPIC_SMPRR_B, 374 .force = IPIC_SIFCR_L, 375 .bit = 7, 376 .prio_mask = 3, 377 }, 378 [72] = { 379 .mask = IPIC_SIMSR_L, 380 .prio = 0, 381 .force = IPIC_SIFCR_L, 382 .bit = 8, 383 }, 384 [73] = { 385 .mask = IPIC_SIMSR_L, 386 .prio = 0, 387 .force = IPIC_SIFCR_L, 388 .bit = 9, 389 }, 390 [74] = { 391 .mask = IPIC_SIMSR_L, 392 .prio = 0, 393 .force = IPIC_SIFCR_L, 394 .bit = 10, 395 }, 396 [75] = { 397 .mask = IPIC_SIMSR_L, 398 .prio = 0, 399 .force = IPIC_SIFCR_L, 400 .bit = 11, 401 }, 402 [76] = { 403 .mask = IPIC_SIMSR_L, 404 .prio = 0, 405 .force = IPIC_SIFCR_L, 406 .bit = 12, 407 }, 408 [77] = { 409 .mask = IPIC_SIMSR_L, 410 .prio = 0, 411 .force = IPIC_SIFCR_L, 412 .bit = 13, 413 }, 414 [78] = { 415 .mask = IPIC_SIMSR_L, 416 .prio = 0, 417 .force = IPIC_SIFCR_L, 418 .bit = 14, 419 }, 420 [79] = { 421 .mask = IPIC_SIMSR_L, 422 .prio = 0, 423 .force = IPIC_SIFCR_L, 424 .bit = 15, 425 }, 426 [80] = { 427 .mask = IPIC_SIMSR_L, 428 .prio = 0, 429 .force = IPIC_SIFCR_L, 430 .bit = 16, 431 }, 432 [81] = { 433 .mask = IPIC_SIMSR_L, 434 .prio = 0, 435 .force = IPIC_SIFCR_L, 436 .bit = 17, 437 }, 438 [82] = { 439 .mask = IPIC_SIMSR_L, 440 .prio = 0, 441 .force = IPIC_SIFCR_L, 442 .bit = 18, 443 }, 444 [83] = { 445 .mask = IPIC_SIMSR_L, 446 .prio = 0, 447 .force = IPIC_SIFCR_L, 448 .bit = 19, 449 }, 450 [84] = { 451 .mask = IPIC_SIMSR_L, 452 .prio = 0, 453 .force = IPIC_SIFCR_L, 454 .bit = 20, 455 }, 456 [85] = { 457 .mask = IPIC_SIMSR_L, 458 .prio = 0, 459 .force = IPIC_SIFCR_L, 460 .bit = 21, 461 }, 462 [86] = { 463 .mask = IPIC_SIMSR_L, 464 .prio = 0, 465 .force = IPIC_SIFCR_L, 466 .bit = 22, 467 }, 468 [87] = { 469 .mask = IPIC_SIMSR_L, 470 .prio = 0, 471 .force = IPIC_SIFCR_L, 472 .bit = 23, 473 }, 474 [88] = { 475 .mask = IPIC_SIMSR_L, 476 .prio = 0, 477 .force = IPIC_SIFCR_L, 478 .bit = 24, 479 }, 480 [89] = { 481 .mask = IPIC_SIMSR_L, 482 .prio = 0, 483 .force = IPIC_SIFCR_L, 484 .bit = 25, 485 }, 486 [90] = { 487 .mask = IPIC_SIMSR_L, 488 .prio = 0, 489 .force = IPIC_SIFCR_L, 490 .bit = 26, 491 }, 492 [91] = { 493 .mask = IPIC_SIMSR_L, 494 .prio = 0, 495 .force = IPIC_SIFCR_L, 496 .bit = 27, 497 }, 498 [94] = { 499 .mask = IPIC_SIMSR_L, 500 .prio = 0, 501 .force = IPIC_SIFCR_L, 502 .bit = 30, 503 }, 504 }; 505 506 static inline u32 ipic_read(volatile u32 __iomem *base, unsigned int reg) 507 { 508 return in_be32(base + (reg >> 2)); 509 } 510 511 static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32 value) 512 { 513 out_be32(base + (reg >> 2), value); 514 } 515 516 static inline struct ipic * ipic_from_irq(unsigned int virq) 517 { 518 return primary_ipic; 519 } 520 521 static void ipic_unmask_irq(struct irq_data *d) 522 { 523 struct ipic *ipic = ipic_from_irq(d->irq); 524 unsigned int src = irqd_to_hwirq(d); 525 unsigned long flags; 526 u32 temp; 527 528 raw_spin_lock_irqsave(&ipic_lock, flags); 529 530 temp = ipic_read(ipic->regs, ipic_info[src].mask); 531 temp |= (1 << (31 - ipic_info[src].bit)); 532 ipic_write(ipic->regs, ipic_info[src].mask, temp); 533 534 raw_spin_unlock_irqrestore(&ipic_lock, flags); 535 } 536 537 static void ipic_mask_irq(struct irq_data *d) 538 { 539 struct ipic *ipic = ipic_from_irq(d->irq); 540 unsigned int src = irqd_to_hwirq(d); 541 unsigned long flags; 542 u32 temp; 543 544 raw_spin_lock_irqsave(&ipic_lock, flags); 545 546 temp = ipic_read(ipic->regs, ipic_info[src].mask); 547 temp &= ~(1 << (31 - ipic_info[src].bit)); 548 ipic_write(ipic->regs, ipic_info[src].mask, temp); 549 550 /* mb() can't guarantee that masking is finished. But it does finish 551 * for nearly all cases. */ 552 mb(); 553 554 raw_spin_unlock_irqrestore(&ipic_lock, flags); 555 } 556 557 static void ipic_ack_irq(struct irq_data *d) 558 { 559 struct ipic *ipic = ipic_from_irq(d->irq); 560 unsigned int src = irqd_to_hwirq(d); 561 unsigned long flags; 562 u32 temp; 563 564 raw_spin_lock_irqsave(&ipic_lock, flags); 565 566 temp = 1 << (31 - ipic_info[src].bit); 567 ipic_write(ipic->regs, ipic_info[src].ack, temp); 568 569 /* mb() can't guarantee that ack is finished. But it does finish 570 * for nearly all cases. */ 571 mb(); 572 573 raw_spin_unlock_irqrestore(&ipic_lock, flags); 574 } 575 576 static void ipic_mask_irq_and_ack(struct irq_data *d) 577 { 578 struct ipic *ipic = ipic_from_irq(d->irq); 579 unsigned int src = irqd_to_hwirq(d); 580 unsigned long flags; 581 u32 temp; 582 583 raw_spin_lock_irqsave(&ipic_lock, flags); 584 585 temp = ipic_read(ipic->regs, ipic_info[src].mask); 586 temp &= ~(1 << (31 - ipic_info[src].bit)); 587 ipic_write(ipic->regs, ipic_info[src].mask, temp); 588 589 temp = 1 << (31 - ipic_info[src].bit); 590 ipic_write(ipic->regs, ipic_info[src].ack, temp); 591 592 /* mb() can't guarantee that ack is finished. But it does finish 593 * for nearly all cases. */ 594 mb(); 595 596 raw_spin_unlock_irqrestore(&ipic_lock, flags); 597 } 598 599 static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type) 600 { 601 struct ipic *ipic = ipic_from_irq(d->irq); 602 unsigned int src = irqd_to_hwirq(d); 603 unsigned int vold, vnew, edibit; 604 605 if (flow_type == IRQ_TYPE_NONE) 606 flow_type = IRQ_TYPE_LEVEL_LOW; 607 608 /* ipic supports only low assertion and high-to-low change senses 609 */ 610 if (!(flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))) { 611 printk(KERN_ERR "ipic: sense type 0x%x not supported\n", 612 flow_type); 613 return -EINVAL; 614 } 615 /* ipic supports only edge mode on external interrupts */ 616 if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !ipic_info[src].ack) { 617 printk(KERN_ERR "ipic: edge sense not supported on internal " 618 "interrupts\n"); 619 return -EINVAL; 620 621 } 622 623 irqd_set_trigger_type(d, flow_type); 624 if (flow_type & IRQ_TYPE_LEVEL_LOW) { 625 irq_set_handler_locked(d, handle_level_irq); 626 d->chip = &ipic_level_irq_chip; 627 } else { 628 irq_set_handler_locked(d, handle_edge_irq); 629 d->chip = &ipic_edge_irq_chip; 630 } 631 632 /* only EXT IRQ senses are programmable on ipic 633 * internal IRQ senses are LEVEL_LOW 634 */ 635 if (src == IPIC_IRQ_EXT0) 636 edibit = 15; 637 else 638 if (src >= IPIC_IRQ_EXT1 && src <= IPIC_IRQ_EXT7) 639 edibit = (14 - (src - IPIC_IRQ_EXT1)); 640 else 641 return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL; 642 643 vold = ipic_read(ipic->regs, IPIC_SECNR); 644 if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING) { 645 vnew = vold | (1 << edibit); 646 } else { 647 vnew = vold & ~(1 << edibit); 648 } 649 if (vold != vnew) 650 ipic_write(ipic->regs, IPIC_SECNR, vnew); 651 return IRQ_SET_MASK_OK_NOCOPY; 652 } 653 654 /* level interrupts and edge interrupts have different ack operations */ 655 static struct irq_chip ipic_level_irq_chip = { 656 .name = "IPIC", 657 .irq_unmask = ipic_unmask_irq, 658 .irq_mask = ipic_mask_irq, 659 .irq_mask_ack = ipic_mask_irq, 660 .irq_set_type = ipic_set_irq_type, 661 }; 662 663 static struct irq_chip ipic_edge_irq_chip = { 664 .name = "IPIC", 665 .irq_unmask = ipic_unmask_irq, 666 .irq_mask = ipic_mask_irq, 667 .irq_mask_ack = ipic_mask_irq_and_ack, 668 .irq_ack = ipic_ack_irq, 669 .irq_set_type = ipic_set_irq_type, 670 }; 671 672 static int ipic_host_match(struct irq_domain *h, struct device_node *node, 673 enum irq_domain_bus_token bus_token) 674 { 675 /* Exact match, unless ipic node is NULL */ 676 struct device_node *of_node = irq_domain_get_of_node(h); 677 return of_node == NULL || of_node == node; 678 } 679 680 static int ipic_host_map(struct irq_domain *h, unsigned int virq, 681 irq_hw_number_t hw) 682 { 683 struct ipic *ipic = h->host_data; 684 685 irq_set_chip_data(virq, ipic); 686 irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq); 687 688 /* Set default irq type */ 689 irq_set_irq_type(virq, IRQ_TYPE_NONE); 690 691 return 0; 692 } 693 694 static const struct irq_domain_ops ipic_host_ops = { 695 .match = ipic_host_match, 696 .map = ipic_host_map, 697 .xlate = irq_domain_xlate_onetwocell, 698 }; 699 700 struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) 701 { 702 struct ipic *ipic; 703 struct resource res; 704 u32 temp = 0, ret; 705 706 ret = of_address_to_resource(node, 0, &res); 707 if (ret) 708 return NULL; 709 710 ipic = kzalloc(sizeof(*ipic), GFP_KERNEL); 711 if (ipic == NULL) 712 return NULL; 713 714 ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS, 715 &ipic_host_ops, ipic); 716 if (ipic->irqhost == NULL) { 717 kfree(ipic); 718 return NULL; 719 } 720 721 ipic->regs = ioremap(res.start, resource_size(&res)); 722 723 /* init hw */ 724 ipic_write(ipic->regs, IPIC_SICNR, 0x0); 725 726 /* default priority scheme is grouped. If spread mode is required 727 * configure SICFR accordingly */ 728 if (flags & IPIC_SPREADMODE_GRP_A) 729 temp |= SICFR_IPSA; 730 if (flags & IPIC_SPREADMODE_GRP_B) 731 temp |= SICFR_IPSB; 732 if (flags & IPIC_SPREADMODE_GRP_C) 733 temp |= SICFR_IPSC; 734 if (flags & IPIC_SPREADMODE_GRP_D) 735 temp |= SICFR_IPSD; 736 if (flags & IPIC_SPREADMODE_MIX_A) 737 temp |= SICFR_MPSA; 738 if (flags & IPIC_SPREADMODE_MIX_B) 739 temp |= SICFR_MPSB; 740 741 ipic_write(ipic->regs, IPIC_SICFR, temp); 742 743 /* handle MCP route */ 744 temp = 0; 745 if (flags & IPIC_DISABLE_MCP_OUT) 746 temp = SERCR_MCPR; 747 ipic_write(ipic->regs, IPIC_SERCR, temp); 748 749 /* handle routing of IRQ0 to MCP */ 750 temp = ipic_read(ipic->regs, IPIC_SEMSR); 751 752 if (flags & IPIC_IRQ0_MCP) 753 temp |= SEMSR_SIRQ0; 754 else 755 temp &= ~SEMSR_SIRQ0; 756 757 ipic_write(ipic->regs, IPIC_SEMSR, temp); 758 759 primary_ipic = ipic; 760 irq_set_default_host(primary_ipic->irqhost); 761 762 ipic_write(ipic->regs, IPIC_SIMSR_H, 0); 763 ipic_write(ipic->regs, IPIC_SIMSR_L, 0); 764 765 printk ("IPIC (%d IRQ sources) at %p\n", NR_IPIC_INTS, 766 primary_ipic->regs); 767 768 return ipic; 769 } 770 771 void __init ipic_set_default_priority(void) 772 { 773 ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_PRIORITY_DEFAULT); 774 ipic_write(primary_ipic->regs, IPIC_SIPRR_B, IPIC_PRIORITY_DEFAULT); 775 ipic_write(primary_ipic->regs, IPIC_SIPRR_C, IPIC_PRIORITY_DEFAULT); 776 ipic_write(primary_ipic->regs, IPIC_SIPRR_D, IPIC_PRIORITY_DEFAULT); 777 ipic_write(primary_ipic->regs, IPIC_SMPRR_A, IPIC_PRIORITY_DEFAULT); 778 ipic_write(primary_ipic->regs, IPIC_SMPRR_B, IPIC_PRIORITY_DEFAULT); 779 } 780 781 u32 ipic_get_mcp_status(void) 782 { 783 return primary_ipic ? ipic_read(primary_ipic->regs, IPIC_SERSR) : 0; 784 } 785 786 void ipic_clear_mcp_status(u32 mask) 787 { 788 ipic_write(primary_ipic->regs, IPIC_SERSR, mask); 789 } 790 791 /* Return an interrupt vector or 0 if no interrupt is pending. */ 792 unsigned int ipic_get_irq(void) 793 { 794 int irq; 795 796 BUG_ON(primary_ipic == NULL); 797 798 #define IPIC_SIVCR_VECTOR_MASK 0x7f 799 irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & IPIC_SIVCR_VECTOR_MASK; 800 801 if (irq == 0) /* 0 --> no irq is pending */ 802 return 0; 803 804 return irq_linear_revmap(primary_ipic->irqhost, irq); 805 } 806 807 #ifdef CONFIG_SUSPEND 808 static struct { 809 u32 sicfr; 810 u32 siprr[2]; 811 u32 simsr[2]; 812 u32 sicnr; 813 u32 smprr[2]; 814 u32 semsr; 815 u32 secnr; 816 u32 sermr; 817 u32 sercr; 818 } ipic_saved_state; 819 820 static int ipic_suspend(void) 821 { 822 struct ipic *ipic = primary_ipic; 823 824 ipic_saved_state.sicfr = ipic_read(ipic->regs, IPIC_SICFR); 825 ipic_saved_state.siprr[0] = ipic_read(ipic->regs, IPIC_SIPRR_A); 826 ipic_saved_state.siprr[1] = ipic_read(ipic->regs, IPIC_SIPRR_D); 827 ipic_saved_state.simsr[0] = ipic_read(ipic->regs, IPIC_SIMSR_H); 828 ipic_saved_state.simsr[1] = ipic_read(ipic->regs, IPIC_SIMSR_L); 829 ipic_saved_state.sicnr = ipic_read(ipic->regs, IPIC_SICNR); 830 ipic_saved_state.smprr[0] = ipic_read(ipic->regs, IPIC_SMPRR_A); 831 ipic_saved_state.smprr[1] = ipic_read(ipic->regs, IPIC_SMPRR_B); 832 ipic_saved_state.semsr = ipic_read(ipic->regs, IPIC_SEMSR); 833 ipic_saved_state.secnr = ipic_read(ipic->regs, IPIC_SECNR); 834 ipic_saved_state.sermr = ipic_read(ipic->regs, IPIC_SERMR); 835 ipic_saved_state.sercr = ipic_read(ipic->regs, IPIC_SERCR); 836 837 if (fsl_deep_sleep()) { 838 /* In deep sleep, make sure there can be no 839 * pending interrupts, as this can cause 840 * problems on 831x. 841 */ 842 ipic_write(ipic->regs, IPIC_SIMSR_H, 0); 843 ipic_write(ipic->regs, IPIC_SIMSR_L, 0); 844 ipic_write(ipic->regs, IPIC_SEMSR, 0); 845 ipic_write(ipic->regs, IPIC_SERMR, 0); 846 } 847 848 return 0; 849 } 850 851 static void ipic_resume(void) 852 { 853 struct ipic *ipic = primary_ipic; 854 855 ipic_write(ipic->regs, IPIC_SICFR, ipic_saved_state.sicfr); 856 ipic_write(ipic->regs, IPIC_SIPRR_A, ipic_saved_state.siprr[0]); 857 ipic_write(ipic->regs, IPIC_SIPRR_D, ipic_saved_state.siprr[1]); 858 ipic_write(ipic->regs, IPIC_SIMSR_H, ipic_saved_state.simsr[0]); 859 ipic_write(ipic->regs, IPIC_SIMSR_L, ipic_saved_state.simsr[1]); 860 ipic_write(ipic->regs, IPIC_SICNR, ipic_saved_state.sicnr); 861 ipic_write(ipic->regs, IPIC_SMPRR_A, ipic_saved_state.smprr[0]); 862 ipic_write(ipic->regs, IPIC_SMPRR_B, ipic_saved_state.smprr[1]); 863 ipic_write(ipic->regs, IPIC_SEMSR, ipic_saved_state.semsr); 864 ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr); 865 ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr); 866 ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr); 867 } 868 #else 869 #define ipic_suspend NULL 870 #define ipic_resume NULL 871 #endif 872 873 static struct syscore_ops ipic_syscore_ops = { 874 .suspend = ipic_suspend, 875 .resume = ipic_resume, 876 }; 877 878 static int __init init_ipic_syscore(void) 879 { 880 if (!primary_ipic || !primary_ipic->regs) 881 return -ENODEV; 882 883 printk(KERN_DEBUG "Registering ipic system core operations\n"); 884 register_syscore_ops(&ipic_syscore_ops); 885 886 return 0; 887 } 888 889 subsys_initcall(init_ipic_syscore); 890