1 /* 2 * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include <linux/acpi.h> 19 #include <linux/cpu.h> 20 #include <linux/cpu_pm.h> 21 #include <linux/delay.h> 22 #include <linux/interrupt.h> 23 #include <linux/irqdomain.h> 24 #include <linux/of.h> 25 #include <linux/of_address.h> 26 #include <linux/of_irq.h> 27 #include <linux/percpu.h> 28 #include <linux/slab.h> 29 30 #include <linux/irqchip.h> 31 #include <linux/irqchip/arm-gic-v3.h> 32 33 #include <asm/cputype.h> 34 #include <asm/exception.h> 35 #include <asm/smp_plat.h> 36 #include <asm/virt.h> 37 38 #include "irq-gic-common.h" 39 40 struct redist_region { 41 void __iomem *redist_base; 42 phys_addr_t phys_base; 43 bool single_redist; 44 }; 45 46 struct gic_chip_data { 47 void __iomem *dist_base; 48 struct redist_region *redist_regions; 49 struct rdists rdists; 50 struct irq_domain *domain; 51 u64 redist_stride; 52 u32 nr_redist_regions; 53 unsigned int irq_nr; 54 }; 55 56 static struct gic_chip_data gic_data __read_mostly; 57 static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE; 58 59 #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) 60 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 61 #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) 62 63 /* Our default, arbitrary priority value. Linux only uses one anyway. */ 64 #define DEFAULT_PMR_VALUE 0xf0 65 66 static inline unsigned int gic_irq(struct irq_data *d) 67 { 68 return d->hwirq; 69 } 70 71 static inline int gic_irq_in_rdist(struct irq_data *d) 72 { 73 return gic_irq(d) < 32; 74 } 75 76 static inline void __iomem *gic_dist_base(struct irq_data *d) 77 { 78 if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */ 79 return gic_data_rdist_sgi_base(); 80 81 if (d->hwirq <= 1023) /* SPI -> dist_base */ 82 return gic_data.dist_base; 83 84 return NULL; 85 } 86 87 static void gic_do_wait_for_rwp(void __iomem *base) 88 { 89 u32 count = 1000000; /* 1s! */ 90 91 while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { 92 count--; 93 if (!count) { 94 pr_err_ratelimited("RWP timeout, gone fishing\n"); 95 return; 96 } 97 cpu_relax(); 98 udelay(1); 99 }; 100 } 101 102 /* Wait for completion of a distributor change */ 103 static void gic_dist_wait_for_rwp(void) 104 { 105 gic_do_wait_for_rwp(gic_data.dist_base); 106 } 107 108 /* Wait for completion of a redistributor change */ 109 static void gic_redist_wait_for_rwp(void) 110 { 111 gic_do_wait_for_rwp(gic_data_rdist_rd_base()); 112 } 113 114 #ifdef CONFIG_ARM64 115 static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx); 116 117 static u64 __maybe_unused gic_read_iar(void) 118 { 119 if (static_branch_unlikely(&is_cavium_thunderx)) 120 return gic_read_iar_cavium_thunderx(); 121 else 122 return gic_read_iar_common(); 123 } 124 #endif 125 126 static void gic_enable_redist(bool enable) 127 { 128 void __iomem *rbase; 129 u32 count = 1000000; /* 1s! */ 130 u32 val; 131 132 rbase = gic_data_rdist_rd_base(); 133 134 val = readl_relaxed(rbase + GICR_WAKER); 135 if (enable) 136 /* Wake up this CPU redistributor */ 137 val &= ~GICR_WAKER_ProcessorSleep; 138 else 139 val |= GICR_WAKER_ProcessorSleep; 140 writel_relaxed(val, rbase + GICR_WAKER); 141 142 if (!enable) { /* Check that GICR_WAKER is writeable */ 143 val = readl_relaxed(rbase + GICR_WAKER); 144 if (!(val & GICR_WAKER_ProcessorSleep)) 145 return; /* No PM support in this redistributor */ 146 } 147 148 while (count--) { 149 val = readl_relaxed(rbase + GICR_WAKER); 150 if (enable ^ (val & GICR_WAKER_ChildrenAsleep)) 151 break; 152 cpu_relax(); 153 udelay(1); 154 }; 155 if (!count) 156 pr_err_ratelimited("redistributor failed to %s...\n", 157 enable ? "wakeup" : "sleep"); 158 } 159 160 /* 161 * Routines to disable, enable, EOI and route interrupts 162 */ 163 static int gic_peek_irq(struct irq_data *d, u32 offset) 164 { 165 u32 mask = 1 << (gic_irq(d) % 32); 166 void __iomem *base; 167 168 if (gic_irq_in_rdist(d)) 169 base = gic_data_rdist_sgi_base(); 170 else 171 base = gic_data.dist_base; 172 173 return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); 174 } 175 176 static void gic_poke_irq(struct irq_data *d, u32 offset) 177 { 178 u32 mask = 1 << (gic_irq(d) % 32); 179 void (*rwp_wait)(void); 180 void __iomem *base; 181 182 if (gic_irq_in_rdist(d)) { 183 base = gic_data_rdist_sgi_base(); 184 rwp_wait = gic_redist_wait_for_rwp; 185 } else { 186 base = gic_data.dist_base; 187 rwp_wait = gic_dist_wait_for_rwp; 188 } 189 190 writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4); 191 rwp_wait(); 192 } 193 194 static void gic_mask_irq(struct irq_data *d) 195 { 196 gic_poke_irq(d, GICD_ICENABLER); 197 } 198 199 static void gic_eoimode1_mask_irq(struct irq_data *d) 200 { 201 gic_mask_irq(d); 202 /* 203 * When masking a forwarded interrupt, make sure it is 204 * deactivated as well. 205 * 206 * This ensures that an interrupt that is getting 207 * disabled/masked will not get "stuck", because there is 208 * noone to deactivate it (guest is being terminated). 209 */ 210 if (irqd_is_forwarded_to_vcpu(d)) 211 gic_poke_irq(d, GICD_ICACTIVER); 212 } 213 214 static void gic_unmask_irq(struct irq_data *d) 215 { 216 gic_poke_irq(d, GICD_ISENABLER); 217 } 218 219 static int gic_irq_set_irqchip_state(struct irq_data *d, 220 enum irqchip_irq_state which, bool val) 221 { 222 u32 reg; 223 224 if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ 225 return -EINVAL; 226 227 switch (which) { 228 case IRQCHIP_STATE_PENDING: 229 reg = val ? GICD_ISPENDR : GICD_ICPENDR; 230 break; 231 232 case IRQCHIP_STATE_ACTIVE: 233 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; 234 break; 235 236 case IRQCHIP_STATE_MASKED: 237 reg = val ? GICD_ICENABLER : GICD_ISENABLER; 238 break; 239 240 default: 241 return -EINVAL; 242 } 243 244 gic_poke_irq(d, reg); 245 return 0; 246 } 247 248 static int gic_irq_get_irqchip_state(struct irq_data *d, 249 enum irqchip_irq_state which, bool *val) 250 { 251 if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ 252 return -EINVAL; 253 254 switch (which) { 255 case IRQCHIP_STATE_PENDING: 256 *val = gic_peek_irq(d, GICD_ISPENDR); 257 break; 258 259 case IRQCHIP_STATE_ACTIVE: 260 *val = gic_peek_irq(d, GICD_ISACTIVER); 261 break; 262 263 case IRQCHIP_STATE_MASKED: 264 *val = !gic_peek_irq(d, GICD_ISENABLER); 265 break; 266 267 default: 268 return -EINVAL; 269 } 270 271 return 0; 272 } 273 274 static void gic_eoi_irq(struct irq_data *d) 275 { 276 gic_write_eoir(gic_irq(d)); 277 } 278 279 static void gic_eoimode1_eoi_irq(struct irq_data *d) 280 { 281 /* 282 * No need to deactivate an LPI, or an interrupt that 283 * is is getting forwarded to a vcpu. 284 */ 285 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) 286 return; 287 gic_write_dir(gic_irq(d)); 288 } 289 290 static int gic_set_type(struct irq_data *d, unsigned int type) 291 { 292 unsigned int irq = gic_irq(d); 293 void (*rwp_wait)(void); 294 void __iomem *base; 295 296 /* Interrupt configuration for SGIs can't be changed */ 297 if (irq < 16) 298 return -EINVAL; 299 300 /* SPIs have restrictions on the supported types */ 301 if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && 302 type != IRQ_TYPE_EDGE_RISING) 303 return -EINVAL; 304 305 if (gic_irq_in_rdist(d)) { 306 base = gic_data_rdist_sgi_base(); 307 rwp_wait = gic_redist_wait_for_rwp; 308 } else { 309 base = gic_data.dist_base; 310 rwp_wait = gic_dist_wait_for_rwp; 311 } 312 313 return gic_configure_irq(irq, type, base, rwp_wait); 314 } 315 316 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) 317 { 318 if (vcpu) 319 irqd_set_forwarded_to_vcpu(d); 320 else 321 irqd_clr_forwarded_to_vcpu(d); 322 return 0; 323 } 324 325 static u64 gic_mpidr_to_affinity(unsigned long mpidr) 326 { 327 u64 aff; 328 329 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | 330 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | 331 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | 332 MPIDR_AFFINITY_LEVEL(mpidr, 0)); 333 334 return aff; 335 } 336 337 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) 338 { 339 u32 irqnr; 340 341 do { 342 irqnr = gic_read_iar(); 343 344 if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) { 345 int err; 346 347 if (static_key_true(&supports_deactivate)) 348 gic_write_eoir(irqnr); 349 350 err = handle_domain_irq(gic_data.domain, irqnr, regs); 351 if (err) { 352 WARN_ONCE(true, "Unexpected interrupt received!\n"); 353 if (static_key_true(&supports_deactivate)) { 354 if (irqnr < 8192) 355 gic_write_dir(irqnr); 356 } else { 357 gic_write_eoir(irqnr); 358 } 359 } 360 continue; 361 } 362 if (irqnr < 16) { 363 gic_write_eoir(irqnr); 364 if (static_key_true(&supports_deactivate)) 365 gic_write_dir(irqnr); 366 #ifdef CONFIG_SMP 367 handle_IPI(irqnr, regs); 368 #else 369 WARN_ONCE(true, "Unexpected SGI received!\n"); 370 #endif 371 continue; 372 } 373 } while (irqnr != ICC_IAR1_EL1_SPURIOUS); 374 } 375 376 static void __init gic_dist_init(void) 377 { 378 unsigned int i; 379 u64 affinity; 380 void __iomem *base = gic_data.dist_base; 381 382 /* Disable the distributor */ 383 writel_relaxed(0, base + GICD_CTLR); 384 gic_dist_wait_for_rwp(); 385 386 gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp); 387 388 /* Enable distributor with ARE, Group1 */ 389 writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, 390 base + GICD_CTLR); 391 392 /* 393 * Set all global interrupts to the boot CPU only. ARE must be 394 * enabled. 395 */ 396 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); 397 for (i = 32; i < gic_data.irq_nr; i++) 398 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); 399 } 400 401 static int gic_populate_rdist(void) 402 { 403 unsigned long mpidr = cpu_logical_map(smp_processor_id()); 404 u64 typer; 405 u32 aff; 406 int i; 407 408 /* 409 * Convert affinity to a 32bit value that can be matched to 410 * GICR_TYPER bits [63:32]. 411 */ 412 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | 413 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | 414 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | 415 MPIDR_AFFINITY_LEVEL(mpidr, 0)); 416 417 for (i = 0; i < gic_data.nr_redist_regions; i++) { 418 void __iomem *ptr = gic_data.redist_regions[i].redist_base; 419 u32 reg; 420 421 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; 422 if (reg != GIC_PIDR2_ARCH_GICv3 && 423 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ 424 pr_warn("No redistributor present @%p\n", ptr); 425 break; 426 } 427 428 do { 429 typer = gic_read_typer(ptr + GICR_TYPER); 430 if ((typer >> 32) == aff) { 431 u64 offset = ptr - gic_data.redist_regions[i].redist_base; 432 gic_data_rdist_rd_base() = ptr; 433 gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset; 434 pr_info("CPU%d: found redistributor %lx region %d:%pa\n", 435 smp_processor_id(), mpidr, i, 436 &gic_data_rdist()->phys_base); 437 return 0; 438 } 439 440 if (gic_data.redist_regions[i].single_redist) 441 break; 442 443 if (gic_data.redist_stride) { 444 ptr += gic_data.redist_stride; 445 } else { 446 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ 447 if (typer & GICR_TYPER_VLPIS) 448 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ 449 } 450 } while (!(typer & GICR_TYPER_LAST)); 451 } 452 453 /* We couldn't even deal with ourselves... */ 454 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", 455 smp_processor_id(), mpidr); 456 return -ENODEV; 457 } 458 459 static void gic_cpu_sys_reg_init(void) 460 { 461 /* 462 * Need to check that the SRE bit has actually been set. If 463 * not, it means that SRE is disabled at EL2. We're going to 464 * die painfully, and there is nothing we can do about it. 465 * 466 * Kindly inform the luser. 467 */ 468 if (!gic_enable_sre()) 469 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); 470 471 /* Set priority mask register */ 472 gic_write_pmr(DEFAULT_PMR_VALUE); 473 474 if (static_key_true(&supports_deactivate)) { 475 /* EOI drops priority only (mode 1) */ 476 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); 477 } else { 478 /* EOI deactivates interrupt too (mode 0) */ 479 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); 480 } 481 482 /* ... and let's hit the road... */ 483 gic_write_grpen1(1); 484 } 485 486 static int gic_dist_supports_lpis(void) 487 { 488 return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS); 489 } 490 491 static void gic_cpu_init(void) 492 { 493 void __iomem *rbase; 494 495 /* Register ourselves with the rest of the world */ 496 if (gic_populate_rdist()) 497 return; 498 499 gic_enable_redist(true); 500 501 rbase = gic_data_rdist_sgi_base(); 502 503 gic_cpu_config(rbase, gic_redist_wait_for_rwp); 504 505 /* Give LPIs a spin */ 506 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) 507 its_cpu_init(); 508 509 /* initialise system registers */ 510 gic_cpu_sys_reg_init(); 511 } 512 513 #ifdef CONFIG_SMP 514 static int gic_secondary_init(struct notifier_block *nfb, 515 unsigned long action, void *hcpu) 516 { 517 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 518 gic_cpu_init(); 519 return NOTIFY_OK; 520 } 521 522 /* 523 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high 524 * priority because the GIC needs to be up before the ARM generic timers. 525 */ 526 static struct notifier_block gic_cpu_notifier = { 527 .notifier_call = gic_secondary_init, 528 .priority = 100, 529 }; 530 531 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, 532 unsigned long cluster_id) 533 { 534 int cpu = *base_cpu; 535 unsigned long mpidr = cpu_logical_map(cpu); 536 u16 tlist = 0; 537 538 while (cpu < nr_cpu_ids) { 539 /* 540 * If we ever get a cluster of more than 16 CPUs, just 541 * scream and skip that CPU. 542 */ 543 if (WARN_ON((mpidr & 0xff) >= 16)) 544 goto out; 545 546 tlist |= 1 << (mpidr & 0xf); 547 548 cpu = cpumask_next(cpu, mask); 549 if (cpu >= nr_cpu_ids) 550 goto out; 551 552 mpidr = cpu_logical_map(cpu); 553 554 if (cluster_id != (mpidr & ~0xffUL)) { 555 cpu--; 556 goto out; 557 } 558 } 559 out: 560 *base_cpu = cpu; 561 return tlist; 562 } 563 564 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ 565 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ 566 << ICC_SGI1R_AFFINITY_## level ##_SHIFT) 567 568 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) 569 { 570 u64 val; 571 572 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | 573 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | 574 irq << ICC_SGI1R_SGI_ID_SHIFT | 575 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | 576 tlist << ICC_SGI1R_TARGET_LIST_SHIFT); 577 578 pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); 579 gic_write_sgi1r(val); 580 } 581 582 static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) 583 { 584 int cpu; 585 586 if (WARN_ON(irq >= 16)) 587 return; 588 589 /* 590 * Ensure that stores to Normal memory are visible to the 591 * other CPUs before issuing the IPI. 592 */ 593 smp_wmb(); 594 595 for_each_cpu(cpu, mask) { 596 unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL; 597 u16 tlist; 598 599 tlist = gic_compute_target_list(&cpu, mask, cluster_id); 600 gic_send_sgi(cluster_id, tlist, irq); 601 } 602 603 /* Force the above writes to ICC_SGI1R_EL1 to be executed */ 604 isb(); 605 } 606 607 static void gic_smp_init(void) 608 { 609 set_smp_cross_call(gic_raise_softirq); 610 register_cpu_notifier(&gic_cpu_notifier); 611 } 612 613 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 614 bool force) 615 { 616 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); 617 void __iomem *reg; 618 int enabled; 619 u64 val; 620 621 if (gic_irq_in_rdist(d)) 622 return -EINVAL; 623 624 /* If interrupt was enabled, disable it first */ 625 enabled = gic_peek_irq(d, GICD_ISENABLER); 626 if (enabled) 627 gic_mask_irq(d); 628 629 reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8); 630 val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); 631 632 gic_write_irouter(val, reg); 633 634 /* 635 * If the interrupt was enabled, enabled it again. Otherwise, 636 * just wait for the distributor to have digested our changes. 637 */ 638 if (enabled) 639 gic_unmask_irq(d); 640 else 641 gic_dist_wait_for_rwp(); 642 643 return IRQ_SET_MASK_OK_DONE; 644 } 645 #else 646 #define gic_set_affinity NULL 647 #define gic_smp_init() do { } while(0) 648 #endif 649 650 #ifdef CONFIG_CPU_PM 651 static int gic_cpu_pm_notifier(struct notifier_block *self, 652 unsigned long cmd, void *v) 653 { 654 if (cmd == CPU_PM_EXIT) { 655 gic_enable_redist(true); 656 gic_cpu_sys_reg_init(); 657 } else if (cmd == CPU_PM_ENTER) { 658 gic_write_grpen1(0); 659 gic_enable_redist(false); 660 } 661 return NOTIFY_OK; 662 } 663 664 static struct notifier_block gic_cpu_pm_notifier_block = { 665 .notifier_call = gic_cpu_pm_notifier, 666 }; 667 668 static void gic_cpu_pm_init(void) 669 { 670 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block); 671 } 672 673 #else 674 static inline void gic_cpu_pm_init(void) { } 675 #endif /* CONFIG_CPU_PM */ 676 677 static struct irq_chip gic_chip = { 678 .name = "GICv3", 679 .irq_mask = gic_mask_irq, 680 .irq_unmask = gic_unmask_irq, 681 .irq_eoi = gic_eoi_irq, 682 .irq_set_type = gic_set_type, 683 .irq_set_affinity = gic_set_affinity, 684 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 685 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 686 .flags = IRQCHIP_SET_TYPE_MASKED, 687 }; 688 689 static struct irq_chip gic_eoimode1_chip = { 690 .name = "GICv3", 691 .irq_mask = gic_eoimode1_mask_irq, 692 .irq_unmask = gic_unmask_irq, 693 .irq_eoi = gic_eoimode1_eoi_irq, 694 .irq_set_type = gic_set_type, 695 .irq_set_affinity = gic_set_affinity, 696 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 697 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 698 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, 699 .flags = IRQCHIP_SET_TYPE_MASKED, 700 }; 701 702 #define GIC_ID_NR (1U << gic_data.rdists.id_bits) 703 704 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, 705 irq_hw_number_t hw) 706 { 707 struct irq_chip *chip = &gic_chip; 708 709 if (static_key_true(&supports_deactivate)) 710 chip = &gic_eoimode1_chip; 711 712 /* SGIs are private to the core kernel */ 713 if (hw < 16) 714 return -EPERM; 715 /* Nothing here */ 716 if (hw >= gic_data.irq_nr && hw < 8192) 717 return -EPERM; 718 /* Off limits */ 719 if (hw >= GIC_ID_NR) 720 return -EPERM; 721 722 /* PPIs */ 723 if (hw < 32) { 724 irq_set_percpu_devid(irq); 725 irq_domain_set_info(d, irq, hw, chip, d->host_data, 726 handle_percpu_devid_irq, NULL, NULL); 727 irq_set_status_flags(irq, IRQ_NOAUTOEN); 728 } 729 /* SPIs */ 730 if (hw >= 32 && hw < gic_data.irq_nr) { 731 irq_domain_set_info(d, irq, hw, chip, d->host_data, 732 handle_fasteoi_irq, NULL, NULL); 733 irq_set_probe(irq); 734 } 735 /* LPIs */ 736 if (hw >= 8192 && hw < GIC_ID_NR) { 737 if (!gic_dist_supports_lpis()) 738 return -EPERM; 739 irq_domain_set_info(d, irq, hw, chip, d->host_data, 740 handle_fasteoi_irq, NULL, NULL); 741 } 742 743 return 0; 744 } 745 746 static int gic_irq_domain_translate(struct irq_domain *d, 747 struct irq_fwspec *fwspec, 748 unsigned long *hwirq, 749 unsigned int *type) 750 { 751 if (is_of_node(fwspec->fwnode)) { 752 if (fwspec->param_count < 3) 753 return -EINVAL; 754 755 switch (fwspec->param[0]) { 756 case 0: /* SPI */ 757 *hwirq = fwspec->param[1] + 32; 758 break; 759 case 1: /* PPI */ 760 *hwirq = fwspec->param[1] + 16; 761 break; 762 case GIC_IRQ_TYPE_LPI: /* LPI */ 763 *hwirq = fwspec->param[1]; 764 break; 765 default: 766 return -EINVAL; 767 } 768 769 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 770 return 0; 771 } 772 773 if (is_fwnode_irqchip(fwspec->fwnode)) { 774 if(fwspec->param_count != 2) 775 return -EINVAL; 776 777 *hwirq = fwspec->param[0]; 778 *type = fwspec->param[1]; 779 return 0; 780 } 781 782 return -EINVAL; 783 } 784 785 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 786 unsigned int nr_irqs, void *arg) 787 { 788 int i, ret; 789 irq_hw_number_t hwirq; 790 unsigned int type = IRQ_TYPE_NONE; 791 struct irq_fwspec *fwspec = arg; 792 793 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); 794 if (ret) 795 return ret; 796 797 for (i = 0; i < nr_irqs; i++) 798 gic_irq_domain_map(domain, virq + i, hwirq + i); 799 800 return 0; 801 } 802 803 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, 804 unsigned int nr_irqs) 805 { 806 int i; 807 808 for (i = 0; i < nr_irqs; i++) { 809 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); 810 irq_set_handler(virq + i, NULL); 811 irq_domain_reset_irq_data(d); 812 } 813 } 814 815 static const struct irq_domain_ops gic_irq_domain_ops = { 816 .translate = gic_irq_domain_translate, 817 .alloc = gic_irq_domain_alloc, 818 .free = gic_irq_domain_free, 819 }; 820 821 static void gicv3_enable_quirks(void) 822 { 823 #ifdef CONFIG_ARM64 824 if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154)) 825 static_branch_enable(&is_cavium_thunderx); 826 #endif 827 } 828 829 static int __init gic_init_bases(void __iomem *dist_base, 830 struct redist_region *rdist_regs, 831 u32 nr_redist_regions, 832 u64 redist_stride, 833 struct fwnode_handle *handle) 834 { 835 struct device_node *node; 836 u32 typer; 837 int gic_irqs; 838 int err; 839 840 if (!is_hyp_mode_available()) 841 static_key_slow_dec(&supports_deactivate); 842 843 if (static_key_true(&supports_deactivate)) 844 pr_info("GIC: Using split EOI/Deactivate mode\n"); 845 846 gic_data.dist_base = dist_base; 847 gic_data.redist_regions = rdist_regs; 848 gic_data.nr_redist_regions = nr_redist_regions; 849 gic_data.redist_stride = redist_stride; 850 851 gicv3_enable_quirks(); 852 853 /* 854 * Find out how many interrupts are supported. 855 * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) 856 */ 857 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); 858 gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer); 859 gic_irqs = GICD_TYPER_IRQS(typer); 860 if (gic_irqs > 1020) 861 gic_irqs = 1020; 862 gic_data.irq_nr = gic_irqs; 863 864 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, 865 &gic_data); 866 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); 867 868 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { 869 err = -ENOMEM; 870 goto out_free; 871 } 872 873 set_handle_irq(gic_handle_irq); 874 875 node = to_of_node(handle); 876 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() && 877 node) /* Temp hack to prevent ITS init for ACPI */ 878 its_init(node, &gic_data.rdists, gic_data.domain); 879 880 gic_smp_init(); 881 gic_dist_init(); 882 gic_cpu_init(); 883 gic_cpu_pm_init(); 884 885 return 0; 886 887 out_free: 888 if (gic_data.domain) 889 irq_domain_remove(gic_data.domain); 890 free_percpu(gic_data.rdists.rdist); 891 return err; 892 } 893 894 static int __init gic_validate_dist_version(void __iomem *dist_base) 895 { 896 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; 897 898 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) 899 return -ENODEV; 900 901 return 0; 902 } 903 904 static int __init gic_of_init(struct device_node *node, struct device_node *parent) 905 { 906 void __iomem *dist_base; 907 struct redist_region *rdist_regs; 908 u64 redist_stride; 909 u32 nr_redist_regions; 910 int err, i; 911 912 dist_base = of_iomap(node, 0); 913 if (!dist_base) { 914 pr_err("%s: unable to map gic dist registers\n", 915 node->full_name); 916 return -ENXIO; 917 } 918 919 err = gic_validate_dist_version(dist_base); 920 if (err) { 921 pr_err("%s: no distributor detected, giving up\n", 922 node->full_name); 923 goto out_unmap_dist; 924 } 925 926 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) 927 nr_redist_regions = 1; 928 929 rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL); 930 if (!rdist_regs) { 931 err = -ENOMEM; 932 goto out_unmap_dist; 933 } 934 935 for (i = 0; i < nr_redist_regions; i++) { 936 struct resource res; 937 int ret; 938 939 ret = of_address_to_resource(node, 1 + i, &res); 940 rdist_regs[i].redist_base = of_iomap(node, 1 + i); 941 if (ret || !rdist_regs[i].redist_base) { 942 pr_err("%s: couldn't map region %d\n", 943 node->full_name, i); 944 err = -ENODEV; 945 goto out_unmap_rdist; 946 } 947 rdist_regs[i].phys_base = res.start; 948 } 949 950 if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) 951 redist_stride = 0; 952 953 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions, 954 redist_stride, &node->fwnode); 955 if (!err) 956 return 0; 957 958 out_unmap_rdist: 959 for (i = 0; i < nr_redist_regions; i++) 960 if (rdist_regs[i].redist_base) 961 iounmap(rdist_regs[i].redist_base); 962 kfree(rdist_regs); 963 out_unmap_dist: 964 iounmap(dist_base); 965 return err; 966 } 967 968 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init); 969 970 #ifdef CONFIG_ACPI 971 static void __iomem *dist_base; 972 static struct redist_region *redist_regs __initdata; 973 static u32 nr_redist_regions __initdata; 974 static bool single_redist; 975 976 static void __init 977 gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) 978 { 979 static int count = 0; 980 981 redist_regs[count].phys_base = phys_base; 982 redist_regs[count].redist_base = redist_base; 983 redist_regs[count].single_redist = single_redist; 984 count++; 985 } 986 987 static int __init 988 gic_acpi_parse_madt_redist(struct acpi_subtable_header *header, 989 const unsigned long end) 990 { 991 struct acpi_madt_generic_redistributor *redist = 992 (struct acpi_madt_generic_redistributor *)header; 993 void __iomem *redist_base; 994 995 redist_base = ioremap(redist->base_address, redist->length); 996 if (!redist_base) { 997 pr_err("Couldn't map GICR region @%llx\n", redist->base_address); 998 return -ENOMEM; 999 } 1000 1001 gic_acpi_register_redist(redist->base_address, redist_base); 1002 return 0; 1003 } 1004 1005 static int __init 1006 gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header, 1007 const unsigned long end) 1008 { 1009 struct acpi_madt_generic_interrupt *gicc = 1010 (struct acpi_madt_generic_interrupt *)header; 1011 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; 1012 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; 1013 void __iomem *redist_base; 1014 1015 redist_base = ioremap(gicc->gicr_base_address, size); 1016 if (!redist_base) 1017 return -ENOMEM; 1018 1019 gic_acpi_register_redist(gicc->gicr_base_address, redist_base); 1020 return 0; 1021 } 1022 1023 static int __init gic_acpi_collect_gicr_base(void) 1024 { 1025 acpi_tbl_entry_handler redist_parser; 1026 enum acpi_madt_type type; 1027 1028 if (single_redist) { 1029 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; 1030 redist_parser = gic_acpi_parse_madt_gicc; 1031 } else { 1032 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; 1033 redist_parser = gic_acpi_parse_madt_redist; 1034 } 1035 1036 /* Collect redistributor base addresses in GICR entries */ 1037 if (acpi_table_parse_madt(type, redist_parser, 0) > 0) 1038 return 0; 1039 1040 pr_info("No valid GICR entries exist\n"); 1041 return -ENODEV; 1042 } 1043 1044 static int __init gic_acpi_match_gicr(struct acpi_subtable_header *header, 1045 const unsigned long end) 1046 { 1047 /* Subtable presence means that redist exists, that's it */ 1048 return 0; 1049 } 1050 1051 static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header, 1052 const unsigned long end) 1053 { 1054 struct acpi_madt_generic_interrupt *gicc = 1055 (struct acpi_madt_generic_interrupt *)header; 1056 1057 /* 1058 * If GICC is enabled and has valid gicr base address, then it means 1059 * GICR base is presented via GICC 1060 */ 1061 if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) 1062 return 0; 1063 1064 return -ENODEV; 1065 } 1066 1067 static int __init gic_acpi_count_gicr_regions(void) 1068 { 1069 int count; 1070 1071 /* 1072 * Count how many redistributor regions we have. It is not allowed 1073 * to mix redistributor description, GICR and GICC subtables have to be 1074 * mutually exclusive. 1075 */ 1076 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, 1077 gic_acpi_match_gicr, 0); 1078 if (count > 0) { 1079 single_redist = false; 1080 return count; 1081 } 1082 1083 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 1084 gic_acpi_match_gicc, 0); 1085 if (count > 0) 1086 single_redist = true; 1087 1088 return count; 1089 } 1090 1091 static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, 1092 struct acpi_probe_entry *ape) 1093 { 1094 struct acpi_madt_generic_distributor *dist; 1095 int count; 1096 1097 dist = (struct acpi_madt_generic_distributor *)header; 1098 if (dist->version != ape->driver_data) 1099 return false; 1100 1101 /* We need to do that exercise anyway, the sooner the better */ 1102 count = gic_acpi_count_gicr_regions(); 1103 if (count <= 0) 1104 return false; 1105 1106 nr_redist_regions = count; 1107 return true; 1108 } 1109 1110 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) 1111 1112 static int __init 1113 gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) 1114 { 1115 struct acpi_madt_generic_distributor *dist; 1116 struct fwnode_handle *domain_handle; 1117 int i, err; 1118 1119 /* Get distributor base address */ 1120 dist = (struct acpi_madt_generic_distributor *)header; 1121 dist_base = ioremap(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE); 1122 if (!dist_base) { 1123 pr_err("Unable to map GICD registers\n"); 1124 return -ENOMEM; 1125 } 1126 1127 err = gic_validate_dist_version(dist_base); 1128 if (err) { 1129 pr_err("No distributor detected at @%p, giving up", dist_base); 1130 goto out_dist_unmap; 1131 } 1132 1133 redist_regs = kzalloc(sizeof(*redist_regs) * nr_redist_regions, 1134 GFP_KERNEL); 1135 if (!redist_regs) { 1136 err = -ENOMEM; 1137 goto out_dist_unmap; 1138 } 1139 1140 err = gic_acpi_collect_gicr_base(); 1141 if (err) 1142 goto out_redist_unmap; 1143 1144 domain_handle = irq_domain_alloc_fwnode(dist_base); 1145 if (!domain_handle) { 1146 err = -ENOMEM; 1147 goto out_redist_unmap; 1148 } 1149 1150 err = gic_init_bases(dist_base, redist_regs, nr_redist_regions, 0, 1151 domain_handle); 1152 if (err) 1153 goto out_fwhandle_free; 1154 1155 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); 1156 return 0; 1157 1158 out_fwhandle_free: 1159 irq_domain_free_fwnode(domain_handle); 1160 out_redist_unmap: 1161 for (i = 0; i < nr_redist_regions; i++) 1162 if (redist_regs[i].redist_base) 1163 iounmap(redist_regs[i].redist_base); 1164 kfree(redist_regs); 1165 out_dist_unmap: 1166 iounmap(dist_base); 1167 return err; 1168 } 1169 IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 1170 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, 1171 gic_acpi_init); 1172 IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 1173 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4, 1174 gic_acpi_init); 1175 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 1176 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE, 1177 gic_acpi_init); 1178 #endif 1179