1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #define pr_fmt(fmt) "GICv3: " fmt 8 9 #include <linux/acpi.h> 10 #include <linux/cpu.h> 11 #include <linux/cpu_pm.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/irqdomain.h> 15 #include <linux/of.h> 16 #include <linux/of_address.h> 17 #include <linux/of_irq.h> 18 #include <linux/percpu.h> 19 #include <linux/refcount.h> 20 #include <linux/slab.h> 21 22 #include <linux/irqchip.h> 23 #include <linux/irqchip/arm-gic-common.h> 24 #include <linux/irqchip/arm-gic-v3.h> 25 #include <linux/irqchip/irq-partition-percpu.h> 26 27 #include <asm/cputype.h> 28 #include <asm/exception.h> 29 #include <asm/smp_plat.h> 30 #include <asm/virt.h> 31 32 #include "irq-gic-common.h" 33 34 #define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80) 35 36 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) 37 38 struct redist_region { 39 void __iomem *redist_base; 40 phys_addr_t phys_base; 41 bool single_redist; 42 }; 43 44 struct gic_chip_data { 45 struct fwnode_handle *fwnode; 46 void __iomem *dist_base; 47 struct redist_region *redist_regions; 48 struct rdists rdists; 49 struct irq_domain *domain; 50 u64 redist_stride; 51 u32 nr_redist_regions; 52 u64 flags; 53 bool has_rss; 54 unsigned int ppi_nr; 55 struct partition_desc **ppi_descs; 56 }; 57 58 static struct gic_chip_data gic_data __read_mostly; 59 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); 60 61 #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) 62 #define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) 63 #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) 64 65 /* 66 * The behaviours of RPR and PMR registers differ depending on the value of 67 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the 68 * distributor and redistributors depends on whether security is enabled in the 69 * GIC. 70 * 71 * When security is enabled, non-secure priority values from the (re)distributor 72 * are presented to the GIC CPUIF as follow: 73 * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80; 74 * 75 * If SCR_EL3.FIQ == 1, the values writen to/read from PMR and RPR at non-secure 76 * EL1 are subject to a similar operation thus matching the priorities presented 77 * from the (re)distributor when security is enabled. 78 * 79 * see GICv3/GICv4 Architecture Specification (IHI0069D): 80 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt 81 * priorities. 82 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 83 * interrupt. 84 * 85 * For now, we only support pseudo-NMIs if we have non-secure view of 86 * priorities. 87 */ 88 static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); 89 90 /* 91 * Global static key controlling whether an update to PMR allowing more 92 * interrupts requires to be propagated to the redistributor (DSB SY). 93 * And this needs to be exported for modules to be able to enable 94 * interrupts... 95 */ 96 DEFINE_STATIC_KEY_FALSE(gic_pmr_sync); 97 EXPORT_SYMBOL(gic_pmr_sync); 98 99 /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ 100 static refcount_t *ppi_nmi_refs; 101 102 static struct gic_kvm_info gic_v3_kvm_info; 103 static DEFINE_PER_CPU(bool, has_rss); 104 105 #define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) 106 #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) 107 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 108 #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) 109 110 /* Our default, arbitrary priority value. Linux only uses one anyway. */ 111 #define DEFAULT_PMR_VALUE 0xf0 112 113 enum gic_intid_range { 114 PPI_RANGE, 115 SPI_RANGE, 116 EPPI_RANGE, 117 ESPI_RANGE, 118 LPI_RANGE, 119 __INVALID_RANGE__ 120 }; 121 122 static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) 123 { 124 switch (hwirq) { 125 case 16 ... 31: 126 return PPI_RANGE; 127 case 32 ... 1019: 128 return SPI_RANGE; 129 case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): 130 return EPPI_RANGE; 131 case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): 132 return ESPI_RANGE; 133 case 8192 ... GENMASK(23, 0): 134 return LPI_RANGE; 135 default: 136 return __INVALID_RANGE__; 137 } 138 } 139 140 static enum gic_intid_range get_intid_range(struct irq_data *d) 141 { 142 return __get_intid_range(d->hwirq); 143 } 144 145 static inline unsigned int gic_irq(struct irq_data *d) 146 { 147 return d->hwirq; 148 } 149 150 static inline int gic_irq_in_rdist(struct irq_data *d) 151 { 152 enum gic_intid_range range = get_intid_range(d); 153 return range == PPI_RANGE || range == EPPI_RANGE; 154 } 155 156 static inline void __iomem *gic_dist_base(struct irq_data *d) 157 { 158 switch (get_intid_range(d)) { 159 case PPI_RANGE: 160 case EPPI_RANGE: 161 /* SGI+PPI -> SGI_base for this CPU */ 162 return gic_data_rdist_sgi_base(); 163 164 case SPI_RANGE: 165 case ESPI_RANGE: 166 /* SPI -> dist_base */ 167 return gic_data.dist_base; 168 169 default: 170 return NULL; 171 } 172 } 173 174 static void gic_do_wait_for_rwp(void __iomem *base) 175 { 176 u32 count = 1000000; /* 1s! */ 177 178 while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { 179 count--; 180 if (!count) { 181 pr_err_ratelimited("RWP timeout, gone fishing\n"); 182 return; 183 } 184 cpu_relax(); 185 udelay(1); 186 } 187 } 188 189 /* Wait for completion of a distributor change */ 190 static void gic_dist_wait_for_rwp(void) 191 { 192 gic_do_wait_for_rwp(gic_data.dist_base); 193 } 194 195 /* Wait for completion of a redistributor change */ 196 static void gic_redist_wait_for_rwp(void) 197 { 198 gic_do_wait_for_rwp(gic_data_rdist_rd_base()); 199 } 200 201 #ifdef CONFIG_ARM64 202 203 static u64 __maybe_unused gic_read_iar(void) 204 { 205 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154)) 206 return gic_read_iar_cavium_thunderx(); 207 else 208 return gic_read_iar_common(); 209 } 210 #endif 211 212 static void gic_enable_redist(bool enable) 213 { 214 void __iomem *rbase; 215 u32 count = 1000000; /* 1s! */ 216 u32 val; 217 218 if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) 219 return; 220 221 rbase = gic_data_rdist_rd_base(); 222 223 val = readl_relaxed(rbase + GICR_WAKER); 224 if (enable) 225 /* Wake up this CPU redistributor */ 226 val &= ~GICR_WAKER_ProcessorSleep; 227 else 228 val |= GICR_WAKER_ProcessorSleep; 229 writel_relaxed(val, rbase + GICR_WAKER); 230 231 if (!enable) { /* Check that GICR_WAKER is writeable */ 232 val = readl_relaxed(rbase + GICR_WAKER); 233 if (!(val & GICR_WAKER_ProcessorSleep)) 234 return; /* No PM support in this redistributor */ 235 } 236 237 while (--count) { 238 val = readl_relaxed(rbase + GICR_WAKER); 239 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) 240 break; 241 cpu_relax(); 242 udelay(1); 243 } 244 if (!count) 245 pr_err_ratelimited("redistributor failed to %s...\n", 246 enable ? "wakeup" : "sleep"); 247 } 248 249 /* 250 * Routines to disable, enable, EOI and route interrupts 251 */ 252 static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index) 253 { 254 switch (get_intid_range(d)) { 255 case PPI_RANGE: 256 case SPI_RANGE: 257 *index = d->hwirq; 258 return offset; 259 case EPPI_RANGE: 260 /* 261 * Contrary to the ESPI range, the EPPI range is contiguous 262 * to the PPI range in the registers, so let's adjust the 263 * displacement accordingly. Consistency is overrated. 264 */ 265 *index = d->hwirq - EPPI_BASE_INTID + 32; 266 return offset; 267 case ESPI_RANGE: 268 *index = d->hwirq - ESPI_BASE_INTID; 269 switch (offset) { 270 case GICD_ISENABLER: 271 return GICD_ISENABLERnE; 272 case GICD_ICENABLER: 273 return GICD_ICENABLERnE; 274 case GICD_ISPENDR: 275 return GICD_ISPENDRnE; 276 case GICD_ICPENDR: 277 return GICD_ICPENDRnE; 278 case GICD_ISACTIVER: 279 return GICD_ISACTIVERnE; 280 case GICD_ICACTIVER: 281 return GICD_ICACTIVERnE; 282 case GICD_IPRIORITYR: 283 return GICD_IPRIORITYRnE; 284 case GICD_ICFGR: 285 return GICD_ICFGRnE; 286 case GICD_IROUTER: 287 return GICD_IROUTERnE; 288 default: 289 break; 290 } 291 break; 292 default: 293 break; 294 } 295 296 WARN_ON(1); 297 *index = d->hwirq; 298 return offset; 299 } 300 301 static int gic_peek_irq(struct irq_data *d, u32 offset) 302 { 303 void __iomem *base; 304 u32 index, mask; 305 306 offset = convert_offset_index(d, offset, &index); 307 mask = 1 << (index % 32); 308 309 if (gic_irq_in_rdist(d)) 310 base = gic_data_rdist_sgi_base(); 311 else 312 base = gic_data.dist_base; 313 314 return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask); 315 } 316 317 static void gic_poke_irq(struct irq_data *d, u32 offset) 318 { 319 void (*rwp_wait)(void); 320 void __iomem *base; 321 u32 index, mask; 322 323 offset = convert_offset_index(d, offset, &index); 324 mask = 1 << (index % 32); 325 326 if (gic_irq_in_rdist(d)) { 327 base = gic_data_rdist_sgi_base(); 328 rwp_wait = gic_redist_wait_for_rwp; 329 } else { 330 base = gic_data.dist_base; 331 rwp_wait = gic_dist_wait_for_rwp; 332 } 333 334 writel_relaxed(mask, base + offset + (index / 32) * 4); 335 rwp_wait(); 336 } 337 338 static void gic_mask_irq(struct irq_data *d) 339 { 340 gic_poke_irq(d, GICD_ICENABLER); 341 } 342 343 static void gic_eoimode1_mask_irq(struct irq_data *d) 344 { 345 gic_mask_irq(d); 346 /* 347 * When masking a forwarded interrupt, make sure it is 348 * deactivated as well. 349 * 350 * This ensures that an interrupt that is getting 351 * disabled/masked will not get "stuck", because there is 352 * noone to deactivate it (guest is being terminated). 353 */ 354 if (irqd_is_forwarded_to_vcpu(d)) 355 gic_poke_irq(d, GICD_ICACTIVER); 356 } 357 358 static void gic_unmask_irq(struct irq_data *d) 359 { 360 gic_poke_irq(d, GICD_ISENABLER); 361 } 362 363 static inline bool gic_supports_nmi(void) 364 { 365 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && 366 static_branch_likely(&supports_pseudo_nmis); 367 } 368 369 static int gic_irq_set_irqchip_state(struct irq_data *d, 370 enum irqchip_irq_state which, bool val) 371 { 372 u32 reg; 373 374 if (d->hwirq >= 8192) /* PPI/SPI only */ 375 return -EINVAL; 376 377 switch (which) { 378 case IRQCHIP_STATE_PENDING: 379 reg = val ? GICD_ISPENDR : GICD_ICPENDR; 380 break; 381 382 case IRQCHIP_STATE_ACTIVE: 383 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; 384 break; 385 386 case IRQCHIP_STATE_MASKED: 387 reg = val ? GICD_ICENABLER : GICD_ISENABLER; 388 break; 389 390 default: 391 return -EINVAL; 392 } 393 394 gic_poke_irq(d, reg); 395 return 0; 396 } 397 398 static int gic_irq_get_irqchip_state(struct irq_data *d, 399 enum irqchip_irq_state which, bool *val) 400 { 401 if (d->hwirq >= 8192) /* PPI/SPI only */ 402 return -EINVAL; 403 404 switch (which) { 405 case IRQCHIP_STATE_PENDING: 406 *val = gic_peek_irq(d, GICD_ISPENDR); 407 break; 408 409 case IRQCHIP_STATE_ACTIVE: 410 *val = gic_peek_irq(d, GICD_ISACTIVER); 411 break; 412 413 case IRQCHIP_STATE_MASKED: 414 *val = !gic_peek_irq(d, GICD_ISENABLER); 415 break; 416 417 default: 418 return -EINVAL; 419 } 420 421 return 0; 422 } 423 424 static void gic_irq_set_prio(struct irq_data *d, u8 prio) 425 { 426 void __iomem *base = gic_dist_base(d); 427 u32 offset, index; 428 429 offset = convert_offset_index(d, GICD_IPRIORITYR, &index); 430 431 writeb_relaxed(prio, base + offset + index); 432 } 433 434 static u32 gic_get_ppi_index(struct irq_data *d) 435 { 436 switch (get_intid_range(d)) { 437 case PPI_RANGE: 438 return d->hwirq - 16; 439 case EPPI_RANGE: 440 return d->hwirq - EPPI_BASE_INTID + 16; 441 default: 442 unreachable(); 443 } 444 } 445 446 static int gic_irq_nmi_setup(struct irq_data *d) 447 { 448 struct irq_desc *desc = irq_to_desc(d->irq); 449 450 if (!gic_supports_nmi()) 451 return -EINVAL; 452 453 if (gic_peek_irq(d, GICD_ISENABLER)) { 454 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); 455 return -EINVAL; 456 } 457 458 /* 459 * A secondary irq_chip should be in charge of LPI request, 460 * it should not be possible to get there 461 */ 462 if (WARN_ON(gic_irq(d) >= 8192)) 463 return -EINVAL; 464 465 /* desc lock should already be held */ 466 if (gic_irq_in_rdist(d)) { 467 u32 idx = gic_get_ppi_index(d); 468 469 /* Setting up PPI as NMI, only switch handler for first NMI */ 470 if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) { 471 refcount_set(&ppi_nmi_refs[idx], 1); 472 desc->handle_irq = handle_percpu_devid_fasteoi_nmi; 473 } 474 } else { 475 desc->handle_irq = handle_fasteoi_nmi; 476 } 477 478 gic_irq_set_prio(d, GICD_INT_NMI_PRI); 479 480 return 0; 481 } 482 483 static void gic_irq_nmi_teardown(struct irq_data *d) 484 { 485 struct irq_desc *desc = irq_to_desc(d->irq); 486 487 if (WARN_ON(!gic_supports_nmi())) 488 return; 489 490 if (gic_peek_irq(d, GICD_ISENABLER)) { 491 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); 492 return; 493 } 494 495 /* 496 * A secondary irq_chip should be in charge of LPI request, 497 * it should not be possible to get there 498 */ 499 if (WARN_ON(gic_irq(d) >= 8192)) 500 return; 501 502 /* desc lock should already be held */ 503 if (gic_irq_in_rdist(d)) { 504 u32 idx = gic_get_ppi_index(d); 505 506 /* Tearing down NMI, only switch handler for last NMI */ 507 if (refcount_dec_and_test(&ppi_nmi_refs[idx])) 508 desc->handle_irq = handle_percpu_devid_irq; 509 } else { 510 desc->handle_irq = handle_fasteoi_irq; 511 } 512 513 gic_irq_set_prio(d, GICD_INT_DEF_PRI); 514 } 515 516 static void gic_eoi_irq(struct irq_data *d) 517 { 518 gic_write_eoir(gic_irq(d)); 519 } 520 521 static void gic_eoimode1_eoi_irq(struct irq_data *d) 522 { 523 /* 524 * No need to deactivate an LPI, or an interrupt that 525 * is is getting forwarded to a vcpu. 526 */ 527 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) 528 return; 529 gic_write_dir(gic_irq(d)); 530 } 531 532 static int gic_set_type(struct irq_data *d, unsigned int type) 533 { 534 enum gic_intid_range range; 535 unsigned int irq = gic_irq(d); 536 void (*rwp_wait)(void); 537 void __iomem *base; 538 u32 offset, index; 539 int ret; 540 541 /* Interrupt configuration for SGIs can't be changed */ 542 if (irq < 16) 543 return -EINVAL; 544 545 range = get_intid_range(d); 546 547 /* SPIs have restrictions on the supported types */ 548 if ((range == SPI_RANGE || range == ESPI_RANGE) && 549 type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) 550 return -EINVAL; 551 552 if (gic_irq_in_rdist(d)) { 553 base = gic_data_rdist_sgi_base(); 554 rwp_wait = gic_redist_wait_for_rwp; 555 } else { 556 base = gic_data.dist_base; 557 rwp_wait = gic_dist_wait_for_rwp; 558 } 559 560 offset = convert_offset_index(d, GICD_ICFGR, &index); 561 562 ret = gic_configure_irq(index, type, base + offset, rwp_wait); 563 if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { 564 /* Misconfigured PPIs are usually not fatal */ 565 pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq); 566 ret = 0; 567 } 568 569 return ret; 570 } 571 572 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) 573 { 574 if (vcpu) 575 irqd_set_forwarded_to_vcpu(d); 576 else 577 irqd_clr_forwarded_to_vcpu(d); 578 return 0; 579 } 580 581 static u64 gic_mpidr_to_affinity(unsigned long mpidr) 582 { 583 u64 aff; 584 585 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | 586 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | 587 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | 588 MPIDR_AFFINITY_LEVEL(mpidr, 0)); 589 590 return aff; 591 } 592 593 static void gic_deactivate_unhandled(u32 irqnr) 594 { 595 if (static_branch_likely(&supports_deactivate_key)) { 596 if (irqnr < 8192) 597 gic_write_dir(irqnr); 598 } else { 599 gic_write_eoir(irqnr); 600 } 601 } 602 603 static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs) 604 { 605 bool irqs_enabled = interrupts_enabled(regs); 606 int err; 607 608 if (irqs_enabled) 609 nmi_enter(); 610 611 if (static_branch_likely(&supports_deactivate_key)) 612 gic_write_eoir(irqnr); 613 /* 614 * Leave the PSR.I bit set to prevent other NMIs to be 615 * received while handling this one. 616 * PSR.I will be restored when we ERET to the 617 * interrupted context. 618 */ 619 err = handle_domain_nmi(gic_data.domain, irqnr, regs); 620 if (err) 621 gic_deactivate_unhandled(irqnr); 622 623 if (irqs_enabled) 624 nmi_exit(); 625 } 626 627 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) 628 { 629 u32 irqnr; 630 631 irqnr = gic_read_iar(); 632 633 if (gic_supports_nmi() && 634 unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) { 635 gic_handle_nmi(irqnr, regs); 636 return; 637 } 638 639 if (gic_prio_masking_enabled()) { 640 gic_pmr_mask_irqs(); 641 gic_arch_enable_irqs(); 642 } 643 644 /* Check for special IDs first */ 645 if ((irqnr >= 1020 && irqnr <= 1023)) 646 return; 647 648 /* Treat anything but SGIs in a uniform way */ 649 if (likely(irqnr > 15)) { 650 int err; 651 652 if (static_branch_likely(&supports_deactivate_key)) 653 gic_write_eoir(irqnr); 654 else 655 isb(); 656 657 err = handle_domain_irq(gic_data.domain, irqnr, regs); 658 if (err) { 659 WARN_ONCE(true, "Unexpected interrupt received!\n"); 660 gic_deactivate_unhandled(irqnr); 661 } 662 return; 663 } 664 if (irqnr < 16) { 665 gic_write_eoir(irqnr); 666 if (static_branch_likely(&supports_deactivate_key)) 667 gic_write_dir(irqnr); 668 #ifdef CONFIG_SMP 669 /* 670 * Unlike GICv2, we don't need an smp_rmb() here. 671 * The control dependency from gic_read_iar to 672 * the ISB in gic_write_eoir is enough to ensure 673 * that any shared data read by handle_IPI will 674 * be read after the ACK. 675 */ 676 handle_IPI(irqnr, regs); 677 #else 678 WARN_ONCE(true, "Unexpected SGI received!\n"); 679 #endif 680 } 681 } 682 683 static u32 gic_get_pribits(void) 684 { 685 u32 pribits; 686 687 pribits = gic_read_ctlr(); 688 pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; 689 pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; 690 pribits++; 691 692 return pribits; 693 } 694 695 static bool gic_has_group0(void) 696 { 697 u32 val; 698 u32 old_pmr; 699 700 old_pmr = gic_read_pmr(); 701 702 /* 703 * Let's find out if Group0 is under control of EL3 or not by 704 * setting the highest possible, non-zero priority in PMR. 705 * 706 * If SCR_EL3.FIQ is set, the priority gets shifted down in 707 * order for the CPU interface to set bit 7, and keep the 708 * actual priority in the non-secure range. In the process, it 709 * looses the least significant bit and the actual priority 710 * becomes 0x80. Reading it back returns 0, indicating that 711 * we're don't have access to Group0. 712 */ 713 gic_write_pmr(BIT(8 - gic_get_pribits())); 714 val = gic_read_pmr(); 715 716 gic_write_pmr(old_pmr); 717 718 return val != 0; 719 } 720 721 static void __init gic_dist_init(void) 722 { 723 unsigned int i; 724 u64 affinity; 725 void __iomem *base = gic_data.dist_base; 726 727 /* Disable the distributor */ 728 writel_relaxed(0, base + GICD_CTLR); 729 gic_dist_wait_for_rwp(); 730 731 /* 732 * Configure SPIs as non-secure Group-1. This will only matter 733 * if the GIC only has a single security state. This will not 734 * do the right thing if the kernel is running in secure mode, 735 * but that's not the intended use case anyway. 736 */ 737 for (i = 32; i < GIC_LINE_NR; i += 32) 738 writel_relaxed(~0, base + GICD_IGROUPR + i / 8); 739 740 /* Extended SPI range, not handled by the GICv2/GICv3 common code */ 741 for (i = 0; i < GIC_ESPI_NR; i += 32) { 742 writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8); 743 writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8); 744 } 745 746 for (i = 0; i < GIC_ESPI_NR; i += 32) 747 writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8); 748 749 for (i = 0; i < GIC_ESPI_NR; i += 16) 750 writel_relaxed(0, base + GICD_ICFGRnE + i / 4); 751 752 for (i = 0; i < GIC_ESPI_NR; i += 4) 753 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i); 754 755 /* Now do the common stuff, and wait for the distributor to drain */ 756 gic_dist_config(base, GIC_LINE_NR, gic_dist_wait_for_rwp); 757 758 /* Enable distributor with ARE, Group1 */ 759 writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1, 760 base + GICD_CTLR); 761 762 /* 763 * Set all global interrupts to the boot CPU only. ARE must be 764 * enabled. 765 */ 766 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); 767 for (i = 32; i < GIC_LINE_NR; i++) 768 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); 769 770 for (i = 0; i < GIC_ESPI_NR; i++) 771 gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8); 772 } 773 774 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) 775 { 776 int ret = -ENODEV; 777 int i; 778 779 for (i = 0; i < gic_data.nr_redist_regions; i++) { 780 void __iomem *ptr = gic_data.redist_regions[i].redist_base; 781 u64 typer; 782 u32 reg; 783 784 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; 785 if (reg != GIC_PIDR2_ARCH_GICv3 && 786 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ 787 pr_warn("No redistributor present @%p\n", ptr); 788 break; 789 } 790 791 do { 792 typer = gic_read_typer(ptr + GICR_TYPER); 793 ret = fn(gic_data.redist_regions + i, ptr); 794 if (!ret) 795 return 0; 796 797 if (gic_data.redist_regions[i].single_redist) 798 break; 799 800 if (gic_data.redist_stride) { 801 ptr += gic_data.redist_stride; 802 } else { 803 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ 804 if (typer & GICR_TYPER_VLPIS) 805 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ 806 } 807 } while (!(typer & GICR_TYPER_LAST)); 808 } 809 810 return ret ? -ENODEV : 0; 811 } 812 813 static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) 814 { 815 unsigned long mpidr = cpu_logical_map(smp_processor_id()); 816 u64 typer; 817 u32 aff; 818 819 /* 820 * Convert affinity to a 32bit value that can be matched to 821 * GICR_TYPER bits [63:32]. 822 */ 823 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | 824 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | 825 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | 826 MPIDR_AFFINITY_LEVEL(mpidr, 0)); 827 828 typer = gic_read_typer(ptr + GICR_TYPER); 829 if ((typer >> 32) == aff) { 830 u64 offset = ptr - region->redist_base; 831 gic_data_rdist_rd_base() = ptr; 832 gic_data_rdist()->phys_base = region->phys_base + offset; 833 834 pr_info("CPU%d: found redistributor %lx region %d:%pa\n", 835 smp_processor_id(), mpidr, 836 (int)(region - gic_data.redist_regions), 837 &gic_data_rdist()->phys_base); 838 return 0; 839 } 840 841 /* Try next one */ 842 return 1; 843 } 844 845 static int gic_populate_rdist(void) 846 { 847 if (gic_iterate_rdists(__gic_populate_rdist) == 0) 848 return 0; 849 850 /* We couldn't even deal with ourselves... */ 851 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", 852 smp_processor_id(), 853 (unsigned long)cpu_logical_map(smp_processor_id())); 854 return -ENODEV; 855 } 856 857 static int __gic_update_rdist_properties(struct redist_region *region, 858 void __iomem *ptr) 859 { 860 u64 typer = gic_read_typer(ptr + GICR_TYPER); 861 862 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); 863 864 /* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */ 865 gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID); 866 gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | 867 gic_data.rdists.has_rvpeid); 868 869 /* Detect non-sensical configurations */ 870 if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) { 871 gic_data.rdists.has_direct_lpi = false; 872 gic_data.rdists.has_vlpis = false; 873 gic_data.rdists.has_rvpeid = false; 874 } 875 876 gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); 877 878 return 1; 879 } 880 881 static void gic_update_rdist_properties(void) 882 { 883 gic_data.ppi_nr = UINT_MAX; 884 gic_iterate_rdists(__gic_update_rdist_properties); 885 if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) 886 gic_data.ppi_nr = 0; 887 pr_info("%d PPIs implemented\n", gic_data.ppi_nr); 888 pr_info("%sVLPI support, %sdirect LPI support, %sRVPEID support\n", 889 !gic_data.rdists.has_vlpis ? "no " : "", 890 !gic_data.rdists.has_direct_lpi ? "no " : "", 891 !gic_data.rdists.has_rvpeid ? "no " : ""); 892 } 893 894 /* Check whether it's single security state view */ 895 static inline bool gic_dist_security_disabled(void) 896 { 897 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; 898 } 899 900 static void gic_cpu_sys_reg_init(void) 901 { 902 int i, cpu = smp_processor_id(); 903 u64 mpidr = cpu_logical_map(cpu); 904 u64 need_rss = MPIDR_RS(mpidr); 905 bool group0; 906 u32 pribits; 907 908 /* 909 * Need to check that the SRE bit has actually been set. If 910 * not, it means that SRE is disabled at EL2. We're going to 911 * die painfully, and there is nothing we can do about it. 912 * 913 * Kindly inform the luser. 914 */ 915 if (!gic_enable_sre()) 916 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); 917 918 pribits = gic_get_pribits(); 919 920 group0 = gic_has_group0(); 921 922 /* Set priority mask register */ 923 if (!gic_prio_masking_enabled()) { 924 write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); 925 } else { 926 /* 927 * Mismatch configuration with boot CPU, the system is likely 928 * to die as interrupt masking will not work properly on all 929 * CPUs 930 */ 931 WARN_ON(gic_supports_nmi() && group0 && 932 !gic_dist_security_disabled()); 933 } 934 935 /* 936 * Some firmwares hand over to the kernel with the BPR changed from 937 * its reset value (and with a value large enough to prevent 938 * any pre-emptive interrupts from working at all). Writing a zero 939 * to BPR restores is reset value. 940 */ 941 gic_write_bpr1(0); 942 943 if (static_branch_likely(&supports_deactivate_key)) { 944 /* EOI drops priority only (mode 1) */ 945 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); 946 } else { 947 /* EOI deactivates interrupt too (mode 0) */ 948 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); 949 } 950 951 /* Always whack Group0 before Group1 */ 952 if (group0) { 953 switch(pribits) { 954 case 8: 955 case 7: 956 write_gicreg(0, ICC_AP0R3_EL1); 957 write_gicreg(0, ICC_AP0R2_EL1); 958 /* Fall through */ 959 case 6: 960 write_gicreg(0, ICC_AP0R1_EL1); 961 /* Fall through */ 962 case 5: 963 case 4: 964 write_gicreg(0, ICC_AP0R0_EL1); 965 } 966 967 isb(); 968 } 969 970 switch(pribits) { 971 case 8: 972 case 7: 973 write_gicreg(0, ICC_AP1R3_EL1); 974 write_gicreg(0, ICC_AP1R2_EL1); 975 /* Fall through */ 976 case 6: 977 write_gicreg(0, ICC_AP1R1_EL1); 978 /* Fall through */ 979 case 5: 980 case 4: 981 write_gicreg(0, ICC_AP1R0_EL1); 982 } 983 984 isb(); 985 986 /* ... and let's hit the road... */ 987 gic_write_grpen1(1); 988 989 /* Keep the RSS capability status in per_cpu variable */ 990 per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); 991 992 /* Check all the CPUs have capable of sending SGIs to other CPUs */ 993 for_each_online_cpu(i) { 994 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu); 995 996 need_rss |= MPIDR_RS(cpu_logical_map(i)); 997 if (need_rss && (!have_rss)) 998 pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", 999 cpu, (unsigned long)mpidr, 1000 i, (unsigned long)cpu_logical_map(i)); 1001 } 1002 1003 /** 1004 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, 1005 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED 1006 * UNPREDICTABLE choice of : 1007 * - The write is ignored. 1008 * - The RS field is treated as 0. 1009 */ 1010 if (need_rss && (!gic_data.has_rss)) 1011 pr_crit_once("RSS is required but GICD doesn't support it\n"); 1012 } 1013 1014 static bool gicv3_nolpi; 1015 1016 static int __init gicv3_nolpi_cfg(char *buf) 1017 { 1018 return strtobool(buf, &gicv3_nolpi); 1019 } 1020 early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); 1021 1022 static int gic_dist_supports_lpis(void) 1023 { 1024 return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && 1025 !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && 1026 !gicv3_nolpi); 1027 } 1028 1029 static void gic_cpu_init(void) 1030 { 1031 void __iomem *rbase; 1032 int i; 1033 1034 /* Register ourselves with the rest of the world */ 1035 if (gic_populate_rdist()) 1036 return; 1037 1038 gic_enable_redist(true); 1039 1040 WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) && 1041 !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange), 1042 "Distributor has extended ranges, but CPU%d doesn't\n", 1043 smp_processor_id()); 1044 1045 rbase = gic_data_rdist_sgi_base(); 1046 1047 /* Configure SGIs/PPIs as non-secure Group-1 */ 1048 for (i = 0; i < gic_data.ppi_nr + 16; i += 32) 1049 writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); 1050 1051 gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp); 1052 1053 /* initialise system registers */ 1054 gic_cpu_sys_reg_init(); 1055 } 1056 1057 #ifdef CONFIG_SMP 1058 1059 #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) 1060 #define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) 1061 1062 static int gic_starting_cpu(unsigned int cpu) 1063 { 1064 gic_cpu_init(); 1065 1066 if (gic_dist_supports_lpis()) 1067 its_cpu_init(); 1068 1069 return 0; 1070 } 1071 1072 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, 1073 unsigned long cluster_id) 1074 { 1075 int next_cpu, cpu = *base_cpu; 1076 unsigned long mpidr = cpu_logical_map(cpu); 1077 u16 tlist = 0; 1078 1079 while (cpu < nr_cpu_ids) { 1080 tlist |= 1 << (mpidr & 0xf); 1081 1082 next_cpu = cpumask_next(cpu, mask); 1083 if (next_cpu >= nr_cpu_ids) 1084 goto out; 1085 cpu = next_cpu; 1086 1087 mpidr = cpu_logical_map(cpu); 1088 1089 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { 1090 cpu--; 1091 goto out; 1092 } 1093 } 1094 out: 1095 *base_cpu = cpu; 1096 return tlist; 1097 } 1098 1099 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ 1100 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ 1101 << ICC_SGI1R_AFFINITY_## level ##_SHIFT) 1102 1103 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) 1104 { 1105 u64 val; 1106 1107 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | 1108 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | 1109 irq << ICC_SGI1R_SGI_ID_SHIFT | 1110 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | 1111 MPIDR_TO_SGI_RS(cluster_id) | 1112 tlist << ICC_SGI1R_TARGET_LIST_SHIFT); 1113 1114 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); 1115 gic_write_sgi1r(val); 1116 } 1117 1118 static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) 1119 { 1120 int cpu; 1121 1122 if (WARN_ON(irq >= 16)) 1123 return; 1124 1125 /* 1126 * Ensure that stores to Normal memory are visible to the 1127 * other CPUs before issuing the IPI. 1128 */ 1129 wmb(); 1130 1131 for_each_cpu(cpu, mask) { 1132 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); 1133 u16 tlist; 1134 1135 tlist = gic_compute_target_list(&cpu, mask, cluster_id); 1136 gic_send_sgi(cluster_id, tlist, irq); 1137 } 1138 1139 /* Force the above writes to ICC_SGI1R_EL1 to be executed */ 1140 isb(); 1141 } 1142 1143 static void gic_smp_init(void) 1144 { 1145 set_smp_cross_call(gic_raise_softirq); 1146 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, 1147 "irqchip/arm/gicv3:starting", 1148 gic_starting_cpu, NULL); 1149 } 1150 1151 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 1152 bool force) 1153 { 1154 unsigned int cpu; 1155 u32 offset, index; 1156 void __iomem *reg; 1157 int enabled; 1158 u64 val; 1159 1160 if (force) 1161 cpu = cpumask_first(mask_val); 1162 else 1163 cpu = cpumask_any_and(mask_val, cpu_online_mask); 1164 1165 if (cpu >= nr_cpu_ids) 1166 return -EINVAL; 1167 1168 if (gic_irq_in_rdist(d)) 1169 return -EINVAL; 1170 1171 /* If interrupt was enabled, disable it first */ 1172 enabled = gic_peek_irq(d, GICD_ISENABLER); 1173 if (enabled) 1174 gic_mask_irq(d); 1175 1176 offset = convert_offset_index(d, GICD_IROUTER, &index); 1177 reg = gic_dist_base(d) + offset + (index * 8); 1178 val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); 1179 1180 gic_write_irouter(val, reg); 1181 1182 /* 1183 * If the interrupt was enabled, enabled it again. Otherwise, 1184 * just wait for the distributor to have digested our changes. 1185 */ 1186 if (enabled) 1187 gic_unmask_irq(d); 1188 else 1189 gic_dist_wait_for_rwp(); 1190 1191 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 1192 1193 return IRQ_SET_MASK_OK_DONE; 1194 } 1195 #else 1196 #define gic_set_affinity NULL 1197 #define gic_smp_init() do { } while(0) 1198 #endif 1199 1200 #ifdef CONFIG_CPU_PM 1201 static int gic_cpu_pm_notifier(struct notifier_block *self, 1202 unsigned long cmd, void *v) 1203 { 1204 if (cmd == CPU_PM_EXIT) { 1205 if (gic_dist_security_disabled()) 1206 gic_enable_redist(true); 1207 gic_cpu_sys_reg_init(); 1208 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { 1209 gic_write_grpen1(0); 1210 gic_enable_redist(false); 1211 } 1212 return NOTIFY_OK; 1213 } 1214 1215 static struct notifier_block gic_cpu_pm_notifier_block = { 1216 .notifier_call = gic_cpu_pm_notifier, 1217 }; 1218 1219 static void gic_cpu_pm_init(void) 1220 { 1221 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block); 1222 } 1223 1224 #else 1225 static inline void gic_cpu_pm_init(void) { } 1226 #endif /* CONFIG_CPU_PM */ 1227 1228 static struct irq_chip gic_chip = { 1229 .name = "GICv3", 1230 .irq_mask = gic_mask_irq, 1231 .irq_unmask = gic_unmask_irq, 1232 .irq_eoi = gic_eoi_irq, 1233 .irq_set_type = gic_set_type, 1234 .irq_set_affinity = gic_set_affinity, 1235 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 1236 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 1237 .irq_nmi_setup = gic_irq_nmi_setup, 1238 .irq_nmi_teardown = gic_irq_nmi_teardown, 1239 .flags = IRQCHIP_SET_TYPE_MASKED | 1240 IRQCHIP_SKIP_SET_WAKE | 1241 IRQCHIP_MASK_ON_SUSPEND, 1242 }; 1243 1244 static struct irq_chip gic_eoimode1_chip = { 1245 .name = "GICv3", 1246 .irq_mask = gic_eoimode1_mask_irq, 1247 .irq_unmask = gic_unmask_irq, 1248 .irq_eoi = gic_eoimode1_eoi_irq, 1249 .irq_set_type = gic_set_type, 1250 .irq_set_affinity = gic_set_affinity, 1251 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 1252 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 1253 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, 1254 .irq_nmi_setup = gic_irq_nmi_setup, 1255 .irq_nmi_teardown = gic_irq_nmi_teardown, 1256 .flags = IRQCHIP_SET_TYPE_MASKED | 1257 IRQCHIP_SKIP_SET_WAKE | 1258 IRQCHIP_MASK_ON_SUSPEND, 1259 }; 1260 1261 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, 1262 irq_hw_number_t hw) 1263 { 1264 struct irq_chip *chip = &gic_chip; 1265 1266 if (static_branch_likely(&supports_deactivate_key)) 1267 chip = &gic_eoimode1_chip; 1268 1269 switch (__get_intid_range(hw)) { 1270 case PPI_RANGE: 1271 case EPPI_RANGE: 1272 irq_set_percpu_devid(irq); 1273 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1274 handle_percpu_devid_irq, NULL, NULL); 1275 irq_set_status_flags(irq, IRQ_NOAUTOEN); 1276 break; 1277 1278 case SPI_RANGE: 1279 case ESPI_RANGE: 1280 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1281 handle_fasteoi_irq, NULL, NULL); 1282 irq_set_probe(irq); 1283 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); 1284 break; 1285 1286 case LPI_RANGE: 1287 if (!gic_dist_supports_lpis()) 1288 return -EPERM; 1289 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1290 handle_fasteoi_irq, NULL, NULL); 1291 break; 1292 1293 default: 1294 return -EPERM; 1295 } 1296 1297 return 0; 1298 } 1299 1300 #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) 1301 1302 static int gic_irq_domain_translate(struct irq_domain *d, 1303 struct irq_fwspec *fwspec, 1304 unsigned long *hwirq, 1305 unsigned int *type) 1306 { 1307 if (is_of_node(fwspec->fwnode)) { 1308 if (fwspec->param_count < 3) 1309 return -EINVAL; 1310 1311 switch (fwspec->param[0]) { 1312 case 0: /* SPI */ 1313 *hwirq = fwspec->param[1] + 32; 1314 break; 1315 case 1: /* PPI */ 1316 *hwirq = fwspec->param[1] + 16; 1317 break; 1318 case 2: /* ESPI */ 1319 *hwirq = fwspec->param[1] + ESPI_BASE_INTID; 1320 break; 1321 case 3: /* EPPI */ 1322 *hwirq = fwspec->param[1] + EPPI_BASE_INTID; 1323 break; 1324 case GIC_IRQ_TYPE_LPI: /* LPI */ 1325 *hwirq = fwspec->param[1]; 1326 break; 1327 case GIC_IRQ_TYPE_PARTITION: 1328 *hwirq = fwspec->param[1]; 1329 if (fwspec->param[1] >= 16) 1330 *hwirq += EPPI_BASE_INTID - 16; 1331 else 1332 *hwirq += 16; 1333 break; 1334 default: 1335 return -EINVAL; 1336 } 1337 1338 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 1339 1340 /* 1341 * Make it clear that broken DTs are... broken. 1342 * Partitionned PPIs are an unfortunate exception. 1343 */ 1344 WARN_ON(*type == IRQ_TYPE_NONE && 1345 fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); 1346 return 0; 1347 } 1348 1349 if (is_fwnode_irqchip(fwspec->fwnode)) { 1350 if(fwspec->param_count != 2) 1351 return -EINVAL; 1352 1353 *hwirq = fwspec->param[0]; 1354 *type = fwspec->param[1]; 1355 1356 WARN_ON(*type == IRQ_TYPE_NONE); 1357 return 0; 1358 } 1359 1360 return -EINVAL; 1361 } 1362 1363 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1364 unsigned int nr_irqs, void *arg) 1365 { 1366 int i, ret; 1367 irq_hw_number_t hwirq; 1368 unsigned int type = IRQ_TYPE_NONE; 1369 struct irq_fwspec *fwspec = arg; 1370 1371 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); 1372 if (ret) 1373 return ret; 1374 1375 for (i = 0; i < nr_irqs; i++) { 1376 ret = gic_irq_domain_map(domain, virq + i, hwirq + i); 1377 if (ret) 1378 return ret; 1379 } 1380 1381 return 0; 1382 } 1383 1384 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, 1385 unsigned int nr_irqs) 1386 { 1387 int i; 1388 1389 for (i = 0; i < nr_irqs; i++) { 1390 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); 1391 irq_set_handler(virq + i, NULL); 1392 irq_domain_reset_irq_data(d); 1393 } 1394 } 1395 1396 static int gic_irq_domain_select(struct irq_domain *d, 1397 struct irq_fwspec *fwspec, 1398 enum irq_domain_bus_token bus_token) 1399 { 1400 /* Not for us */ 1401 if (fwspec->fwnode != d->fwnode) 1402 return 0; 1403 1404 /* If this is not DT, then we have a single domain */ 1405 if (!is_of_node(fwspec->fwnode)) 1406 return 1; 1407 1408 /* 1409 * If this is a PPI and we have a 4th (non-null) parameter, 1410 * then we need to match the partition domain. 1411 */ 1412 if (fwspec->param_count >= 4 && 1413 fwspec->param[0] == 1 && fwspec->param[3] != 0 && 1414 gic_data.ppi_descs) 1415 return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]); 1416 1417 return d == gic_data.domain; 1418 } 1419 1420 static const struct irq_domain_ops gic_irq_domain_ops = { 1421 .translate = gic_irq_domain_translate, 1422 .alloc = gic_irq_domain_alloc, 1423 .free = gic_irq_domain_free, 1424 .select = gic_irq_domain_select, 1425 }; 1426 1427 static int partition_domain_translate(struct irq_domain *d, 1428 struct irq_fwspec *fwspec, 1429 unsigned long *hwirq, 1430 unsigned int *type) 1431 { 1432 struct device_node *np; 1433 int ret; 1434 1435 if (!gic_data.ppi_descs) 1436 return -ENOMEM; 1437 1438 np = of_find_node_by_phandle(fwspec->param[3]); 1439 if (WARN_ON(!np)) 1440 return -EINVAL; 1441 1442 ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]], 1443 of_node_to_fwnode(np)); 1444 if (ret < 0) 1445 return ret; 1446 1447 *hwirq = ret; 1448 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 1449 1450 return 0; 1451 } 1452 1453 static const struct irq_domain_ops partition_domain_ops = { 1454 .translate = partition_domain_translate, 1455 .select = gic_irq_domain_select, 1456 }; 1457 1458 static bool gic_enable_quirk_msm8996(void *data) 1459 { 1460 struct gic_chip_data *d = data; 1461 1462 d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996; 1463 1464 return true; 1465 } 1466 1467 static bool gic_enable_quirk_hip06_07(void *data) 1468 { 1469 struct gic_chip_data *d = data; 1470 1471 /* 1472 * HIP06 GICD_IIDR clashes with GIC-600 product number (despite 1473 * not being an actual ARM implementation). The saving grace is 1474 * that GIC-600 doesn't have ESPI, so nothing to do in that case. 1475 * HIP07 doesn't even have a proper IIDR, and still pretends to 1476 * have ESPI. In both cases, put them right. 1477 */ 1478 if (d->rdists.gicd_typer & GICD_TYPER_ESPI) { 1479 /* Zero both ESPI and the RES0 field next to it... */ 1480 d->rdists.gicd_typer &= ~GENMASK(9, 8); 1481 return true; 1482 } 1483 1484 return false; 1485 } 1486 1487 static const struct gic_quirk gic_quirks[] = { 1488 { 1489 .desc = "GICv3: Qualcomm MSM8996 broken firmware", 1490 .compatible = "qcom,msm8996-gic-v3", 1491 .init = gic_enable_quirk_msm8996, 1492 }, 1493 { 1494 .desc = "GICv3: HIP06 erratum 161010803", 1495 .iidr = 0x0204043b, 1496 .mask = 0xffffffff, 1497 .init = gic_enable_quirk_hip06_07, 1498 }, 1499 { 1500 .desc = "GICv3: HIP07 erratum 161010803", 1501 .iidr = 0x00000000, 1502 .mask = 0xffffffff, 1503 .init = gic_enable_quirk_hip06_07, 1504 }, 1505 { 1506 } 1507 }; 1508 1509 static void gic_enable_nmi_support(void) 1510 { 1511 int i; 1512 1513 if (!gic_prio_masking_enabled()) 1514 return; 1515 1516 if (gic_has_group0() && !gic_dist_security_disabled()) { 1517 pr_warn("SCR_EL3.FIQ is cleared, cannot enable use of pseudo-NMIs\n"); 1518 return; 1519 } 1520 1521 ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL); 1522 if (!ppi_nmi_refs) 1523 return; 1524 1525 for (i = 0; i < gic_data.ppi_nr; i++) 1526 refcount_set(&ppi_nmi_refs[i], 0); 1527 1528 /* 1529 * Linux itself doesn't use 1:N distribution, so has no need to 1530 * set PMHE. The only reason to have it set is if EL3 requires it 1531 * (and we can't change it). 1532 */ 1533 if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK) 1534 static_branch_enable(&gic_pmr_sync); 1535 1536 pr_info("%s ICC_PMR_EL1 synchronisation\n", 1537 static_branch_unlikely(&gic_pmr_sync) ? "Forcing" : "Relaxing"); 1538 1539 static_branch_enable(&supports_pseudo_nmis); 1540 1541 if (static_branch_likely(&supports_deactivate_key)) 1542 gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI; 1543 else 1544 gic_chip.flags |= IRQCHIP_SUPPORTS_NMI; 1545 } 1546 1547 static int __init gic_init_bases(void __iomem *dist_base, 1548 struct redist_region *rdist_regs, 1549 u32 nr_redist_regions, 1550 u64 redist_stride, 1551 struct fwnode_handle *handle) 1552 { 1553 u32 typer; 1554 int err; 1555 1556 if (!is_hyp_mode_available()) 1557 static_branch_disable(&supports_deactivate_key); 1558 1559 if (static_branch_likely(&supports_deactivate_key)) 1560 pr_info("GIC: Using split EOI/Deactivate mode\n"); 1561 1562 gic_data.fwnode = handle; 1563 gic_data.dist_base = dist_base; 1564 gic_data.redist_regions = rdist_regs; 1565 gic_data.nr_redist_regions = nr_redist_regions; 1566 gic_data.redist_stride = redist_stride; 1567 1568 /* 1569 * Find out how many interrupts are supported. 1570 */ 1571 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); 1572 gic_data.rdists.gicd_typer = typer; 1573 1574 gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR), 1575 gic_quirks, &gic_data); 1576 1577 pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); 1578 pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); 1579 1580 gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); 1581 1582 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, 1583 &gic_data); 1584 irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); 1585 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); 1586 gic_data.rdists.has_rvpeid = true; 1587 gic_data.rdists.has_vlpis = true; 1588 gic_data.rdists.has_direct_lpi = true; 1589 1590 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { 1591 err = -ENOMEM; 1592 goto out_free; 1593 } 1594 1595 gic_data.has_rss = !!(typer & GICD_TYPER_RSS); 1596 pr_info("Distributor has %sRange Selector support\n", 1597 gic_data.has_rss ? "" : "no "); 1598 1599 if (typer & GICD_TYPER_MBIS) { 1600 err = mbi_init(handle, gic_data.domain); 1601 if (err) 1602 pr_err("Failed to initialize MBIs\n"); 1603 } 1604 1605 set_handle_irq(gic_handle_irq); 1606 1607 gic_update_rdist_properties(); 1608 1609 gic_smp_init(); 1610 gic_dist_init(); 1611 gic_cpu_init(); 1612 gic_cpu_pm_init(); 1613 1614 if (gic_dist_supports_lpis()) { 1615 its_init(handle, &gic_data.rdists, gic_data.domain); 1616 its_cpu_init(); 1617 } else { 1618 if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) 1619 gicv2m_init(handle, gic_data.domain); 1620 } 1621 1622 gic_enable_nmi_support(); 1623 1624 return 0; 1625 1626 out_free: 1627 if (gic_data.domain) 1628 irq_domain_remove(gic_data.domain); 1629 free_percpu(gic_data.rdists.rdist); 1630 return err; 1631 } 1632 1633 static int __init gic_validate_dist_version(void __iomem *dist_base) 1634 { 1635 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; 1636 1637 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) 1638 return -ENODEV; 1639 1640 return 0; 1641 } 1642 1643 /* Create all possible partitions at boot time */ 1644 static void __init gic_populate_ppi_partitions(struct device_node *gic_node) 1645 { 1646 struct device_node *parts_node, *child_part; 1647 int part_idx = 0, i; 1648 int nr_parts; 1649 struct partition_affinity *parts; 1650 1651 parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); 1652 if (!parts_node) 1653 return; 1654 1655 gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); 1656 if (!gic_data.ppi_descs) 1657 return; 1658 1659 nr_parts = of_get_child_count(parts_node); 1660 1661 if (!nr_parts) 1662 goto out_put_node; 1663 1664 parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); 1665 if (WARN_ON(!parts)) 1666 goto out_put_node; 1667 1668 for_each_child_of_node(parts_node, child_part) { 1669 struct partition_affinity *part; 1670 int n; 1671 1672 part = &parts[part_idx]; 1673 1674 part->partition_id = of_node_to_fwnode(child_part); 1675 1676 pr_info("GIC: PPI partition %pOFn[%d] { ", 1677 child_part, part_idx); 1678 1679 n = of_property_count_elems_of_size(child_part, "affinity", 1680 sizeof(u32)); 1681 WARN_ON(n <= 0); 1682 1683 for (i = 0; i < n; i++) { 1684 int err, cpu; 1685 u32 cpu_phandle; 1686 struct device_node *cpu_node; 1687 1688 err = of_property_read_u32_index(child_part, "affinity", 1689 i, &cpu_phandle); 1690 if (WARN_ON(err)) 1691 continue; 1692 1693 cpu_node = of_find_node_by_phandle(cpu_phandle); 1694 if (WARN_ON(!cpu_node)) 1695 continue; 1696 1697 cpu = of_cpu_node_to_id(cpu_node); 1698 if (WARN_ON(cpu < 0)) 1699 continue; 1700 1701 pr_cont("%pOF[%d] ", cpu_node, cpu); 1702 1703 cpumask_set_cpu(cpu, &part->mask); 1704 } 1705 1706 pr_cont("}\n"); 1707 part_idx++; 1708 } 1709 1710 for (i = 0; i < gic_data.ppi_nr; i++) { 1711 unsigned int irq; 1712 struct partition_desc *desc; 1713 struct irq_fwspec ppi_fwspec = { 1714 .fwnode = gic_data.fwnode, 1715 .param_count = 3, 1716 .param = { 1717 [0] = GIC_IRQ_TYPE_PARTITION, 1718 [1] = i, 1719 [2] = IRQ_TYPE_NONE, 1720 }, 1721 }; 1722 1723 irq = irq_create_fwspec_mapping(&ppi_fwspec); 1724 if (WARN_ON(!irq)) 1725 continue; 1726 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts, 1727 irq, &partition_domain_ops); 1728 if (WARN_ON(!desc)) 1729 continue; 1730 1731 gic_data.ppi_descs[i] = desc; 1732 } 1733 1734 out_put_node: 1735 of_node_put(parts_node); 1736 } 1737 1738 static void __init gic_of_setup_kvm_info(struct device_node *node) 1739 { 1740 int ret; 1741 struct resource r; 1742 u32 gicv_idx; 1743 1744 gic_v3_kvm_info.type = GIC_V3; 1745 1746 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); 1747 if (!gic_v3_kvm_info.maint_irq) 1748 return; 1749 1750 if (of_property_read_u32(node, "#redistributor-regions", 1751 &gicv_idx)) 1752 gicv_idx = 1; 1753 1754 gicv_idx += 3; /* Also skip GICD, GICC, GICH */ 1755 ret = of_address_to_resource(node, gicv_idx, &r); 1756 if (!ret) 1757 gic_v3_kvm_info.vcpu = r; 1758 1759 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; 1760 gic_set_kvm_info(&gic_v3_kvm_info); 1761 } 1762 1763 static int __init gic_of_init(struct device_node *node, struct device_node *parent) 1764 { 1765 void __iomem *dist_base; 1766 struct redist_region *rdist_regs; 1767 u64 redist_stride; 1768 u32 nr_redist_regions; 1769 int err, i; 1770 1771 dist_base = of_iomap(node, 0); 1772 if (!dist_base) { 1773 pr_err("%pOF: unable to map gic dist registers\n", node); 1774 return -ENXIO; 1775 } 1776 1777 err = gic_validate_dist_version(dist_base); 1778 if (err) { 1779 pr_err("%pOF: no distributor detected, giving up\n", node); 1780 goto out_unmap_dist; 1781 } 1782 1783 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) 1784 nr_redist_regions = 1; 1785 1786 rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs), 1787 GFP_KERNEL); 1788 if (!rdist_regs) { 1789 err = -ENOMEM; 1790 goto out_unmap_dist; 1791 } 1792 1793 for (i = 0; i < nr_redist_regions; i++) { 1794 struct resource res; 1795 int ret; 1796 1797 ret = of_address_to_resource(node, 1 + i, &res); 1798 rdist_regs[i].redist_base = of_iomap(node, 1 + i); 1799 if (ret || !rdist_regs[i].redist_base) { 1800 pr_err("%pOF: couldn't map region %d\n", node, i); 1801 err = -ENODEV; 1802 goto out_unmap_rdist; 1803 } 1804 rdist_regs[i].phys_base = res.start; 1805 } 1806 1807 if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) 1808 redist_stride = 0; 1809 1810 gic_enable_of_quirks(node, gic_quirks, &gic_data); 1811 1812 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions, 1813 redist_stride, &node->fwnode); 1814 if (err) 1815 goto out_unmap_rdist; 1816 1817 gic_populate_ppi_partitions(node); 1818 1819 if (static_branch_likely(&supports_deactivate_key)) 1820 gic_of_setup_kvm_info(node); 1821 return 0; 1822 1823 out_unmap_rdist: 1824 for (i = 0; i < nr_redist_regions; i++) 1825 if (rdist_regs[i].redist_base) 1826 iounmap(rdist_regs[i].redist_base); 1827 kfree(rdist_regs); 1828 out_unmap_dist: 1829 iounmap(dist_base); 1830 return err; 1831 } 1832 1833 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init); 1834 1835 #ifdef CONFIG_ACPI 1836 static struct 1837 { 1838 void __iomem *dist_base; 1839 struct redist_region *redist_regs; 1840 u32 nr_redist_regions; 1841 bool single_redist; 1842 int enabled_rdists; 1843 u32 maint_irq; 1844 int maint_irq_mode; 1845 phys_addr_t vcpu_base; 1846 } acpi_data __initdata; 1847 1848 static void __init 1849 gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) 1850 { 1851 static int count = 0; 1852 1853 acpi_data.redist_regs[count].phys_base = phys_base; 1854 acpi_data.redist_regs[count].redist_base = redist_base; 1855 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist; 1856 count++; 1857 } 1858 1859 static int __init 1860 gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, 1861 const unsigned long end) 1862 { 1863 struct acpi_madt_generic_redistributor *redist = 1864 (struct acpi_madt_generic_redistributor *)header; 1865 void __iomem *redist_base; 1866 1867 redist_base = ioremap(redist->base_address, redist->length); 1868 if (!redist_base) { 1869 pr_err("Couldn't map GICR region @%llx\n", redist->base_address); 1870 return -ENOMEM; 1871 } 1872 1873 gic_acpi_register_redist(redist->base_address, redist_base); 1874 return 0; 1875 } 1876 1877 static int __init 1878 gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, 1879 const unsigned long end) 1880 { 1881 struct acpi_madt_generic_interrupt *gicc = 1882 (struct acpi_madt_generic_interrupt *)header; 1883 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; 1884 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; 1885 void __iomem *redist_base; 1886 1887 /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ 1888 if (!(gicc->flags & ACPI_MADT_ENABLED)) 1889 return 0; 1890 1891 redist_base = ioremap(gicc->gicr_base_address, size); 1892 if (!redist_base) 1893 return -ENOMEM; 1894 1895 gic_acpi_register_redist(gicc->gicr_base_address, redist_base); 1896 return 0; 1897 } 1898 1899 static int __init gic_acpi_collect_gicr_base(void) 1900 { 1901 acpi_tbl_entry_handler redist_parser; 1902 enum acpi_madt_type type; 1903 1904 if (acpi_data.single_redist) { 1905 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; 1906 redist_parser = gic_acpi_parse_madt_gicc; 1907 } else { 1908 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; 1909 redist_parser = gic_acpi_parse_madt_redist; 1910 } 1911 1912 /* Collect redistributor base addresses in GICR entries */ 1913 if (acpi_table_parse_madt(type, redist_parser, 0) > 0) 1914 return 0; 1915 1916 pr_info("No valid GICR entries exist\n"); 1917 return -ENODEV; 1918 } 1919 1920 static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header, 1921 const unsigned long end) 1922 { 1923 /* Subtable presence means that redist exists, that's it */ 1924 return 0; 1925 } 1926 1927 static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, 1928 const unsigned long end) 1929 { 1930 struct acpi_madt_generic_interrupt *gicc = 1931 (struct acpi_madt_generic_interrupt *)header; 1932 1933 /* 1934 * If GICC is enabled and has valid gicr base address, then it means 1935 * GICR base is presented via GICC 1936 */ 1937 if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) { 1938 acpi_data.enabled_rdists++; 1939 return 0; 1940 } 1941 1942 /* 1943 * It's perfectly valid firmware can pass disabled GICC entry, driver 1944 * should not treat as errors, skip the entry instead of probe fail. 1945 */ 1946 if (!(gicc->flags & ACPI_MADT_ENABLED)) 1947 return 0; 1948 1949 return -ENODEV; 1950 } 1951 1952 static int __init gic_acpi_count_gicr_regions(void) 1953 { 1954 int count; 1955 1956 /* 1957 * Count how many redistributor regions we have. It is not allowed 1958 * to mix redistributor description, GICR and GICC subtables have to be 1959 * mutually exclusive. 1960 */ 1961 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, 1962 gic_acpi_match_gicr, 0); 1963 if (count > 0) { 1964 acpi_data.single_redist = false; 1965 return count; 1966 } 1967 1968 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 1969 gic_acpi_match_gicc, 0); 1970 if (count > 0) { 1971 acpi_data.single_redist = true; 1972 count = acpi_data.enabled_rdists; 1973 } 1974 1975 return count; 1976 } 1977 1978 static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, 1979 struct acpi_probe_entry *ape) 1980 { 1981 struct acpi_madt_generic_distributor *dist; 1982 int count; 1983 1984 dist = (struct acpi_madt_generic_distributor *)header; 1985 if (dist->version != ape->driver_data) 1986 return false; 1987 1988 /* We need to do that exercise anyway, the sooner the better */ 1989 count = gic_acpi_count_gicr_regions(); 1990 if (count <= 0) 1991 return false; 1992 1993 acpi_data.nr_redist_regions = count; 1994 return true; 1995 } 1996 1997 static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, 1998 const unsigned long end) 1999 { 2000 struct acpi_madt_generic_interrupt *gicc = 2001 (struct acpi_madt_generic_interrupt *)header; 2002 int maint_irq_mode; 2003 static int first_madt = true; 2004 2005 /* Skip unusable CPUs */ 2006 if (!(gicc->flags & ACPI_MADT_ENABLED)) 2007 return 0; 2008 2009 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? 2010 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; 2011 2012 if (first_madt) { 2013 first_madt = false; 2014 2015 acpi_data.maint_irq = gicc->vgic_interrupt; 2016 acpi_data.maint_irq_mode = maint_irq_mode; 2017 acpi_data.vcpu_base = gicc->gicv_base_address; 2018 2019 return 0; 2020 } 2021 2022 /* 2023 * The maintenance interrupt and GICV should be the same for every CPU 2024 */ 2025 if ((acpi_data.maint_irq != gicc->vgic_interrupt) || 2026 (acpi_data.maint_irq_mode != maint_irq_mode) || 2027 (acpi_data.vcpu_base != gicc->gicv_base_address)) 2028 return -EINVAL; 2029 2030 return 0; 2031 } 2032 2033 static bool __init gic_acpi_collect_virt_info(void) 2034 { 2035 int count; 2036 2037 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 2038 gic_acpi_parse_virt_madt_gicc, 0); 2039 2040 return (count > 0); 2041 } 2042 2043 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) 2044 #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) 2045 #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) 2046 2047 static void __init gic_acpi_setup_kvm_info(void) 2048 { 2049 int irq; 2050 2051 if (!gic_acpi_collect_virt_info()) { 2052 pr_warn("Unable to get hardware information used for virtualization\n"); 2053 return; 2054 } 2055 2056 gic_v3_kvm_info.type = GIC_V3; 2057 2058 irq = acpi_register_gsi(NULL, acpi_data.maint_irq, 2059 acpi_data.maint_irq_mode, 2060 ACPI_ACTIVE_HIGH); 2061 if (irq <= 0) 2062 return; 2063 2064 gic_v3_kvm_info.maint_irq = irq; 2065 2066 if (acpi_data.vcpu_base) { 2067 struct resource *vcpu = &gic_v3_kvm_info.vcpu; 2068 2069 vcpu->flags = IORESOURCE_MEM; 2070 vcpu->start = acpi_data.vcpu_base; 2071 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; 2072 } 2073 2074 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; 2075 gic_set_kvm_info(&gic_v3_kvm_info); 2076 } 2077 2078 static int __init 2079 gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) 2080 { 2081 struct acpi_madt_generic_distributor *dist; 2082 struct fwnode_handle *domain_handle; 2083 size_t size; 2084 int i, err; 2085 2086 /* Get distributor base address */ 2087 dist = (struct acpi_madt_generic_distributor *)header; 2088 acpi_data.dist_base = ioremap(dist->base_address, 2089 ACPI_GICV3_DIST_MEM_SIZE); 2090 if (!acpi_data.dist_base) { 2091 pr_err("Unable to map GICD registers\n"); 2092 return -ENOMEM; 2093 } 2094 2095 err = gic_validate_dist_version(acpi_data.dist_base); 2096 if (err) { 2097 pr_err("No distributor detected at @%p, giving up\n", 2098 acpi_data.dist_base); 2099 goto out_dist_unmap; 2100 } 2101 2102 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions; 2103 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL); 2104 if (!acpi_data.redist_regs) { 2105 err = -ENOMEM; 2106 goto out_dist_unmap; 2107 } 2108 2109 err = gic_acpi_collect_gicr_base(); 2110 if (err) 2111 goto out_redist_unmap; 2112 2113 domain_handle = irq_domain_alloc_fwnode(&dist->base_address); 2114 if (!domain_handle) { 2115 err = -ENOMEM; 2116 goto out_redist_unmap; 2117 } 2118 2119 err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs, 2120 acpi_data.nr_redist_regions, 0, domain_handle); 2121 if (err) 2122 goto out_fwhandle_free; 2123 2124 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); 2125 2126 if (static_branch_likely(&supports_deactivate_key)) 2127 gic_acpi_setup_kvm_info(); 2128 2129 return 0; 2130 2131 out_fwhandle_free: 2132 irq_domain_free_fwnode(domain_handle); 2133 out_redist_unmap: 2134 for (i = 0; i < acpi_data.nr_redist_regions; i++) 2135 if (acpi_data.redist_regs[i].redist_base) 2136 iounmap(acpi_data.redist_regs[i].redist_base); 2137 kfree(acpi_data.redist_regs); 2138 out_dist_unmap: 2139 iounmap(acpi_data.dist_base); 2140 return err; 2141 } 2142 IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 2143 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, 2144 gic_acpi_init); 2145 IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 2146 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4, 2147 gic_acpi_init); 2148 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 2149 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE, 2150 gic_acpi_init); 2151 #endif 2152