1 /* 2 * Copyright (C) 2002 ARM Limited, All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * Interrupt architecture for the GIC: 9 * 10 * o There is one Interrupt Distributor, which receives interrupts 11 * from system devices and sends them to the Interrupt Controllers. 12 * 13 * o There is one CPU Interface per CPU, which sends interrupts sent 14 * by the Distributor, and interrupts generated locally, to the 15 * associated CPU. The base address of the CPU interface is usually 16 * aliased so that the same address points to different chips depending 17 * on the CPU it is accessed from. 18 * 19 * Note that IRQs 0-31 are special - they are local to each CPU. 20 * As such, the enable set/clear, pending set/clear and active bit 21 * registers are banked per-cpu for these sources. 22 */ 23 #include <linux/init.h> 24 #include <linux/kernel.h> 25 #include <linux/err.h> 26 #include <linux/module.h> 27 #include <linux/list.h> 28 #include <linux/smp.h> 29 #include <linux/cpu.h> 30 #include <linux/cpu_pm.h> 31 #include <linux/cpumask.h> 32 #include <linux/io.h> 33 #include <linux/of.h> 34 #include <linux/of_address.h> 35 #include <linux/of_irq.h> 36 #include <linux/acpi.h> 37 #include <linux/irqdomain.h> 38 #include <linux/interrupt.h> 39 #include <linux/percpu.h> 40 #include <linux/slab.h> 41 #include <linux/irqchip.h> 42 #include <linux/irqchip/chained_irq.h> 43 #include <linux/irqchip/arm-gic.h> 44 45 #include <asm/cputype.h> 46 #include <asm/irq.h> 47 #include <asm/exception.h> 48 #include <asm/smp_plat.h> 49 #include <asm/virt.h> 50 51 #include "irq-gic-common.h" 52 53 #ifdef CONFIG_ARM64 54 #include <asm/cpufeature.h> 55 56 static void gic_check_cpu_features(void) 57 { 58 WARN_TAINT_ONCE(cpus_have_cap(ARM64_HAS_SYSREG_GIC_CPUIF), 59 TAINT_CPU_OUT_OF_SPEC, 60 "GICv3 system registers enabled, broken firmware!\n"); 61 } 62 #else 63 #define gic_check_cpu_features() do { } while(0) 64 #endif 65 66 union gic_base { 67 void __iomem *common_base; 68 void __percpu * __iomem *percpu_base; 69 }; 70 71 struct gic_chip_data { 72 struct irq_chip chip; 73 union gic_base dist_base; 74 union gic_base cpu_base; 75 #ifdef CONFIG_CPU_PM 76 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; 77 u32 saved_spi_active[DIV_ROUND_UP(1020, 32)]; 78 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; 79 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; 80 u32 __percpu *saved_ppi_enable; 81 u32 __percpu *saved_ppi_active; 82 u32 __percpu *saved_ppi_conf; 83 #endif 84 struct irq_domain *domain; 85 unsigned int gic_irqs; 86 #ifdef CONFIG_GIC_NON_BANKED 87 void __iomem *(*get_base)(union gic_base *); 88 #endif 89 }; 90 91 static DEFINE_RAW_SPINLOCK(irq_controller_lock); 92 93 /* 94 * The GIC mapping of CPU interfaces does not necessarily match 95 * the logical CPU numbering. Let's use a mapping as returned 96 * by the GIC itself. 97 */ 98 #define NR_GIC_CPU_IF 8 99 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; 100 101 static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE; 102 103 static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly; 104 105 #ifdef CONFIG_GIC_NON_BANKED 106 static void __iomem *gic_get_percpu_base(union gic_base *base) 107 { 108 return raw_cpu_read(*base->percpu_base); 109 } 110 111 static void __iomem *gic_get_common_base(union gic_base *base) 112 { 113 return base->common_base; 114 } 115 116 static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data) 117 { 118 return data->get_base(&data->dist_base); 119 } 120 121 static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data) 122 { 123 return data->get_base(&data->cpu_base); 124 } 125 126 static inline void gic_set_base_accessor(struct gic_chip_data *data, 127 void __iomem *(*f)(union gic_base *)) 128 { 129 data->get_base = f; 130 } 131 #else 132 #define gic_data_dist_base(d) ((d)->dist_base.common_base) 133 #define gic_data_cpu_base(d) ((d)->cpu_base.common_base) 134 #define gic_set_base_accessor(d, f) 135 #endif 136 137 static inline void __iomem *gic_dist_base(struct irq_data *d) 138 { 139 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); 140 return gic_data_dist_base(gic_data); 141 } 142 143 static inline void __iomem *gic_cpu_base(struct irq_data *d) 144 { 145 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); 146 return gic_data_cpu_base(gic_data); 147 } 148 149 static inline unsigned int gic_irq(struct irq_data *d) 150 { 151 return d->hwirq; 152 } 153 154 static inline bool cascading_gic_irq(struct irq_data *d) 155 { 156 void *data = irq_data_get_irq_handler_data(d); 157 158 /* 159 * If handler_data is set, this is a cascading interrupt, and 160 * it cannot possibly be forwarded. 161 */ 162 return data != NULL; 163 } 164 165 /* 166 * Routines to acknowledge, disable and enable interrupts 167 */ 168 static void gic_poke_irq(struct irq_data *d, u32 offset) 169 { 170 u32 mask = 1 << (gic_irq(d) % 32); 171 writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4); 172 } 173 174 static int gic_peek_irq(struct irq_data *d, u32 offset) 175 { 176 u32 mask = 1 << (gic_irq(d) % 32); 177 return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask); 178 } 179 180 static void gic_mask_irq(struct irq_data *d) 181 { 182 gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR); 183 } 184 185 static void gic_eoimode1_mask_irq(struct irq_data *d) 186 { 187 gic_mask_irq(d); 188 /* 189 * When masking a forwarded interrupt, make sure it is 190 * deactivated as well. 191 * 192 * This ensures that an interrupt that is getting 193 * disabled/masked will not get "stuck", because there is 194 * noone to deactivate it (guest is being terminated). 195 */ 196 if (irqd_is_forwarded_to_vcpu(d)) 197 gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR); 198 } 199 200 static void gic_unmask_irq(struct irq_data *d) 201 { 202 gic_poke_irq(d, GIC_DIST_ENABLE_SET); 203 } 204 205 static void gic_eoi_irq(struct irq_data *d) 206 { 207 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); 208 } 209 210 static void gic_eoimode1_eoi_irq(struct irq_data *d) 211 { 212 /* Do not deactivate an IRQ forwarded to a vcpu. */ 213 if (irqd_is_forwarded_to_vcpu(d)) 214 return; 215 216 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE); 217 } 218 219 static int gic_irq_set_irqchip_state(struct irq_data *d, 220 enum irqchip_irq_state which, bool val) 221 { 222 u32 reg; 223 224 switch (which) { 225 case IRQCHIP_STATE_PENDING: 226 reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR; 227 break; 228 229 case IRQCHIP_STATE_ACTIVE: 230 reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR; 231 break; 232 233 case IRQCHIP_STATE_MASKED: 234 reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET; 235 break; 236 237 default: 238 return -EINVAL; 239 } 240 241 gic_poke_irq(d, reg); 242 return 0; 243 } 244 245 static int gic_irq_get_irqchip_state(struct irq_data *d, 246 enum irqchip_irq_state which, bool *val) 247 { 248 switch (which) { 249 case IRQCHIP_STATE_PENDING: 250 *val = gic_peek_irq(d, GIC_DIST_PENDING_SET); 251 break; 252 253 case IRQCHIP_STATE_ACTIVE: 254 *val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET); 255 break; 256 257 case IRQCHIP_STATE_MASKED: 258 *val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET); 259 break; 260 261 default: 262 return -EINVAL; 263 } 264 265 return 0; 266 } 267 268 static int gic_set_type(struct irq_data *d, unsigned int type) 269 { 270 void __iomem *base = gic_dist_base(d); 271 unsigned int gicirq = gic_irq(d); 272 273 /* Interrupt configuration for SGIs can't be changed */ 274 if (gicirq < 16) 275 return -EINVAL; 276 277 /* SPIs have restrictions on the supported types */ 278 if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && 279 type != IRQ_TYPE_EDGE_RISING) 280 return -EINVAL; 281 282 return gic_configure_irq(gicirq, type, base, NULL); 283 } 284 285 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) 286 { 287 /* Only interrupts on the primary GIC can be forwarded to a vcpu. */ 288 if (cascading_gic_irq(d)) 289 return -EINVAL; 290 291 if (vcpu) 292 irqd_set_forwarded_to_vcpu(d); 293 else 294 irqd_clr_forwarded_to_vcpu(d); 295 return 0; 296 } 297 298 #ifdef CONFIG_SMP 299 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 300 bool force) 301 { 302 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); 303 unsigned int cpu, shift = (gic_irq(d) % 4) * 8; 304 u32 val, mask, bit; 305 unsigned long flags; 306 307 if (!force) 308 cpu = cpumask_any_and(mask_val, cpu_online_mask); 309 else 310 cpu = cpumask_first(mask_val); 311 312 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) 313 return -EINVAL; 314 315 raw_spin_lock_irqsave(&irq_controller_lock, flags); 316 mask = 0xff << shift; 317 bit = gic_cpu_map[cpu] << shift; 318 val = readl_relaxed(reg) & ~mask; 319 writel_relaxed(val | bit, reg); 320 raw_spin_unlock_irqrestore(&irq_controller_lock, flags); 321 322 return IRQ_SET_MASK_OK; 323 } 324 #endif 325 326 static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) 327 { 328 u32 irqstat, irqnr; 329 struct gic_chip_data *gic = &gic_data[0]; 330 void __iomem *cpu_base = gic_data_cpu_base(gic); 331 332 do { 333 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); 334 irqnr = irqstat & GICC_IAR_INT_ID_MASK; 335 336 if (likely(irqnr > 15 && irqnr < 1020)) { 337 if (static_key_true(&supports_deactivate)) 338 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); 339 handle_domain_irq(gic->domain, irqnr, regs); 340 continue; 341 } 342 if (irqnr < 16) { 343 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); 344 if (static_key_true(&supports_deactivate)) 345 writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE); 346 #ifdef CONFIG_SMP 347 handle_IPI(irqnr, regs); 348 #endif 349 continue; 350 } 351 break; 352 } while (1); 353 } 354 355 static void gic_handle_cascade_irq(struct irq_desc *desc) 356 { 357 struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc); 358 struct irq_chip *chip = irq_desc_get_chip(desc); 359 unsigned int cascade_irq, gic_irq; 360 unsigned long status; 361 362 chained_irq_enter(chip, desc); 363 364 raw_spin_lock(&irq_controller_lock); 365 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); 366 raw_spin_unlock(&irq_controller_lock); 367 368 gic_irq = (status & GICC_IAR_INT_ID_MASK); 369 if (gic_irq == GICC_INT_SPURIOUS) 370 goto out; 371 372 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); 373 if (unlikely(gic_irq < 32 || gic_irq > 1020)) 374 handle_bad_irq(desc); 375 else 376 generic_handle_irq(cascade_irq); 377 378 out: 379 chained_irq_exit(chip, desc); 380 } 381 382 static struct irq_chip gic_chip = { 383 .irq_mask = gic_mask_irq, 384 .irq_unmask = gic_unmask_irq, 385 .irq_eoi = gic_eoi_irq, 386 .irq_set_type = gic_set_type, 387 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 388 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 389 .flags = IRQCHIP_SET_TYPE_MASKED | 390 IRQCHIP_SKIP_SET_WAKE | 391 IRQCHIP_MASK_ON_SUSPEND, 392 }; 393 394 static struct irq_chip gic_eoimode1_chip = { 395 .name = "GICv2", 396 .irq_mask = gic_eoimode1_mask_irq, 397 .irq_unmask = gic_unmask_irq, 398 .irq_eoi = gic_eoimode1_eoi_irq, 399 .irq_set_type = gic_set_type, 400 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 401 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 402 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, 403 .flags = IRQCHIP_SET_TYPE_MASKED | 404 IRQCHIP_SKIP_SET_WAKE | 405 IRQCHIP_MASK_ON_SUSPEND, 406 }; 407 408 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) 409 { 410 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 411 irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, 412 &gic_data[gic_nr]); 413 } 414 415 static u8 gic_get_cpumask(struct gic_chip_data *gic) 416 { 417 void __iomem *base = gic_data_dist_base(gic); 418 u32 mask, i; 419 420 for (i = mask = 0; i < 32; i += 4) { 421 mask = readl_relaxed(base + GIC_DIST_TARGET + i); 422 mask |= mask >> 16; 423 mask |= mask >> 8; 424 if (mask) 425 break; 426 } 427 428 if (!mask && num_possible_cpus() > 1) 429 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); 430 431 return mask; 432 } 433 434 static void gic_cpu_if_up(struct gic_chip_data *gic) 435 { 436 void __iomem *cpu_base = gic_data_cpu_base(gic); 437 u32 bypass = 0; 438 u32 mode = 0; 439 440 if (gic == &gic_data[0] && static_key_true(&supports_deactivate)) 441 mode = GIC_CPU_CTRL_EOImodeNS; 442 443 /* 444 * Preserve bypass disable bits to be written back later 445 */ 446 bypass = readl(cpu_base + GIC_CPU_CTRL); 447 bypass &= GICC_DIS_BYPASS_MASK; 448 449 writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL); 450 } 451 452 453 static void __init gic_dist_init(struct gic_chip_data *gic) 454 { 455 unsigned int i; 456 u32 cpumask; 457 unsigned int gic_irqs = gic->gic_irqs; 458 void __iomem *base = gic_data_dist_base(gic); 459 460 writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL); 461 462 /* 463 * Set all global interrupts to this CPU only. 464 */ 465 cpumask = gic_get_cpumask(gic); 466 cpumask |= cpumask << 8; 467 cpumask |= cpumask << 16; 468 for (i = 32; i < gic_irqs; i += 4) 469 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 470 471 gic_dist_config(base, gic_irqs, NULL); 472 473 writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL); 474 } 475 476 static void gic_cpu_init(struct gic_chip_data *gic) 477 { 478 void __iomem *dist_base = gic_data_dist_base(gic); 479 void __iomem *base = gic_data_cpu_base(gic); 480 unsigned int cpu_mask, cpu = smp_processor_id(); 481 int i; 482 483 /* 484 * Setting up the CPU map is only relevant for the primary GIC 485 * because any nested/secondary GICs do not directly interface 486 * with the CPU(s). 487 */ 488 if (gic == &gic_data[0]) { 489 /* 490 * Get what the GIC says our CPU mask is. 491 */ 492 BUG_ON(cpu >= NR_GIC_CPU_IF); 493 cpu_mask = gic_get_cpumask(gic); 494 gic_cpu_map[cpu] = cpu_mask; 495 496 /* 497 * Clear our mask from the other map entries in case they're 498 * still undefined. 499 */ 500 for (i = 0; i < NR_GIC_CPU_IF; i++) 501 if (i != cpu) 502 gic_cpu_map[i] &= ~cpu_mask; 503 } 504 505 gic_cpu_config(dist_base, NULL); 506 507 writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK); 508 gic_cpu_if_up(gic); 509 } 510 511 int gic_cpu_if_down(unsigned int gic_nr) 512 { 513 void __iomem *cpu_base; 514 u32 val = 0; 515 516 if (gic_nr >= CONFIG_ARM_GIC_MAX_NR) 517 return -EINVAL; 518 519 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); 520 val = readl(cpu_base + GIC_CPU_CTRL); 521 val &= ~GICC_ENABLE; 522 writel_relaxed(val, cpu_base + GIC_CPU_CTRL); 523 524 return 0; 525 } 526 527 #ifdef CONFIG_CPU_PM 528 /* 529 * Saves the GIC distributor registers during suspend or idle. Must be called 530 * with interrupts disabled but before powering down the GIC. After calling 531 * this function, no interrupts will be delivered by the GIC, and another 532 * platform-specific wakeup source must be enabled. 533 */ 534 static void gic_dist_save(unsigned int gic_nr) 535 { 536 unsigned int gic_irqs; 537 void __iomem *dist_base; 538 int i; 539 540 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 541 542 gic_irqs = gic_data[gic_nr].gic_irqs; 543 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 544 545 if (!dist_base) 546 return; 547 548 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) 549 gic_data[gic_nr].saved_spi_conf[i] = 550 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 551 552 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 553 gic_data[gic_nr].saved_spi_target[i] = 554 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); 555 556 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 557 gic_data[gic_nr].saved_spi_enable[i] = 558 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 559 560 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 561 gic_data[gic_nr].saved_spi_active[i] = 562 readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); 563 } 564 565 /* 566 * Restores the GIC distributor registers during resume or when coming out of 567 * idle. Must be called before enabling interrupts. If a level interrupt 568 * that occured while the GIC was suspended is still present, it will be 569 * handled normally, but any edge interrupts that occured will not be seen by 570 * the GIC and need to be handled by the platform-specific wakeup source. 571 */ 572 static void gic_dist_restore(unsigned int gic_nr) 573 { 574 unsigned int gic_irqs; 575 unsigned int i; 576 void __iomem *dist_base; 577 578 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 579 580 gic_irqs = gic_data[gic_nr].gic_irqs; 581 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 582 583 if (!dist_base) 584 return; 585 586 writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL); 587 588 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) 589 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], 590 dist_base + GIC_DIST_CONFIG + i * 4); 591 592 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 593 writel_relaxed(GICD_INT_DEF_PRI_X4, 594 dist_base + GIC_DIST_PRI + i * 4); 595 596 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 597 writel_relaxed(gic_data[gic_nr].saved_spi_target[i], 598 dist_base + GIC_DIST_TARGET + i * 4); 599 600 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { 601 writel_relaxed(GICD_INT_EN_CLR_X32, 602 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); 603 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], 604 dist_base + GIC_DIST_ENABLE_SET + i * 4); 605 } 606 607 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { 608 writel_relaxed(GICD_INT_EN_CLR_X32, 609 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); 610 writel_relaxed(gic_data[gic_nr].saved_spi_active[i], 611 dist_base + GIC_DIST_ACTIVE_SET + i * 4); 612 } 613 614 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); 615 } 616 617 static void gic_cpu_save(unsigned int gic_nr) 618 { 619 int i; 620 u32 *ptr; 621 void __iomem *dist_base; 622 void __iomem *cpu_base; 623 624 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 625 626 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 627 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); 628 629 if (!dist_base || !cpu_base) 630 return; 631 632 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); 633 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 634 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 635 636 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); 637 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 638 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); 639 640 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 641 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 642 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 643 644 } 645 646 static void gic_cpu_restore(unsigned int gic_nr) 647 { 648 int i; 649 u32 *ptr; 650 void __iomem *dist_base; 651 void __iomem *cpu_base; 652 653 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 654 655 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 656 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); 657 658 if (!dist_base || !cpu_base) 659 return; 660 661 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); 662 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { 663 writel_relaxed(GICD_INT_EN_CLR_X32, 664 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); 665 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); 666 } 667 668 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active); 669 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { 670 writel_relaxed(GICD_INT_EN_CLR_X32, 671 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); 672 writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4); 673 } 674 675 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 676 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 677 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); 678 679 for (i = 0; i < DIV_ROUND_UP(32, 4); i++) 680 writel_relaxed(GICD_INT_DEF_PRI_X4, 681 dist_base + GIC_DIST_PRI + i * 4); 682 683 writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK); 684 gic_cpu_if_up(&gic_data[gic_nr]); 685 } 686 687 static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) 688 { 689 int i; 690 691 for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) { 692 #ifdef CONFIG_GIC_NON_BANKED 693 /* Skip over unused GICs */ 694 if (!gic_data[i].get_base) 695 continue; 696 #endif 697 switch (cmd) { 698 case CPU_PM_ENTER: 699 gic_cpu_save(i); 700 break; 701 case CPU_PM_ENTER_FAILED: 702 case CPU_PM_EXIT: 703 gic_cpu_restore(i); 704 break; 705 case CPU_CLUSTER_PM_ENTER: 706 gic_dist_save(i); 707 break; 708 case CPU_CLUSTER_PM_ENTER_FAILED: 709 case CPU_CLUSTER_PM_EXIT: 710 gic_dist_restore(i); 711 break; 712 } 713 } 714 715 return NOTIFY_OK; 716 } 717 718 static struct notifier_block gic_notifier_block = { 719 .notifier_call = gic_notifier, 720 }; 721 722 static void __init gic_pm_init(struct gic_chip_data *gic) 723 { 724 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, 725 sizeof(u32)); 726 BUG_ON(!gic->saved_ppi_enable); 727 728 gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, 729 sizeof(u32)); 730 BUG_ON(!gic->saved_ppi_active); 731 732 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, 733 sizeof(u32)); 734 BUG_ON(!gic->saved_ppi_conf); 735 736 if (gic == &gic_data[0]) 737 cpu_pm_register_notifier(&gic_notifier_block); 738 } 739 #else 740 static void __init gic_pm_init(struct gic_chip_data *gic) 741 { 742 } 743 #endif 744 745 #ifdef CONFIG_SMP 746 static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) 747 { 748 int cpu; 749 unsigned long flags, map = 0; 750 751 raw_spin_lock_irqsave(&irq_controller_lock, flags); 752 753 /* Convert our logical CPU mask into a physical one. */ 754 for_each_cpu(cpu, mask) 755 map |= gic_cpu_map[cpu]; 756 757 /* 758 * Ensure that stores to Normal memory are visible to the 759 * other CPUs before they observe us issuing the IPI. 760 */ 761 dmb(ishst); 762 763 /* this always happens on GIC0 */ 764 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); 765 766 raw_spin_unlock_irqrestore(&irq_controller_lock, flags); 767 } 768 #endif 769 770 #ifdef CONFIG_BL_SWITCHER 771 /* 772 * gic_send_sgi - send a SGI directly to given CPU interface number 773 * 774 * cpu_id: the ID for the destination CPU interface 775 * irq: the IPI number to send a SGI for 776 */ 777 void gic_send_sgi(unsigned int cpu_id, unsigned int irq) 778 { 779 BUG_ON(cpu_id >= NR_GIC_CPU_IF); 780 cpu_id = 1 << cpu_id; 781 /* this always happens on GIC0 */ 782 writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); 783 } 784 785 /* 786 * gic_get_cpu_id - get the CPU interface ID for the specified CPU 787 * 788 * @cpu: the logical CPU number to get the GIC ID for. 789 * 790 * Return the CPU interface ID for the given logical CPU number, 791 * or -1 if the CPU number is too large or the interface ID is 792 * unknown (more than one bit set). 793 */ 794 int gic_get_cpu_id(unsigned int cpu) 795 { 796 unsigned int cpu_bit; 797 798 if (cpu >= NR_GIC_CPU_IF) 799 return -1; 800 cpu_bit = gic_cpu_map[cpu]; 801 if (cpu_bit & (cpu_bit - 1)) 802 return -1; 803 return __ffs(cpu_bit); 804 } 805 806 /* 807 * gic_migrate_target - migrate IRQs to another CPU interface 808 * 809 * @new_cpu_id: the CPU target ID to migrate IRQs to 810 * 811 * Migrate all peripheral interrupts with a target matching the current CPU 812 * to the interface corresponding to @new_cpu_id. The CPU interface mapping 813 * is also updated. Targets to other CPU interfaces are unchanged. 814 * This must be called with IRQs locally disabled. 815 */ 816 void gic_migrate_target(unsigned int new_cpu_id) 817 { 818 unsigned int cur_cpu_id, gic_irqs, gic_nr = 0; 819 void __iomem *dist_base; 820 int i, ror_val, cpu = smp_processor_id(); 821 u32 val, cur_target_mask, active_mask; 822 823 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 824 825 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 826 if (!dist_base) 827 return; 828 gic_irqs = gic_data[gic_nr].gic_irqs; 829 830 cur_cpu_id = __ffs(gic_cpu_map[cpu]); 831 cur_target_mask = 0x01010101 << cur_cpu_id; 832 ror_val = (cur_cpu_id - new_cpu_id) & 31; 833 834 raw_spin_lock(&irq_controller_lock); 835 836 /* Update the target interface for this logical CPU */ 837 gic_cpu_map[cpu] = 1 << new_cpu_id; 838 839 /* 840 * Find all the peripheral interrupts targetting the current 841 * CPU interface and migrate them to the new CPU interface. 842 * We skip DIST_TARGET 0 to 7 as they are read-only. 843 */ 844 for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) { 845 val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); 846 active_mask = val & cur_target_mask; 847 if (active_mask) { 848 val &= ~active_mask; 849 val |= ror32(active_mask, ror_val); 850 writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4); 851 } 852 } 853 854 raw_spin_unlock(&irq_controller_lock); 855 856 /* 857 * Now let's migrate and clear any potential SGIs that might be 858 * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET 859 * is a banked register, we can only forward the SGI using 860 * GIC_DIST_SOFTINT. The original SGI source is lost but Linux 861 * doesn't use that information anyway. 862 * 863 * For the same reason we do not adjust SGI source information 864 * for previously sent SGIs by us to other CPUs either. 865 */ 866 for (i = 0; i < 16; i += 4) { 867 int j; 868 val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i); 869 if (!val) 870 continue; 871 writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i); 872 for (j = i; j < i + 4; j++) { 873 if (val & 0xff) 874 writel_relaxed((1 << (new_cpu_id + 16)) | j, 875 dist_base + GIC_DIST_SOFTINT); 876 val >>= 8; 877 } 878 } 879 } 880 881 /* 882 * gic_get_sgir_physaddr - get the physical address for the SGI register 883 * 884 * REturn the physical address of the SGI register to be used 885 * by some early assembly code when the kernel is not yet available. 886 */ 887 static unsigned long gic_dist_physaddr; 888 889 unsigned long gic_get_sgir_physaddr(void) 890 { 891 if (!gic_dist_physaddr) 892 return 0; 893 return gic_dist_physaddr + GIC_DIST_SOFTINT; 894 } 895 896 void __init gic_init_physaddr(struct device_node *node) 897 { 898 struct resource res; 899 if (of_address_to_resource(node, 0, &res) == 0) { 900 gic_dist_physaddr = res.start; 901 pr_info("GIC physical location is %#lx\n", gic_dist_physaddr); 902 } 903 } 904 905 #else 906 #define gic_init_physaddr(node) do { } while (0) 907 #endif 908 909 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, 910 irq_hw_number_t hw) 911 { 912 struct gic_chip_data *gic = d->host_data; 913 914 if (hw < 32) { 915 irq_set_percpu_devid(irq); 916 irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, 917 handle_percpu_devid_irq, NULL, NULL); 918 irq_set_status_flags(irq, IRQ_NOAUTOEN); 919 } else { 920 irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, 921 handle_fasteoi_irq, NULL, NULL); 922 irq_set_probe(irq); 923 } 924 return 0; 925 } 926 927 static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq) 928 { 929 } 930 931 static int gic_irq_domain_translate(struct irq_domain *d, 932 struct irq_fwspec *fwspec, 933 unsigned long *hwirq, 934 unsigned int *type) 935 { 936 if (is_of_node(fwspec->fwnode)) { 937 if (fwspec->param_count < 3) 938 return -EINVAL; 939 940 /* Get the interrupt number and add 16 to skip over SGIs */ 941 *hwirq = fwspec->param[1] + 16; 942 943 /* 944 * For SPIs, we need to add 16 more to get the GIC irq 945 * ID number 946 */ 947 if (!fwspec->param[0]) 948 *hwirq += 16; 949 950 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 951 return 0; 952 } 953 954 if (is_fwnode_irqchip(fwspec->fwnode)) { 955 if(fwspec->param_count != 2) 956 return -EINVAL; 957 958 *hwirq = fwspec->param[0]; 959 *type = fwspec->param[1]; 960 return 0; 961 } 962 963 return -EINVAL; 964 } 965 966 #ifdef CONFIG_SMP 967 static int gic_secondary_init(struct notifier_block *nfb, unsigned long action, 968 void *hcpu) 969 { 970 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 971 gic_cpu_init(&gic_data[0]); 972 return NOTIFY_OK; 973 } 974 975 /* 976 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high 977 * priority because the GIC needs to be up before the ARM generic timers. 978 */ 979 static struct notifier_block gic_cpu_notifier = { 980 .notifier_call = gic_secondary_init, 981 .priority = 100, 982 }; 983 #endif 984 985 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 986 unsigned int nr_irqs, void *arg) 987 { 988 int i, ret; 989 irq_hw_number_t hwirq; 990 unsigned int type = IRQ_TYPE_NONE; 991 struct irq_fwspec *fwspec = arg; 992 993 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); 994 if (ret) 995 return ret; 996 997 for (i = 0; i < nr_irqs; i++) 998 gic_irq_domain_map(domain, virq + i, hwirq + i); 999 1000 return 0; 1001 } 1002 1003 static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = { 1004 .translate = gic_irq_domain_translate, 1005 .alloc = gic_irq_domain_alloc, 1006 .free = irq_domain_free_irqs_top, 1007 }; 1008 1009 static const struct irq_domain_ops gic_irq_domain_ops = { 1010 .map = gic_irq_domain_map, 1011 .unmap = gic_irq_domain_unmap, 1012 }; 1013 1014 static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, 1015 void __iomem *dist_base, void __iomem *cpu_base, 1016 u32 percpu_offset, struct fwnode_handle *handle) 1017 { 1018 irq_hw_number_t hwirq_base; 1019 struct gic_chip_data *gic; 1020 int gic_irqs, irq_base, i; 1021 1022 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 1023 1024 gic_check_cpu_features(); 1025 1026 gic = &gic_data[gic_nr]; 1027 1028 /* Initialize irq_chip */ 1029 if (static_key_true(&supports_deactivate) && gic_nr == 0) { 1030 gic->chip = gic_eoimode1_chip; 1031 } else { 1032 gic->chip = gic_chip; 1033 gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr); 1034 } 1035 1036 #ifdef CONFIG_SMP 1037 if (gic_nr == 0) 1038 gic->chip.irq_set_affinity = gic_set_affinity; 1039 #endif 1040 1041 #ifdef CONFIG_GIC_NON_BANKED 1042 if (percpu_offset) { /* Frankein-GIC without banked registers... */ 1043 unsigned int cpu; 1044 1045 gic->dist_base.percpu_base = alloc_percpu(void __iomem *); 1046 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); 1047 if (WARN_ON(!gic->dist_base.percpu_base || 1048 !gic->cpu_base.percpu_base)) { 1049 free_percpu(gic->dist_base.percpu_base); 1050 free_percpu(gic->cpu_base.percpu_base); 1051 return; 1052 } 1053 1054 for_each_possible_cpu(cpu) { 1055 u32 mpidr = cpu_logical_map(cpu); 1056 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 1057 unsigned long offset = percpu_offset * core_id; 1058 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; 1059 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; 1060 } 1061 1062 gic_set_base_accessor(gic, gic_get_percpu_base); 1063 } else 1064 #endif 1065 { /* Normal, sane GIC... */ 1066 WARN(percpu_offset, 1067 "GIC_NON_BANKED not enabled, ignoring %08x offset!", 1068 percpu_offset); 1069 gic->dist_base.common_base = dist_base; 1070 gic->cpu_base.common_base = cpu_base; 1071 gic_set_base_accessor(gic, gic_get_common_base); 1072 } 1073 1074 /* 1075 * Find out how many interrupts are supported. 1076 * The GIC only supports up to 1020 interrupt sources. 1077 */ 1078 gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f; 1079 gic_irqs = (gic_irqs + 1) * 32; 1080 if (gic_irqs > 1020) 1081 gic_irqs = 1020; 1082 gic->gic_irqs = gic_irqs; 1083 1084 if (handle) { /* DT/ACPI */ 1085 gic->domain = irq_domain_create_linear(handle, gic_irqs, 1086 &gic_irq_domain_hierarchy_ops, 1087 gic); 1088 } else { /* Legacy support */ 1089 /* 1090 * For primary GICs, skip over SGIs. 1091 * For secondary GICs, skip over PPIs, too. 1092 */ 1093 if (gic_nr == 0 && (irq_start & 31) > 0) { 1094 hwirq_base = 16; 1095 if (irq_start != -1) 1096 irq_start = (irq_start & ~31) + 16; 1097 } else { 1098 hwirq_base = 32; 1099 } 1100 1101 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ 1102 1103 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, 1104 numa_node_id()); 1105 if (IS_ERR_VALUE(irq_base)) { 1106 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", 1107 irq_start); 1108 irq_base = irq_start; 1109 } 1110 1111 gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base, 1112 hwirq_base, &gic_irq_domain_ops, gic); 1113 } 1114 1115 if (WARN_ON(!gic->domain)) 1116 return; 1117 1118 if (gic_nr == 0) { 1119 /* 1120 * Initialize the CPU interface map to all CPUs. 1121 * It will be refined as each CPU probes its ID. 1122 * This is only necessary for the primary GIC. 1123 */ 1124 for (i = 0; i < NR_GIC_CPU_IF; i++) 1125 gic_cpu_map[i] = 0xff; 1126 #ifdef CONFIG_SMP 1127 set_smp_cross_call(gic_raise_softirq); 1128 register_cpu_notifier(&gic_cpu_notifier); 1129 #endif 1130 set_handle_irq(gic_handle_irq); 1131 if (static_key_true(&supports_deactivate)) 1132 pr_info("GIC: Using split EOI/Deactivate mode\n"); 1133 } 1134 1135 gic_dist_init(gic); 1136 gic_cpu_init(gic); 1137 gic_pm_init(gic); 1138 } 1139 1140 void __init gic_init(unsigned int gic_nr, int irq_start, 1141 void __iomem *dist_base, void __iomem *cpu_base) 1142 { 1143 /* 1144 * Non-DT/ACPI systems won't run a hypervisor, so let's not 1145 * bother with these... 1146 */ 1147 static_key_slow_dec(&supports_deactivate); 1148 __gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, 0, NULL); 1149 } 1150 1151 #ifdef CONFIG_OF 1152 static int gic_cnt __initdata; 1153 1154 static bool gic_check_eoimode(struct device_node *node, void __iomem **base) 1155 { 1156 struct resource cpuif_res; 1157 1158 of_address_to_resource(node, 1, &cpuif_res); 1159 1160 if (!is_hyp_mode_available()) 1161 return false; 1162 if (resource_size(&cpuif_res) < SZ_8K) 1163 return false; 1164 if (resource_size(&cpuif_res) == SZ_128K) { 1165 u32 val_low, val_high; 1166 1167 /* 1168 * Verify that we have the first 4kB of a GIC400 1169 * aliased over the first 64kB by checking the 1170 * GICC_IIDR register on both ends. 1171 */ 1172 val_low = readl_relaxed(*base + GIC_CPU_IDENT); 1173 val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000); 1174 if ((val_low & 0xffff0fff) != 0x0202043B || 1175 val_low != val_high) 1176 return false; 1177 1178 /* 1179 * Move the base up by 60kB, so that we have a 8kB 1180 * contiguous region, which allows us to use GICC_DIR 1181 * at its normal offset. Please pass me that bucket. 1182 */ 1183 *base += 0xf000; 1184 cpuif_res.start += 0xf000; 1185 pr_warn("GIC: Adjusting CPU interface base to %pa", 1186 &cpuif_res.start); 1187 } 1188 1189 return true; 1190 } 1191 1192 int __init 1193 gic_of_init(struct device_node *node, struct device_node *parent) 1194 { 1195 void __iomem *cpu_base; 1196 void __iomem *dist_base; 1197 u32 percpu_offset; 1198 int irq; 1199 1200 if (WARN_ON(!node)) 1201 return -ENODEV; 1202 1203 dist_base = of_iomap(node, 0); 1204 WARN(!dist_base, "unable to map gic dist registers\n"); 1205 1206 cpu_base = of_iomap(node, 1); 1207 WARN(!cpu_base, "unable to map gic cpu registers\n"); 1208 1209 /* 1210 * Disable split EOI/Deactivate if either HYP is not available 1211 * or the CPU interface is too small. 1212 */ 1213 if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base)) 1214 static_key_slow_dec(&supports_deactivate); 1215 1216 if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) 1217 percpu_offset = 0; 1218 1219 __gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, 1220 &node->fwnode); 1221 if (!gic_cnt) 1222 gic_init_physaddr(node); 1223 1224 if (parent) { 1225 irq = irq_of_parse_and_map(node, 0); 1226 gic_cascade_irq(gic_cnt, irq); 1227 } 1228 1229 if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) 1230 gicv2m_init(&node->fwnode, gic_data[gic_cnt].domain); 1231 1232 gic_cnt++; 1233 return 0; 1234 } 1235 IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init); 1236 IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init); 1237 IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init); 1238 IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); 1239 IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); 1240 IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); 1241 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); 1242 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); 1243 IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init); 1244 1245 #endif 1246 1247 #ifdef CONFIG_ACPI 1248 static phys_addr_t cpu_phy_base __initdata; 1249 1250 static int __init 1251 gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, 1252 const unsigned long end) 1253 { 1254 struct acpi_madt_generic_interrupt *processor; 1255 phys_addr_t gic_cpu_base; 1256 static int cpu_base_assigned; 1257 1258 processor = (struct acpi_madt_generic_interrupt *)header; 1259 1260 if (BAD_MADT_GICC_ENTRY(processor, end)) 1261 return -EINVAL; 1262 1263 /* 1264 * There is no support for non-banked GICv1/2 register in ACPI spec. 1265 * All CPU interface addresses have to be the same. 1266 */ 1267 gic_cpu_base = processor->base_address; 1268 if (cpu_base_assigned && gic_cpu_base != cpu_phy_base) 1269 return -EINVAL; 1270 1271 cpu_phy_base = gic_cpu_base; 1272 cpu_base_assigned = 1; 1273 return 0; 1274 } 1275 1276 /* The things you have to do to just *count* something... */ 1277 static int __init acpi_dummy_func(struct acpi_subtable_header *header, 1278 const unsigned long end) 1279 { 1280 return 0; 1281 } 1282 1283 static bool __init acpi_gic_redist_is_present(void) 1284 { 1285 return acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, 1286 acpi_dummy_func, 0) > 0; 1287 } 1288 1289 static bool __init gic_validate_dist(struct acpi_subtable_header *header, 1290 struct acpi_probe_entry *ape) 1291 { 1292 struct acpi_madt_generic_distributor *dist; 1293 dist = (struct acpi_madt_generic_distributor *)header; 1294 1295 return (dist->version == ape->driver_data && 1296 (dist->version != ACPI_MADT_GIC_VERSION_NONE || 1297 !acpi_gic_redist_is_present())); 1298 } 1299 1300 #define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K) 1301 #define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K) 1302 1303 static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, 1304 const unsigned long end) 1305 { 1306 struct acpi_madt_generic_distributor *dist; 1307 void __iomem *cpu_base, *dist_base; 1308 struct fwnode_handle *domain_handle; 1309 int count; 1310 1311 /* Collect CPU base addresses */ 1312 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 1313 gic_acpi_parse_madt_cpu, 0); 1314 if (count <= 0) { 1315 pr_err("No valid GICC entries exist\n"); 1316 return -EINVAL; 1317 } 1318 1319 cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE); 1320 if (!cpu_base) { 1321 pr_err("Unable to map GICC registers\n"); 1322 return -ENOMEM; 1323 } 1324 1325 dist = (struct acpi_madt_generic_distributor *)header; 1326 dist_base = ioremap(dist->base_address, ACPI_GICV2_DIST_MEM_SIZE); 1327 if (!dist_base) { 1328 pr_err("Unable to map GICD registers\n"); 1329 iounmap(cpu_base); 1330 return -ENOMEM; 1331 } 1332 1333 /* 1334 * Disable split EOI/Deactivate if HYP is not available. ACPI 1335 * guarantees that we'll always have a GICv2, so the CPU 1336 * interface will always be the right size. 1337 */ 1338 if (!is_hyp_mode_available()) 1339 static_key_slow_dec(&supports_deactivate); 1340 1341 /* 1342 * Initialize GIC instance zero (no multi-GIC support). 1343 */ 1344 domain_handle = irq_domain_alloc_fwnode(dist_base); 1345 if (!domain_handle) { 1346 pr_err("Unable to allocate domain handle\n"); 1347 iounmap(cpu_base); 1348 iounmap(dist_base); 1349 return -ENOMEM; 1350 } 1351 1352 __gic_init_bases(0, -1, dist_base, cpu_base, 0, domain_handle); 1353 1354 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); 1355 1356 if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) 1357 gicv2m_init(NULL, gic_data[0].domain); 1358 1359 return 0; 1360 } 1361 IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 1362 gic_validate_dist, ACPI_MADT_GIC_VERSION_V2, 1363 gic_v2_acpi_init); 1364 IRQCHIP_ACPI_DECLARE(gic_v2_maybe, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 1365 gic_validate_dist, ACPI_MADT_GIC_VERSION_NONE, 1366 gic_v2_acpi_init); 1367 #endif 1368