1 /* 2 * Copyright (C) 2002 ARM Limited, All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * Interrupt architecture for the GIC: 9 * 10 * o There is one Interrupt Distributor, which receives interrupts 11 * from system devices and sends them to the Interrupt Controllers. 12 * 13 * o There is one CPU Interface per CPU, which sends interrupts sent 14 * by the Distributor, and interrupts generated locally, to the 15 * associated CPU. The base address of the CPU interface is usually 16 * aliased so that the same address points to different chips depending 17 * on the CPU it is accessed from. 18 * 19 * Note that IRQs 0-31 are special - they are local to each CPU. 20 * As such, the enable set/clear, pending set/clear and active bit 21 * registers are banked per-cpu for these sources. 22 */ 23 #include <linux/init.h> 24 #include <linux/kernel.h> 25 #include <linux/err.h> 26 #include <linux/module.h> 27 #include <linux/list.h> 28 #include <linux/smp.h> 29 #include <linux/cpu.h> 30 #include <linux/cpu_pm.h> 31 #include <linux/cpumask.h> 32 #include <linux/io.h> 33 #include <linux/of.h> 34 #include <linux/of_address.h> 35 #include <linux/of_irq.h> 36 #include <linux/acpi.h> 37 #include <linux/irqdomain.h> 38 #include <linux/interrupt.h> 39 #include <linux/percpu.h> 40 #include <linux/slab.h> 41 #include <linux/irqchip.h> 42 #include <linux/irqchip/chained_irq.h> 43 #include <linux/irqchip/arm-gic.h> 44 45 #include <asm/cputype.h> 46 #include <asm/irq.h> 47 #include <asm/exception.h> 48 #include <asm/smp_plat.h> 49 #include <asm/virt.h> 50 51 #include "irq-gic-common.h" 52 53 #ifdef CONFIG_ARM64 54 #include <asm/cpufeature.h> 55 56 static void gic_check_cpu_features(void) 57 { 58 WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_SYSREG_GIC_CPUIF), 59 TAINT_CPU_OUT_OF_SPEC, 60 "GICv3 system registers enabled, broken firmware!\n"); 61 } 62 #else 63 #define gic_check_cpu_features() do { } while(0) 64 #endif 65 66 union gic_base { 67 void __iomem *common_base; 68 void __percpu * __iomem *percpu_base; 69 }; 70 71 struct gic_chip_data { 72 struct irq_chip chip; 73 union gic_base dist_base; 74 union gic_base cpu_base; 75 void __iomem *raw_dist_base; 76 void __iomem *raw_cpu_base; 77 u32 percpu_offset; 78 #if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM) 79 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; 80 u32 saved_spi_active[DIV_ROUND_UP(1020, 32)]; 81 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; 82 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; 83 u32 __percpu *saved_ppi_enable; 84 u32 __percpu *saved_ppi_active; 85 u32 __percpu *saved_ppi_conf; 86 #endif 87 struct irq_domain *domain; 88 unsigned int gic_irqs; 89 #ifdef CONFIG_GIC_NON_BANKED 90 void __iomem *(*get_base)(union gic_base *); 91 #endif 92 }; 93 94 #ifdef CONFIG_BL_SWITCHER 95 96 static DEFINE_RAW_SPINLOCK(cpu_map_lock); 97 98 #define gic_lock_irqsave(f) \ 99 raw_spin_lock_irqsave(&cpu_map_lock, (f)) 100 #define gic_unlock_irqrestore(f) \ 101 raw_spin_unlock_irqrestore(&cpu_map_lock, (f)) 102 103 #define gic_lock() raw_spin_lock(&cpu_map_lock) 104 #define gic_unlock() raw_spin_unlock(&cpu_map_lock) 105 106 #else 107 108 #define gic_lock_irqsave(f) do { (void)(f); } while(0) 109 #define gic_unlock_irqrestore(f) do { (void)(f); } while(0) 110 111 #define gic_lock() do { } while(0) 112 #define gic_unlock() do { } while(0) 113 114 #endif 115 116 /* 117 * The GIC mapping of CPU interfaces does not necessarily match 118 * the logical CPU numbering. Let's use a mapping as returned 119 * by the GIC itself. 120 */ 121 #define NR_GIC_CPU_IF 8 122 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; 123 124 static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE; 125 126 static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly; 127 128 static struct gic_kvm_info gic_v2_kvm_info; 129 130 #ifdef CONFIG_GIC_NON_BANKED 131 static void __iomem *gic_get_percpu_base(union gic_base *base) 132 { 133 return raw_cpu_read(*base->percpu_base); 134 } 135 136 static void __iomem *gic_get_common_base(union gic_base *base) 137 { 138 return base->common_base; 139 } 140 141 static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data) 142 { 143 return data->get_base(&data->dist_base); 144 } 145 146 static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data) 147 { 148 return data->get_base(&data->cpu_base); 149 } 150 151 static inline void gic_set_base_accessor(struct gic_chip_data *data, 152 void __iomem *(*f)(union gic_base *)) 153 { 154 data->get_base = f; 155 } 156 #else 157 #define gic_data_dist_base(d) ((d)->dist_base.common_base) 158 #define gic_data_cpu_base(d) ((d)->cpu_base.common_base) 159 #define gic_set_base_accessor(d, f) 160 #endif 161 162 static inline void __iomem *gic_dist_base(struct irq_data *d) 163 { 164 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); 165 return gic_data_dist_base(gic_data); 166 } 167 168 static inline void __iomem *gic_cpu_base(struct irq_data *d) 169 { 170 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); 171 return gic_data_cpu_base(gic_data); 172 } 173 174 static inline unsigned int gic_irq(struct irq_data *d) 175 { 176 return d->hwirq; 177 } 178 179 static inline bool cascading_gic_irq(struct irq_data *d) 180 { 181 void *data = irq_data_get_irq_handler_data(d); 182 183 /* 184 * If handler_data is set, this is a cascading interrupt, and 185 * it cannot possibly be forwarded. 186 */ 187 return data != NULL; 188 } 189 190 /* 191 * Routines to acknowledge, disable and enable interrupts 192 */ 193 static void gic_poke_irq(struct irq_data *d, u32 offset) 194 { 195 u32 mask = 1 << (gic_irq(d) % 32); 196 writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4); 197 } 198 199 static int gic_peek_irq(struct irq_data *d, u32 offset) 200 { 201 u32 mask = 1 << (gic_irq(d) % 32); 202 return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask); 203 } 204 205 static void gic_mask_irq(struct irq_data *d) 206 { 207 gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR); 208 } 209 210 static void gic_eoimode1_mask_irq(struct irq_data *d) 211 { 212 gic_mask_irq(d); 213 /* 214 * When masking a forwarded interrupt, make sure it is 215 * deactivated as well. 216 * 217 * This ensures that an interrupt that is getting 218 * disabled/masked will not get "stuck", because there is 219 * noone to deactivate it (guest is being terminated). 220 */ 221 if (irqd_is_forwarded_to_vcpu(d)) 222 gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR); 223 } 224 225 static void gic_unmask_irq(struct irq_data *d) 226 { 227 gic_poke_irq(d, GIC_DIST_ENABLE_SET); 228 } 229 230 static void gic_eoi_irq(struct irq_data *d) 231 { 232 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); 233 } 234 235 static void gic_eoimode1_eoi_irq(struct irq_data *d) 236 { 237 /* Do not deactivate an IRQ forwarded to a vcpu. */ 238 if (irqd_is_forwarded_to_vcpu(d)) 239 return; 240 241 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE); 242 } 243 244 static int gic_irq_set_irqchip_state(struct irq_data *d, 245 enum irqchip_irq_state which, bool val) 246 { 247 u32 reg; 248 249 switch (which) { 250 case IRQCHIP_STATE_PENDING: 251 reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR; 252 break; 253 254 case IRQCHIP_STATE_ACTIVE: 255 reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR; 256 break; 257 258 case IRQCHIP_STATE_MASKED: 259 reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET; 260 break; 261 262 default: 263 return -EINVAL; 264 } 265 266 gic_poke_irq(d, reg); 267 return 0; 268 } 269 270 static int gic_irq_get_irqchip_state(struct irq_data *d, 271 enum irqchip_irq_state which, bool *val) 272 { 273 switch (which) { 274 case IRQCHIP_STATE_PENDING: 275 *val = gic_peek_irq(d, GIC_DIST_PENDING_SET); 276 break; 277 278 case IRQCHIP_STATE_ACTIVE: 279 *val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET); 280 break; 281 282 case IRQCHIP_STATE_MASKED: 283 *val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET); 284 break; 285 286 default: 287 return -EINVAL; 288 } 289 290 return 0; 291 } 292 293 static int gic_set_type(struct irq_data *d, unsigned int type) 294 { 295 void __iomem *base = gic_dist_base(d); 296 unsigned int gicirq = gic_irq(d); 297 298 /* Interrupt configuration for SGIs can't be changed */ 299 if (gicirq < 16) 300 return -EINVAL; 301 302 /* SPIs have restrictions on the supported types */ 303 if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && 304 type != IRQ_TYPE_EDGE_RISING) 305 return -EINVAL; 306 307 return gic_configure_irq(gicirq, type, base, NULL); 308 } 309 310 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) 311 { 312 /* Only interrupts on the primary GIC can be forwarded to a vcpu. */ 313 if (cascading_gic_irq(d)) 314 return -EINVAL; 315 316 if (vcpu) 317 irqd_set_forwarded_to_vcpu(d); 318 else 319 irqd_clr_forwarded_to_vcpu(d); 320 return 0; 321 } 322 323 #ifdef CONFIG_SMP 324 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 325 bool force) 326 { 327 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); 328 unsigned int cpu, shift = (gic_irq(d) % 4) * 8; 329 u32 val, mask, bit; 330 unsigned long flags; 331 332 if (!force) 333 cpu = cpumask_any_and(mask_val, cpu_online_mask); 334 else 335 cpu = cpumask_first(mask_val); 336 337 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) 338 return -EINVAL; 339 340 gic_lock_irqsave(flags); 341 mask = 0xff << shift; 342 bit = gic_cpu_map[cpu] << shift; 343 val = readl_relaxed(reg) & ~mask; 344 writel_relaxed(val | bit, reg); 345 gic_unlock_irqrestore(flags); 346 347 return IRQ_SET_MASK_OK_DONE; 348 } 349 #endif 350 351 static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) 352 { 353 u32 irqstat, irqnr; 354 struct gic_chip_data *gic = &gic_data[0]; 355 void __iomem *cpu_base = gic_data_cpu_base(gic); 356 357 do { 358 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); 359 irqnr = irqstat & GICC_IAR_INT_ID_MASK; 360 361 if (likely(irqnr > 15 && irqnr < 1020)) { 362 if (static_key_true(&supports_deactivate)) 363 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); 364 isb(); 365 handle_domain_irq(gic->domain, irqnr, regs); 366 continue; 367 } 368 if (irqnr < 16) { 369 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); 370 if (static_key_true(&supports_deactivate)) 371 writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE); 372 #ifdef CONFIG_SMP 373 /* 374 * Ensure any shared data written by the CPU sending 375 * the IPI is read after we've read the ACK register 376 * on the GIC. 377 * 378 * Pairs with the write barrier in gic_raise_softirq 379 */ 380 smp_rmb(); 381 handle_IPI(irqnr, regs); 382 #endif 383 continue; 384 } 385 break; 386 } while (1); 387 } 388 389 static void gic_handle_cascade_irq(struct irq_desc *desc) 390 { 391 struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc); 392 struct irq_chip *chip = irq_desc_get_chip(desc); 393 unsigned int cascade_irq, gic_irq; 394 unsigned long status; 395 396 chained_irq_enter(chip, desc); 397 398 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); 399 400 gic_irq = (status & GICC_IAR_INT_ID_MASK); 401 if (gic_irq == GICC_INT_SPURIOUS) 402 goto out; 403 404 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); 405 if (unlikely(gic_irq < 32 || gic_irq > 1020)) { 406 handle_bad_irq(desc); 407 } else { 408 isb(); 409 generic_handle_irq(cascade_irq); 410 } 411 412 out: 413 chained_irq_exit(chip, desc); 414 } 415 416 static struct irq_chip gic_chip = { 417 .irq_mask = gic_mask_irq, 418 .irq_unmask = gic_unmask_irq, 419 .irq_eoi = gic_eoi_irq, 420 .irq_set_type = gic_set_type, 421 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 422 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 423 .flags = IRQCHIP_SET_TYPE_MASKED | 424 IRQCHIP_SKIP_SET_WAKE | 425 IRQCHIP_MASK_ON_SUSPEND, 426 }; 427 428 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) 429 { 430 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 431 irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, 432 &gic_data[gic_nr]); 433 } 434 435 static u8 gic_get_cpumask(struct gic_chip_data *gic) 436 { 437 void __iomem *base = gic_data_dist_base(gic); 438 u32 mask, i; 439 440 for (i = mask = 0; i < 32; i += 4) { 441 mask = readl_relaxed(base + GIC_DIST_TARGET + i); 442 mask |= mask >> 16; 443 mask |= mask >> 8; 444 if (mask) 445 break; 446 } 447 448 if (!mask && num_possible_cpus() > 1) 449 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); 450 451 return mask; 452 } 453 454 static void gic_cpu_if_up(struct gic_chip_data *gic) 455 { 456 void __iomem *cpu_base = gic_data_cpu_base(gic); 457 u32 bypass = 0; 458 u32 mode = 0; 459 460 if (gic == &gic_data[0] && static_key_true(&supports_deactivate)) 461 mode = GIC_CPU_CTRL_EOImodeNS; 462 463 /* 464 * Preserve bypass disable bits to be written back later 465 */ 466 bypass = readl(cpu_base + GIC_CPU_CTRL); 467 bypass &= GICC_DIS_BYPASS_MASK; 468 469 writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL); 470 } 471 472 473 static void gic_dist_init(struct gic_chip_data *gic) 474 { 475 unsigned int i; 476 u32 cpumask; 477 unsigned int gic_irqs = gic->gic_irqs; 478 void __iomem *base = gic_data_dist_base(gic); 479 480 writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL); 481 482 /* 483 * Set all global interrupts to this CPU only. 484 */ 485 cpumask = gic_get_cpumask(gic); 486 cpumask |= cpumask << 8; 487 cpumask |= cpumask << 16; 488 for (i = 32; i < gic_irqs; i += 4) 489 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 490 491 gic_dist_config(base, gic_irqs, NULL); 492 493 writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL); 494 } 495 496 static int gic_cpu_init(struct gic_chip_data *gic) 497 { 498 void __iomem *dist_base = gic_data_dist_base(gic); 499 void __iomem *base = gic_data_cpu_base(gic); 500 unsigned int cpu_mask, cpu = smp_processor_id(); 501 int i; 502 503 /* 504 * Setting up the CPU map is only relevant for the primary GIC 505 * because any nested/secondary GICs do not directly interface 506 * with the CPU(s). 507 */ 508 if (gic == &gic_data[0]) { 509 /* 510 * Get what the GIC says our CPU mask is. 511 */ 512 if (WARN_ON(cpu >= NR_GIC_CPU_IF)) 513 return -EINVAL; 514 515 gic_check_cpu_features(); 516 cpu_mask = gic_get_cpumask(gic); 517 gic_cpu_map[cpu] = cpu_mask; 518 519 /* 520 * Clear our mask from the other map entries in case they're 521 * still undefined. 522 */ 523 for (i = 0; i < NR_GIC_CPU_IF; i++) 524 if (i != cpu) 525 gic_cpu_map[i] &= ~cpu_mask; 526 } 527 528 gic_cpu_config(dist_base, NULL); 529 530 writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK); 531 gic_cpu_if_up(gic); 532 533 return 0; 534 } 535 536 int gic_cpu_if_down(unsigned int gic_nr) 537 { 538 void __iomem *cpu_base; 539 u32 val = 0; 540 541 if (gic_nr >= CONFIG_ARM_GIC_MAX_NR) 542 return -EINVAL; 543 544 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); 545 val = readl(cpu_base + GIC_CPU_CTRL); 546 val &= ~GICC_ENABLE; 547 writel_relaxed(val, cpu_base + GIC_CPU_CTRL); 548 549 return 0; 550 } 551 552 #if defined(CONFIG_CPU_PM) || defined(CONFIG_ARM_GIC_PM) 553 /* 554 * Saves the GIC distributor registers during suspend or idle. Must be called 555 * with interrupts disabled but before powering down the GIC. After calling 556 * this function, no interrupts will be delivered by the GIC, and another 557 * platform-specific wakeup source must be enabled. 558 */ 559 void gic_dist_save(struct gic_chip_data *gic) 560 { 561 unsigned int gic_irqs; 562 void __iomem *dist_base; 563 int i; 564 565 if (WARN_ON(!gic)) 566 return; 567 568 gic_irqs = gic->gic_irqs; 569 dist_base = gic_data_dist_base(gic); 570 571 if (!dist_base) 572 return; 573 574 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) 575 gic->saved_spi_conf[i] = 576 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 577 578 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 579 gic->saved_spi_target[i] = 580 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); 581 582 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 583 gic->saved_spi_enable[i] = 584 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 585 586 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 587 gic->saved_spi_active[i] = 588 readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); 589 } 590 591 /* 592 * Restores the GIC distributor registers during resume or when coming out of 593 * idle. Must be called before enabling interrupts. If a level interrupt 594 * that occured while the GIC was suspended is still present, it will be 595 * handled normally, but any edge interrupts that occured will not be seen by 596 * the GIC and need to be handled by the platform-specific wakeup source. 597 */ 598 void gic_dist_restore(struct gic_chip_data *gic) 599 { 600 unsigned int gic_irqs; 601 unsigned int i; 602 void __iomem *dist_base; 603 604 if (WARN_ON(!gic)) 605 return; 606 607 gic_irqs = gic->gic_irqs; 608 dist_base = gic_data_dist_base(gic); 609 610 if (!dist_base) 611 return; 612 613 writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL); 614 615 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) 616 writel_relaxed(gic->saved_spi_conf[i], 617 dist_base + GIC_DIST_CONFIG + i * 4); 618 619 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 620 writel_relaxed(GICD_INT_DEF_PRI_X4, 621 dist_base + GIC_DIST_PRI + i * 4); 622 623 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 624 writel_relaxed(gic->saved_spi_target[i], 625 dist_base + GIC_DIST_TARGET + i * 4); 626 627 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { 628 writel_relaxed(GICD_INT_EN_CLR_X32, 629 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); 630 writel_relaxed(gic->saved_spi_enable[i], 631 dist_base + GIC_DIST_ENABLE_SET + i * 4); 632 } 633 634 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) { 635 writel_relaxed(GICD_INT_EN_CLR_X32, 636 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); 637 writel_relaxed(gic->saved_spi_active[i], 638 dist_base + GIC_DIST_ACTIVE_SET + i * 4); 639 } 640 641 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); 642 } 643 644 void gic_cpu_save(struct gic_chip_data *gic) 645 { 646 int i; 647 u32 *ptr; 648 void __iomem *dist_base; 649 void __iomem *cpu_base; 650 651 if (WARN_ON(!gic)) 652 return; 653 654 dist_base = gic_data_dist_base(gic); 655 cpu_base = gic_data_cpu_base(gic); 656 657 if (!dist_base || !cpu_base) 658 return; 659 660 ptr = raw_cpu_ptr(gic->saved_ppi_enable); 661 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 662 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 663 664 ptr = raw_cpu_ptr(gic->saved_ppi_active); 665 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 666 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4); 667 668 ptr = raw_cpu_ptr(gic->saved_ppi_conf); 669 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 670 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 671 672 } 673 674 void gic_cpu_restore(struct gic_chip_data *gic) 675 { 676 int i; 677 u32 *ptr; 678 void __iomem *dist_base; 679 void __iomem *cpu_base; 680 681 if (WARN_ON(!gic)) 682 return; 683 684 dist_base = gic_data_dist_base(gic); 685 cpu_base = gic_data_cpu_base(gic); 686 687 if (!dist_base || !cpu_base) 688 return; 689 690 ptr = raw_cpu_ptr(gic->saved_ppi_enable); 691 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { 692 writel_relaxed(GICD_INT_EN_CLR_X32, 693 dist_base + GIC_DIST_ENABLE_CLEAR + i * 4); 694 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); 695 } 696 697 ptr = raw_cpu_ptr(gic->saved_ppi_active); 698 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) { 699 writel_relaxed(GICD_INT_EN_CLR_X32, 700 dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4); 701 writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4); 702 } 703 704 ptr = raw_cpu_ptr(gic->saved_ppi_conf); 705 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 706 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); 707 708 for (i = 0; i < DIV_ROUND_UP(32, 4); i++) 709 writel_relaxed(GICD_INT_DEF_PRI_X4, 710 dist_base + GIC_DIST_PRI + i * 4); 711 712 writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK); 713 gic_cpu_if_up(gic); 714 } 715 716 static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) 717 { 718 int i; 719 720 for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) { 721 #ifdef CONFIG_GIC_NON_BANKED 722 /* Skip over unused GICs */ 723 if (!gic_data[i].get_base) 724 continue; 725 #endif 726 switch (cmd) { 727 case CPU_PM_ENTER: 728 gic_cpu_save(&gic_data[i]); 729 break; 730 case CPU_PM_ENTER_FAILED: 731 case CPU_PM_EXIT: 732 gic_cpu_restore(&gic_data[i]); 733 break; 734 case CPU_CLUSTER_PM_ENTER: 735 gic_dist_save(&gic_data[i]); 736 break; 737 case CPU_CLUSTER_PM_ENTER_FAILED: 738 case CPU_CLUSTER_PM_EXIT: 739 gic_dist_restore(&gic_data[i]); 740 break; 741 } 742 } 743 744 return NOTIFY_OK; 745 } 746 747 static struct notifier_block gic_notifier_block = { 748 .notifier_call = gic_notifier, 749 }; 750 751 static int gic_pm_init(struct gic_chip_data *gic) 752 { 753 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, 754 sizeof(u32)); 755 if (WARN_ON(!gic->saved_ppi_enable)) 756 return -ENOMEM; 757 758 gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, 759 sizeof(u32)); 760 if (WARN_ON(!gic->saved_ppi_active)) 761 goto free_ppi_enable; 762 763 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, 764 sizeof(u32)); 765 if (WARN_ON(!gic->saved_ppi_conf)) 766 goto free_ppi_active; 767 768 if (gic == &gic_data[0]) 769 cpu_pm_register_notifier(&gic_notifier_block); 770 771 return 0; 772 773 free_ppi_active: 774 free_percpu(gic->saved_ppi_active); 775 free_ppi_enable: 776 free_percpu(gic->saved_ppi_enable); 777 778 return -ENOMEM; 779 } 780 #else 781 static int gic_pm_init(struct gic_chip_data *gic) 782 { 783 return 0; 784 } 785 #endif 786 787 #ifdef CONFIG_SMP 788 static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) 789 { 790 int cpu; 791 unsigned long flags, map = 0; 792 793 if (unlikely(nr_cpu_ids == 1)) { 794 /* Only one CPU? let's do a self-IPI... */ 795 writel_relaxed(2 << 24 | irq, 796 gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); 797 return; 798 } 799 800 gic_lock_irqsave(flags); 801 802 /* Convert our logical CPU mask into a physical one. */ 803 for_each_cpu(cpu, mask) 804 map |= gic_cpu_map[cpu]; 805 806 /* 807 * Ensure that stores to Normal memory are visible to the 808 * other CPUs before they observe us issuing the IPI. 809 */ 810 dmb(ishst); 811 812 /* this always happens on GIC0 */ 813 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); 814 815 gic_unlock_irqrestore(flags); 816 } 817 #endif 818 819 #ifdef CONFIG_BL_SWITCHER 820 /* 821 * gic_send_sgi - send a SGI directly to given CPU interface number 822 * 823 * cpu_id: the ID for the destination CPU interface 824 * irq: the IPI number to send a SGI for 825 */ 826 void gic_send_sgi(unsigned int cpu_id, unsigned int irq) 827 { 828 BUG_ON(cpu_id >= NR_GIC_CPU_IF); 829 cpu_id = 1 << cpu_id; 830 /* this always happens on GIC0 */ 831 writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); 832 } 833 834 /* 835 * gic_get_cpu_id - get the CPU interface ID for the specified CPU 836 * 837 * @cpu: the logical CPU number to get the GIC ID for. 838 * 839 * Return the CPU interface ID for the given logical CPU number, 840 * or -1 if the CPU number is too large or the interface ID is 841 * unknown (more than one bit set). 842 */ 843 int gic_get_cpu_id(unsigned int cpu) 844 { 845 unsigned int cpu_bit; 846 847 if (cpu >= NR_GIC_CPU_IF) 848 return -1; 849 cpu_bit = gic_cpu_map[cpu]; 850 if (cpu_bit & (cpu_bit - 1)) 851 return -1; 852 return __ffs(cpu_bit); 853 } 854 855 /* 856 * gic_migrate_target - migrate IRQs to another CPU interface 857 * 858 * @new_cpu_id: the CPU target ID to migrate IRQs to 859 * 860 * Migrate all peripheral interrupts with a target matching the current CPU 861 * to the interface corresponding to @new_cpu_id. The CPU interface mapping 862 * is also updated. Targets to other CPU interfaces are unchanged. 863 * This must be called with IRQs locally disabled. 864 */ 865 void gic_migrate_target(unsigned int new_cpu_id) 866 { 867 unsigned int cur_cpu_id, gic_irqs, gic_nr = 0; 868 void __iomem *dist_base; 869 int i, ror_val, cpu = smp_processor_id(); 870 u32 val, cur_target_mask, active_mask; 871 872 BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR); 873 874 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 875 if (!dist_base) 876 return; 877 gic_irqs = gic_data[gic_nr].gic_irqs; 878 879 cur_cpu_id = __ffs(gic_cpu_map[cpu]); 880 cur_target_mask = 0x01010101 << cur_cpu_id; 881 ror_val = (cur_cpu_id - new_cpu_id) & 31; 882 883 gic_lock(); 884 885 /* Update the target interface for this logical CPU */ 886 gic_cpu_map[cpu] = 1 << new_cpu_id; 887 888 /* 889 * Find all the peripheral interrupts targetting the current 890 * CPU interface and migrate them to the new CPU interface. 891 * We skip DIST_TARGET 0 to 7 as they are read-only. 892 */ 893 for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) { 894 val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); 895 active_mask = val & cur_target_mask; 896 if (active_mask) { 897 val &= ~active_mask; 898 val |= ror32(active_mask, ror_val); 899 writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4); 900 } 901 } 902 903 gic_unlock(); 904 905 /* 906 * Now let's migrate and clear any potential SGIs that might be 907 * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET 908 * is a banked register, we can only forward the SGI using 909 * GIC_DIST_SOFTINT. The original SGI source is lost but Linux 910 * doesn't use that information anyway. 911 * 912 * For the same reason we do not adjust SGI source information 913 * for previously sent SGIs by us to other CPUs either. 914 */ 915 for (i = 0; i < 16; i += 4) { 916 int j; 917 val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i); 918 if (!val) 919 continue; 920 writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i); 921 for (j = i; j < i + 4; j++) { 922 if (val & 0xff) 923 writel_relaxed((1 << (new_cpu_id + 16)) | j, 924 dist_base + GIC_DIST_SOFTINT); 925 val >>= 8; 926 } 927 } 928 } 929 930 /* 931 * gic_get_sgir_physaddr - get the physical address for the SGI register 932 * 933 * REturn the physical address of the SGI register to be used 934 * by some early assembly code when the kernel is not yet available. 935 */ 936 static unsigned long gic_dist_physaddr; 937 938 unsigned long gic_get_sgir_physaddr(void) 939 { 940 if (!gic_dist_physaddr) 941 return 0; 942 return gic_dist_physaddr + GIC_DIST_SOFTINT; 943 } 944 945 static void __init gic_init_physaddr(struct device_node *node) 946 { 947 struct resource res; 948 if (of_address_to_resource(node, 0, &res) == 0) { 949 gic_dist_physaddr = res.start; 950 pr_info("GIC physical location is %#lx\n", gic_dist_physaddr); 951 } 952 } 953 954 #else 955 #define gic_init_physaddr(node) do { } while (0) 956 #endif 957 958 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, 959 irq_hw_number_t hw) 960 { 961 struct gic_chip_data *gic = d->host_data; 962 963 if (hw < 32) { 964 irq_set_percpu_devid(irq); 965 irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, 966 handle_percpu_devid_irq, NULL, NULL); 967 irq_set_status_flags(irq, IRQ_NOAUTOEN); 968 } else { 969 irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, 970 handle_fasteoi_irq, NULL, NULL); 971 irq_set_probe(irq); 972 } 973 return 0; 974 } 975 976 static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq) 977 { 978 } 979 980 static int gic_irq_domain_translate(struct irq_domain *d, 981 struct irq_fwspec *fwspec, 982 unsigned long *hwirq, 983 unsigned int *type) 984 { 985 if (is_of_node(fwspec->fwnode)) { 986 if (fwspec->param_count < 3) 987 return -EINVAL; 988 989 /* Get the interrupt number and add 16 to skip over SGIs */ 990 *hwirq = fwspec->param[1] + 16; 991 992 /* 993 * For SPIs, we need to add 16 more to get the GIC irq 994 * ID number 995 */ 996 if (!fwspec->param[0]) 997 *hwirq += 16; 998 999 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 1000 return 0; 1001 } 1002 1003 if (is_fwnode_irqchip(fwspec->fwnode)) { 1004 if(fwspec->param_count != 2) 1005 return -EINVAL; 1006 1007 *hwirq = fwspec->param[0]; 1008 *type = fwspec->param[1]; 1009 return 0; 1010 } 1011 1012 return -EINVAL; 1013 } 1014 1015 static int gic_starting_cpu(unsigned int cpu) 1016 { 1017 gic_cpu_init(&gic_data[0]); 1018 return 0; 1019 } 1020 1021 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1022 unsigned int nr_irqs, void *arg) 1023 { 1024 int i, ret; 1025 irq_hw_number_t hwirq; 1026 unsigned int type = IRQ_TYPE_NONE; 1027 struct irq_fwspec *fwspec = arg; 1028 1029 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); 1030 if (ret) 1031 return ret; 1032 1033 for (i = 0; i < nr_irqs; i++) { 1034 ret = gic_irq_domain_map(domain, virq + i, hwirq + i); 1035 if (ret) 1036 return ret; 1037 } 1038 1039 return 0; 1040 } 1041 1042 static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = { 1043 .translate = gic_irq_domain_translate, 1044 .alloc = gic_irq_domain_alloc, 1045 .free = irq_domain_free_irqs_top, 1046 }; 1047 1048 static const struct irq_domain_ops gic_irq_domain_ops = { 1049 .map = gic_irq_domain_map, 1050 .unmap = gic_irq_domain_unmap, 1051 }; 1052 1053 static void gic_init_chip(struct gic_chip_data *gic, struct device *dev, 1054 const char *name, bool use_eoimode1) 1055 { 1056 /* Initialize irq_chip */ 1057 gic->chip = gic_chip; 1058 gic->chip.name = name; 1059 gic->chip.parent_device = dev; 1060 1061 if (use_eoimode1) { 1062 gic->chip.irq_mask = gic_eoimode1_mask_irq; 1063 gic->chip.irq_eoi = gic_eoimode1_eoi_irq; 1064 gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity; 1065 } 1066 1067 #ifdef CONFIG_SMP 1068 if (gic == &gic_data[0]) 1069 gic->chip.irq_set_affinity = gic_set_affinity; 1070 #endif 1071 } 1072 1073 static int gic_init_bases(struct gic_chip_data *gic, int irq_start, 1074 struct fwnode_handle *handle) 1075 { 1076 irq_hw_number_t hwirq_base; 1077 int gic_irqs, irq_base, ret; 1078 1079 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { 1080 /* Frankein-GIC without banked registers... */ 1081 unsigned int cpu; 1082 1083 gic->dist_base.percpu_base = alloc_percpu(void __iomem *); 1084 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); 1085 if (WARN_ON(!gic->dist_base.percpu_base || 1086 !gic->cpu_base.percpu_base)) { 1087 ret = -ENOMEM; 1088 goto error; 1089 } 1090 1091 for_each_possible_cpu(cpu) { 1092 u32 mpidr = cpu_logical_map(cpu); 1093 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 1094 unsigned long offset = gic->percpu_offset * core_id; 1095 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = 1096 gic->raw_dist_base + offset; 1097 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = 1098 gic->raw_cpu_base + offset; 1099 } 1100 1101 gic_set_base_accessor(gic, gic_get_percpu_base); 1102 } else { 1103 /* Normal, sane GIC... */ 1104 WARN(gic->percpu_offset, 1105 "GIC_NON_BANKED not enabled, ignoring %08x offset!", 1106 gic->percpu_offset); 1107 gic->dist_base.common_base = gic->raw_dist_base; 1108 gic->cpu_base.common_base = gic->raw_cpu_base; 1109 gic_set_base_accessor(gic, gic_get_common_base); 1110 } 1111 1112 /* 1113 * Find out how many interrupts are supported. 1114 * The GIC only supports up to 1020 interrupt sources. 1115 */ 1116 gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f; 1117 gic_irqs = (gic_irqs + 1) * 32; 1118 if (gic_irqs > 1020) 1119 gic_irqs = 1020; 1120 gic->gic_irqs = gic_irqs; 1121 1122 if (handle) { /* DT/ACPI */ 1123 gic->domain = irq_domain_create_linear(handle, gic_irqs, 1124 &gic_irq_domain_hierarchy_ops, 1125 gic); 1126 } else { /* Legacy support */ 1127 /* 1128 * For primary GICs, skip over SGIs. 1129 * For secondary GICs, skip over PPIs, too. 1130 */ 1131 if (gic == &gic_data[0] && (irq_start & 31) > 0) { 1132 hwirq_base = 16; 1133 if (irq_start != -1) 1134 irq_start = (irq_start & ~31) + 16; 1135 } else { 1136 hwirq_base = 32; 1137 } 1138 1139 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ 1140 1141 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, 1142 numa_node_id()); 1143 if (irq_base < 0) { 1144 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", 1145 irq_start); 1146 irq_base = irq_start; 1147 } 1148 1149 gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base, 1150 hwirq_base, &gic_irq_domain_ops, gic); 1151 } 1152 1153 if (WARN_ON(!gic->domain)) { 1154 ret = -ENODEV; 1155 goto error; 1156 } 1157 1158 gic_dist_init(gic); 1159 ret = gic_cpu_init(gic); 1160 if (ret) 1161 goto error; 1162 1163 ret = gic_pm_init(gic); 1164 if (ret) 1165 goto error; 1166 1167 return 0; 1168 1169 error: 1170 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { 1171 free_percpu(gic->dist_base.percpu_base); 1172 free_percpu(gic->cpu_base.percpu_base); 1173 } 1174 1175 return ret; 1176 } 1177 1178 static int __init __gic_init_bases(struct gic_chip_data *gic, 1179 int irq_start, 1180 struct fwnode_handle *handle) 1181 { 1182 char *name; 1183 int i, ret; 1184 1185 if (WARN_ON(!gic || gic->domain)) 1186 return -EINVAL; 1187 1188 if (gic == &gic_data[0]) { 1189 /* 1190 * Initialize the CPU interface map to all CPUs. 1191 * It will be refined as each CPU probes its ID. 1192 * This is only necessary for the primary GIC. 1193 */ 1194 for (i = 0; i < NR_GIC_CPU_IF; i++) 1195 gic_cpu_map[i] = 0xff; 1196 #ifdef CONFIG_SMP 1197 set_smp_cross_call(gic_raise_softirq); 1198 #endif 1199 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, 1200 "irqchip/arm/gic:starting", 1201 gic_starting_cpu, NULL); 1202 set_handle_irq(gic_handle_irq); 1203 if (static_key_true(&supports_deactivate)) 1204 pr_info("GIC: Using split EOI/Deactivate mode\n"); 1205 } 1206 1207 if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) { 1208 name = kasprintf(GFP_KERNEL, "GICv2"); 1209 gic_init_chip(gic, NULL, name, true); 1210 } else { 1211 name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0])); 1212 gic_init_chip(gic, NULL, name, false); 1213 } 1214 1215 ret = gic_init_bases(gic, irq_start, handle); 1216 if (ret) 1217 kfree(name); 1218 1219 return ret; 1220 } 1221 1222 void __init gic_init(unsigned int gic_nr, int irq_start, 1223 void __iomem *dist_base, void __iomem *cpu_base) 1224 { 1225 struct gic_chip_data *gic; 1226 1227 if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR)) 1228 return; 1229 1230 /* 1231 * Non-DT/ACPI systems won't run a hypervisor, so let's not 1232 * bother with these... 1233 */ 1234 static_key_slow_dec(&supports_deactivate); 1235 1236 gic = &gic_data[gic_nr]; 1237 gic->raw_dist_base = dist_base; 1238 gic->raw_cpu_base = cpu_base; 1239 1240 __gic_init_bases(gic, irq_start, NULL); 1241 } 1242 1243 static void gic_teardown(struct gic_chip_data *gic) 1244 { 1245 if (WARN_ON(!gic)) 1246 return; 1247 1248 if (gic->raw_dist_base) 1249 iounmap(gic->raw_dist_base); 1250 if (gic->raw_cpu_base) 1251 iounmap(gic->raw_cpu_base); 1252 } 1253 1254 #ifdef CONFIG_OF 1255 static int gic_cnt __initdata; 1256 1257 static bool gic_check_eoimode(struct device_node *node, void __iomem **base) 1258 { 1259 struct resource cpuif_res; 1260 1261 of_address_to_resource(node, 1, &cpuif_res); 1262 1263 if (!is_hyp_mode_available()) 1264 return false; 1265 if (resource_size(&cpuif_res) < SZ_8K) 1266 return false; 1267 if (resource_size(&cpuif_res) == SZ_128K) { 1268 u32 val_low, val_high; 1269 1270 /* 1271 * Verify that we have the first 4kB of a GIC400 1272 * aliased over the first 64kB by checking the 1273 * GICC_IIDR register on both ends. 1274 */ 1275 val_low = readl_relaxed(*base + GIC_CPU_IDENT); 1276 val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000); 1277 if ((val_low & 0xffff0fff) != 0x0202043B || 1278 val_low != val_high) 1279 return false; 1280 1281 /* 1282 * Move the base up by 60kB, so that we have a 8kB 1283 * contiguous region, which allows us to use GICC_DIR 1284 * at its normal offset. Please pass me that bucket. 1285 */ 1286 *base += 0xf000; 1287 cpuif_res.start += 0xf000; 1288 pr_warn("GIC: Adjusting CPU interface base to %pa\n", 1289 &cpuif_res.start); 1290 } 1291 1292 return true; 1293 } 1294 1295 static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node) 1296 { 1297 if (!gic || !node) 1298 return -EINVAL; 1299 1300 gic->raw_dist_base = of_iomap(node, 0); 1301 if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n")) 1302 goto error; 1303 1304 gic->raw_cpu_base = of_iomap(node, 1); 1305 if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n")) 1306 goto error; 1307 1308 if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset)) 1309 gic->percpu_offset = 0; 1310 1311 return 0; 1312 1313 error: 1314 gic_teardown(gic); 1315 1316 return -ENOMEM; 1317 } 1318 1319 int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq) 1320 { 1321 int ret; 1322 1323 if (!dev || !dev->of_node || !gic || !irq) 1324 return -EINVAL; 1325 1326 *gic = devm_kzalloc(dev, sizeof(**gic), GFP_KERNEL); 1327 if (!*gic) 1328 return -ENOMEM; 1329 1330 gic_init_chip(*gic, dev, dev->of_node->name, false); 1331 1332 ret = gic_of_setup(*gic, dev->of_node); 1333 if (ret) 1334 return ret; 1335 1336 ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode); 1337 if (ret) { 1338 gic_teardown(*gic); 1339 return ret; 1340 } 1341 1342 irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic); 1343 1344 return 0; 1345 } 1346 1347 static void __init gic_of_setup_kvm_info(struct device_node *node) 1348 { 1349 int ret; 1350 struct resource *vctrl_res = &gic_v2_kvm_info.vctrl; 1351 struct resource *vcpu_res = &gic_v2_kvm_info.vcpu; 1352 1353 gic_v2_kvm_info.type = GIC_V2; 1354 1355 gic_v2_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); 1356 if (!gic_v2_kvm_info.maint_irq) 1357 return; 1358 1359 ret = of_address_to_resource(node, 2, vctrl_res); 1360 if (ret) 1361 return; 1362 1363 ret = of_address_to_resource(node, 3, vcpu_res); 1364 if (ret) 1365 return; 1366 1367 gic_set_kvm_info(&gic_v2_kvm_info); 1368 } 1369 1370 int __init 1371 gic_of_init(struct device_node *node, struct device_node *parent) 1372 { 1373 struct gic_chip_data *gic; 1374 int irq, ret; 1375 1376 if (WARN_ON(!node)) 1377 return -ENODEV; 1378 1379 if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR)) 1380 return -EINVAL; 1381 1382 gic = &gic_data[gic_cnt]; 1383 1384 ret = gic_of_setup(gic, node); 1385 if (ret) 1386 return ret; 1387 1388 /* 1389 * Disable split EOI/Deactivate if either HYP is not available 1390 * or the CPU interface is too small. 1391 */ 1392 if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base)) 1393 static_key_slow_dec(&supports_deactivate); 1394 1395 ret = __gic_init_bases(gic, -1, &node->fwnode); 1396 if (ret) { 1397 gic_teardown(gic); 1398 return ret; 1399 } 1400 1401 if (!gic_cnt) { 1402 gic_init_physaddr(node); 1403 gic_of_setup_kvm_info(node); 1404 } 1405 1406 if (parent) { 1407 irq = irq_of_parse_and_map(node, 0); 1408 gic_cascade_irq(gic_cnt, irq); 1409 } 1410 1411 if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) 1412 gicv2m_init(&node->fwnode, gic_data[gic_cnt].domain); 1413 1414 gic_cnt++; 1415 return 0; 1416 } 1417 IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init); 1418 IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init); 1419 IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init); 1420 IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); 1421 IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); 1422 IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); 1423 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); 1424 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); 1425 IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init); 1426 #else 1427 int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq) 1428 { 1429 return -ENOTSUPP; 1430 } 1431 #endif 1432 1433 #ifdef CONFIG_ACPI 1434 static struct 1435 { 1436 phys_addr_t cpu_phys_base; 1437 u32 maint_irq; 1438 int maint_irq_mode; 1439 phys_addr_t vctrl_base; 1440 phys_addr_t vcpu_base; 1441 } acpi_data __initdata; 1442 1443 static int __init 1444 gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, 1445 const unsigned long end) 1446 { 1447 struct acpi_madt_generic_interrupt *processor; 1448 phys_addr_t gic_cpu_base; 1449 static int cpu_base_assigned; 1450 1451 processor = (struct acpi_madt_generic_interrupt *)header; 1452 1453 if (BAD_MADT_GICC_ENTRY(processor, end)) 1454 return -EINVAL; 1455 1456 /* 1457 * There is no support for non-banked GICv1/2 register in ACPI spec. 1458 * All CPU interface addresses have to be the same. 1459 */ 1460 gic_cpu_base = processor->base_address; 1461 if (cpu_base_assigned && gic_cpu_base != acpi_data.cpu_phys_base) 1462 return -EINVAL; 1463 1464 acpi_data.cpu_phys_base = gic_cpu_base; 1465 acpi_data.maint_irq = processor->vgic_interrupt; 1466 acpi_data.maint_irq_mode = (processor->flags & ACPI_MADT_VGIC_IRQ_MODE) ? 1467 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; 1468 acpi_data.vctrl_base = processor->gich_base_address; 1469 acpi_data.vcpu_base = processor->gicv_base_address; 1470 1471 cpu_base_assigned = 1; 1472 return 0; 1473 } 1474 1475 /* The things you have to do to just *count* something... */ 1476 static int __init acpi_dummy_func(struct acpi_subtable_header *header, 1477 const unsigned long end) 1478 { 1479 return 0; 1480 } 1481 1482 static bool __init acpi_gic_redist_is_present(void) 1483 { 1484 return acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, 1485 acpi_dummy_func, 0) > 0; 1486 } 1487 1488 static bool __init gic_validate_dist(struct acpi_subtable_header *header, 1489 struct acpi_probe_entry *ape) 1490 { 1491 struct acpi_madt_generic_distributor *dist; 1492 dist = (struct acpi_madt_generic_distributor *)header; 1493 1494 return (dist->version == ape->driver_data && 1495 (dist->version != ACPI_MADT_GIC_VERSION_NONE || 1496 !acpi_gic_redist_is_present())); 1497 } 1498 1499 #define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K) 1500 #define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K) 1501 #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) 1502 #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) 1503 1504 static void __init gic_acpi_setup_kvm_info(void) 1505 { 1506 int irq; 1507 struct resource *vctrl_res = &gic_v2_kvm_info.vctrl; 1508 struct resource *vcpu_res = &gic_v2_kvm_info.vcpu; 1509 1510 gic_v2_kvm_info.type = GIC_V2; 1511 1512 if (!acpi_data.vctrl_base) 1513 return; 1514 1515 vctrl_res->flags = IORESOURCE_MEM; 1516 vctrl_res->start = acpi_data.vctrl_base; 1517 vctrl_res->end = vctrl_res->start + ACPI_GICV2_VCTRL_MEM_SIZE - 1; 1518 1519 if (!acpi_data.vcpu_base) 1520 return; 1521 1522 vcpu_res->flags = IORESOURCE_MEM; 1523 vcpu_res->start = acpi_data.vcpu_base; 1524 vcpu_res->end = vcpu_res->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; 1525 1526 irq = acpi_register_gsi(NULL, acpi_data.maint_irq, 1527 acpi_data.maint_irq_mode, 1528 ACPI_ACTIVE_HIGH); 1529 if (irq <= 0) 1530 return; 1531 1532 gic_v2_kvm_info.maint_irq = irq; 1533 1534 gic_set_kvm_info(&gic_v2_kvm_info); 1535 } 1536 1537 static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, 1538 const unsigned long end) 1539 { 1540 struct acpi_madt_generic_distributor *dist; 1541 struct fwnode_handle *domain_handle; 1542 struct gic_chip_data *gic = &gic_data[0]; 1543 int count, ret; 1544 1545 /* Collect CPU base addresses */ 1546 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 1547 gic_acpi_parse_madt_cpu, 0); 1548 if (count <= 0) { 1549 pr_err("No valid GICC entries exist\n"); 1550 return -EINVAL; 1551 } 1552 1553 gic->raw_cpu_base = ioremap(acpi_data.cpu_phys_base, ACPI_GIC_CPU_IF_MEM_SIZE); 1554 if (!gic->raw_cpu_base) { 1555 pr_err("Unable to map GICC registers\n"); 1556 return -ENOMEM; 1557 } 1558 1559 dist = (struct acpi_madt_generic_distributor *)header; 1560 gic->raw_dist_base = ioremap(dist->base_address, 1561 ACPI_GICV2_DIST_MEM_SIZE); 1562 if (!gic->raw_dist_base) { 1563 pr_err("Unable to map GICD registers\n"); 1564 gic_teardown(gic); 1565 return -ENOMEM; 1566 } 1567 1568 /* 1569 * Disable split EOI/Deactivate if HYP is not available. ACPI 1570 * guarantees that we'll always have a GICv2, so the CPU 1571 * interface will always be the right size. 1572 */ 1573 if (!is_hyp_mode_available()) 1574 static_key_slow_dec(&supports_deactivate); 1575 1576 /* 1577 * Initialize GIC instance zero (no multi-GIC support). 1578 */ 1579 domain_handle = irq_domain_alloc_fwnode(gic->raw_dist_base); 1580 if (!domain_handle) { 1581 pr_err("Unable to allocate domain handle\n"); 1582 gic_teardown(gic); 1583 return -ENOMEM; 1584 } 1585 1586 ret = __gic_init_bases(gic, -1, domain_handle); 1587 if (ret) { 1588 pr_err("Failed to initialise GIC\n"); 1589 irq_domain_free_fwnode(domain_handle); 1590 gic_teardown(gic); 1591 return ret; 1592 } 1593 1594 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); 1595 1596 if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) 1597 gicv2m_init(NULL, gic_data[0].domain); 1598 1599 gic_acpi_setup_kvm_info(); 1600 1601 return 0; 1602 } 1603 IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 1604 gic_validate_dist, ACPI_MADT_GIC_VERSION_V2, 1605 gic_v2_acpi_init); 1606 IRQCHIP_ACPI_DECLARE(gic_v2_maybe, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 1607 gic_validate_dist, ACPI_MADT_GIC_VERSION_NONE, 1608 gic_v2_acpi_init); 1609 #endif 1610