1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 */ 9 10 #define pr_fmt(fmt) "irq-mips-gic: " fmt 11 12 #include <linux/bitmap.h> 13 #include <linux/clocksource.h> 14 #include <linux/cpuhotplug.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/irq.h> 18 #include <linux/irqchip.h> 19 #include <linux/of_address.h> 20 #include <linux/percpu.h> 21 #include <linux/sched.h> 22 #include <linux/smp.h> 23 24 #include <asm/mips-cps.h> 25 #include <asm/setup.h> 26 #include <asm/traps.h> 27 28 #include <dt-bindings/interrupt-controller/mips-gic.h> 29 30 #define GIC_MAX_INTRS 256 31 #define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS) 32 33 /* Add 2 to convert GIC CPU pin to core interrupt */ 34 #define GIC_CPU_PIN_OFFSET 2 35 36 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ 37 #define GIC_PIN_TO_VEC_OFFSET 1 38 39 /* Convert between local/shared IRQ number and GIC HW IRQ number. */ 40 #define GIC_LOCAL_HWIRQ_BASE 0 41 #define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x)) 42 #define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE) 43 #define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS 44 #define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) 45 #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) 46 47 void __iomem *mips_gic_base; 48 49 DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); 50 51 static DEFINE_SPINLOCK(gic_lock); 52 static struct irq_domain *gic_irq_domain; 53 static struct irq_domain *gic_ipi_domain; 54 static int gic_shared_intrs; 55 static unsigned int gic_cpu_pin; 56 static unsigned int timer_cpu_pin; 57 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; 58 static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); 59 static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); 60 61 static struct gic_all_vpes_chip_data { 62 u32 map; 63 bool mask; 64 } gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS]; 65 66 static void gic_clear_pcpu_masks(unsigned int intr) 67 { 68 unsigned int i; 69 70 /* Clear the interrupt's bit in all pcpu_masks */ 71 for_each_possible_cpu(i) 72 clear_bit(intr, per_cpu_ptr(pcpu_masks, i)); 73 } 74 75 static bool gic_local_irq_is_routable(int intr) 76 { 77 u32 vpe_ctl; 78 79 /* All local interrupts are routable in EIC mode. */ 80 if (cpu_has_veic) 81 return true; 82 83 vpe_ctl = read_gic_vl_ctl(); 84 switch (intr) { 85 case GIC_LOCAL_INT_TIMER: 86 return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE; 87 case GIC_LOCAL_INT_PERFCTR: 88 return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE; 89 case GIC_LOCAL_INT_FDC: 90 return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE; 91 case GIC_LOCAL_INT_SWINT0: 92 case GIC_LOCAL_INT_SWINT1: 93 return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE; 94 default: 95 return true; 96 } 97 } 98 99 static void gic_bind_eic_interrupt(int irq, int set) 100 { 101 /* Convert irq vector # to hw int # */ 102 irq -= GIC_PIN_TO_VEC_OFFSET; 103 104 /* Set irq to use shadow set */ 105 write_gic_vl_eic_shadow_set(irq, set); 106 } 107 108 static void gic_send_ipi(struct irq_data *d, unsigned int cpu) 109 { 110 irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d)); 111 112 write_gic_wedge(GIC_WEDGE_RW | hwirq); 113 } 114 115 int gic_get_c0_compare_int(void) 116 { 117 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) 118 return MIPS_CPU_IRQ_BASE + cp0_compare_irq; 119 return irq_create_mapping(gic_irq_domain, 120 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER)); 121 } 122 123 int gic_get_c0_perfcount_int(void) 124 { 125 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) { 126 /* Is the performance counter shared with the timer? */ 127 if (cp0_perfcount_irq < 0) 128 return -1; 129 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 130 } 131 return irq_create_mapping(gic_irq_domain, 132 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR)); 133 } 134 135 int gic_get_c0_fdc_int(void) 136 { 137 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) { 138 /* Is the FDC IRQ even present? */ 139 if (cp0_fdc_irq < 0) 140 return -1; 141 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; 142 } 143 144 return irq_create_mapping(gic_irq_domain, 145 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); 146 } 147 148 static void gic_handle_shared_int(bool chained) 149 { 150 unsigned int intr, virq; 151 unsigned long *pcpu_mask; 152 DECLARE_BITMAP(pending, GIC_MAX_INTRS); 153 154 /* Get per-cpu bitmaps */ 155 pcpu_mask = this_cpu_ptr(pcpu_masks); 156 157 if (mips_cm_is64) 158 __ioread64_copy(pending, addr_gic_pend(), 159 DIV_ROUND_UP(gic_shared_intrs, 64)); 160 else 161 __ioread32_copy(pending, addr_gic_pend(), 162 DIV_ROUND_UP(gic_shared_intrs, 32)); 163 164 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); 165 166 for_each_set_bit(intr, pending, gic_shared_intrs) { 167 virq = irq_linear_revmap(gic_irq_domain, 168 GIC_SHARED_TO_HWIRQ(intr)); 169 if (chained) 170 generic_handle_irq(virq); 171 else 172 do_IRQ(virq); 173 } 174 } 175 176 static void gic_mask_irq(struct irq_data *d) 177 { 178 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 179 180 write_gic_rmask(intr); 181 gic_clear_pcpu_masks(intr); 182 } 183 184 static void gic_unmask_irq(struct irq_data *d) 185 { 186 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 187 unsigned int cpu; 188 189 write_gic_smask(intr); 190 191 gic_clear_pcpu_masks(intr); 192 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); 193 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); 194 } 195 196 static void gic_ack_irq(struct irq_data *d) 197 { 198 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 199 200 write_gic_wedge(irq); 201 } 202 203 static int gic_set_type(struct irq_data *d, unsigned int type) 204 { 205 unsigned int irq, pol, trig, dual; 206 unsigned long flags; 207 208 irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 209 210 spin_lock_irqsave(&gic_lock, flags); 211 switch (type & IRQ_TYPE_SENSE_MASK) { 212 case IRQ_TYPE_EDGE_FALLING: 213 pol = GIC_POL_FALLING_EDGE; 214 trig = GIC_TRIG_EDGE; 215 dual = GIC_DUAL_SINGLE; 216 break; 217 case IRQ_TYPE_EDGE_RISING: 218 pol = GIC_POL_RISING_EDGE; 219 trig = GIC_TRIG_EDGE; 220 dual = GIC_DUAL_SINGLE; 221 break; 222 case IRQ_TYPE_EDGE_BOTH: 223 pol = 0; /* Doesn't matter */ 224 trig = GIC_TRIG_EDGE; 225 dual = GIC_DUAL_DUAL; 226 break; 227 case IRQ_TYPE_LEVEL_LOW: 228 pol = GIC_POL_ACTIVE_LOW; 229 trig = GIC_TRIG_LEVEL; 230 dual = GIC_DUAL_SINGLE; 231 break; 232 case IRQ_TYPE_LEVEL_HIGH: 233 default: 234 pol = GIC_POL_ACTIVE_HIGH; 235 trig = GIC_TRIG_LEVEL; 236 dual = GIC_DUAL_SINGLE; 237 break; 238 } 239 240 change_gic_pol(irq, pol); 241 change_gic_trig(irq, trig); 242 change_gic_dual(irq, dual); 243 244 if (trig == GIC_TRIG_EDGE) 245 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller, 246 handle_edge_irq, NULL); 247 else 248 irq_set_chip_handler_name_locked(d, &gic_level_irq_controller, 249 handle_level_irq, NULL); 250 spin_unlock_irqrestore(&gic_lock, flags); 251 252 return 0; 253 } 254 255 #ifdef CONFIG_SMP 256 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, 257 bool force) 258 { 259 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 260 unsigned long flags; 261 unsigned int cpu; 262 263 cpu = cpumask_first_and(cpumask, cpu_online_mask); 264 if (cpu >= NR_CPUS) 265 return -EINVAL; 266 267 /* Assumption : cpumask refers to a single CPU */ 268 spin_lock_irqsave(&gic_lock, flags); 269 270 /* Re-route this IRQ */ 271 write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); 272 273 /* Update the pcpu_masks */ 274 gic_clear_pcpu_masks(irq); 275 if (read_gic_mask(irq)) 276 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); 277 278 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 279 spin_unlock_irqrestore(&gic_lock, flags); 280 281 return IRQ_SET_MASK_OK; 282 } 283 #endif 284 285 static struct irq_chip gic_level_irq_controller = { 286 .name = "MIPS GIC", 287 .irq_mask = gic_mask_irq, 288 .irq_unmask = gic_unmask_irq, 289 .irq_set_type = gic_set_type, 290 #ifdef CONFIG_SMP 291 .irq_set_affinity = gic_set_affinity, 292 #endif 293 }; 294 295 static struct irq_chip gic_edge_irq_controller = { 296 .name = "MIPS GIC", 297 .irq_ack = gic_ack_irq, 298 .irq_mask = gic_mask_irq, 299 .irq_unmask = gic_unmask_irq, 300 .irq_set_type = gic_set_type, 301 #ifdef CONFIG_SMP 302 .irq_set_affinity = gic_set_affinity, 303 #endif 304 .ipi_send_single = gic_send_ipi, 305 }; 306 307 static void gic_handle_local_int(bool chained) 308 { 309 unsigned long pending, masked; 310 unsigned int intr, virq; 311 312 pending = read_gic_vl_pend(); 313 masked = read_gic_vl_mask(); 314 315 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); 316 317 for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) { 318 virq = irq_linear_revmap(gic_irq_domain, 319 GIC_LOCAL_TO_HWIRQ(intr)); 320 if (chained) 321 generic_handle_irq(virq); 322 else 323 do_IRQ(virq); 324 } 325 } 326 327 static void gic_mask_local_irq(struct irq_data *d) 328 { 329 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 330 331 write_gic_vl_rmask(BIT(intr)); 332 } 333 334 static void gic_unmask_local_irq(struct irq_data *d) 335 { 336 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 337 338 write_gic_vl_smask(BIT(intr)); 339 } 340 341 static struct irq_chip gic_local_irq_controller = { 342 .name = "MIPS GIC Local", 343 .irq_mask = gic_mask_local_irq, 344 .irq_unmask = gic_unmask_local_irq, 345 }; 346 347 static void gic_mask_local_irq_all_vpes(struct irq_data *d) 348 { 349 struct gic_all_vpes_chip_data *cd; 350 unsigned long flags; 351 int intr, cpu; 352 353 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 354 cd = irq_data_get_irq_chip_data(d); 355 cd->mask = false; 356 357 spin_lock_irqsave(&gic_lock, flags); 358 for_each_online_cpu(cpu) { 359 write_gic_vl_other(mips_cm_vp_id(cpu)); 360 write_gic_vo_rmask(BIT(intr)); 361 } 362 spin_unlock_irqrestore(&gic_lock, flags); 363 } 364 365 static void gic_unmask_local_irq_all_vpes(struct irq_data *d) 366 { 367 struct gic_all_vpes_chip_data *cd; 368 unsigned long flags; 369 int intr, cpu; 370 371 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 372 cd = irq_data_get_irq_chip_data(d); 373 cd->mask = true; 374 375 spin_lock_irqsave(&gic_lock, flags); 376 for_each_online_cpu(cpu) { 377 write_gic_vl_other(mips_cm_vp_id(cpu)); 378 write_gic_vo_smask(BIT(intr)); 379 } 380 spin_unlock_irqrestore(&gic_lock, flags); 381 } 382 383 static void gic_all_vpes_irq_cpu_online(struct irq_data *d) 384 { 385 struct gic_all_vpes_chip_data *cd; 386 unsigned int intr; 387 388 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 389 cd = irq_data_get_irq_chip_data(d); 390 391 write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); 392 if (cd->mask) 393 write_gic_vl_smask(BIT(intr)); 394 } 395 396 static struct irq_chip gic_all_vpes_local_irq_controller = { 397 .name = "MIPS GIC Local", 398 .irq_mask = gic_mask_local_irq_all_vpes, 399 .irq_unmask = gic_unmask_local_irq_all_vpes, 400 .irq_cpu_online = gic_all_vpes_irq_cpu_online, 401 }; 402 403 static void __gic_irq_dispatch(void) 404 { 405 gic_handle_local_int(false); 406 gic_handle_shared_int(false); 407 } 408 409 static void gic_irq_dispatch(struct irq_desc *desc) 410 { 411 gic_handle_local_int(true); 412 gic_handle_shared_int(true); 413 } 414 415 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, 416 irq_hw_number_t hw, unsigned int cpu) 417 { 418 int intr = GIC_HWIRQ_TO_SHARED(hw); 419 struct irq_data *data; 420 unsigned long flags; 421 422 data = irq_get_irq_data(virq); 423 424 spin_lock_irqsave(&gic_lock, flags); 425 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 426 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); 427 irq_data_update_effective_affinity(data, cpumask_of(cpu)); 428 spin_unlock_irqrestore(&gic_lock, flags); 429 430 return 0; 431 } 432 433 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 434 const u32 *intspec, unsigned int intsize, 435 irq_hw_number_t *out_hwirq, 436 unsigned int *out_type) 437 { 438 if (intsize != 3) 439 return -EINVAL; 440 441 if (intspec[0] == GIC_SHARED) 442 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]); 443 else if (intspec[0] == GIC_LOCAL) 444 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]); 445 else 446 return -EINVAL; 447 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 448 449 return 0; 450 } 451 452 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, 453 irq_hw_number_t hwirq) 454 { 455 struct gic_all_vpes_chip_data *cd; 456 unsigned long flags; 457 unsigned int intr; 458 int err, cpu; 459 u32 map; 460 461 if (hwirq >= GIC_SHARED_HWIRQ_BASE) { 462 /* verify that shared irqs don't conflict with an IPI irq */ 463 if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv)) 464 return -EBUSY; 465 466 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 467 &gic_level_irq_controller, 468 NULL); 469 if (err) 470 return err; 471 472 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); 473 return gic_shared_irq_domain_map(d, virq, hwirq, 0); 474 } 475 476 intr = GIC_HWIRQ_TO_LOCAL(hwirq); 477 map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; 478 479 switch (intr) { 480 case GIC_LOCAL_INT_TIMER: 481 /* CONFIG_MIPS_CMP workaround (see __gic_init) */ 482 map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin; 483 /* fall-through */ 484 case GIC_LOCAL_INT_PERFCTR: 485 case GIC_LOCAL_INT_FDC: 486 /* 487 * HACK: These are all really percpu interrupts, but 488 * the rest of the MIPS kernel code does not use the 489 * percpu IRQ API for them. 490 */ 491 cd = &gic_all_vpes_chip_data[intr]; 492 cd->map = map; 493 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 494 &gic_all_vpes_local_irq_controller, 495 cd); 496 if (err) 497 return err; 498 499 irq_set_handler(virq, handle_percpu_irq); 500 break; 501 502 default: 503 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 504 &gic_local_irq_controller, 505 NULL); 506 if (err) 507 return err; 508 509 irq_set_handler(virq, handle_percpu_devid_irq); 510 irq_set_percpu_devid(virq); 511 break; 512 } 513 514 if (!gic_local_irq_is_routable(intr)) 515 return -EPERM; 516 517 spin_lock_irqsave(&gic_lock, flags); 518 for_each_online_cpu(cpu) { 519 write_gic_vl_other(mips_cm_vp_id(cpu)); 520 write_gic_vo_map(mips_gic_vx_map_reg(intr), map); 521 } 522 spin_unlock_irqrestore(&gic_lock, flags); 523 524 return 0; 525 } 526 527 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, 528 unsigned int nr_irqs, void *arg) 529 { 530 struct irq_fwspec *fwspec = arg; 531 irq_hw_number_t hwirq; 532 533 if (fwspec->param[0] == GIC_SHARED) 534 hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]); 535 else 536 hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]); 537 538 return gic_irq_domain_map(d, virq, hwirq); 539 } 540 541 void gic_irq_domain_free(struct irq_domain *d, unsigned int virq, 542 unsigned int nr_irqs) 543 { 544 } 545 546 static const struct irq_domain_ops gic_irq_domain_ops = { 547 .xlate = gic_irq_domain_xlate, 548 .alloc = gic_irq_domain_alloc, 549 .free = gic_irq_domain_free, 550 .map = gic_irq_domain_map, 551 }; 552 553 static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 554 const u32 *intspec, unsigned int intsize, 555 irq_hw_number_t *out_hwirq, 556 unsigned int *out_type) 557 { 558 /* 559 * There's nothing to translate here. hwirq is dynamically allocated and 560 * the irq type is always edge triggered. 561 * */ 562 *out_hwirq = 0; 563 *out_type = IRQ_TYPE_EDGE_RISING; 564 565 return 0; 566 } 567 568 static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq, 569 unsigned int nr_irqs, void *arg) 570 { 571 struct cpumask *ipimask = arg; 572 irq_hw_number_t hwirq, base_hwirq; 573 int cpu, ret, i; 574 575 base_hwirq = find_first_bit(ipi_available, gic_shared_intrs); 576 if (base_hwirq == gic_shared_intrs) 577 return -ENOMEM; 578 579 /* check that we have enough space */ 580 for (i = base_hwirq; i < nr_irqs; i++) { 581 if (!test_bit(i, ipi_available)) 582 return -EBUSY; 583 } 584 bitmap_clear(ipi_available, base_hwirq, nr_irqs); 585 586 /* map the hwirq for each cpu consecutively */ 587 i = 0; 588 for_each_cpu(cpu, ipimask) { 589 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i); 590 591 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq, 592 &gic_edge_irq_controller, 593 NULL); 594 if (ret) 595 goto error; 596 597 ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq, 598 &gic_edge_irq_controller, 599 NULL); 600 if (ret) 601 goto error; 602 603 ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING); 604 if (ret) 605 goto error; 606 607 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); 608 if (ret) 609 goto error; 610 611 i++; 612 } 613 614 return 0; 615 error: 616 bitmap_set(ipi_available, base_hwirq, nr_irqs); 617 return ret; 618 } 619 620 void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq, 621 unsigned int nr_irqs) 622 { 623 irq_hw_number_t base_hwirq; 624 struct irq_data *data; 625 626 data = irq_get_irq_data(virq); 627 if (!data) 628 return; 629 630 base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data)); 631 bitmap_set(ipi_available, base_hwirq, nr_irqs); 632 } 633 634 int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node, 635 enum irq_domain_bus_token bus_token) 636 { 637 bool is_ipi; 638 639 switch (bus_token) { 640 case DOMAIN_BUS_IPI: 641 is_ipi = d->bus_token == bus_token; 642 return (!node || to_of_node(d->fwnode) == node) && is_ipi; 643 break; 644 default: 645 return 0; 646 } 647 } 648 649 static const struct irq_domain_ops gic_ipi_domain_ops = { 650 .xlate = gic_ipi_domain_xlate, 651 .alloc = gic_ipi_domain_alloc, 652 .free = gic_ipi_domain_free, 653 .match = gic_ipi_domain_match, 654 }; 655 656 static int gic_cpu_startup(unsigned int cpu) 657 { 658 /* Enable or disable EIC */ 659 change_gic_vl_ctl(GIC_VX_CTL_EIC, 660 cpu_has_veic ? GIC_VX_CTL_EIC : 0); 661 662 /* Clear all local IRQ masks (ie. disable all local interrupts) */ 663 write_gic_vl_rmask(~0); 664 665 /* Invoke irq_cpu_online callbacks to enable desired interrupts */ 666 irq_cpu_online(); 667 668 return 0; 669 } 670 671 static int __init gic_of_init(struct device_node *node, 672 struct device_node *parent) 673 { 674 unsigned int cpu_vec, i, gicconfig, v[2], num_ipis; 675 unsigned long reserved; 676 phys_addr_t gic_base; 677 struct resource res; 678 size_t gic_len; 679 680 /* Find the first available CPU vector. */ 681 i = 0; 682 reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0); 683 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", 684 i++, &cpu_vec)) 685 reserved |= BIT(cpu_vec); 686 687 cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM)); 688 if (cpu_vec == hweight_long(ST0_IM)) { 689 pr_err("No CPU vectors available\n"); 690 return -ENODEV; 691 } 692 693 if (of_address_to_resource(node, 0, &res)) { 694 /* 695 * Probe the CM for the GIC base address if not specified 696 * in the device-tree. 697 */ 698 if (mips_cm_present()) { 699 gic_base = read_gcr_gic_base() & 700 ~CM_GCR_GIC_BASE_GICEN; 701 gic_len = 0x20000; 702 pr_warn("Using inherited base address %pa\n", 703 &gic_base); 704 } else { 705 pr_err("Failed to get memory range\n"); 706 return -ENODEV; 707 } 708 } else { 709 gic_base = res.start; 710 gic_len = resource_size(&res); 711 } 712 713 if (mips_cm_present()) { 714 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN); 715 /* Ensure GIC region is enabled before trying to access it */ 716 __sync(); 717 } 718 719 mips_gic_base = ioremap_nocache(gic_base, gic_len); 720 721 gicconfig = read_gic_config(); 722 gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; 723 gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS); 724 gic_shared_intrs = (gic_shared_intrs + 1) * 8; 725 726 if (cpu_has_veic) { 727 /* Always use vector 1 in EIC mode */ 728 gic_cpu_pin = 0; 729 timer_cpu_pin = gic_cpu_pin; 730 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, 731 __gic_irq_dispatch); 732 } else { 733 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; 734 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, 735 gic_irq_dispatch); 736 /* 737 * With the CMP implementation of SMP (deprecated), other CPUs 738 * are started by the bootloader and put into a timer based 739 * waiting poll loop. We must not re-route those CPU's local 740 * timer interrupts as the wait instruction will never finish, 741 * so just handle whatever CPU interrupt it is routed to by 742 * default. 743 * 744 * This workaround should be removed when CMP support is 745 * dropped. 746 */ 747 if (IS_ENABLED(CONFIG_MIPS_CMP) && 748 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) { 749 timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP; 750 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 751 GIC_CPU_PIN_OFFSET + 752 timer_cpu_pin, 753 gic_irq_dispatch); 754 } else { 755 timer_cpu_pin = gic_cpu_pin; 756 } 757 } 758 759 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + 760 gic_shared_intrs, 0, 761 &gic_irq_domain_ops, NULL); 762 if (!gic_irq_domain) { 763 pr_err("Failed to add IRQ domain"); 764 return -ENXIO; 765 } 766 767 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, 768 IRQ_DOMAIN_FLAG_IPI_PER_CPU, 769 GIC_NUM_LOCAL_INTRS + gic_shared_intrs, 770 node, &gic_ipi_domain_ops, NULL); 771 if (!gic_ipi_domain) { 772 pr_err("Failed to add IPI domain"); 773 return -ENXIO; 774 } 775 776 irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); 777 778 if (node && 779 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { 780 bitmap_set(ipi_resrv, v[0], v[1]); 781 } else { 782 /* 783 * Reserve 2 interrupts per possible CPU/VP for use as IPIs, 784 * meeting the requirements of arch/mips SMP. 785 */ 786 num_ipis = 2 * num_possible_cpus(); 787 bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); 788 } 789 790 bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); 791 792 board_bind_eic_interrupt = &gic_bind_eic_interrupt; 793 794 /* Setup defaults */ 795 for (i = 0; i < gic_shared_intrs; i++) { 796 change_gic_pol(i, GIC_POL_ACTIVE_HIGH); 797 change_gic_trig(i, GIC_TRIG_LEVEL); 798 write_gic_rmask(i); 799 } 800 801 return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING, 802 "irqchip/mips/gic:starting", 803 gic_cpu_startup, NULL); 804 } 805 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init); 806