1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 */ 9 10 #define pr_fmt(fmt) "irq-mips-gic: " fmt 11 12 #include <linux/bitmap.h> 13 #include <linux/clocksource.h> 14 #include <linux/cpuhotplug.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/irq.h> 18 #include <linux/irqchip.h> 19 #include <linux/of_address.h> 20 #include <linux/percpu.h> 21 #include <linux/sched.h> 22 #include <linux/smp.h> 23 24 #include <asm/mips-cps.h> 25 #include <asm/setup.h> 26 #include <asm/traps.h> 27 28 #include <dt-bindings/interrupt-controller/mips-gic.h> 29 30 #define GIC_MAX_INTRS 256 31 #define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS) 32 33 /* Add 2 to convert GIC CPU pin to core interrupt */ 34 #define GIC_CPU_PIN_OFFSET 2 35 36 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ 37 #define GIC_PIN_TO_VEC_OFFSET 1 38 39 /* Convert between local/shared IRQ number and GIC HW IRQ number. */ 40 #define GIC_LOCAL_HWIRQ_BASE 0 41 #define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x)) 42 #define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE) 43 #define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS 44 #define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) 45 #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) 46 47 void __iomem *mips_gic_base; 48 49 DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); 50 51 static DEFINE_SPINLOCK(gic_lock); 52 static struct irq_domain *gic_irq_domain; 53 static struct irq_domain *gic_ipi_domain; 54 static int gic_shared_intrs; 55 static unsigned int gic_cpu_pin; 56 static unsigned int timer_cpu_pin; 57 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; 58 static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); 59 static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); 60 61 static struct gic_all_vpes_chip_data { 62 u32 map; 63 bool mask; 64 } gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS]; 65 66 static void gic_clear_pcpu_masks(unsigned int intr) 67 { 68 unsigned int i; 69 70 /* Clear the interrupt's bit in all pcpu_masks */ 71 for_each_possible_cpu(i) 72 clear_bit(intr, per_cpu_ptr(pcpu_masks, i)); 73 } 74 75 static bool gic_local_irq_is_routable(int intr) 76 { 77 u32 vpe_ctl; 78 79 /* All local interrupts are routable in EIC mode. */ 80 if (cpu_has_veic) 81 return true; 82 83 vpe_ctl = read_gic_vl_ctl(); 84 switch (intr) { 85 case GIC_LOCAL_INT_TIMER: 86 return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE; 87 case GIC_LOCAL_INT_PERFCTR: 88 return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE; 89 case GIC_LOCAL_INT_FDC: 90 return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE; 91 case GIC_LOCAL_INT_SWINT0: 92 case GIC_LOCAL_INT_SWINT1: 93 return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE; 94 default: 95 return true; 96 } 97 } 98 99 static void gic_bind_eic_interrupt(int irq, int set) 100 { 101 /* Convert irq vector # to hw int # */ 102 irq -= GIC_PIN_TO_VEC_OFFSET; 103 104 /* Set irq to use shadow set */ 105 write_gic_vl_eic_shadow_set(irq, set); 106 } 107 108 static void gic_send_ipi(struct irq_data *d, unsigned int cpu) 109 { 110 irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d)); 111 112 write_gic_wedge(GIC_WEDGE_RW | hwirq); 113 } 114 115 int gic_get_c0_compare_int(void) 116 { 117 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) 118 return MIPS_CPU_IRQ_BASE + cp0_compare_irq; 119 return irq_create_mapping(gic_irq_domain, 120 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER)); 121 } 122 123 int gic_get_c0_perfcount_int(void) 124 { 125 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) { 126 /* Is the performance counter shared with the timer? */ 127 if (cp0_perfcount_irq < 0) 128 return -1; 129 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 130 } 131 return irq_create_mapping(gic_irq_domain, 132 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR)); 133 } 134 135 int gic_get_c0_fdc_int(void) 136 { 137 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) { 138 /* Is the FDC IRQ even present? */ 139 if (cp0_fdc_irq < 0) 140 return -1; 141 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; 142 } 143 144 return irq_create_mapping(gic_irq_domain, 145 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); 146 } 147 148 static void gic_handle_shared_int(bool chained) 149 { 150 unsigned int intr, virq; 151 unsigned long *pcpu_mask; 152 DECLARE_BITMAP(pending, GIC_MAX_INTRS); 153 154 /* Get per-cpu bitmaps */ 155 pcpu_mask = this_cpu_ptr(pcpu_masks); 156 157 if (mips_cm_is64) 158 __ioread64_copy(pending, addr_gic_pend(), 159 DIV_ROUND_UP(gic_shared_intrs, 64)); 160 else 161 __ioread32_copy(pending, addr_gic_pend(), 162 DIV_ROUND_UP(gic_shared_intrs, 32)); 163 164 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); 165 166 for_each_set_bit(intr, pending, gic_shared_intrs) { 167 virq = irq_linear_revmap(gic_irq_domain, 168 GIC_SHARED_TO_HWIRQ(intr)); 169 if (chained) 170 generic_handle_irq(virq); 171 else 172 do_IRQ(virq); 173 } 174 } 175 176 static void gic_mask_irq(struct irq_data *d) 177 { 178 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 179 180 write_gic_rmask(intr); 181 gic_clear_pcpu_masks(intr); 182 } 183 184 static void gic_unmask_irq(struct irq_data *d) 185 { 186 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 187 unsigned int cpu; 188 189 write_gic_smask(intr); 190 191 gic_clear_pcpu_masks(intr); 192 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); 193 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); 194 } 195 196 static void gic_ack_irq(struct irq_data *d) 197 { 198 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 199 200 write_gic_wedge(irq); 201 } 202 203 static int gic_set_type(struct irq_data *d, unsigned int type) 204 { 205 unsigned int irq, pol, trig, dual; 206 unsigned long flags; 207 208 irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 209 210 spin_lock_irqsave(&gic_lock, flags); 211 switch (type & IRQ_TYPE_SENSE_MASK) { 212 case IRQ_TYPE_EDGE_FALLING: 213 pol = GIC_POL_FALLING_EDGE; 214 trig = GIC_TRIG_EDGE; 215 dual = GIC_DUAL_SINGLE; 216 break; 217 case IRQ_TYPE_EDGE_RISING: 218 pol = GIC_POL_RISING_EDGE; 219 trig = GIC_TRIG_EDGE; 220 dual = GIC_DUAL_SINGLE; 221 break; 222 case IRQ_TYPE_EDGE_BOTH: 223 pol = 0; /* Doesn't matter */ 224 trig = GIC_TRIG_EDGE; 225 dual = GIC_DUAL_DUAL; 226 break; 227 case IRQ_TYPE_LEVEL_LOW: 228 pol = GIC_POL_ACTIVE_LOW; 229 trig = GIC_TRIG_LEVEL; 230 dual = GIC_DUAL_SINGLE; 231 break; 232 case IRQ_TYPE_LEVEL_HIGH: 233 default: 234 pol = GIC_POL_ACTIVE_HIGH; 235 trig = GIC_TRIG_LEVEL; 236 dual = GIC_DUAL_SINGLE; 237 break; 238 } 239 240 change_gic_pol(irq, pol); 241 change_gic_trig(irq, trig); 242 change_gic_dual(irq, dual); 243 244 if (trig == GIC_TRIG_EDGE) 245 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller, 246 handle_edge_irq, NULL); 247 else 248 irq_set_chip_handler_name_locked(d, &gic_level_irq_controller, 249 handle_level_irq, NULL); 250 spin_unlock_irqrestore(&gic_lock, flags); 251 252 return 0; 253 } 254 255 #ifdef CONFIG_SMP 256 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, 257 bool force) 258 { 259 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 260 unsigned long flags; 261 unsigned int cpu; 262 263 cpu = cpumask_first_and(cpumask, cpu_online_mask); 264 if (cpu >= NR_CPUS) 265 return -EINVAL; 266 267 /* Assumption : cpumask refers to a single CPU */ 268 spin_lock_irqsave(&gic_lock, flags); 269 270 /* Re-route this IRQ */ 271 write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); 272 273 /* Update the pcpu_masks */ 274 gic_clear_pcpu_masks(irq); 275 if (read_gic_mask(irq)) 276 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); 277 278 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 279 spin_unlock_irqrestore(&gic_lock, flags); 280 281 return IRQ_SET_MASK_OK; 282 } 283 #endif 284 285 static struct irq_chip gic_level_irq_controller = { 286 .name = "MIPS GIC", 287 .irq_mask = gic_mask_irq, 288 .irq_unmask = gic_unmask_irq, 289 .irq_set_type = gic_set_type, 290 #ifdef CONFIG_SMP 291 .irq_set_affinity = gic_set_affinity, 292 #endif 293 }; 294 295 static struct irq_chip gic_edge_irq_controller = { 296 .name = "MIPS GIC", 297 .irq_ack = gic_ack_irq, 298 .irq_mask = gic_mask_irq, 299 .irq_unmask = gic_unmask_irq, 300 .irq_set_type = gic_set_type, 301 #ifdef CONFIG_SMP 302 .irq_set_affinity = gic_set_affinity, 303 #endif 304 .ipi_send_single = gic_send_ipi, 305 }; 306 307 static void gic_handle_local_int(bool chained) 308 { 309 unsigned long pending, masked; 310 unsigned int intr, virq; 311 312 pending = read_gic_vl_pend(); 313 masked = read_gic_vl_mask(); 314 315 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); 316 317 for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) { 318 virq = irq_linear_revmap(gic_irq_domain, 319 GIC_LOCAL_TO_HWIRQ(intr)); 320 if (chained) 321 generic_handle_irq(virq); 322 else 323 do_IRQ(virq); 324 } 325 } 326 327 static void gic_mask_local_irq(struct irq_data *d) 328 { 329 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 330 331 write_gic_vl_rmask(BIT(intr)); 332 } 333 334 static void gic_unmask_local_irq(struct irq_data *d) 335 { 336 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 337 338 write_gic_vl_smask(BIT(intr)); 339 } 340 341 static struct irq_chip gic_local_irq_controller = { 342 .name = "MIPS GIC Local", 343 .irq_mask = gic_mask_local_irq, 344 .irq_unmask = gic_unmask_local_irq, 345 }; 346 347 static void gic_mask_local_irq_all_vpes(struct irq_data *d) 348 { 349 struct gic_all_vpes_chip_data *cd; 350 unsigned long flags; 351 int intr, cpu; 352 353 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 354 cd = irq_data_get_irq_chip_data(d); 355 cd->mask = false; 356 357 spin_lock_irqsave(&gic_lock, flags); 358 for_each_online_cpu(cpu) { 359 write_gic_vl_other(mips_cm_vp_id(cpu)); 360 write_gic_vo_rmask(BIT(intr)); 361 } 362 spin_unlock_irqrestore(&gic_lock, flags); 363 } 364 365 static void gic_unmask_local_irq_all_vpes(struct irq_data *d) 366 { 367 struct gic_all_vpes_chip_data *cd; 368 unsigned long flags; 369 int intr, cpu; 370 371 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 372 cd = irq_data_get_irq_chip_data(d); 373 cd->mask = true; 374 375 spin_lock_irqsave(&gic_lock, flags); 376 for_each_online_cpu(cpu) { 377 write_gic_vl_other(mips_cm_vp_id(cpu)); 378 write_gic_vo_smask(BIT(intr)); 379 } 380 spin_unlock_irqrestore(&gic_lock, flags); 381 } 382 383 static void gic_all_vpes_irq_cpu_online(struct irq_data *d) 384 { 385 struct gic_all_vpes_chip_data *cd; 386 unsigned int intr; 387 388 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 389 cd = irq_data_get_irq_chip_data(d); 390 391 write_gic_vl_map(intr, cd->map); 392 if (cd->mask) 393 write_gic_vl_smask(BIT(intr)); 394 } 395 396 static struct irq_chip gic_all_vpes_local_irq_controller = { 397 .name = "MIPS GIC Local", 398 .irq_mask = gic_mask_local_irq_all_vpes, 399 .irq_unmask = gic_unmask_local_irq_all_vpes, 400 .irq_cpu_online = gic_all_vpes_irq_cpu_online, 401 }; 402 403 static void __gic_irq_dispatch(void) 404 { 405 gic_handle_local_int(false); 406 gic_handle_shared_int(false); 407 } 408 409 static void gic_irq_dispatch(struct irq_desc *desc) 410 { 411 gic_handle_local_int(true); 412 gic_handle_shared_int(true); 413 } 414 415 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, 416 irq_hw_number_t hw, unsigned int cpu) 417 { 418 int intr = GIC_HWIRQ_TO_SHARED(hw); 419 struct irq_data *data; 420 unsigned long flags; 421 422 data = irq_get_irq_data(virq); 423 424 spin_lock_irqsave(&gic_lock, flags); 425 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 426 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); 427 gic_clear_pcpu_masks(intr); 428 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); 429 irq_data_update_effective_affinity(data, cpumask_of(cpu)); 430 spin_unlock_irqrestore(&gic_lock, flags); 431 432 return 0; 433 } 434 435 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 436 const u32 *intspec, unsigned int intsize, 437 irq_hw_number_t *out_hwirq, 438 unsigned int *out_type) 439 { 440 if (intsize != 3) 441 return -EINVAL; 442 443 if (intspec[0] == GIC_SHARED) 444 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]); 445 else if (intspec[0] == GIC_LOCAL) 446 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]); 447 else 448 return -EINVAL; 449 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 450 451 return 0; 452 } 453 454 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, 455 irq_hw_number_t hwirq) 456 { 457 struct gic_all_vpes_chip_data *cd; 458 unsigned long flags; 459 unsigned int intr; 460 int err, cpu; 461 u32 map; 462 463 if (hwirq >= GIC_SHARED_HWIRQ_BASE) { 464 /* verify that shared irqs don't conflict with an IPI irq */ 465 if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv)) 466 return -EBUSY; 467 468 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 469 &gic_level_irq_controller, 470 NULL); 471 if (err) 472 return err; 473 474 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); 475 return gic_shared_irq_domain_map(d, virq, hwirq, 0); 476 } 477 478 intr = GIC_HWIRQ_TO_LOCAL(hwirq); 479 map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; 480 481 switch (intr) { 482 case GIC_LOCAL_INT_TIMER: 483 /* CONFIG_MIPS_CMP workaround (see __gic_init) */ 484 map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin; 485 /* fall-through */ 486 case GIC_LOCAL_INT_PERFCTR: 487 case GIC_LOCAL_INT_FDC: 488 /* 489 * HACK: These are all really percpu interrupts, but 490 * the rest of the MIPS kernel code does not use the 491 * percpu IRQ API for them. 492 */ 493 cd = &gic_all_vpes_chip_data[intr]; 494 cd->map = map; 495 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 496 &gic_all_vpes_local_irq_controller, 497 cd); 498 if (err) 499 return err; 500 501 irq_set_handler(virq, handle_percpu_irq); 502 break; 503 504 default: 505 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 506 &gic_local_irq_controller, 507 NULL); 508 if (err) 509 return err; 510 511 irq_set_handler(virq, handle_percpu_devid_irq); 512 irq_set_percpu_devid(virq); 513 break; 514 } 515 516 if (!gic_local_irq_is_routable(intr)) 517 return -EPERM; 518 519 spin_lock_irqsave(&gic_lock, flags); 520 for_each_online_cpu(cpu) { 521 write_gic_vl_other(mips_cm_vp_id(cpu)); 522 write_gic_vo_map(intr, map); 523 } 524 spin_unlock_irqrestore(&gic_lock, flags); 525 526 return 0; 527 } 528 529 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, 530 unsigned int nr_irqs, void *arg) 531 { 532 struct irq_fwspec *fwspec = arg; 533 irq_hw_number_t hwirq; 534 535 if (fwspec->param[0] == GIC_SHARED) 536 hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]); 537 else 538 hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]); 539 540 return gic_irq_domain_map(d, virq, hwirq); 541 } 542 543 void gic_irq_domain_free(struct irq_domain *d, unsigned int virq, 544 unsigned int nr_irqs) 545 { 546 } 547 548 static const struct irq_domain_ops gic_irq_domain_ops = { 549 .xlate = gic_irq_domain_xlate, 550 .alloc = gic_irq_domain_alloc, 551 .free = gic_irq_domain_free, 552 .map = gic_irq_domain_map, 553 }; 554 555 static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 556 const u32 *intspec, unsigned int intsize, 557 irq_hw_number_t *out_hwirq, 558 unsigned int *out_type) 559 { 560 /* 561 * There's nothing to translate here. hwirq is dynamically allocated and 562 * the irq type is always edge triggered. 563 * */ 564 *out_hwirq = 0; 565 *out_type = IRQ_TYPE_EDGE_RISING; 566 567 return 0; 568 } 569 570 static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq, 571 unsigned int nr_irqs, void *arg) 572 { 573 struct cpumask *ipimask = arg; 574 irq_hw_number_t hwirq, base_hwirq; 575 int cpu, ret, i; 576 577 base_hwirq = find_first_bit(ipi_available, gic_shared_intrs); 578 if (base_hwirq == gic_shared_intrs) 579 return -ENOMEM; 580 581 /* check that we have enough space */ 582 for (i = base_hwirq; i < nr_irqs; i++) { 583 if (!test_bit(i, ipi_available)) 584 return -EBUSY; 585 } 586 bitmap_clear(ipi_available, base_hwirq, nr_irqs); 587 588 /* map the hwirq for each cpu consecutively */ 589 i = 0; 590 for_each_cpu(cpu, ipimask) { 591 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i); 592 593 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq, 594 &gic_edge_irq_controller, 595 NULL); 596 if (ret) 597 goto error; 598 599 ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq, 600 &gic_edge_irq_controller, 601 NULL); 602 if (ret) 603 goto error; 604 605 ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING); 606 if (ret) 607 goto error; 608 609 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); 610 if (ret) 611 goto error; 612 613 i++; 614 } 615 616 return 0; 617 error: 618 bitmap_set(ipi_available, base_hwirq, nr_irqs); 619 return ret; 620 } 621 622 void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq, 623 unsigned int nr_irqs) 624 { 625 irq_hw_number_t base_hwirq; 626 struct irq_data *data; 627 628 data = irq_get_irq_data(virq); 629 if (!data) 630 return; 631 632 base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data)); 633 bitmap_set(ipi_available, base_hwirq, nr_irqs); 634 } 635 636 int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node, 637 enum irq_domain_bus_token bus_token) 638 { 639 bool is_ipi; 640 641 switch (bus_token) { 642 case DOMAIN_BUS_IPI: 643 is_ipi = d->bus_token == bus_token; 644 return (!node || to_of_node(d->fwnode) == node) && is_ipi; 645 break; 646 default: 647 return 0; 648 } 649 } 650 651 static const struct irq_domain_ops gic_ipi_domain_ops = { 652 .xlate = gic_ipi_domain_xlate, 653 .alloc = gic_ipi_domain_alloc, 654 .free = gic_ipi_domain_free, 655 .match = gic_ipi_domain_match, 656 }; 657 658 static int gic_cpu_startup(unsigned int cpu) 659 { 660 /* Enable or disable EIC */ 661 change_gic_vl_ctl(GIC_VX_CTL_EIC, 662 cpu_has_veic ? GIC_VX_CTL_EIC : 0); 663 664 /* Clear all local IRQ masks (ie. disable all local interrupts) */ 665 write_gic_vl_rmask(~0); 666 667 /* Invoke irq_cpu_online callbacks to enable desired interrupts */ 668 irq_cpu_online(); 669 670 return 0; 671 } 672 673 static int __init gic_of_init(struct device_node *node, 674 struct device_node *parent) 675 { 676 unsigned int cpu_vec, i, gicconfig, v[2], num_ipis; 677 unsigned long reserved; 678 phys_addr_t gic_base; 679 struct resource res; 680 size_t gic_len; 681 682 /* Find the first available CPU vector. */ 683 i = 0; 684 reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0); 685 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", 686 i++, &cpu_vec)) 687 reserved |= BIT(cpu_vec); 688 689 cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM)); 690 if (cpu_vec == hweight_long(ST0_IM)) { 691 pr_err("No CPU vectors available\n"); 692 return -ENODEV; 693 } 694 695 if (of_address_to_resource(node, 0, &res)) { 696 /* 697 * Probe the CM for the GIC base address if not specified 698 * in the device-tree. 699 */ 700 if (mips_cm_present()) { 701 gic_base = read_gcr_gic_base() & 702 ~CM_GCR_GIC_BASE_GICEN; 703 gic_len = 0x20000; 704 pr_warn("Using inherited base address %pa\n", 705 &gic_base); 706 } else { 707 pr_err("Failed to get memory range\n"); 708 return -ENODEV; 709 } 710 } else { 711 gic_base = res.start; 712 gic_len = resource_size(&res); 713 } 714 715 if (mips_cm_present()) { 716 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN); 717 /* Ensure GIC region is enabled before trying to access it */ 718 __sync(); 719 } 720 721 mips_gic_base = ioremap_nocache(gic_base, gic_len); 722 723 gicconfig = read_gic_config(); 724 gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; 725 gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS); 726 gic_shared_intrs = (gic_shared_intrs + 1) * 8; 727 728 if (cpu_has_veic) { 729 /* Always use vector 1 in EIC mode */ 730 gic_cpu_pin = 0; 731 timer_cpu_pin = gic_cpu_pin; 732 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, 733 __gic_irq_dispatch); 734 } else { 735 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; 736 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, 737 gic_irq_dispatch); 738 /* 739 * With the CMP implementation of SMP (deprecated), other CPUs 740 * are started by the bootloader and put into a timer based 741 * waiting poll loop. We must not re-route those CPU's local 742 * timer interrupts as the wait instruction will never finish, 743 * so just handle whatever CPU interrupt it is routed to by 744 * default. 745 * 746 * This workaround should be removed when CMP support is 747 * dropped. 748 */ 749 if (IS_ENABLED(CONFIG_MIPS_CMP) && 750 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) { 751 timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP; 752 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 753 GIC_CPU_PIN_OFFSET + 754 timer_cpu_pin, 755 gic_irq_dispatch); 756 } else { 757 timer_cpu_pin = gic_cpu_pin; 758 } 759 } 760 761 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + 762 gic_shared_intrs, 0, 763 &gic_irq_domain_ops, NULL); 764 if (!gic_irq_domain) { 765 pr_err("Failed to add IRQ domain"); 766 return -ENXIO; 767 } 768 769 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, 770 IRQ_DOMAIN_FLAG_IPI_PER_CPU, 771 GIC_NUM_LOCAL_INTRS + gic_shared_intrs, 772 node, &gic_ipi_domain_ops, NULL); 773 if (!gic_ipi_domain) { 774 pr_err("Failed to add IPI domain"); 775 return -ENXIO; 776 } 777 778 irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); 779 780 if (node && 781 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { 782 bitmap_set(ipi_resrv, v[0], v[1]); 783 } else { 784 /* 785 * Reserve 2 interrupts per possible CPU/VP for use as IPIs, 786 * meeting the requirements of arch/mips SMP. 787 */ 788 num_ipis = 2 * num_possible_cpus(); 789 bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); 790 } 791 792 bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); 793 794 board_bind_eic_interrupt = &gic_bind_eic_interrupt; 795 796 /* Setup defaults */ 797 for (i = 0; i < gic_shared_intrs; i++) { 798 change_gic_pol(i, GIC_POL_ACTIVE_HIGH); 799 change_gic_trig(i, GIC_TRIG_LEVEL); 800 write_gic_rmask(i); 801 } 802 803 return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING, 804 "irqchip/mips/gic:starting", 805 gic_cpu_startup, NULL); 806 } 807 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init); 808