1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 */ 9 #include <linux/bitmap.h> 10 #include <linux/clocksource.h> 11 #include <linux/init.h> 12 #include <linux/interrupt.h> 13 #include <linux/irq.h> 14 #include <linux/irqchip.h> 15 #include <linux/of_address.h> 16 #include <linux/percpu.h> 17 #include <linux/sched.h> 18 #include <linux/smp.h> 19 20 #include <asm/mips-cps.h> 21 #include <asm/setup.h> 22 #include <asm/traps.h> 23 24 #include <dt-bindings/interrupt-controller/mips-gic.h> 25 26 #define GIC_MAX_INTRS 256 27 #define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS) 28 29 /* Add 2 to convert GIC CPU pin to core interrupt */ 30 #define GIC_CPU_PIN_OFFSET 2 31 32 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ 33 #define GIC_PIN_TO_VEC_OFFSET 1 34 35 /* Convert between local/shared IRQ number and GIC HW IRQ number. */ 36 #define GIC_LOCAL_HWIRQ_BASE 0 37 #define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x)) 38 #define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE) 39 #define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS 40 #define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) 41 #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) 42 43 void __iomem *mips_gic_base; 44 45 DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); 46 47 static DEFINE_SPINLOCK(gic_lock); 48 static struct irq_domain *gic_irq_domain; 49 static struct irq_domain *gic_ipi_domain; 50 static int gic_shared_intrs; 51 static int gic_vpes; 52 static unsigned int gic_cpu_pin; 53 static unsigned int timer_cpu_pin; 54 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; 55 DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); 56 DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); 57 58 static void gic_clear_pcpu_masks(unsigned int intr) 59 { 60 unsigned int i; 61 62 /* Clear the interrupt's bit in all pcpu_masks */ 63 for_each_possible_cpu(i) 64 clear_bit(intr, per_cpu_ptr(pcpu_masks, i)); 65 } 66 67 static bool gic_local_irq_is_routable(int intr) 68 { 69 u32 vpe_ctl; 70 71 /* All local interrupts are routable in EIC mode. */ 72 if (cpu_has_veic) 73 return true; 74 75 vpe_ctl = read_gic_vl_ctl(); 76 switch (intr) { 77 case GIC_LOCAL_INT_TIMER: 78 return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE; 79 case GIC_LOCAL_INT_PERFCTR: 80 return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE; 81 case GIC_LOCAL_INT_FDC: 82 return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE; 83 case GIC_LOCAL_INT_SWINT0: 84 case GIC_LOCAL_INT_SWINT1: 85 return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE; 86 default: 87 return true; 88 } 89 } 90 91 static void gic_bind_eic_interrupt(int irq, int set) 92 { 93 /* Convert irq vector # to hw int # */ 94 irq -= GIC_PIN_TO_VEC_OFFSET; 95 96 /* Set irq to use shadow set */ 97 write_gic_vl_eic_shadow_set(irq, set); 98 } 99 100 static void gic_send_ipi(struct irq_data *d, unsigned int cpu) 101 { 102 irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d)); 103 104 write_gic_wedge(GIC_WEDGE_RW | hwirq); 105 } 106 107 int gic_get_c0_compare_int(void) 108 { 109 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) 110 return MIPS_CPU_IRQ_BASE + cp0_compare_irq; 111 return irq_create_mapping(gic_irq_domain, 112 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER)); 113 } 114 115 int gic_get_c0_perfcount_int(void) 116 { 117 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) { 118 /* Is the performance counter shared with the timer? */ 119 if (cp0_perfcount_irq < 0) 120 return -1; 121 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 122 } 123 return irq_create_mapping(gic_irq_domain, 124 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR)); 125 } 126 127 int gic_get_c0_fdc_int(void) 128 { 129 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) { 130 /* Is the FDC IRQ even present? */ 131 if (cp0_fdc_irq < 0) 132 return -1; 133 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; 134 } 135 136 return irq_create_mapping(gic_irq_domain, 137 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); 138 } 139 140 static void gic_handle_shared_int(bool chained) 141 { 142 unsigned int intr, virq; 143 unsigned long *pcpu_mask; 144 DECLARE_BITMAP(pending, GIC_MAX_INTRS); 145 146 /* Get per-cpu bitmaps */ 147 pcpu_mask = this_cpu_ptr(pcpu_masks); 148 149 if (mips_cm_is64) 150 __ioread64_copy(pending, addr_gic_pend(), 151 DIV_ROUND_UP(gic_shared_intrs, 64)); 152 else 153 __ioread32_copy(pending, addr_gic_pend(), 154 DIV_ROUND_UP(gic_shared_intrs, 32)); 155 156 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); 157 158 for_each_set_bit(intr, pending, gic_shared_intrs) { 159 virq = irq_linear_revmap(gic_irq_domain, 160 GIC_SHARED_TO_HWIRQ(intr)); 161 if (chained) 162 generic_handle_irq(virq); 163 else 164 do_IRQ(virq); 165 } 166 } 167 168 static void gic_mask_irq(struct irq_data *d) 169 { 170 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 171 172 write_gic_rmask(BIT(intr)); 173 gic_clear_pcpu_masks(intr); 174 } 175 176 static void gic_unmask_irq(struct irq_data *d) 177 { 178 struct cpumask *affinity = irq_data_get_affinity_mask(d); 179 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 180 unsigned int cpu; 181 182 write_gic_smask(BIT(intr)); 183 184 gic_clear_pcpu_masks(intr); 185 cpu = cpumask_first_and(affinity, cpu_online_mask); 186 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); 187 } 188 189 static void gic_ack_irq(struct irq_data *d) 190 { 191 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 192 193 write_gic_wedge(irq); 194 } 195 196 static int gic_set_type(struct irq_data *d, unsigned int type) 197 { 198 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 199 unsigned long flags; 200 bool is_edge; 201 202 spin_lock_irqsave(&gic_lock, flags); 203 switch (type & IRQ_TYPE_SENSE_MASK) { 204 case IRQ_TYPE_EDGE_FALLING: 205 change_gic_pol(irq, GIC_POL_FALLING_EDGE); 206 change_gic_trig(irq, GIC_TRIG_EDGE); 207 change_gic_dual(irq, GIC_DUAL_SINGLE); 208 is_edge = true; 209 break; 210 case IRQ_TYPE_EDGE_RISING: 211 change_gic_pol(irq, GIC_POL_RISING_EDGE); 212 change_gic_trig(irq, GIC_TRIG_EDGE); 213 change_gic_dual(irq, GIC_DUAL_SINGLE); 214 is_edge = true; 215 break; 216 case IRQ_TYPE_EDGE_BOTH: 217 /* polarity is irrelevant in this case */ 218 change_gic_trig(irq, GIC_TRIG_EDGE); 219 change_gic_dual(irq, GIC_DUAL_DUAL); 220 is_edge = true; 221 break; 222 case IRQ_TYPE_LEVEL_LOW: 223 change_gic_pol(irq, GIC_POL_ACTIVE_LOW); 224 change_gic_trig(irq, GIC_TRIG_LEVEL); 225 change_gic_dual(irq, GIC_DUAL_SINGLE); 226 is_edge = false; 227 break; 228 case IRQ_TYPE_LEVEL_HIGH: 229 default: 230 change_gic_pol(irq, GIC_POL_ACTIVE_HIGH); 231 change_gic_trig(irq, GIC_TRIG_LEVEL); 232 change_gic_dual(irq, GIC_DUAL_SINGLE); 233 is_edge = false; 234 break; 235 } 236 237 if (is_edge) 238 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller, 239 handle_edge_irq, NULL); 240 else 241 irq_set_chip_handler_name_locked(d, &gic_level_irq_controller, 242 handle_level_irq, NULL); 243 spin_unlock_irqrestore(&gic_lock, flags); 244 245 return 0; 246 } 247 248 #ifdef CONFIG_SMP 249 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, 250 bool force) 251 { 252 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 253 unsigned long flags; 254 unsigned int cpu; 255 256 cpu = cpumask_first_and(cpumask, cpu_online_mask); 257 if (cpu >= NR_CPUS) 258 return -EINVAL; 259 260 /* Assumption : cpumask refers to a single CPU */ 261 spin_lock_irqsave(&gic_lock, flags); 262 263 /* Re-route this IRQ */ 264 write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); 265 266 /* Update the pcpu_masks */ 267 gic_clear_pcpu_masks(irq); 268 if (read_gic_mask(irq)) 269 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); 270 271 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 272 spin_unlock_irqrestore(&gic_lock, flags); 273 274 return IRQ_SET_MASK_OK; 275 } 276 #endif 277 278 static struct irq_chip gic_level_irq_controller = { 279 .name = "MIPS GIC", 280 .irq_mask = gic_mask_irq, 281 .irq_unmask = gic_unmask_irq, 282 .irq_set_type = gic_set_type, 283 #ifdef CONFIG_SMP 284 .irq_set_affinity = gic_set_affinity, 285 #endif 286 }; 287 288 static struct irq_chip gic_edge_irq_controller = { 289 .name = "MIPS GIC", 290 .irq_ack = gic_ack_irq, 291 .irq_mask = gic_mask_irq, 292 .irq_unmask = gic_unmask_irq, 293 .irq_set_type = gic_set_type, 294 #ifdef CONFIG_SMP 295 .irq_set_affinity = gic_set_affinity, 296 #endif 297 .ipi_send_single = gic_send_ipi, 298 }; 299 300 static void gic_handle_local_int(bool chained) 301 { 302 unsigned long pending, masked; 303 unsigned int intr, virq; 304 305 pending = read_gic_vl_pend(); 306 masked = read_gic_vl_mask(); 307 308 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); 309 310 for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) { 311 virq = irq_linear_revmap(gic_irq_domain, 312 GIC_LOCAL_TO_HWIRQ(intr)); 313 if (chained) 314 generic_handle_irq(virq); 315 else 316 do_IRQ(virq); 317 } 318 } 319 320 static void gic_mask_local_irq(struct irq_data *d) 321 { 322 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 323 324 write_gic_vl_rmask(BIT(intr)); 325 } 326 327 static void gic_unmask_local_irq(struct irq_data *d) 328 { 329 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 330 331 write_gic_vl_smask(BIT(intr)); 332 } 333 334 static struct irq_chip gic_local_irq_controller = { 335 .name = "MIPS GIC Local", 336 .irq_mask = gic_mask_local_irq, 337 .irq_unmask = gic_unmask_local_irq, 338 }; 339 340 static void gic_mask_local_irq_all_vpes(struct irq_data *d) 341 { 342 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 343 int i; 344 unsigned long flags; 345 346 spin_lock_irqsave(&gic_lock, flags); 347 for (i = 0; i < gic_vpes; i++) { 348 write_gic_vl_other(mips_cm_vp_id(i)); 349 write_gic_vo_rmask(BIT(intr)); 350 } 351 spin_unlock_irqrestore(&gic_lock, flags); 352 } 353 354 static void gic_unmask_local_irq_all_vpes(struct irq_data *d) 355 { 356 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 357 int i; 358 unsigned long flags; 359 360 spin_lock_irqsave(&gic_lock, flags); 361 for (i = 0; i < gic_vpes; i++) { 362 write_gic_vl_other(mips_cm_vp_id(i)); 363 write_gic_vo_smask(BIT(intr)); 364 } 365 spin_unlock_irqrestore(&gic_lock, flags); 366 } 367 368 static struct irq_chip gic_all_vpes_local_irq_controller = { 369 .name = "MIPS GIC Local", 370 .irq_mask = gic_mask_local_irq_all_vpes, 371 .irq_unmask = gic_unmask_local_irq_all_vpes, 372 }; 373 374 static void __gic_irq_dispatch(void) 375 { 376 gic_handle_local_int(false); 377 gic_handle_shared_int(false); 378 } 379 380 static void gic_irq_dispatch(struct irq_desc *desc) 381 { 382 gic_handle_local_int(true); 383 gic_handle_shared_int(true); 384 } 385 386 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, 387 irq_hw_number_t hw) 388 { 389 int intr = GIC_HWIRQ_TO_LOCAL(hw); 390 int i; 391 unsigned long flags; 392 u32 val; 393 394 if (!gic_local_irq_is_routable(intr)) 395 return -EPERM; 396 397 if (intr > GIC_LOCAL_INT_FDC) { 398 pr_err("Invalid local IRQ %d\n", intr); 399 return -EINVAL; 400 } 401 402 if (intr == GIC_LOCAL_INT_TIMER) { 403 /* CONFIG_MIPS_CMP workaround (see __gic_init) */ 404 val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin; 405 } else { 406 val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; 407 } 408 409 spin_lock_irqsave(&gic_lock, flags); 410 for (i = 0; i < gic_vpes; i++) { 411 write_gic_vl_other(mips_cm_vp_id(i)); 412 write_gic_vo_map(intr, val); 413 } 414 spin_unlock_irqrestore(&gic_lock, flags); 415 416 return 0; 417 } 418 419 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, 420 irq_hw_number_t hw, unsigned int cpu) 421 { 422 int intr = GIC_HWIRQ_TO_SHARED(hw); 423 unsigned long flags; 424 425 spin_lock_irqsave(&gic_lock, flags); 426 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 427 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); 428 gic_clear_pcpu_masks(intr); 429 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); 430 spin_unlock_irqrestore(&gic_lock, flags); 431 432 return 0; 433 } 434 435 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 436 const u32 *intspec, unsigned int intsize, 437 irq_hw_number_t *out_hwirq, 438 unsigned int *out_type) 439 { 440 if (intsize != 3) 441 return -EINVAL; 442 443 if (intspec[0] == GIC_SHARED) 444 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]); 445 else if (intspec[0] == GIC_LOCAL) 446 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]); 447 else 448 return -EINVAL; 449 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 450 451 return 0; 452 } 453 454 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, 455 irq_hw_number_t hwirq) 456 { 457 int err; 458 459 if (hwirq >= GIC_SHARED_HWIRQ_BASE) { 460 /* verify that shared irqs don't conflict with an IPI irq */ 461 if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv)) 462 return -EBUSY; 463 464 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 465 &gic_level_irq_controller, 466 NULL); 467 if (err) 468 return err; 469 470 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); 471 return gic_shared_irq_domain_map(d, virq, hwirq, 0); 472 } 473 474 switch (GIC_HWIRQ_TO_LOCAL(hwirq)) { 475 case GIC_LOCAL_INT_TIMER: 476 case GIC_LOCAL_INT_PERFCTR: 477 case GIC_LOCAL_INT_FDC: 478 /* 479 * HACK: These are all really percpu interrupts, but 480 * the rest of the MIPS kernel code does not use the 481 * percpu IRQ API for them. 482 */ 483 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 484 &gic_all_vpes_local_irq_controller, 485 NULL); 486 if (err) 487 return err; 488 489 irq_set_handler(virq, handle_percpu_irq); 490 break; 491 492 default: 493 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 494 &gic_local_irq_controller, 495 NULL); 496 if (err) 497 return err; 498 499 irq_set_handler(virq, handle_percpu_devid_irq); 500 irq_set_percpu_devid(virq); 501 break; 502 } 503 504 return gic_local_irq_domain_map(d, virq, hwirq); 505 } 506 507 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, 508 unsigned int nr_irqs, void *arg) 509 { 510 struct irq_fwspec *fwspec = arg; 511 irq_hw_number_t hwirq; 512 513 if (fwspec->param[0] == GIC_SHARED) 514 hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]); 515 else 516 hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]); 517 518 return gic_irq_domain_map(d, virq, hwirq); 519 } 520 521 void gic_irq_domain_free(struct irq_domain *d, unsigned int virq, 522 unsigned int nr_irqs) 523 { 524 } 525 526 static const struct irq_domain_ops gic_irq_domain_ops = { 527 .xlate = gic_irq_domain_xlate, 528 .alloc = gic_irq_domain_alloc, 529 .free = gic_irq_domain_free, 530 .map = gic_irq_domain_map, 531 }; 532 533 static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 534 const u32 *intspec, unsigned int intsize, 535 irq_hw_number_t *out_hwirq, 536 unsigned int *out_type) 537 { 538 /* 539 * There's nothing to translate here. hwirq is dynamically allocated and 540 * the irq type is always edge triggered. 541 * */ 542 *out_hwirq = 0; 543 *out_type = IRQ_TYPE_EDGE_RISING; 544 545 return 0; 546 } 547 548 static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq, 549 unsigned int nr_irqs, void *arg) 550 { 551 struct cpumask *ipimask = arg; 552 irq_hw_number_t hwirq, base_hwirq; 553 int cpu, ret, i; 554 555 base_hwirq = find_first_bit(ipi_available, gic_shared_intrs); 556 if (base_hwirq == gic_shared_intrs) 557 return -ENOMEM; 558 559 /* check that we have enough space */ 560 for (i = base_hwirq; i < nr_irqs; i++) { 561 if (!test_bit(i, ipi_available)) 562 return -EBUSY; 563 } 564 bitmap_clear(ipi_available, base_hwirq, nr_irqs); 565 566 /* map the hwirq for each cpu consecutively */ 567 i = 0; 568 for_each_cpu(cpu, ipimask) { 569 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i); 570 571 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq, 572 &gic_edge_irq_controller, 573 NULL); 574 if (ret) 575 goto error; 576 577 ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq, 578 &gic_edge_irq_controller, 579 NULL); 580 if (ret) 581 goto error; 582 583 ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING); 584 if (ret) 585 goto error; 586 587 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); 588 if (ret) 589 goto error; 590 591 i++; 592 } 593 594 return 0; 595 error: 596 bitmap_set(ipi_available, base_hwirq, nr_irqs); 597 return ret; 598 } 599 600 void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq, 601 unsigned int nr_irqs) 602 { 603 irq_hw_number_t base_hwirq; 604 struct irq_data *data; 605 606 data = irq_get_irq_data(virq); 607 if (!data) 608 return; 609 610 base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data)); 611 bitmap_set(ipi_available, base_hwirq, nr_irqs); 612 } 613 614 int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node, 615 enum irq_domain_bus_token bus_token) 616 { 617 bool is_ipi; 618 619 switch (bus_token) { 620 case DOMAIN_BUS_IPI: 621 is_ipi = d->bus_token == bus_token; 622 return (!node || to_of_node(d->fwnode) == node) && is_ipi; 623 break; 624 default: 625 return 0; 626 } 627 } 628 629 static const struct irq_domain_ops gic_ipi_domain_ops = { 630 .xlate = gic_ipi_domain_xlate, 631 .alloc = gic_ipi_domain_alloc, 632 .free = gic_ipi_domain_free, 633 .match = gic_ipi_domain_match, 634 }; 635 636 637 static int __init gic_of_init(struct device_node *node, 638 struct device_node *parent) 639 { 640 unsigned int cpu_vec, i, j, gicconfig, cpu, v[2]; 641 unsigned long reserved; 642 phys_addr_t gic_base; 643 struct resource res; 644 size_t gic_len; 645 646 /* Find the first available CPU vector. */ 647 i = 0; 648 reserved = (C_SW0 | C_SW1) >> __fls(C_SW0); 649 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", 650 i++, &cpu_vec)) 651 reserved |= BIT(cpu_vec); 652 653 cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM)); 654 if (cpu_vec == hweight_long(ST0_IM)) { 655 pr_err("No CPU vectors available for GIC\n"); 656 return -ENODEV; 657 } 658 659 if (of_address_to_resource(node, 0, &res)) { 660 /* 661 * Probe the CM for the GIC base address if not specified 662 * in the device-tree. 663 */ 664 if (mips_cm_present()) { 665 gic_base = read_gcr_gic_base() & 666 ~CM_GCR_GIC_BASE_GICEN; 667 gic_len = 0x20000; 668 } else { 669 pr_err("Failed to get GIC memory range\n"); 670 return -ENODEV; 671 } 672 } else { 673 gic_base = res.start; 674 gic_len = resource_size(&res); 675 } 676 677 if (mips_cm_present()) { 678 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN); 679 /* Ensure GIC region is enabled before trying to access it */ 680 __sync(); 681 } 682 683 mips_gic_base = ioremap_nocache(gic_base, gic_len); 684 685 gicconfig = read_gic_config(); 686 gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; 687 gic_shared_intrs >>= __fls(GIC_CONFIG_NUMINTERRUPTS); 688 gic_shared_intrs = (gic_shared_intrs + 1) * 8; 689 690 gic_vpes = gicconfig & GIC_CONFIG_PVPS; 691 gic_vpes >>= __fls(GIC_CONFIG_PVPS); 692 gic_vpes = gic_vpes + 1; 693 694 if (cpu_has_veic) { 695 /* Set EIC mode for all VPEs */ 696 for_each_present_cpu(cpu) { 697 write_gic_vl_other(mips_cm_vp_id(cpu)); 698 write_gic_vo_ctl(GIC_VX_CTL_EIC); 699 } 700 701 /* Always use vector 1 in EIC mode */ 702 gic_cpu_pin = 0; 703 timer_cpu_pin = gic_cpu_pin; 704 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, 705 __gic_irq_dispatch); 706 } else { 707 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; 708 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, 709 gic_irq_dispatch); 710 /* 711 * With the CMP implementation of SMP (deprecated), other CPUs 712 * are started by the bootloader and put into a timer based 713 * waiting poll loop. We must not re-route those CPU's local 714 * timer interrupts as the wait instruction will never finish, 715 * so just handle whatever CPU interrupt it is routed to by 716 * default. 717 * 718 * This workaround should be removed when CMP support is 719 * dropped. 720 */ 721 if (IS_ENABLED(CONFIG_MIPS_CMP) && 722 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) { 723 timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP; 724 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 725 GIC_CPU_PIN_OFFSET + 726 timer_cpu_pin, 727 gic_irq_dispatch); 728 } else { 729 timer_cpu_pin = gic_cpu_pin; 730 } 731 } 732 733 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + 734 gic_shared_intrs, 0, 735 &gic_irq_domain_ops, NULL); 736 if (!gic_irq_domain) { 737 pr_err("Failed to add GIC IRQ domain"); 738 return -ENXIO; 739 } 740 741 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, 742 IRQ_DOMAIN_FLAG_IPI_PER_CPU, 743 GIC_NUM_LOCAL_INTRS + gic_shared_intrs, 744 node, &gic_ipi_domain_ops, NULL); 745 if (!gic_ipi_domain) { 746 pr_err("Failed to add GIC IPI domain"); 747 return -ENXIO; 748 } 749 750 irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); 751 752 if (node && 753 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { 754 bitmap_set(ipi_resrv, v[0], v[1]); 755 } else { 756 /* Make the last 2 * gic_vpes available for IPIs */ 757 bitmap_set(ipi_resrv, 758 gic_shared_intrs - 2 * gic_vpes, 759 2 * gic_vpes); 760 } 761 762 bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); 763 764 board_bind_eic_interrupt = &gic_bind_eic_interrupt; 765 766 /* Setup defaults */ 767 for (i = 0; i < gic_shared_intrs; i++) { 768 change_gic_pol(i, GIC_POL_ACTIVE_HIGH); 769 change_gic_trig(i, GIC_TRIG_LEVEL); 770 write_gic_rmask(BIT(i)); 771 } 772 773 for (i = 0; i < gic_vpes; i++) { 774 write_gic_vl_other(mips_cm_vp_id(i)); 775 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) { 776 if (!gic_local_irq_is_routable(j)) 777 continue; 778 write_gic_vo_rmask(BIT(j)); 779 } 780 } 781 782 return 0; 783 } 784 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init); 785