1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 */ 9 #include <linux/bitmap.h> 10 #include <linux/clocksource.h> 11 #include <linux/init.h> 12 #include <linux/interrupt.h> 13 #include <linux/irq.h> 14 #include <linux/irqchip.h> 15 #include <linux/of_address.h> 16 #include <linux/percpu.h> 17 #include <linux/sched.h> 18 #include <linux/smp.h> 19 20 #include <asm/mips-cps.h> 21 #include <asm/setup.h> 22 #include <asm/traps.h> 23 24 #include <dt-bindings/interrupt-controller/mips-gic.h> 25 26 #define GIC_MAX_INTRS 256 27 #define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS) 28 29 /* Add 2 to convert GIC CPU pin to core interrupt */ 30 #define GIC_CPU_PIN_OFFSET 2 31 32 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ 33 #define GIC_PIN_TO_VEC_OFFSET 1 34 35 /* Convert between local/shared IRQ number and GIC HW IRQ number. */ 36 #define GIC_LOCAL_HWIRQ_BASE 0 37 #define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x)) 38 #define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE) 39 #define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS 40 #define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) 41 #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) 42 43 void __iomem *mips_gic_base; 44 45 DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); 46 47 static DEFINE_SPINLOCK(gic_lock); 48 static struct irq_domain *gic_irq_domain; 49 static struct irq_domain *gic_ipi_domain; 50 static int gic_shared_intrs; 51 static int gic_vpes; 52 static unsigned int gic_cpu_pin; 53 static unsigned int timer_cpu_pin; 54 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; 55 DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); 56 DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); 57 58 static void gic_clear_pcpu_masks(unsigned int intr) 59 { 60 unsigned int i; 61 62 /* Clear the interrupt's bit in all pcpu_masks */ 63 for_each_possible_cpu(i) 64 clear_bit(intr, per_cpu_ptr(pcpu_masks, i)); 65 } 66 67 static bool gic_local_irq_is_routable(int intr) 68 { 69 u32 vpe_ctl; 70 71 /* All local interrupts are routable in EIC mode. */ 72 if (cpu_has_veic) 73 return true; 74 75 vpe_ctl = read_gic_vl_ctl(); 76 switch (intr) { 77 case GIC_LOCAL_INT_TIMER: 78 return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE; 79 case GIC_LOCAL_INT_PERFCTR: 80 return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE; 81 case GIC_LOCAL_INT_FDC: 82 return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE; 83 case GIC_LOCAL_INT_SWINT0: 84 case GIC_LOCAL_INT_SWINT1: 85 return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE; 86 default: 87 return true; 88 } 89 } 90 91 static void gic_bind_eic_interrupt(int irq, int set) 92 { 93 /* Convert irq vector # to hw int # */ 94 irq -= GIC_PIN_TO_VEC_OFFSET; 95 96 /* Set irq to use shadow set */ 97 write_gic_vl_eic_shadow_set(irq, set); 98 } 99 100 static void gic_send_ipi(struct irq_data *d, unsigned int cpu) 101 { 102 irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d)); 103 104 write_gic_wedge(GIC_WEDGE_RW | hwirq); 105 } 106 107 int gic_get_c0_compare_int(void) 108 { 109 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) 110 return MIPS_CPU_IRQ_BASE + cp0_compare_irq; 111 return irq_create_mapping(gic_irq_domain, 112 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER)); 113 } 114 115 int gic_get_c0_perfcount_int(void) 116 { 117 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) { 118 /* Is the performance counter shared with the timer? */ 119 if (cp0_perfcount_irq < 0) 120 return -1; 121 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 122 } 123 return irq_create_mapping(gic_irq_domain, 124 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR)); 125 } 126 127 int gic_get_c0_fdc_int(void) 128 { 129 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) { 130 /* Is the FDC IRQ even present? */ 131 if (cp0_fdc_irq < 0) 132 return -1; 133 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; 134 } 135 136 return irq_create_mapping(gic_irq_domain, 137 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); 138 } 139 140 static void gic_handle_shared_int(bool chained) 141 { 142 unsigned int intr, virq; 143 unsigned long *pcpu_mask; 144 DECLARE_BITMAP(pending, GIC_MAX_INTRS); 145 146 /* Get per-cpu bitmaps */ 147 pcpu_mask = this_cpu_ptr(pcpu_masks); 148 149 if (mips_cm_is64) 150 __ioread64_copy(pending, addr_gic_pend(), 151 DIV_ROUND_UP(gic_shared_intrs, 64)); 152 else 153 __ioread32_copy(pending, addr_gic_pend(), 154 DIV_ROUND_UP(gic_shared_intrs, 32)); 155 156 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); 157 158 for_each_set_bit(intr, pending, gic_shared_intrs) { 159 virq = irq_linear_revmap(gic_irq_domain, 160 GIC_SHARED_TO_HWIRQ(intr)); 161 if (chained) 162 generic_handle_irq(virq); 163 else 164 do_IRQ(virq); 165 } 166 } 167 168 static void gic_mask_irq(struct irq_data *d) 169 { 170 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 171 172 write_gic_rmask(BIT(intr)); 173 gic_clear_pcpu_masks(intr); 174 } 175 176 static void gic_unmask_irq(struct irq_data *d) 177 { 178 struct cpumask *affinity = irq_data_get_affinity_mask(d); 179 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 180 unsigned int cpu; 181 182 write_gic_smask(BIT(intr)); 183 184 gic_clear_pcpu_masks(intr); 185 cpu = cpumask_first_and(affinity, cpu_online_mask); 186 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); 187 } 188 189 static void gic_ack_irq(struct irq_data *d) 190 { 191 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 192 193 write_gic_wedge(irq); 194 } 195 196 static int gic_set_type(struct irq_data *d, unsigned int type) 197 { 198 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 199 unsigned long flags; 200 bool is_edge; 201 202 spin_lock_irqsave(&gic_lock, flags); 203 switch (type & IRQ_TYPE_SENSE_MASK) { 204 case IRQ_TYPE_EDGE_FALLING: 205 change_gic_pol(irq, GIC_POL_FALLING_EDGE); 206 change_gic_trig(irq, GIC_TRIG_EDGE); 207 change_gic_dual(irq, GIC_DUAL_SINGLE); 208 is_edge = true; 209 break; 210 case IRQ_TYPE_EDGE_RISING: 211 change_gic_pol(irq, GIC_POL_RISING_EDGE); 212 change_gic_trig(irq, GIC_TRIG_EDGE); 213 change_gic_dual(irq, GIC_DUAL_SINGLE); 214 is_edge = true; 215 break; 216 case IRQ_TYPE_EDGE_BOTH: 217 /* polarity is irrelevant in this case */ 218 change_gic_trig(irq, GIC_TRIG_EDGE); 219 change_gic_dual(irq, GIC_DUAL_DUAL); 220 is_edge = true; 221 break; 222 case IRQ_TYPE_LEVEL_LOW: 223 change_gic_pol(irq, GIC_POL_ACTIVE_LOW); 224 change_gic_trig(irq, GIC_TRIG_LEVEL); 225 change_gic_dual(irq, GIC_DUAL_SINGLE); 226 is_edge = false; 227 break; 228 case IRQ_TYPE_LEVEL_HIGH: 229 default: 230 change_gic_pol(irq, GIC_POL_ACTIVE_HIGH); 231 change_gic_trig(irq, GIC_TRIG_LEVEL); 232 change_gic_dual(irq, GIC_DUAL_SINGLE); 233 is_edge = false; 234 break; 235 } 236 237 if (is_edge) 238 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller, 239 handle_edge_irq, NULL); 240 else 241 irq_set_chip_handler_name_locked(d, &gic_level_irq_controller, 242 handle_level_irq, NULL); 243 spin_unlock_irqrestore(&gic_lock, flags); 244 245 return 0; 246 } 247 248 #ifdef CONFIG_SMP 249 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, 250 bool force) 251 { 252 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 253 unsigned long flags; 254 unsigned int cpu; 255 256 cpu = cpumask_first_and(cpumask, cpu_online_mask); 257 if (cpu >= NR_CPUS) 258 return -EINVAL; 259 260 /* Assumption : cpumask refers to a single CPU */ 261 spin_lock_irqsave(&gic_lock, flags); 262 263 /* Re-route this IRQ */ 264 write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); 265 266 /* Update the pcpu_masks */ 267 gic_clear_pcpu_masks(irq); 268 if (read_gic_mask(irq)) 269 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); 270 271 spin_unlock_irqrestore(&gic_lock, flags); 272 273 return IRQ_SET_MASK_OK; 274 } 275 #endif 276 277 static struct irq_chip gic_level_irq_controller = { 278 .name = "MIPS GIC", 279 .irq_mask = gic_mask_irq, 280 .irq_unmask = gic_unmask_irq, 281 .irq_set_type = gic_set_type, 282 #ifdef CONFIG_SMP 283 .irq_set_affinity = gic_set_affinity, 284 #endif 285 }; 286 287 static struct irq_chip gic_edge_irq_controller = { 288 .name = "MIPS GIC", 289 .irq_ack = gic_ack_irq, 290 .irq_mask = gic_mask_irq, 291 .irq_unmask = gic_unmask_irq, 292 .irq_set_type = gic_set_type, 293 #ifdef CONFIG_SMP 294 .irq_set_affinity = gic_set_affinity, 295 #endif 296 .ipi_send_single = gic_send_ipi, 297 }; 298 299 static void gic_handle_local_int(bool chained) 300 { 301 unsigned long pending, masked; 302 unsigned int intr, virq; 303 304 pending = read_gic_vl_pend(); 305 masked = read_gic_vl_mask(); 306 307 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); 308 309 for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) { 310 virq = irq_linear_revmap(gic_irq_domain, 311 GIC_LOCAL_TO_HWIRQ(intr)); 312 if (chained) 313 generic_handle_irq(virq); 314 else 315 do_IRQ(virq); 316 } 317 } 318 319 static void gic_mask_local_irq(struct irq_data *d) 320 { 321 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 322 323 write_gic_vl_rmask(BIT(intr)); 324 } 325 326 static void gic_unmask_local_irq(struct irq_data *d) 327 { 328 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 329 330 write_gic_vl_smask(BIT(intr)); 331 } 332 333 static struct irq_chip gic_local_irq_controller = { 334 .name = "MIPS GIC Local", 335 .irq_mask = gic_mask_local_irq, 336 .irq_unmask = gic_unmask_local_irq, 337 }; 338 339 static void gic_mask_local_irq_all_vpes(struct irq_data *d) 340 { 341 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 342 int i; 343 unsigned long flags; 344 345 spin_lock_irqsave(&gic_lock, flags); 346 for (i = 0; i < gic_vpes; i++) { 347 write_gic_vl_other(mips_cm_vp_id(i)); 348 write_gic_vo_rmask(BIT(intr)); 349 } 350 spin_unlock_irqrestore(&gic_lock, flags); 351 } 352 353 static void gic_unmask_local_irq_all_vpes(struct irq_data *d) 354 { 355 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 356 int i; 357 unsigned long flags; 358 359 spin_lock_irqsave(&gic_lock, flags); 360 for (i = 0; i < gic_vpes; i++) { 361 write_gic_vl_other(mips_cm_vp_id(i)); 362 write_gic_vo_smask(BIT(intr)); 363 } 364 spin_unlock_irqrestore(&gic_lock, flags); 365 } 366 367 static struct irq_chip gic_all_vpes_local_irq_controller = { 368 .name = "MIPS GIC Local", 369 .irq_mask = gic_mask_local_irq_all_vpes, 370 .irq_unmask = gic_unmask_local_irq_all_vpes, 371 }; 372 373 static void __gic_irq_dispatch(void) 374 { 375 gic_handle_local_int(false); 376 gic_handle_shared_int(false); 377 } 378 379 static void gic_irq_dispatch(struct irq_desc *desc) 380 { 381 gic_handle_local_int(true); 382 gic_handle_shared_int(true); 383 } 384 385 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, 386 irq_hw_number_t hw) 387 { 388 int intr = GIC_HWIRQ_TO_LOCAL(hw); 389 int i; 390 unsigned long flags; 391 u32 val; 392 393 if (!gic_local_irq_is_routable(intr)) 394 return -EPERM; 395 396 if (intr > GIC_LOCAL_INT_FDC) { 397 pr_err("Invalid local IRQ %d\n", intr); 398 return -EINVAL; 399 } 400 401 if (intr == GIC_LOCAL_INT_TIMER) { 402 /* CONFIG_MIPS_CMP workaround (see __gic_init) */ 403 val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin; 404 } else { 405 val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; 406 } 407 408 spin_lock_irqsave(&gic_lock, flags); 409 for (i = 0; i < gic_vpes; i++) { 410 write_gic_vl_other(mips_cm_vp_id(i)); 411 write_gic_vo_map(intr, val); 412 } 413 spin_unlock_irqrestore(&gic_lock, flags); 414 415 return 0; 416 } 417 418 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, 419 irq_hw_number_t hw, unsigned int cpu) 420 { 421 int intr = GIC_HWIRQ_TO_SHARED(hw); 422 unsigned long flags; 423 424 spin_lock_irqsave(&gic_lock, flags); 425 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 426 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); 427 gic_clear_pcpu_masks(intr); 428 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); 429 spin_unlock_irqrestore(&gic_lock, flags); 430 431 return 0; 432 } 433 434 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 435 const u32 *intspec, unsigned int intsize, 436 irq_hw_number_t *out_hwirq, 437 unsigned int *out_type) 438 { 439 if (intsize != 3) 440 return -EINVAL; 441 442 if (intspec[0] == GIC_SHARED) 443 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]); 444 else if (intspec[0] == GIC_LOCAL) 445 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]); 446 else 447 return -EINVAL; 448 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 449 450 return 0; 451 } 452 453 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, 454 irq_hw_number_t hwirq) 455 { 456 int err; 457 458 if (hwirq >= GIC_SHARED_HWIRQ_BASE) { 459 /* verify that shared irqs don't conflict with an IPI irq */ 460 if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv)) 461 return -EBUSY; 462 463 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 464 &gic_level_irq_controller, 465 NULL); 466 if (err) 467 return err; 468 469 return gic_shared_irq_domain_map(d, virq, hwirq, 0); 470 } 471 472 switch (GIC_HWIRQ_TO_LOCAL(hwirq)) { 473 case GIC_LOCAL_INT_TIMER: 474 case GIC_LOCAL_INT_PERFCTR: 475 case GIC_LOCAL_INT_FDC: 476 /* 477 * HACK: These are all really percpu interrupts, but 478 * the rest of the MIPS kernel code does not use the 479 * percpu IRQ API for them. 480 */ 481 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 482 &gic_all_vpes_local_irq_controller, 483 NULL); 484 if (err) 485 return err; 486 487 irq_set_handler(virq, handle_percpu_irq); 488 break; 489 490 default: 491 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 492 &gic_local_irq_controller, 493 NULL); 494 if (err) 495 return err; 496 497 irq_set_handler(virq, handle_percpu_devid_irq); 498 irq_set_percpu_devid(virq); 499 break; 500 } 501 502 return gic_local_irq_domain_map(d, virq, hwirq); 503 } 504 505 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, 506 unsigned int nr_irqs, void *arg) 507 { 508 struct irq_fwspec *fwspec = arg; 509 irq_hw_number_t hwirq; 510 511 if (fwspec->param[0] == GIC_SHARED) 512 hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]); 513 else 514 hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]); 515 516 return gic_irq_domain_map(d, virq, hwirq); 517 } 518 519 void gic_irq_domain_free(struct irq_domain *d, unsigned int virq, 520 unsigned int nr_irqs) 521 { 522 } 523 524 static const struct irq_domain_ops gic_irq_domain_ops = { 525 .xlate = gic_irq_domain_xlate, 526 .alloc = gic_irq_domain_alloc, 527 .free = gic_irq_domain_free, 528 .map = gic_irq_domain_map, 529 }; 530 531 static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 532 const u32 *intspec, unsigned int intsize, 533 irq_hw_number_t *out_hwirq, 534 unsigned int *out_type) 535 { 536 /* 537 * There's nothing to translate here. hwirq is dynamically allocated and 538 * the irq type is always edge triggered. 539 * */ 540 *out_hwirq = 0; 541 *out_type = IRQ_TYPE_EDGE_RISING; 542 543 return 0; 544 } 545 546 static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq, 547 unsigned int nr_irqs, void *arg) 548 { 549 struct cpumask *ipimask = arg; 550 irq_hw_number_t hwirq, base_hwirq; 551 int cpu, ret, i; 552 553 base_hwirq = find_first_bit(ipi_available, gic_shared_intrs); 554 if (base_hwirq == gic_shared_intrs) 555 return -ENOMEM; 556 557 /* check that we have enough space */ 558 for (i = base_hwirq; i < nr_irqs; i++) { 559 if (!test_bit(i, ipi_available)) 560 return -EBUSY; 561 } 562 bitmap_clear(ipi_available, base_hwirq, nr_irqs); 563 564 /* map the hwirq for each cpu consecutively */ 565 i = 0; 566 for_each_cpu(cpu, ipimask) { 567 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i); 568 569 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq, 570 &gic_edge_irq_controller, 571 NULL); 572 if (ret) 573 goto error; 574 575 ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq, 576 &gic_edge_irq_controller, 577 NULL); 578 if (ret) 579 goto error; 580 581 ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING); 582 if (ret) 583 goto error; 584 585 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); 586 if (ret) 587 goto error; 588 589 i++; 590 } 591 592 return 0; 593 error: 594 bitmap_set(ipi_available, base_hwirq, nr_irqs); 595 return ret; 596 } 597 598 void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq, 599 unsigned int nr_irqs) 600 { 601 irq_hw_number_t base_hwirq; 602 struct irq_data *data; 603 604 data = irq_get_irq_data(virq); 605 if (!data) 606 return; 607 608 base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data)); 609 bitmap_set(ipi_available, base_hwirq, nr_irqs); 610 } 611 612 int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node, 613 enum irq_domain_bus_token bus_token) 614 { 615 bool is_ipi; 616 617 switch (bus_token) { 618 case DOMAIN_BUS_IPI: 619 is_ipi = d->bus_token == bus_token; 620 return (!node || to_of_node(d->fwnode) == node) && is_ipi; 621 break; 622 default: 623 return 0; 624 } 625 } 626 627 static const struct irq_domain_ops gic_ipi_domain_ops = { 628 .xlate = gic_ipi_domain_xlate, 629 .alloc = gic_ipi_domain_alloc, 630 .free = gic_ipi_domain_free, 631 .match = gic_ipi_domain_match, 632 }; 633 634 635 static int __init gic_of_init(struct device_node *node, 636 struct device_node *parent) 637 { 638 unsigned int cpu_vec, i, j, gicconfig, cpu, v[2]; 639 unsigned long reserved; 640 phys_addr_t gic_base; 641 struct resource res; 642 size_t gic_len; 643 644 /* Find the first available CPU vector. */ 645 i = 0; 646 reserved = (C_SW0 | C_SW1) >> __fls(C_SW0); 647 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", 648 i++, &cpu_vec)) 649 reserved |= BIT(cpu_vec); 650 651 cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM)); 652 if (cpu_vec == hweight_long(ST0_IM)) { 653 pr_err("No CPU vectors available for GIC\n"); 654 return -ENODEV; 655 } 656 657 if (of_address_to_resource(node, 0, &res)) { 658 /* 659 * Probe the CM for the GIC base address if not specified 660 * in the device-tree. 661 */ 662 if (mips_cm_present()) { 663 gic_base = read_gcr_gic_base() & 664 ~CM_GCR_GIC_BASE_GICEN; 665 gic_len = 0x20000; 666 } else { 667 pr_err("Failed to get GIC memory range\n"); 668 return -ENODEV; 669 } 670 } else { 671 gic_base = res.start; 672 gic_len = resource_size(&res); 673 } 674 675 if (mips_cm_present()) { 676 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN); 677 /* Ensure GIC region is enabled before trying to access it */ 678 __sync(); 679 } 680 681 mips_gic_base = ioremap_nocache(gic_base, gic_len); 682 683 gicconfig = read_gic_config(); 684 gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; 685 gic_shared_intrs >>= __fls(GIC_CONFIG_NUMINTERRUPTS); 686 gic_shared_intrs = (gic_shared_intrs + 1) * 8; 687 688 gic_vpes = gicconfig & GIC_CONFIG_PVPS; 689 gic_vpes >>= __fls(GIC_CONFIG_PVPS); 690 gic_vpes = gic_vpes + 1; 691 692 if (cpu_has_veic) { 693 /* Set EIC mode for all VPEs */ 694 for_each_present_cpu(cpu) { 695 write_gic_vl_other(mips_cm_vp_id(cpu)); 696 write_gic_vo_ctl(GIC_VX_CTL_EIC); 697 } 698 699 /* Always use vector 1 in EIC mode */ 700 gic_cpu_pin = 0; 701 timer_cpu_pin = gic_cpu_pin; 702 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, 703 __gic_irq_dispatch); 704 } else { 705 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; 706 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, 707 gic_irq_dispatch); 708 /* 709 * With the CMP implementation of SMP (deprecated), other CPUs 710 * are started by the bootloader and put into a timer based 711 * waiting poll loop. We must not re-route those CPU's local 712 * timer interrupts as the wait instruction will never finish, 713 * so just handle whatever CPU interrupt it is routed to by 714 * default. 715 * 716 * This workaround should be removed when CMP support is 717 * dropped. 718 */ 719 if (IS_ENABLED(CONFIG_MIPS_CMP) && 720 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) { 721 timer_cpu_pin = read_gic_vl_timer_map() & GIC_MAP_PIN_MAP; 722 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 723 GIC_CPU_PIN_OFFSET + 724 timer_cpu_pin, 725 gic_irq_dispatch); 726 } else { 727 timer_cpu_pin = gic_cpu_pin; 728 } 729 } 730 731 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + 732 gic_shared_intrs, 0, 733 &gic_irq_domain_ops, NULL); 734 if (!gic_irq_domain) { 735 pr_err("Failed to add GIC IRQ domain"); 736 return -ENXIO; 737 } 738 739 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, 740 IRQ_DOMAIN_FLAG_IPI_PER_CPU, 741 GIC_NUM_LOCAL_INTRS + gic_shared_intrs, 742 node, &gic_ipi_domain_ops, NULL); 743 if (!gic_ipi_domain) { 744 pr_err("Failed to add GIC IPI domain"); 745 return -ENXIO; 746 } 747 748 irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); 749 750 if (node && 751 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { 752 bitmap_set(ipi_resrv, v[0], v[1]); 753 } else { 754 /* Make the last 2 * gic_vpes available for IPIs */ 755 bitmap_set(ipi_resrv, 756 gic_shared_intrs - 2 * gic_vpes, 757 2 * gic_vpes); 758 } 759 760 bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); 761 762 board_bind_eic_interrupt = &gic_bind_eic_interrupt; 763 764 /* Setup defaults */ 765 for (i = 0; i < gic_shared_intrs; i++) { 766 change_gic_pol(i, GIC_POL_ACTIVE_HIGH); 767 change_gic_trig(i, GIC_TRIG_LEVEL); 768 write_gic_rmask(BIT(i)); 769 } 770 771 for (i = 0; i < gic_vpes; i++) { 772 write_gic_vl_other(mips_cm_vp_id(i)); 773 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) { 774 if (!gic_local_irq_is_routable(j)) 775 continue; 776 write_gic_vo_rmask(BIT(j)); 777 } 778 } 779 780 return 0; 781 } 782 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init); 783