1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 */ 9 #include <linux/bitmap.h> 10 #include <linux/clocksource.h> 11 #include <linux/init.h> 12 #include <linux/interrupt.h> 13 #include <linux/irq.h> 14 #include <linux/irqchip/mips-gic.h> 15 #include <linux/of_address.h> 16 #include <linux/sched.h> 17 #include <linux/smp.h> 18 19 #include <asm/mips-cm.h> 20 #include <asm/setup.h> 21 #include <asm/traps.h> 22 23 #include <dt-bindings/interrupt-controller/mips-gic.h> 24 25 #include "irqchip.h" 26 27 unsigned int gic_present; 28 29 struct gic_pcpu_mask { 30 DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS); 31 }; 32 33 static void __iomem *gic_base; 34 static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; 35 static DEFINE_SPINLOCK(gic_lock); 36 static struct irq_domain *gic_irq_domain; 37 static int gic_shared_intrs; 38 static int gic_vpes; 39 static unsigned int gic_cpu_pin; 40 static unsigned int timer_cpu_pin; 41 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; 42 43 static void __gic_irq_dispatch(void); 44 45 static inline unsigned int gic_read(unsigned int reg) 46 { 47 return __raw_readl(gic_base + reg); 48 } 49 50 static inline void gic_write(unsigned int reg, unsigned int val) 51 { 52 __raw_writel(val, gic_base + reg); 53 } 54 55 static inline void gic_update_bits(unsigned int reg, unsigned int mask, 56 unsigned int val) 57 { 58 unsigned int regval; 59 60 regval = gic_read(reg); 61 regval &= ~mask; 62 regval |= val; 63 gic_write(reg, regval); 64 } 65 66 static inline void gic_reset_mask(unsigned int intr) 67 { 68 gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr), 69 1 << GIC_INTR_BIT(intr)); 70 } 71 72 static inline void gic_set_mask(unsigned int intr) 73 { 74 gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr), 75 1 << GIC_INTR_BIT(intr)); 76 } 77 78 static inline void gic_set_polarity(unsigned int intr, unsigned int pol) 79 { 80 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) + 81 GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr), 82 pol << GIC_INTR_BIT(intr)); 83 } 84 85 static inline void gic_set_trigger(unsigned int intr, unsigned int trig) 86 { 87 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) + 88 GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr), 89 trig << GIC_INTR_BIT(intr)); 90 } 91 92 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual) 93 { 94 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr), 95 1 << GIC_INTR_BIT(intr), 96 dual << GIC_INTR_BIT(intr)); 97 } 98 99 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin) 100 { 101 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) + 102 GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin); 103 } 104 105 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe) 106 { 107 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) + 108 GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe), 109 GIC_SH_MAP_TO_VPE_REG_BIT(vpe)); 110 } 111 112 #ifdef CONFIG_CLKSRC_MIPS_GIC 113 cycle_t gic_read_count(void) 114 { 115 unsigned int hi, hi2, lo; 116 117 do { 118 hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); 119 lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00)); 120 hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); 121 } while (hi2 != hi); 122 123 return (((cycle_t) hi) << 32) + lo; 124 } 125 126 unsigned int gic_get_count_width(void) 127 { 128 unsigned int bits, config; 129 130 config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); 131 bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >> 132 GIC_SH_CONFIG_COUNTBITS_SHF); 133 134 return bits; 135 } 136 137 void gic_write_compare(cycle_t cnt) 138 { 139 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), 140 (int)(cnt >> 32)); 141 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), 142 (int)(cnt & 0xffffffff)); 143 } 144 145 void gic_write_cpu_compare(cycle_t cnt, int cpu) 146 { 147 unsigned long flags; 148 149 local_irq_save(flags); 150 151 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu); 152 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI), 153 (int)(cnt >> 32)); 154 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO), 155 (int)(cnt & 0xffffffff)); 156 157 local_irq_restore(flags); 158 } 159 160 cycle_t gic_read_compare(void) 161 { 162 unsigned int hi, lo; 163 164 hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI)); 165 lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO)); 166 167 return (((cycle_t) hi) << 32) + lo; 168 } 169 170 void gic_start_count(void) 171 { 172 u32 gicconfig; 173 174 /* Start the counter */ 175 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); 176 gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF); 177 gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); 178 } 179 180 void gic_stop_count(void) 181 { 182 u32 gicconfig; 183 184 /* Stop the counter */ 185 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); 186 gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF; 187 gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); 188 } 189 190 #endif 191 192 static bool gic_local_irq_is_routable(int intr) 193 { 194 u32 vpe_ctl; 195 196 /* All local interrupts are routable in EIC mode. */ 197 if (cpu_has_veic) 198 return true; 199 200 vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL)); 201 switch (intr) { 202 case GIC_LOCAL_INT_TIMER: 203 return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK; 204 case GIC_LOCAL_INT_PERFCTR: 205 return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK; 206 case GIC_LOCAL_INT_FDC: 207 return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK; 208 case GIC_LOCAL_INT_SWINT0: 209 case GIC_LOCAL_INT_SWINT1: 210 return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK; 211 default: 212 return true; 213 } 214 } 215 216 static void gic_bind_eic_interrupt(int irq, int set) 217 { 218 /* Convert irq vector # to hw int # */ 219 irq -= GIC_PIN_TO_VEC_OFFSET; 220 221 /* Set irq to use shadow set */ 222 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) + 223 GIC_VPE_EIC_SS(irq), set); 224 } 225 226 void gic_send_ipi(unsigned int intr) 227 { 228 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr)); 229 } 230 231 int gic_get_c0_compare_int(void) 232 { 233 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) 234 return MIPS_CPU_IRQ_BASE + cp0_compare_irq; 235 return irq_create_mapping(gic_irq_domain, 236 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER)); 237 } 238 239 int gic_get_c0_perfcount_int(void) 240 { 241 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) { 242 /* Is the performance counter shared with the timer? */ 243 if (cp0_perfcount_irq < 0) 244 return -1; 245 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 246 } 247 return irq_create_mapping(gic_irq_domain, 248 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR)); 249 } 250 251 int gic_get_c0_fdc_int(void) 252 { 253 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) { 254 /* Is the FDC IRQ even present? */ 255 if (cp0_fdc_irq < 0) 256 return -1; 257 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; 258 } 259 260 /* 261 * Some cores claim the FDC is routable but it doesn't actually seem to 262 * be connected. 263 */ 264 switch (current_cpu_type()) { 265 case CPU_INTERAPTIV: 266 case CPU_PROAPTIV: 267 return -1; 268 } 269 270 return irq_create_mapping(gic_irq_domain, 271 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); 272 } 273 274 static void gic_handle_shared_int(void) 275 { 276 unsigned int i, intr, virq; 277 unsigned long *pcpu_mask; 278 unsigned long pending_reg, intrmask_reg; 279 DECLARE_BITMAP(pending, GIC_MAX_INTRS); 280 DECLARE_BITMAP(intrmask, GIC_MAX_INTRS); 281 282 /* Get per-cpu bitmaps */ 283 pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask; 284 285 pending_reg = GIC_REG(SHARED, GIC_SH_PEND); 286 intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK); 287 288 for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) { 289 pending[i] = gic_read(pending_reg); 290 intrmask[i] = gic_read(intrmask_reg); 291 pending_reg += 0x4; 292 intrmask_reg += 0x4; 293 } 294 295 bitmap_and(pending, pending, intrmask, gic_shared_intrs); 296 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); 297 298 intr = find_first_bit(pending, gic_shared_intrs); 299 while (intr != gic_shared_intrs) { 300 virq = irq_linear_revmap(gic_irq_domain, 301 GIC_SHARED_TO_HWIRQ(intr)); 302 do_IRQ(virq); 303 304 /* go to next pending bit */ 305 bitmap_clear(pending, intr, 1); 306 intr = find_first_bit(pending, gic_shared_intrs); 307 } 308 } 309 310 static void gic_mask_irq(struct irq_data *d) 311 { 312 gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq)); 313 } 314 315 static void gic_unmask_irq(struct irq_data *d) 316 { 317 gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq)); 318 } 319 320 static void gic_ack_irq(struct irq_data *d) 321 { 322 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 323 324 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq)); 325 } 326 327 static int gic_set_type(struct irq_data *d, unsigned int type) 328 { 329 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 330 unsigned long flags; 331 bool is_edge; 332 333 spin_lock_irqsave(&gic_lock, flags); 334 switch (type & IRQ_TYPE_SENSE_MASK) { 335 case IRQ_TYPE_EDGE_FALLING: 336 gic_set_polarity(irq, GIC_POL_NEG); 337 gic_set_trigger(irq, GIC_TRIG_EDGE); 338 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); 339 is_edge = true; 340 break; 341 case IRQ_TYPE_EDGE_RISING: 342 gic_set_polarity(irq, GIC_POL_POS); 343 gic_set_trigger(irq, GIC_TRIG_EDGE); 344 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); 345 is_edge = true; 346 break; 347 case IRQ_TYPE_EDGE_BOTH: 348 /* polarity is irrelevant in this case */ 349 gic_set_trigger(irq, GIC_TRIG_EDGE); 350 gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE); 351 is_edge = true; 352 break; 353 case IRQ_TYPE_LEVEL_LOW: 354 gic_set_polarity(irq, GIC_POL_NEG); 355 gic_set_trigger(irq, GIC_TRIG_LEVEL); 356 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); 357 is_edge = false; 358 break; 359 case IRQ_TYPE_LEVEL_HIGH: 360 default: 361 gic_set_polarity(irq, GIC_POL_POS); 362 gic_set_trigger(irq, GIC_TRIG_LEVEL); 363 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); 364 is_edge = false; 365 break; 366 } 367 368 if (is_edge) { 369 __irq_set_chip_handler_name_locked(d->irq, 370 &gic_edge_irq_controller, 371 handle_edge_irq, NULL); 372 } else { 373 __irq_set_chip_handler_name_locked(d->irq, 374 &gic_level_irq_controller, 375 handle_level_irq, NULL); 376 } 377 spin_unlock_irqrestore(&gic_lock, flags); 378 379 return 0; 380 } 381 382 #ifdef CONFIG_SMP 383 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, 384 bool force) 385 { 386 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 387 cpumask_t tmp = CPU_MASK_NONE; 388 unsigned long flags; 389 int i; 390 391 cpumask_and(&tmp, cpumask, cpu_online_mask); 392 if (cpumask_empty(&tmp)) 393 return -EINVAL; 394 395 /* Assumption : cpumask refers to a single CPU */ 396 spin_lock_irqsave(&gic_lock, flags); 397 398 /* Re-route this IRQ */ 399 gic_map_to_vpe(irq, cpumask_first(&tmp)); 400 401 /* Update the pcpu_masks */ 402 for (i = 0; i < NR_CPUS; i++) 403 clear_bit(irq, pcpu_masks[i].pcpu_mask); 404 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); 405 406 cpumask_copy(d->affinity, cpumask); 407 spin_unlock_irqrestore(&gic_lock, flags); 408 409 return IRQ_SET_MASK_OK_NOCOPY; 410 } 411 #endif 412 413 static struct irq_chip gic_level_irq_controller = { 414 .name = "MIPS GIC", 415 .irq_mask = gic_mask_irq, 416 .irq_unmask = gic_unmask_irq, 417 .irq_set_type = gic_set_type, 418 #ifdef CONFIG_SMP 419 .irq_set_affinity = gic_set_affinity, 420 #endif 421 }; 422 423 static struct irq_chip gic_edge_irq_controller = { 424 .name = "MIPS GIC", 425 .irq_ack = gic_ack_irq, 426 .irq_mask = gic_mask_irq, 427 .irq_unmask = gic_unmask_irq, 428 .irq_set_type = gic_set_type, 429 #ifdef CONFIG_SMP 430 .irq_set_affinity = gic_set_affinity, 431 #endif 432 }; 433 434 static void gic_handle_local_int(void) 435 { 436 unsigned long pending, masked; 437 unsigned int intr, virq; 438 439 pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND)); 440 masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK)); 441 442 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); 443 444 intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS); 445 while (intr != GIC_NUM_LOCAL_INTRS) { 446 virq = irq_linear_revmap(gic_irq_domain, 447 GIC_LOCAL_TO_HWIRQ(intr)); 448 do_IRQ(virq); 449 450 /* go to next pending bit */ 451 bitmap_clear(&pending, intr, 1); 452 intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS); 453 } 454 } 455 456 static void gic_mask_local_irq(struct irq_data *d) 457 { 458 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 459 460 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr); 461 } 462 463 static void gic_unmask_local_irq(struct irq_data *d) 464 { 465 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 466 467 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr); 468 } 469 470 static struct irq_chip gic_local_irq_controller = { 471 .name = "MIPS GIC Local", 472 .irq_mask = gic_mask_local_irq, 473 .irq_unmask = gic_unmask_local_irq, 474 }; 475 476 static void gic_mask_local_irq_all_vpes(struct irq_data *d) 477 { 478 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 479 int i; 480 unsigned long flags; 481 482 spin_lock_irqsave(&gic_lock, flags); 483 for (i = 0; i < gic_vpes; i++) { 484 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i); 485 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr); 486 } 487 spin_unlock_irqrestore(&gic_lock, flags); 488 } 489 490 static void gic_unmask_local_irq_all_vpes(struct irq_data *d) 491 { 492 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 493 int i; 494 unsigned long flags; 495 496 spin_lock_irqsave(&gic_lock, flags); 497 for (i = 0; i < gic_vpes; i++) { 498 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i); 499 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr); 500 } 501 spin_unlock_irqrestore(&gic_lock, flags); 502 } 503 504 static struct irq_chip gic_all_vpes_local_irq_controller = { 505 .name = "MIPS GIC Local", 506 .irq_mask = gic_mask_local_irq_all_vpes, 507 .irq_unmask = gic_unmask_local_irq_all_vpes, 508 }; 509 510 static void __gic_irq_dispatch(void) 511 { 512 gic_handle_local_int(); 513 gic_handle_shared_int(); 514 } 515 516 static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc) 517 { 518 __gic_irq_dispatch(); 519 } 520 521 #ifdef CONFIG_MIPS_GIC_IPI 522 static int gic_resched_int_base; 523 static int gic_call_int_base; 524 525 unsigned int plat_ipi_resched_int_xlate(unsigned int cpu) 526 { 527 return gic_resched_int_base + cpu; 528 } 529 530 unsigned int plat_ipi_call_int_xlate(unsigned int cpu) 531 { 532 return gic_call_int_base + cpu; 533 } 534 535 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) 536 { 537 scheduler_ipi(); 538 539 return IRQ_HANDLED; 540 } 541 542 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 543 { 544 smp_call_function_interrupt(); 545 546 return IRQ_HANDLED; 547 } 548 549 static struct irqaction irq_resched = { 550 .handler = ipi_resched_interrupt, 551 .flags = IRQF_PERCPU, 552 .name = "IPI resched" 553 }; 554 555 static struct irqaction irq_call = { 556 .handler = ipi_call_interrupt, 557 .flags = IRQF_PERCPU, 558 .name = "IPI call" 559 }; 560 561 static __init void gic_ipi_init_one(unsigned int intr, int cpu, 562 struct irqaction *action) 563 { 564 int virq = irq_create_mapping(gic_irq_domain, 565 GIC_SHARED_TO_HWIRQ(intr)); 566 int i; 567 568 gic_map_to_vpe(intr, cpu); 569 for (i = 0; i < NR_CPUS; i++) 570 clear_bit(intr, pcpu_masks[i].pcpu_mask); 571 set_bit(intr, pcpu_masks[cpu].pcpu_mask); 572 573 irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING); 574 575 irq_set_handler(virq, handle_percpu_irq); 576 setup_irq(virq, action); 577 } 578 579 static __init void gic_ipi_init(void) 580 { 581 int i; 582 583 /* Use last 2 * NR_CPUS interrupts as IPIs */ 584 gic_resched_int_base = gic_shared_intrs - nr_cpu_ids; 585 gic_call_int_base = gic_resched_int_base - nr_cpu_ids; 586 587 for (i = 0; i < nr_cpu_ids; i++) { 588 gic_ipi_init_one(gic_call_int_base + i, i, &irq_call); 589 gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched); 590 } 591 } 592 #else 593 static inline void gic_ipi_init(void) 594 { 595 } 596 #endif 597 598 static void __init gic_basic_init(void) 599 { 600 unsigned int i; 601 602 board_bind_eic_interrupt = &gic_bind_eic_interrupt; 603 604 /* Setup defaults */ 605 for (i = 0; i < gic_shared_intrs; i++) { 606 gic_set_polarity(i, GIC_POL_POS); 607 gic_set_trigger(i, GIC_TRIG_LEVEL); 608 gic_reset_mask(i); 609 } 610 611 for (i = 0; i < gic_vpes; i++) { 612 unsigned int j; 613 614 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i); 615 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) { 616 if (!gic_local_irq_is_routable(j)) 617 continue; 618 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j); 619 } 620 } 621 } 622 623 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, 624 irq_hw_number_t hw) 625 { 626 int intr = GIC_HWIRQ_TO_LOCAL(hw); 627 int ret = 0; 628 int i; 629 unsigned long flags; 630 631 if (!gic_local_irq_is_routable(intr)) 632 return -EPERM; 633 634 /* 635 * HACK: These are all really percpu interrupts, but the rest 636 * of the MIPS kernel code does not use the percpu IRQ API for 637 * the CP0 timer and performance counter interrupts. 638 */ 639 switch (intr) { 640 case GIC_LOCAL_INT_TIMER: 641 case GIC_LOCAL_INT_PERFCTR: 642 case GIC_LOCAL_INT_FDC: 643 irq_set_chip_and_handler(virq, 644 &gic_all_vpes_local_irq_controller, 645 handle_percpu_irq); 646 break; 647 default: 648 irq_set_chip_and_handler(virq, 649 &gic_local_irq_controller, 650 handle_percpu_devid_irq); 651 irq_set_percpu_devid(virq); 652 break; 653 } 654 655 spin_lock_irqsave(&gic_lock, flags); 656 for (i = 0; i < gic_vpes; i++) { 657 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin; 658 659 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i); 660 661 switch (intr) { 662 case GIC_LOCAL_INT_WD: 663 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val); 664 break; 665 case GIC_LOCAL_INT_COMPARE: 666 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val); 667 break; 668 case GIC_LOCAL_INT_TIMER: 669 /* CONFIG_MIPS_CMP workaround (see __gic_init) */ 670 val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin; 671 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val); 672 break; 673 case GIC_LOCAL_INT_PERFCTR: 674 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val); 675 break; 676 case GIC_LOCAL_INT_SWINT0: 677 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val); 678 break; 679 case GIC_LOCAL_INT_SWINT1: 680 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val); 681 break; 682 case GIC_LOCAL_INT_FDC: 683 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val); 684 break; 685 default: 686 pr_err("Invalid local IRQ %d\n", intr); 687 ret = -EINVAL; 688 break; 689 } 690 } 691 spin_unlock_irqrestore(&gic_lock, flags); 692 693 return ret; 694 } 695 696 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, 697 irq_hw_number_t hw) 698 { 699 int intr = GIC_HWIRQ_TO_SHARED(hw); 700 unsigned long flags; 701 702 irq_set_chip_and_handler(virq, &gic_level_irq_controller, 703 handle_level_irq); 704 705 spin_lock_irqsave(&gic_lock, flags); 706 gic_map_to_pin(intr, gic_cpu_pin); 707 /* Map to VPE 0 by default */ 708 gic_map_to_vpe(intr, 0); 709 set_bit(intr, pcpu_masks[0].pcpu_mask); 710 spin_unlock_irqrestore(&gic_lock, flags); 711 712 return 0; 713 } 714 715 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, 716 irq_hw_number_t hw) 717 { 718 if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS) 719 return gic_local_irq_domain_map(d, virq, hw); 720 return gic_shared_irq_domain_map(d, virq, hw); 721 } 722 723 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 724 const u32 *intspec, unsigned int intsize, 725 irq_hw_number_t *out_hwirq, 726 unsigned int *out_type) 727 { 728 if (intsize != 3) 729 return -EINVAL; 730 731 if (intspec[0] == GIC_SHARED) 732 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]); 733 else if (intspec[0] == GIC_LOCAL) 734 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]); 735 else 736 return -EINVAL; 737 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 738 739 return 0; 740 } 741 742 static struct irq_domain_ops gic_irq_domain_ops = { 743 .map = gic_irq_domain_map, 744 .xlate = gic_irq_domain_xlate, 745 }; 746 747 static void __init __gic_init(unsigned long gic_base_addr, 748 unsigned long gic_addrspace_size, 749 unsigned int cpu_vec, unsigned int irqbase, 750 struct device_node *node) 751 { 752 unsigned int gicconfig; 753 754 gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size); 755 756 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); 757 gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >> 758 GIC_SH_CONFIG_NUMINTRS_SHF; 759 gic_shared_intrs = ((gic_shared_intrs + 1) * 8); 760 761 gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >> 762 GIC_SH_CONFIG_NUMVPES_SHF; 763 gic_vpes = gic_vpes + 1; 764 765 if (cpu_has_veic) { 766 /* Always use vector 1 in EIC mode */ 767 gic_cpu_pin = 0; 768 timer_cpu_pin = gic_cpu_pin; 769 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, 770 __gic_irq_dispatch); 771 } else { 772 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; 773 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, 774 gic_irq_dispatch); 775 /* 776 * With the CMP implementation of SMP (deprecated), other CPUs 777 * are started by the bootloader and put into a timer based 778 * waiting poll loop. We must not re-route those CPU's local 779 * timer interrupts as the wait instruction will never finish, 780 * so just handle whatever CPU interrupt it is routed to by 781 * default. 782 * 783 * This workaround should be removed when CMP support is 784 * dropped. 785 */ 786 if (IS_ENABLED(CONFIG_MIPS_CMP) && 787 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) { 788 timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL, 789 GIC_VPE_TIMER_MAP)) & 790 GIC_MAP_MSK; 791 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 792 GIC_CPU_PIN_OFFSET + 793 timer_cpu_pin, 794 gic_irq_dispatch); 795 } else { 796 timer_cpu_pin = gic_cpu_pin; 797 } 798 } 799 800 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + 801 gic_shared_intrs, irqbase, 802 &gic_irq_domain_ops, NULL); 803 if (!gic_irq_domain) 804 panic("Failed to add GIC IRQ domain"); 805 806 gic_basic_init(); 807 808 gic_ipi_init(); 809 } 810 811 void __init gic_init(unsigned long gic_base_addr, 812 unsigned long gic_addrspace_size, 813 unsigned int cpu_vec, unsigned int irqbase) 814 { 815 __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL); 816 } 817 818 static int __init gic_of_init(struct device_node *node, 819 struct device_node *parent) 820 { 821 struct resource res; 822 unsigned int cpu_vec, i = 0, reserved = 0; 823 phys_addr_t gic_base; 824 size_t gic_len; 825 826 /* Find the first available CPU vector. */ 827 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", 828 i++, &cpu_vec)) 829 reserved |= BIT(cpu_vec); 830 for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) { 831 if (!(reserved & BIT(cpu_vec))) 832 break; 833 } 834 if (cpu_vec == 8) { 835 pr_err("No CPU vectors available for GIC\n"); 836 return -ENODEV; 837 } 838 839 if (of_address_to_resource(node, 0, &res)) { 840 /* 841 * Probe the CM for the GIC base address if not specified 842 * in the device-tree. 843 */ 844 if (mips_cm_present()) { 845 gic_base = read_gcr_gic_base() & 846 ~CM_GCR_GIC_BASE_GICEN_MSK; 847 gic_len = 0x20000; 848 } else { 849 pr_err("Failed to get GIC memory range\n"); 850 return -ENODEV; 851 } 852 } else { 853 gic_base = res.start; 854 gic_len = resource_size(&res); 855 } 856 857 if (mips_cm_present()) 858 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK); 859 gic_present = true; 860 861 __gic_init(gic_base, gic_len, cpu_vec, 0, node); 862 863 return 0; 864 } 865 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init); 866