1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 */ 9 #include <linux/bitmap.h> 10 #include <linux/clocksource.h> 11 #include <linux/init.h> 12 #include <linux/interrupt.h> 13 #include <linux/irq.h> 14 #include <linux/irqchip/mips-gic.h> 15 #include <linux/of_address.h> 16 #include <linux/sched.h> 17 #include <linux/smp.h> 18 19 #include <asm/mips-cm.h> 20 #include <asm/setup.h> 21 #include <asm/traps.h> 22 23 #include <dt-bindings/interrupt-controller/mips-gic.h> 24 25 #include "irqchip.h" 26 27 unsigned int gic_present; 28 29 struct gic_pcpu_mask { 30 DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS); 31 }; 32 33 static void __iomem *gic_base; 34 static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; 35 static DEFINE_SPINLOCK(gic_lock); 36 static struct irq_domain *gic_irq_domain; 37 static int gic_shared_intrs; 38 static int gic_vpes; 39 static unsigned int gic_cpu_pin; 40 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; 41 42 static void __gic_irq_dispatch(void); 43 44 static inline unsigned int gic_read(unsigned int reg) 45 { 46 return __raw_readl(gic_base + reg); 47 } 48 49 static inline void gic_write(unsigned int reg, unsigned int val) 50 { 51 __raw_writel(val, gic_base + reg); 52 } 53 54 static inline void gic_update_bits(unsigned int reg, unsigned int mask, 55 unsigned int val) 56 { 57 unsigned int regval; 58 59 regval = gic_read(reg); 60 regval &= ~mask; 61 regval |= val; 62 gic_write(reg, regval); 63 } 64 65 static inline void gic_reset_mask(unsigned int intr) 66 { 67 gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr), 68 1 << GIC_INTR_BIT(intr)); 69 } 70 71 static inline void gic_set_mask(unsigned int intr) 72 { 73 gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr), 74 1 << GIC_INTR_BIT(intr)); 75 } 76 77 static inline void gic_set_polarity(unsigned int intr, unsigned int pol) 78 { 79 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) + 80 GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr), 81 pol << GIC_INTR_BIT(intr)); 82 } 83 84 static inline void gic_set_trigger(unsigned int intr, unsigned int trig) 85 { 86 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) + 87 GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr), 88 trig << GIC_INTR_BIT(intr)); 89 } 90 91 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual) 92 { 93 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr), 94 1 << GIC_INTR_BIT(intr), 95 dual << GIC_INTR_BIT(intr)); 96 } 97 98 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin) 99 { 100 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) + 101 GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin); 102 } 103 104 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe) 105 { 106 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) + 107 GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe), 108 GIC_SH_MAP_TO_VPE_REG_BIT(vpe)); 109 } 110 111 #ifdef CONFIG_CLKSRC_MIPS_GIC 112 cycle_t gic_read_count(void) 113 { 114 unsigned int hi, hi2, lo; 115 116 do { 117 hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); 118 lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00)); 119 hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); 120 } while (hi2 != hi); 121 122 return (((cycle_t) hi) << 32) + lo; 123 } 124 125 unsigned int gic_get_count_width(void) 126 { 127 unsigned int bits, config; 128 129 config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); 130 bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >> 131 GIC_SH_CONFIG_COUNTBITS_SHF); 132 133 return bits; 134 } 135 136 void gic_write_compare(cycle_t cnt) 137 { 138 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), 139 (int)(cnt >> 32)); 140 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), 141 (int)(cnt & 0xffffffff)); 142 } 143 144 void gic_write_cpu_compare(cycle_t cnt, int cpu) 145 { 146 unsigned long flags; 147 148 local_irq_save(flags); 149 150 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu); 151 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI), 152 (int)(cnt >> 32)); 153 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO), 154 (int)(cnt & 0xffffffff)); 155 156 local_irq_restore(flags); 157 } 158 159 cycle_t gic_read_compare(void) 160 { 161 unsigned int hi, lo; 162 163 hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI)); 164 lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO)); 165 166 return (((cycle_t) hi) << 32) + lo; 167 } 168 #endif 169 170 static bool gic_local_irq_is_routable(int intr) 171 { 172 u32 vpe_ctl; 173 174 /* All local interrupts are routable in EIC mode. */ 175 if (cpu_has_veic) 176 return true; 177 178 vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL)); 179 switch (intr) { 180 case GIC_LOCAL_INT_TIMER: 181 return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK; 182 case GIC_LOCAL_INT_PERFCTR: 183 return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK; 184 case GIC_LOCAL_INT_FDC: 185 return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK; 186 case GIC_LOCAL_INT_SWINT0: 187 case GIC_LOCAL_INT_SWINT1: 188 return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK; 189 default: 190 return true; 191 } 192 } 193 194 unsigned int gic_get_timer_pending(void) 195 { 196 unsigned int vpe_pending; 197 198 vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND)); 199 return vpe_pending & GIC_VPE_PEND_TIMER_MSK; 200 } 201 202 static void gic_bind_eic_interrupt(int irq, int set) 203 { 204 /* Convert irq vector # to hw int # */ 205 irq -= GIC_PIN_TO_VEC_OFFSET; 206 207 /* Set irq to use shadow set */ 208 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) + 209 GIC_VPE_EIC_SS(irq), set); 210 } 211 212 void gic_send_ipi(unsigned int intr) 213 { 214 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr)); 215 } 216 217 int gic_get_c0_compare_int(void) 218 { 219 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) 220 return MIPS_CPU_IRQ_BASE + cp0_compare_irq; 221 return irq_create_mapping(gic_irq_domain, 222 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER)); 223 } 224 225 int gic_get_c0_perfcount_int(void) 226 { 227 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) { 228 /* Is the erformance counter shared with the timer? */ 229 if (cp0_perfcount_irq < 0) 230 return -1; 231 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 232 } 233 return irq_create_mapping(gic_irq_domain, 234 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR)); 235 } 236 237 static unsigned int gic_get_int(void) 238 { 239 unsigned int i; 240 unsigned long *pcpu_mask; 241 unsigned long pending_reg, intrmask_reg; 242 DECLARE_BITMAP(pending, GIC_MAX_INTRS); 243 DECLARE_BITMAP(intrmask, GIC_MAX_INTRS); 244 245 /* Get per-cpu bitmaps */ 246 pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask; 247 248 pending_reg = GIC_REG(SHARED, GIC_SH_PEND); 249 intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK); 250 251 for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) { 252 pending[i] = gic_read(pending_reg); 253 intrmask[i] = gic_read(intrmask_reg); 254 pending_reg += 0x4; 255 intrmask_reg += 0x4; 256 } 257 258 bitmap_and(pending, pending, intrmask, gic_shared_intrs); 259 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); 260 261 return find_first_bit(pending, gic_shared_intrs); 262 } 263 264 static void gic_mask_irq(struct irq_data *d) 265 { 266 gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq)); 267 } 268 269 static void gic_unmask_irq(struct irq_data *d) 270 { 271 gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq)); 272 } 273 274 static void gic_ack_irq(struct irq_data *d) 275 { 276 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 277 278 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq)); 279 } 280 281 static int gic_set_type(struct irq_data *d, unsigned int type) 282 { 283 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 284 unsigned long flags; 285 bool is_edge; 286 287 spin_lock_irqsave(&gic_lock, flags); 288 switch (type & IRQ_TYPE_SENSE_MASK) { 289 case IRQ_TYPE_EDGE_FALLING: 290 gic_set_polarity(irq, GIC_POL_NEG); 291 gic_set_trigger(irq, GIC_TRIG_EDGE); 292 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); 293 is_edge = true; 294 break; 295 case IRQ_TYPE_EDGE_RISING: 296 gic_set_polarity(irq, GIC_POL_POS); 297 gic_set_trigger(irq, GIC_TRIG_EDGE); 298 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); 299 is_edge = true; 300 break; 301 case IRQ_TYPE_EDGE_BOTH: 302 /* polarity is irrelevant in this case */ 303 gic_set_trigger(irq, GIC_TRIG_EDGE); 304 gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE); 305 is_edge = true; 306 break; 307 case IRQ_TYPE_LEVEL_LOW: 308 gic_set_polarity(irq, GIC_POL_NEG); 309 gic_set_trigger(irq, GIC_TRIG_LEVEL); 310 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); 311 is_edge = false; 312 break; 313 case IRQ_TYPE_LEVEL_HIGH: 314 default: 315 gic_set_polarity(irq, GIC_POL_POS); 316 gic_set_trigger(irq, GIC_TRIG_LEVEL); 317 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); 318 is_edge = false; 319 break; 320 } 321 322 if (is_edge) { 323 __irq_set_chip_handler_name_locked(d->irq, 324 &gic_edge_irq_controller, 325 handle_edge_irq, NULL); 326 } else { 327 __irq_set_chip_handler_name_locked(d->irq, 328 &gic_level_irq_controller, 329 handle_level_irq, NULL); 330 } 331 spin_unlock_irqrestore(&gic_lock, flags); 332 333 return 0; 334 } 335 336 #ifdef CONFIG_SMP 337 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, 338 bool force) 339 { 340 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 341 cpumask_t tmp = CPU_MASK_NONE; 342 unsigned long flags; 343 int i; 344 345 cpumask_and(&tmp, cpumask, cpu_online_mask); 346 if (cpus_empty(tmp)) 347 return -EINVAL; 348 349 /* Assumption : cpumask refers to a single CPU */ 350 spin_lock_irqsave(&gic_lock, flags); 351 352 /* Re-route this IRQ */ 353 gic_map_to_vpe(irq, first_cpu(tmp)); 354 355 /* Update the pcpu_masks */ 356 for (i = 0; i < NR_CPUS; i++) 357 clear_bit(irq, pcpu_masks[i].pcpu_mask); 358 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); 359 360 cpumask_copy(d->affinity, cpumask); 361 spin_unlock_irqrestore(&gic_lock, flags); 362 363 return IRQ_SET_MASK_OK_NOCOPY; 364 } 365 #endif 366 367 static struct irq_chip gic_level_irq_controller = { 368 .name = "MIPS GIC", 369 .irq_mask = gic_mask_irq, 370 .irq_unmask = gic_unmask_irq, 371 .irq_set_type = gic_set_type, 372 #ifdef CONFIG_SMP 373 .irq_set_affinity = gic_set_affinity, 374 #endif 375 }; 376 377 static struct irq_chip gic_edge_irq_controller = { 378 .name = "MIPS GIC", 379 .irq_ack = gic_ack_irq, 380 .irq_mask = gic_mask_irq, 381 .irq_unmask = gic_unmask_irq, 382 .irq_set_type = gic_set_type, 383 #ifdef CONFIG_SMP 384 .irq_set_affinity = gic_set_affinity, 385 #endif 386 }; 387 388 static unsigned int gic_get_local_int(void) 389 { 390 unsigned long pending, masked; 391 392 pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND)); 393 masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK)); 394 395 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); 396 397 return find_first_bit(&pending, GIC_NUM_LOCAL_INTRS); 398 } 399 400 static void gic_mask_local_irq(struct irq_data *d) 401 { 402 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 403 404 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr); 405 } 406 407 static void gic_unmask_local_irq(struct irq_data *d) 408 { 409 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 410 411 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr); 412 } 413 414 static struct irq_chip gic_local_irq_controller = { 415 .name = "MIPS GIC Local", 416 .irq_mask = gic_mask_local_irq, 417 .irq_unmask = gic_unmask_local_irq, 418 }; 419 420 static void gic_mask_local_irq_all_vpes(struct irq_data *d) 421 { 422 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 423 int i; 424 unsigned long flags; 425 426 spin_lock_irqsave(&gic_lock, flags); 427 for (i = 0; i < gic_vpes; i++) { 428 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i); 429 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr); 430 } 431 spin_unlock_irqrestore(&gic_lock, flags); 432 } 433 434 static void gic_unmask_local_irq_all_vpes(struct irq_data *d) 435 { 436 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 437 int i; 438 unsigned long flags; 439 440 spin_lock_irqsave(&gic_lock, flags); 441 for (i = 0; i < gic_vpes; i++) { 442 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i); 443 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr); 444 } 445 spin_unlock_irqrestore(&gic_lock, flags); 446 } 447 448 static struct irq_chip gic_all_vpes_local_irq_controller = { 449 .name = "MIPS GIC Local", 450 .irq_mask = gic_mask_local_irq_all_vpes, 451 .irq_unmask = gic_unmask_local_irq_all_vpes, 452 }; 453 454 static void __gic_irq_dispatch(void) 455 { 456 unsigned int intr, virq; 457 458 while ((intr = gic_get_local_int()) != GIC_NUM_LOCAL_INTRS) { 459 virq = irq_linear_revmap(gic_irq_domain, 460 GIC_LOCAL_TO_HWIRQ(intr)); 461 do_IRQ(virq); 462 } 463 464 while ((intr = gic_get_int()) != gic_shared_intrs) { 465 virq = irq_linear_revmap(gic_irq_domain, 466 GIC_SHARED_TO_HWIRQ(intr)); 467 do_IRQ(virq); 468 } 469 } 470 471 static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc) 472 { 473 __gic_irq_dispatch(); 474 } 475 476 #ifdef CONFIG_MIPS_GIC_IPI 477 static int gic_resched_int_base; 478 static int gic_call_int_base; 479 480 unsigned int plat_ipi_resched_int_xlate(unsigned int cpu) 481 { 482 return gic_resched_int_base + cpu; 483 } 484 485 unsigned int plat_ipi_call_int_xlate(unsigned int cpu) 486 { 487 return gic_call_int_base + cpu; 488 } 489 490 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) 491 { 492 scheduler_ipi(); 493 494 return IRQ_HANDLED; 495 } 496 497 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 498 { 499 smp_call_function_interrupt(); 500 501 return IRQ_HANDLED; 502 } 503 504 static struct irqaction irq_resched = { 505 .handler = ipi_resched_interrupt, 506 .flags = IRQF_PERCPU, 507 .name = "IPI resched" 508 }; 509 510 static struct irqaction irq_call = { 511 .handler = ipi_call_interrupt, 512 .flags = IRQF_PERCPU, 513 .name = "IPI call" 514 }; 515 516 static __init void gic_ipi_init_one(unsigned int intr, int cpu, 517 struct irqaction *action) 518 { 519 int virq = irq_create_mapping(gic_irq_domain, 520 GIC_SHARED_TO_HWIRQ(intr)); 521 int i; 522 523 gic_map_to_vpe(intr, cpu); 524 for (i = 0; i < NR_CPUS; i++) 525 clear_bit(intr, pcpu_masks[i].pcpu_mask); 526 set_bit(intr, pcpu_masks[cpu].pcpu_mask); 527 528 irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING); 529 530 irq_set_handler(virq, handle_percpu_irq); 531 setup_irq(virq, action); 532 } 533 534 static __init void gic_ipi_init(void) 535 { 536 int i; 537 538 /* Use last 2 * NR_CPUS interrupts as IPIs */ 539 gic_resched_int_base = gic_shared_intrs - nr_cpu_ids; 540 gic_call_int_base = gic_resched_int_base - nr_cpu_ids; 541 542 for (i = 0; i < nr_cpu_ids; i++) { 543 gic_ipi_init_one(gic_call_int_base + i, i, &irq_call); 544 gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched); 545 } 546 } 547 #else 548 static inline void gic_ipi_init(void) 549 { 550 } 551 #endif 552 553 static void __init gic_basic_init(void) 554 { 555 unsigned int i; 556 557 board_bind_eic_interrupt = &gic_bind_eic_interrupt; 558 559 /* Setup defaults */ 560 for (i = 0; i < gic_shared_intrs; i++) { 561 gic_set_polarity(i, GIC_POL_POS); 562 gic_set_trigger(i, GIC_TRIG_LEVEL); 563 gic_reset_mask(i); 564 } 565 566 for (i = 0; i < gic_vpes; i++) { 567 unsigned int j; 568 569 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i); 570 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) { 571 if (!gic_local_irq_is_routable(j)) 572 continue; 573 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j); 574 } 575 } 576 } 577 578 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, 579 irq_hw_number_t hw) 580 { 581 int intr = GIC_HWIRQ_TO_LOCAL(hw); 582 int ret = 0; 583 int i; 584 unsigned long flags; 585 586 if (!gic_local_irq_is_routable(intr)) 587 return -EPERM; 588 589 /* 590 * HACK: These are all really percpu interrupts, but the rest 591 * of the MIPS kernel code does not use the percpu IRQ API for 592 * the CP0 timer and performance counter interrupts. 593 */ 594 if (intr != GIC_LOCAL_INT_TIMER && intr != GIC_LOCAL_INT_PERFCTR) { 595 irq_set_chip_and_handler(virq, 596 &gic_local_irq_controller, 597 handle_percpu_devid_irq); 598 irq_set_percpu_devid(virq); 599 } else { 600 irq_set_chip_and_handler(virq, 601 &gic_all_vpes_local_irq_controller, 602 handle_percpu_irq); 603 } 604 605 spin_lock_irqsave(&gic_lock, flags); 606 for (i = 0; i < gic_vpes; i++) { 607 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin; 608 609 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i); 610 611 switch (intr) { 612 case GIC_LOCAL_INT_WD: 613 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val); 614 break; 615 case GIC_LOCAL_INT_COMPARE: 616 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val); 617 break; 618 case GIC_LOCAL_INT_TIMER: 619 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val); 620 break; 621 case GIC_LOCAL_INT_PERFCTR: 622 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val); 623 break; 624 case GIC_LOCAL_INT_SWINT0: 625 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val); 626 break; 627 case GIC_LOCAL_INT_SWINT1: 628 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val); 629 break; 630 case GIC_LOCAL_INT_FDC: 631 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val); 632 break; 633 default: 634 pr_err("Invalid local IRQ %d\n", intr); 635 ret = -EINVAL; 636 break; 637 } 638 } 639 spin_unlock_irqrestore(&gic_lock, flags); 640 641 return ret; 642 } 643 644 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, 645 irq_hw_number_t hw) 646 { 647 int intr = GIC_HWIRQ_TO_SHARED(hw); 648 unsigned long flags; 649 650 irq_set_chip_and_handler(virq, &gic_level_irq_controller, 651 handle_level_irq); 652 653 spin_lock_irqsave(&gic_lock, flags); 654 gic_map_to_pin(intr, gic_cpu_pin); 655 /* Map to VPE 0 by default */ 656 gic_map_to_vpe(intr, 0); 657 set_bit(intr, pcpu_masks[0].pcpu_mask); 658 spin_unlock_irqrestore(&gic_lock, flags); 659 660 return 0; 661 } 662 663 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, 664 irq_hw_number_t hw) 665 { 666 if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS) 667 return gic_local_irq_domain_map(d, virq, hw); 668 return gic_shared_irq_domain_map(d, virq, hw); 669 } 670 671 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 672 const u32 *intspec, unsigned int intsize, 673 irq_hw_number_t *out_hwirq, 674 unsigned int *out_type) 675 { 676 if (intsize != 3) 677 return -EINVAL; 678 679 if (intspec[0] == GIC_SHARED) 680 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]); 681 else if (intspec[0] == GIC_LOCAL) 682 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]); 683 else 684 return -EINVAL; 685 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 686 687 return 0; 688 } 689 690 static struct irq_domain_ops gic_irq_domain_ops = { 691 .map = gic_irq_domain_map, 692 .xlate = gic_irq_domain_xlate, 693 }; 694 695 static void __init __gic_init(unsigned long gic_base_addr, 696 unsigned long gic_addrspace_size, 697 unsigned int cpu_vec, unsigned int irqbase, 698 struct device_node *node) 699 { 700 unsigned int gicconfig; 701 702 gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size); 703 704 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); 705 gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >> 706 GIC_SH_CONFIG_NUMINTRS_SHF; 707 gic_shared_intrs = ((gic_shared_intrs + 1) * 8); 708 709 gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >> 710 GIC_SH_CONFIG_NUMVPES_SHF; 711 gic_vpes = gic_vpes + 1; 712 713 if (cpu_has_veic) { 714 /* Always use vector 1 in EIC mode */ 715 gic_cpu_pin = 0; 716 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, 717 __gic_irq_dispatch); 718 } else { 719 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; 720 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, 721 gic_irq_dispatch); 722 } 723 724 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + 725 gic_shared_intrs, irqbase, 726 &gic_irq_domain_ops, NULL); 727 if (!gic_irq_domain) 728 panic("Failed to add GIC IRQ domain"); 729 730 gic_basic_init(); 731 732 gic_ipi_init(); 733 } 734 735 void __init gic_init(unsigned long gic_base_addr, 736 unsigned long gic_addrspace_size, 737 unsigned int cpu_vec, unsigned int irqbase) 738 { 739 __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL); 740 } 741 742 static int __init gic_of_init(struct device_node *node, 743 struct device_node *parent) 744 { 745 struct resource res; 746 unsigned int cpu_vec, i = 0, reserved = 0; 747 phys_addr_t gic_base; 748 size_t gic_len; 749 750 /* Find the first available CPU vector. */ 751 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", 752 i++, &cpu_vec)) 753 reserved |= BIT(cpu_vec); 754 for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) { 755 if (!(reserved & BIT(cpu_vec))) 756 break; 757 } 758 if (cpu_vec == 8) { 759 pr_err("No CPU vectors available for GIC\n"); 760 return -ENODEV; 761 } 762 763 if (of_address_to_resource(node, 0, &res)) { 764 /* 765 * Probe the CM for the GIC base address if not specified 766 * in the device-tree. 767 */ 768 if (mips_cm_present()) { 769 gic_base = read_gcr_gic_base() & 770 ~CM_GCR_GIC_BASE_GICEN_MSK; 771 gic_len = 0x20000; 772 } else { 773 pr_err("Failed to get GIC memory range\n"); 774 return -ENODEV; 775 } 776 } else { 777 gic_base = res.start; 778 gic_len = resource_size(&res); 779 } 780 781 if (mips_cm_present()) 782 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK); 783 gic_present = true; 784 785 __gic_init(gic_base, gic_len, cpu_vec, 0, node); 786 787 return 0; 788 } 789 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init); 790