1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2004-2012 Cavium, Inc. 7 */ 8 9 #include <linux/interrupt.h> 10 #include <linux/irqdomain.h> 11 #include <linux/bitops.h> 12 #include <linux/percpu.h> 13 #include <linux/slab.h> 14 #include <linux/irq.h> 15 #include <linux/smp.h> 16 #include <linux/of.h> 17 18 #include <asm/octeon/octeon.h> 19 #include <asm/octeon/cvmx-ciu2-defs.h> 20 21 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); 22 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); 23 static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); 24 25 static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; 26 27 union octeon_ciu_chip_data { 28 void *p; 29 unsigned long l; 30 struct { 31 unsigned long line:6; 32 unsigned long bit:6; 33 unsigned long gpio_line:6; 34 } s; 35 }; 36 37 struct octeon_core_chip_data { 38 struct mutex core_irq_mutex; 39 bool current_en; 40 bool desired_en; 41 u8 bit; 42 }; 43 44 #define MIPS_CORE_IRQ_LINES 8 45 46 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; 47 48 static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, 49 struct irq_chip *chip, 50 irq_flow_handler_t handler) 51 { 52 union octeon_ciu_chip_data cd; 53 54 irq_set_chip_and_handler(irq, chip, handler); 55 56 cd.l = 0; 57 cd.s.line = line; 58 cd.s.bit = bit; 59 cd.s.gpio_line = gpio_line; 60 61 irq_set_chip_data(irq, cd.p); 62 octeon_irq_ciu_to_irq[line][bit] = irq; 63 } 64 65 static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, 66 int irq, int line, int bit) 67 { 68 irq_domain_associate(domain, irq, line << 6 | bit); 69 } 70 71 static int octeon_coreid_for_cpu(int cpu) 72 { 73 #ifdef CONFIG_SMP 74 return cpu_logical_map(cpu); 75 #else 76 return cvmx_get_core_num(); 77 #endif 78 } 79 80 static int octeon_cpu_for_coreid(int coreid) 81 { 82 #ifdef CONFIG_SMP 83 return cpu_number_map(coreid); 84 #else 85 return smp_processor_id(); 86 #endif 87 } 88 89 static void octeon_irq_core_ack(struct irq_data *data) 90 { 91 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 92 unsigned int bit = cd->bit; 93 94 /* 95 * We don't need to disable IRQs to make these atomic since 96 * they are already disabled earlier in the low level 97 * interrupt code. 98 */ 99 clear_c0_status(0x100 << bit); 100 /* The two user interrupts must be cleared manually. */ 101 if (bit < 2) 102 clear_c0_cause(0x100 << bit); 103 } 104 105 static void octeon_irq_core_eoi(struct irq_data *data) 106 { 107 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 108 109 /* 110 * We don't need to disable IRQs to make these atomic since 111 * they are already disabled earlier in the low level 112 * interrupt code. 113 */ 114 set_c0_status(0x100 << cd->bit); 115 } 116 117 static void octeon_irq_core_set_enable_local(void *arg) 118 { 119 struct irq_data *data = arg; 120 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 121 unsigned int mask = 0x100 << cd->bit; 122 123 /* 124 * Interrupts are already disabled, so these are atomic. 125 */ 126 if (cd->desired_en) 127 set_c0_status(mask); 128 else 129 clear_c0_status(mask); 130 131 } 132 133 static void octeon_irq_core_disable(struct irq_data *data) 134 { 135 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 136 cd->desired_en = false; 137 } 138 139 static void octeon_irq_core_enable(struct irq_data *data) 140 { 141 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 142 cd->desired_en = true; 143 } 144 145 static void octeon_irq_core_bus_lock(struct irq_data *data) 146 { 147 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 148 149 mutex_lock(&cd->core_irq_mutex); 150 } 151 152 static void octeon_irq_core_bus_sync_unlock(struct irq_data *data) 153 { 154 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 155 156 if (cd->desired_en != cd->current_en) { 157 on_each_cpu(octeon_irq_core_set_enable_local, data, 1); 158 159 cd->current_en = cd->desired_en; 160 } 161 162 mutex_unlock(&cd->core_irq_mutex); 163 } 164 165 static struct irq_chip octeon_irq_chip_core = { 166 .name = "Core", 167 .irq_enable = octeon_irq_core_enable, 168 .irq_disable = octeon_irq_core_disable, 169 .irq_ack = octeon_irq_core_ack, 170 .irq_eoi = octeon_irq_core_eoi, 171 .irq_bus_lock = octeon_irq_core_bus_lock, 172 .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock, 173 174 .irq_cpu_online = octeon_irq_core_eoi, 175 .irq_cpu_offline = octeon_irq_core_ack, 176 .flags = IRQCHIP_ONOFFLINE_ENABLED, 177 }; 178 179 static void __init octeon_irq_init_core(void) 180 { 181 int i; 182 int irq; 183 struct octeon_core_chip_data *cd; 184 185 for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) { 186 cd = &octeon_irq_core_chip_data[i]; 187 cd->current_en = false; 188 cd->desired_en = false; 189 cd->bit = i; 190 mutex_init(&cd->core_irq_mutex); 191 192 irq = OCTEON_IRQ_SW0 + i; 193 irq_set_chip_data(irq, cd); 194 irq_set_chip_and_handler(irq, &octeon_irq_chip_core, 195 handle_percpu_irq); 196 } 197 } 198 199 static int next_cpu_for_irq(struct irq_data *data) 200 { 201 202 #ifdef CONFIG_SMP 203 int cpu; 204 int weight = cpumask_weight(data->affinity); 205 206 if (weight > 1) { 207 cpu = smp_processor_id(); 208 for (;;) { 209 cpu = cpumask_next(cpu, data->affinity); 210 if (cpu >= nr_cpu_ids) { 211 cpu = -1; 212 continue; 213 } else if (cpumask_test_cpu(cpu, cpu_online_mask)) { 214 break; 215 } 216 } 217 } else if (weight == 1) { 218 cpu = cpumask_first(data->affinity); 219 } else { 220 cpu = smp_processor_id(); 221 } 222 return cpu; 223 #else 224 return smp_processor_id(); 225 #endif 226 } 227 228 static void octeon_irq_ciu_enable(struct irq_data *data) 229 { 230 int cpu = next_cpu_for_irq(data); 231 int coreid = octeon_coreid_for_cpu(cpu); 232 unsigned long *pen; 233 unsigned long flags; 234 union octeon_ciu_chip_data cd; 235 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 236 237 cd.p = irq_data_get_irq_chip_data(data); 238 239 raw_spin_lock_irqsave(lock, flags); 240 if (cd.s.line == 0) { 241 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 242 __set_bit(cd.s.bit, pen); 243 /* 244 * Must be visible to octeon_irq_ip{2,3}_ciu() before 245 * enabling the irq. 246 */ 247 wmb(); 248 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 249 } else { 250 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 251 __set_bit(cd.s.bit, pen); 252 /* 253 * Must be visible to octeon_irq_ip{2,3}_ciu() before 254 * enabling the irq. 255 */ 256 wmb(); 257 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 258 } 259 raw_spin_unlock_irqrestore(lock, flags); 260 } 261 262 static void octeon_irq_ciu_enable_local(struct irq_data *data) 263 { 264 unsigned long *pen; 265 unsigned long flags; 266 union octeon_ciu_chip_data cd; 267 raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); 268 269 cd.p = irq_data_get_irq_chip_data(data); 270 271 raw_spin_lock_irqsave(lock, flags); 272 if (cd.s.line == 0) { 273 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); 274 __set_bit(cd.s.bit, pen); 275 /* 276 * Must be visible to octeon_irq_ip{2,3}_ciu() before 277 * enabling the irq. 278 */ 279 wmb(); 280 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 281 } else { 282 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); 283 __set_bit(cd.s.bit, pen); 284 /* 285 * Must be visible to octeon_irq_ip{2,3}_ciu() before 286 * enabling the irq. 287 */ 288 wmb(); 289 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); 290 } 291 raw_spin_unlock_irqrestore(lock, flags); 292 } 293 294 static void octeon_irq_ciu_disable_local(struct irq_data *data) 295 { 296 unsigned long *pen; 297 unsigned long flags; 298 union octeon_ciu_chip_data cd; 299 raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock); 300 301 cd.p = irq_data_get_irq_chip_data(data); 302 303 raw_spin_lock_irqsave(lock, flags); 304 if (cd.s.line == 0) { 305 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); 306 __clear_bit(cd.s.bit, pen); 307 /* 308 * Must be visible to octeon_irq_ip{2,3}_ciu() before 309 * enabling the irq. 310 */ 311 wmb(); 312 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 313 } else { 314 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); 315 __clear_bit(cd.s.bit, pen); 316 /* 317 * Must be visible to octeon_irq_ip{2,3}_ciu() before 318 * enabling the irq. 319 */ 320 wmb(); 321 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); 322 } 323 raw_spin_unlock_irqrestore(lock, flags); 324 } 325 326 static void octeon_irq_ciu_disable_all(struct irq_data *data) 327 { 328 unsigned long flags; 329 unsigned long *pen; 330 int cpu; 331 union octeon_ciu_chip_data cd; 332 raw_spinlock_t *lock; 333 334 cd.p = irq_data_get_irq_chip_data(data); 335 336 for_each_online_cpu(cpu) { 337 int coreid = octeon_coreid_for_cpu(cpu); 338 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 339 if (cd.s.line == 0) 340 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 341 else 342 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 343 344 raw_spin_lock_irqsave(lock, flags); 345 __clear_bit(cd.s.bit, pen); 346 /* 347 * Must be visible to octeon_irq_ip{2,3}_ciu() before 348 * enabling the irq. 349 */ 350 wmb(); 351 if (cd.s.line == 0) 352 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 353 else 354 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 355 raw_spin_unlock_irqrestore(lock, flags); 356 } 357 } 358 359 static void octeon_irq_ciu_enable_all(struct irq_data *data) 360 { 361 unsigned long flags; 362 unsigned long *pen; 363 int cpu; 364 union octeon_ciu_chip_data cd; 365 raw_spinlock_t *lock; 366 367 cd.p = irq_data_get_irq_chip_data(data); 368 369 for_each_online_cpu(cpu) { 370 int coreid = octeon_coreid_for_cpu(cpu); 371 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 372 if (cd.s.line == 0) 373 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 374 else 375 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 376 377 raw_spin_lock_irqsave(lock, flags); 378 __set_bit(cd.s.bit, pen); 379 /* 380 * Must be visible to octeon_irq_ip{2,3}_ciu() before 381 * enabling the irq. 382 */ 383 wmb(); 384 if (cd.s.line == 0) 385 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 386 else 387 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 388 raw_spin_unlock_irqrestore(lock, flags); 389 } 390 } 391 392 /* 393 * Enable the irq on the next core in the affinity set for chips that 394 * have the EN*_W1{S,C} registers. 395 */ 396 static void octeon_irq_ciu_enable_v2(struct irq_data *data) 397 { 398 u64 mask; 399 int cpu = next_cpu_for_irq(data); 400 union octeon_ciu_chip_data cd; 401 402 cd.p = irq_data_get_irq_chip_data(data); 403 mask = 1ull << (cd.s.bit); 404 405 /* 406 * Called under the desc lock, so these should never get out 407 * of sync. 408 */ 409 if (cd.s.line == 0) { 410 int index = octeon_coreid_for_cpu(cpu) * 2; 411 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 412 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 413 } else { 414 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 415 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 416 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 417 } 418 } 419 420 /* 421 * Enable the irq on the current CPU for chips that 422 * have the EN*_W1{S,C} registers. 423 */ 424 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) 425 { 426 u64 mask; 427 union octeon_ciu_chip_data cd; 428 429 cd.p = irq_data_get_irq_chip_data(data); 430 mask = 1ull << (cd.s.bit); 431 432 if (cd.s.line == 0) { 433 int index = cvmx_get_core_num() * 2; 434 set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); 435 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 436 } else { 437 int index = cvmx_get_core_num() * 2 + 1; 438 set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); 439 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 440 } 441 } 442 443 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) 444 { 445 u64 mask; 446 union octeon_ciu_chip_data cd; 447 448 cd.p = irq_data_get_irq_chip_data(data); 449 mask = 1ull << (cd.s.bit); 450 451 if (cd.s.line == 0) { 452 int index = cvmx_get_core_num() * 2; 453 clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); 454 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 455 } else { 456 int index = cvmx_get_core_num() * 2 + 1; 457 clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); 458 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 459 } 460 } 461 462 /* 463 * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq. 464 */ 465 static void octeon_irq_ciu_ack(struct irq_data *data) 466 { 467 u64 mask; 468 union octeon_ciu_chip_data cd; 469 470 cd.p = irq_data_get_irq_chip_data(data); 471 mask = 1ull << (cd.s.bit); 472 473 if (cd.s.line == 0) { 474 int index = cvmx_get_core_num() * 2; 475 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); 476 } else { 477 cvmx_write_csr(CVMX_CIU_INT_SUM1, mask); 478 } 479 } 480 481 /* 482 * Disable the irq on the all cores for chips that have the EN*_W1{S,C} 483 * registers. 484 */ 485 static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) 486 { 487 int cpu; 488 u64 mask; 489 union octeon_ciu_chip_data cd; 490 491 cd.p = irq_data_get_irq_chip_data(data); 492 mask = 1ull << (cd.s.bit); 493 494 if (cd.s.line == 0) { 495 for_each_online_cpu(cpu) { 496 int index = octeon_coreid_for_cpu(cpu) * 2; 497 clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 498 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 499 } 500 } else { 501 for_each_online_cpu(cpu) { 502 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 503 clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 504 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 505 } 506 } 507 } 508 509 /* 510 * Enable the irq on the all cores for chips that have the EN*_W1{S,C} 511 * registers. 512 */ 513 static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) 514 { 515 int cpu; 516 u64 mask; 517 union octeon_ciu_chip_data cd; 518 519 cd.p = irq_data_get_irq_chip_data(data); 520 mask = 1ull << (cd.s.bit); 521 522 if (cd.s.line == 0) { 523 for_each_online_cpu(cpu) { 524 int index = octeon_coreid_for_cpu(cpu) * 2; 525 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 526 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 527 } 528 } else { 529 for_each_online_cpu(cpu) { 530 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 531 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 532 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 533 } 534 } 535 } 536 537 static void octeon_irq_gpio_setup(struct irq_data *data) 538 { 539 union cvmx_gpio_bit_cfgx cfg; 540 union octeon_ciu_chip_data cd; 541 u32 t = irqd_get_trigger_type(data); 542 543 cd.p = irq_data_get_irq_chip_data(data); 544 545 cfg.u64 = 0; 546 cfg.s.int_en = 1; 547 cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0; 548 cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0; 549 550 /* 140 nS glitch filter*/ 551 cfg.s.fil_cnt = 7; 552 cfg.s.fil_sel = 3; 553 554 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64); 555 } 556 557 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) 558 { 559 octeon_irq_gpio_setup(data); 560 octeon_irq_ciu_enable_v2(data); 561 } 562 563 static void octeon_irq_ciu_enable_gpio(struct irq_data *data) 564 { 565 octeon_irq_gpio_setup(data); 566 octeon_irq_ciu_enable(data); 567 } 568 569 static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t) 570 { 571 irqd_set_trigger_type(data, t); 572 octeon_irq_gpio_setup(data); 573 574 return IRQ_SET_MASK_OK; 575 } 576 577 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) 578 { 579 union octeon_ciu_chip_data cd; 580 581 cd.p = irq_data_get_irq_chip_data(data); 582 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); 583 584 octeon_irq_ciu_disable_all_v2(data); 585 } 586 587 static void octeon_irq_ciu_disable_gpio(struct irq_data *data) 588 { 589 union octeon_ciu_chip_data cd; 590 591 cd.p = irq_data_get_irq_chip_data(data); 592 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); 593 594 octeon_irq_ciu_disable_all(data); 595 } 596 597 static void octeon_irq_ciu_gpio_ack(struct irq_data *data) 598 { 599 union octeon_ciu_chip_data cd; 600 u64 mask; 601 602 cd.p = irq_data_get_irq_chip_data(data); 603 mask = 1ull << (cd.s.gpio_line); 604 605 cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); 606 } 607 608 static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc) 609 { 610 if (irqd_get_trigger_type(irq_desc_get_irq_data(desc)) & IRQ_TYPE_EDGE_BOTH) 611 handle_edge_irq(irq, desc); 612 else 613 handle_level_irq(irq, desc); 614 } 615 616 #ifdef CONFIG_SMP 617 618 static void octeon_irq_cpu_offline_ciu(struct irq_data *data) 619 { 620 int cpu = smp_processor_id(); 621 cpumask_t new_affinity; 622 623 if (!cpumask_test_cpu(cpu, data->affinity)) 624 return; 625 626 if (cpumask_weight(data->affinity) > 1) { 627 /* 628 * It has multi CPU affinity, just remove this CPU 629 * from the affinity set. 630 */ 631 cpumask_copy(&new_affinity, data->affinity); 632 cpumask_clear_cpu(cpu, &new_affinity); 633 } else { 634 /* Otherwise, put it on lowest numbered online CPU. */ 635 cpumask_clear(&new_affinity); 636 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); 637 } 638 __irq_set_affinity_locked(data, &new_affinity); 639 } 640 641 static int octeon_irq_ciu_set_affinity(struct irq_data *data, 642 const struct cpumask *dest, bool force) 643 { 644 int cpu; 645 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 646 unsigned long flags; 647 union octeon_ciu_chip_data cd; 648 unsigned long *pen; 649 raw_spinlock_t *lock; 650 651 cd.p = irq_data_get_irq_chip_data(data); 652 653 /* 654 * For non-v2 CIU, we will allow only single CPU affinity. 655 * This removes the need to do locking in the .ack/.eoi 656 * functions. 657 */ 658 if (cpumask_weight(dest) != 1) 659 return -EINVAL; 660 661 if (!enable_one) 662 return 0; 663 664 665 for_each_online_cpu(cpu) { 666 int coreid = octeon_coreid_for_cpu(cpu); 667 668 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 669 raw_spin_lock_irqsave(lock, flags); 670 671 if (cd.s.line == 0) 672 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 673 else 674 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 675 676 if (cpumask_test_cpu(cpu, dest) && enable_one) { 677 enable_one = 0; 678 __set_bit(cd.s.bit, pen); 679 } else { 680 __clear_bit(cd.s.bit, pen); 681 } 682 /* 683 * Must be visible to octeon_irq_ip{2,3}_ciu() before 684 * enabling the irq. 685 */ 686 wmb(); 687 688 if (cd.s.line == 0) 689 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 690 else 691 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 692 693 raw_spin_unlock_irqrestore(lock, flags); 694 } 695 return 0; 696 } 697 698 /* 699 * Set affinity for the irq for chips that have the EN*_W1{S,C} 700 * registers. 701 */ 702 static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, 703 const struct cpumask *dest, 704 bool force) 705 { 706 int cpu; 707 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 708 u64 mask; 709 union octeon_ciu_chip_data cd; 710 711 if (!enable_one) 712 return 0; 713 714 cd.p = irq_data_get_irq_chip_data(data); 715 mask = 1ull << cd.s.bit; 716 717 if (cd.s.line == 0) { 718 for_each_online_cpu(cpu) { 719 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 720 int index = octeon_coreid_for_cpu(cpu) * 2; 721 if (cpumask_test_cpu(cpu, dest) && enable_one) { 722 enable_one = false; 723 set_bit(cd.s.bit, pen); 724 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 725 } else { 726 clear_bit(cd.s.bit, pen); 727 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 728 } 729 } 730 } else { 731 for_each_online_cpu(cpu) { 732 unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 733 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 734 if (cpumask_test_cpu(cpu, dest) && enable_one) { 735 enable_one = false; 736 set_bit(cd.s.bit, pen); 737 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 738 } else { 739 clear_bit(cd.s.bit, pen); 740 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 741 } 742 } 743 } 744 return 0; 745 } 746 #endif 747 748 /* 749 * Newer octeon chips have support for lockless CIU operation. 750 */ 751 static struct irq_chip octeon_irq_chip_ciu_v2 = { 752 .name = "CIU", 753 .irq_enable = octeon_irq_ciu_enable_v2, 754 .irq_disable = octeon_irq_ciu_disable_all_v2, 755 .irq_ack = octeon_irq_ciu_ack, 756 .irq_mask = octeon_irq_ciu_disable_local_v2, 757 .irq_unmask = octeon_irq_ciu_enable_v2, 758 #ifdef CONFIG_SMP 759 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, 760 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 761 #endif 762 }; 763 764 static struct irq_chip octeon_irq_chip_ciu = { 765 .name = "CIU", 766 .irq_enable = octeon_irq_ciu_enable, 767 .irq_disable = octeon_irq_ciu_disable_all, 768 .irq_ack = octeon_irq_ciu_ack, 769 .irq_mask = octeon_irq_ciu_disable_local, 770 .irq_unmask = octeon_irq_ciu_enable, 771 #ifdef CONFIG_SMP 772 .irq_set_affinity = octeon_irq_ciu_set_affinity, 773 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 774 #endif 775 }; 776 777 /* The mbox versions don't do any affinity or round-robin. */ 778 static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = { 779 .name = "CIU-M", 780 .irq_enable = octeon_irq_ciu_enable_all_v2, 781 .irq_disable = octeon_irq_ciu_disable_all_v2, 782 .irq_ack = octeon_irq_ciu_disable_local_v2, 783 .irq_eoi = octeon_irq_ciu_enable_local_v2, 784 785 .irq_cpu_online = octeon_irq_ciu_enable_local_v2, 786 .irq_cpu_offline = octeon_irq_ciu_disable_local_v2, 787 .flags = IRQCHIP_ONOFFLINE_ENABLED, 788 }; 789 790 static struct irq_chip octeon_irq_chip_ciu_mbox = { 791 .name = "CIU-M", 792 .irq_enable = octeon_irq_ciu_enable_all, 793 .irq_disable = octeon_irq_ciu_disable_all, 794 .irq_ack = octeon_irq_ciu_disable_local, 795 .irq_eoi = octeon_irq_ciu_enable_local, 796 797 .irq_cpu_online = octeon_irq_ciu_enable_local, 798 .irq_cpu_offline = octeon_irq_ciu_disable_local, 799 .flags = IRQCHIP_ONOFFLINE_ENABLED, 800 }; 801 802 static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = { 803 .name = "CIU-GPIO", 804 .irq_enable = octeon_irq_ciu_enable_gpio_v2, 805 .irq_disable = octeon_irq_ciu_disable_gpio_v2, 806 .irq_ack = octeon_irq_ciu_gpio_ack, 807 .irq_mask = octeon_irq_ciu_disable_local_v2, 808 .irq_unmask = octeon_irq_ciu_enable_v2, 809 .irq_set_type = octeon_irq_ciu_gpio_set_type, 810 #ifdef CONFIG_SMP 811 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, 812 #endif 813 .flags = IRQCHIP_SET_TYPE_MASKED, 814 }; 815 816 static struct irq_chip octeon_irq_chip_ciu_gpio = { 817 .name = "CIU-GPIO", 818 .irq_enable = octeon_irq_ciu_enable_gpio, 819 .irq_disable = octeon_irq_ciu_disable_gpio, 820 .irq_mask = octeon_irq_ciu_disable_local, 821 .irq_unmask = octeon_irq_ciu_enable, 822 .irq_ack = octeon_irq_ciu_gpio_ack, 823 .irq_set_type = octeon_irq_ciu_gpio_set_type, 824 #ifdef CONFIG_SMP 825 .irq_set_affinity = octeon_irq_ciu_set_affinity, 826 #endif 827 .flags = IRQCHIP_SET_TYPE_MASKED, 828 }; 829 830 /* 831 * Watchdog interrupts are special. They are associated with a single 832 * core, so we hardwire the affinity to that core. 833 */ 834 static void octeon_irq_ciu_wd_enable(struct irq_data *data) 835 { 836 unsigned long flags; 837 unsigned long *pen; 838 int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 839 int cpu = octeon_cpu_for_coreid(coreid); 840 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 841 842 raw_spin_lock_irqsave(lock, flags); 843 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 844 __set_bit(coreid, pen); 845 /* 846 * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling 847 * the irq. 848 */ 849 wmb(); 850 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 851 raw_spin_unlock_irqrestore(lock, flags); 852 } 853 854 /* 855 * Watchdog interrupts are special. They are associated with a single 856 * core, so we hardwire the affinity to that core. 857 */ 858 static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data) 859 { 860 int coreid = data->irq - OCTEON_IRQ_WDOG0; 861 int cpu = octeon_cpu_for_coreid(coreid); 862 863 set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 864 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid); 865 } 866 867 868 static struct irq_chip octeon_irq_chip_ciu_wd_v2 = { 869 .name = "CIU-W", 870 .irq_enable = octeon_irq_ciu1_wd_enable_v2, 871 .irq_disable = octeon_irq_ciu_disable_all_v2, 872 .irq_mask = octeon_irq_ciu_disable_local_v2, 873 .irq_unmask = octeon_irq_ciu_enable_local_v2, 874 }; 875 876 static struct irq_chip octeon_irq_chip_ciu_wd = { 877 .name = "CIU-W", 878 .irq_enable = octeon_irq_ciu_wd_enable, 879 .irq_disable = octeon_irq_ciu_disable_all, 880 .irq_mask = octeon_irq_ciu_disable_local, 881 .irq_unmask = octeon_irq_ciu_enable_local, 882 }; 883 884 static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit) 885 { 886 bool edge = false; 887 888 if (line == 0) 889 switch (bit) { 890 case 48 ... 49: /* GMX DRP */ 891 case 50: /* IPD_DRP */ 892 case 52 ... 55: /* Timers */ 893 case 58: /* MPI */ 894 edge = true; 895 break; 896 default: 897 break; 898 } 899 else /* line == 1 */ 900 switch (bit) { 901 case 47: /* PTP */ 902 edge = true; 903 break; 904 default: 905 break; 906 } 907 return edge; 908 } 909 910 struct octeon_irq_gpio_domain_data { 911 unsigned int base_hwirq; 912 }; 913 914 static int octeon_irq_gpio_xlat(struct irq_domain *d, 915 struct device_node *node, 916 const u32 *intspec, 917 unsigned int intsize, 918 unsigned long *out_hwirq, 919 unsigned int *out_type) 920 { 921 unsigned int type; 922 unsigned int pin; 923 unsigned int trigger; 924 925 if (d->of_node != node) 926 return -EINVAL; 927 928 if (intsize < 2) 929 return -EINVAL; 930 931 pin = intspec[0]; 932 if (pin >= 16) 933 return -EINVAL; 934 935 trigger = intspec[1]; 936 937 switch (trigger) { 938 case 1: 939 type = IRQ_TYPE_EDGE_RISING; 940 break; 941 case 2: 942 type = IRQ_TYPE_EDGE_FALLING; 943 break; 944 case 4: 945 type = IRQ_TYPE_LEVEL_HIGH; 946 break; 947 case 8: 948 type = IRQ_TYPE_LEVEL_LOW; 949 break; 950 default: 951 pr_err("Error: (%s) Invalid irq trigger specification: %x\n", 952 node->name, 953 trigger); 954 type = IRQ_TYPE_LEVEL_LOW; 955 break; 956 } 957 *out_type = type; 958 *out_hwirq = pin; 959 960 return 0; 961 } 962 963 static int octeon_irq_ciu_xlat(struct irq_domain *d, 964 struct device_node *node, 965 const u32 *intspec, 966 unsigned int intsize, 967 unsigned long *out_hwirq, 968 unsigned int *out_type) 969 { 970 unsigned int ciu, bit; 971 972 ciu = intspec[0]; 973 bit = intspec[1]; 974 975 if (ciu > 1 || bit > 63) 976 return -EINVAL; 977 978 /* These are the GPIO lines */ 979 if (ciu == 0 && bit >= 16 && bit < 32) 980 return -EINVAL; 981 982 *out_hwirq = (ciu << 6) | bit; 983 *out_type = 0; 984 985 return 0; 986 } 987 988 static struct irq_chip *octeon_irq_ciu_chip; 989 static struct irq_chip *octeon_irq_gpio_chip; 990 991 static bool octeon_irq_virq_in_range(unsigned int virq) 992 { 993 /* We cannot let it overflow the mapping array. */ 994 if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0]))) 995 return true; 996 997 WARN_ONCE(true, "virq out of range %u.\n", virq); 998 return false; 999 } 1000 1001 static int octeon_irq_ciu_map(struct irq_domain *d, 1002 unsigned int virq, irq_hw_number_t hw) 1003 { 1004 unsigned int line = hw >> 6; 1005 unsigned int bit = hw & 63; 1006 1007 if (!octeon_irq_virq_in_range(virq)) 1008 return -EINVAL; 1009 1010 if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) 1011 return -EINVAL; 1012 1013 if (octeon_irq_ciu_is_edge(line, bit)) 1014 octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1015 octeon_irq_ciu_chip, 1016 handle_edge_irq); 1017 else 1018 octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1019 octeon_irq_ciu_chip, 1020 handle_level_irq); 1021 1022 return 0; 1023 } 1024 1025 static int octeon_irq_gpio_map_common(struct irq_domain *d, 1026 unsigned int virq, irq_hw_number_t hw, 1027 int line_limit, struct irq_chip *chip) 1028 { 1029 struct octeon_irq_gpio_domain_data *gpiod = d->host_data; 1030 unsigned int line, bit; 1031 1032 if (!octeon_irq_virq_in_range(virq)) 1033 return -EINVAL; 1034 1035 hw += gpiod->base_hwirq; 1036 line = hw >> 6; 1037 bit = hw & 63; 1038 if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) 1039 return -EINVAL; 1040 1041 octeon_irq_set_ciu_mapping(virq, line, bit, hw, 1042 chip, octeon_irq_handle_gpio); 1043 return 0; 1044 } 1045 1046 static int octeon_irq_gpio_map(struct irq_domain *d, 1047 unsigned int virq, irq_hw_number_t hw) 1048 { 1049 return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip); 1050 } 1051 1052 static struct irq_domain_ops octeon_irq_domain_ciu_ops = { 1053 .map = octeon_irq_ciu_map, 1054 .xlate = octeon_irq_ciu_xlat, 1055 }; 1056 1057 static struct irq_domain_ops octeon_irq_domain_gpio_ops = { 1058 .map = octeon_irq_gpio_map, 1059 .xlate = octeon_irq_gpio_xlat, 1060 }; 1061 1062 static void octeon_irq_ip2_ciu(void) 1063 { 1064 const unsigned long core_id = cvmx_get_core_num(); 1065 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); 1066 1067 ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); 1068 if (likely(ciu_sum)) { 1069 int bit = fls64(ciu_sum) - 1; 1070 int irq = octeon_irq_ciu_to_irq[0][bit]; 1071 if (likely(irq)) 1072 do_IRQ(irq); 1073 else 1074 spurious_interrupt(); 1075 } else { 1076 spurious_interrupt(); 1077 } 1078 } 1079 1080 static void octeon_irq_ip3_ciu(void) 1081 { 1082 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); 1083 1084 ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); 1085 if (likely(ciu_sum)) { 1086 int bit = fls64(ciu_sum) - 1; 1087 int irq = octeon_irq_ciu_to_irq[1][bit]; 1088 if (likely(irq)) 1089 do_IRQ(irq); 1090 else 1091 spurious_interrupt(); 1092 } else { 1093 spurious_interrupt(); 1094 } 1095 } 1096 1097 static bool octeon_irq_use_ip4; 1098 1099 static void __cpuinit octeon_irq_local_enable_ip4(void *arg) 1100 { 1101 set_c0_status(STATUSF_IP4); 1102 } 1103 1104 static void octeon_irq_ip4_mask(void) 1105 { 1106 clear_c0_status(STATUSF_IP4); 1107 spurious_interrupt(); 1108 } 1109 1110 static void (*octeon_irq_ip2)(void); 1111 static void (*octeon_irq_ip3)(void); 1112 static void (*octeon_irq_ip4)(void); 1113 1114 void __cpuinitdata (*octeon_irq_setup_secondary)(void); 1115 1116 void __cpuinit octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h) 1117 { 1118 octeon_irq_ip4 = h; 1119 octeon_irq_use_ip4 = true; 1120 on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1); 1121 } 1122 1123 static void __cpuinit octeon_irq_percpu_enable(void) 1124 { 1125 irq_cpu_online(); 1126 } 1127 1128 static void __cpuinit octeon_irq_init_ciu_percpu(void) 1129 { 1130 int coreid = cvmx_get_core_num(); 1131 1132 1133 __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0; 1134 __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0; 1135 wmb(); 1136 raw_spin_lock_init(&__get_cpu_var(octeon_irq_ciu_spinlock)); 1137 /* 1138 * Disable All CIU Interrupts. The ones we need will be 1139 * enabled later. Read the SUM register so we know the write 1140 * completed. 1141 */ 1142 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); 1143 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); 1144 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); 1145 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); 1146 cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); 1147 } 1148 1149 static void octeon_irq_init_ciu2_percpu(void) 1150 { 1151 u64 regx, ipx; 1152 int coreid = cvmx_get_core_num(); 1153 u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid); 1154 1155 /* 1156 * Disable All CIU2 Interrupts. The ones we need will be 1157 * enabled later. Read the SUM register so we know the write 1158 * completed. 1159 * 1160 * There are 9 registers and 3 IPX levels with strides 0x1000 1161 * and 0x200 respectivly. Use loops to clear them. 1162 */ 1163 for (regx = 0; regx <= 0x8000; regx += 0x1000) { 1164 for (ipx = 0; ipx <= 0x400; ipx += 0x200) 1165 cvmx_write_csr(base + regx + ipx, 0); 1166 } 1167 1168 cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid)); 1169 } 1170 1171 static void __cpuinit octeon_irq_setup_secondary_ciu(void) 1172 { 1173 octeon_irq_init_ciu_percpu(); 1174 octeon_irq_percpu_enable(); 1175 1176 /* Enable the CIU lines */ 1177 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1178 clear_c0_status(STATUSF_IP4); 1179 } 1180 1181 static void octeon_irq_setup_secondary_ciu2(void) 1182 { 1183 octeon_irq_init_ciu2_percpu(); 1184 octeon_irq_percpu_enable(); 1185 1186 /* Enable the CIU lines */ 1187 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1188 if (octeon_irq_use_ip4) 1189 set_c0_status(STATUSF_IP4); 1190 else 1191 clear_c0_status(STATUSF_IP4); 1192 } 1193 1194 static void __init octeon_irq_init_ciu(void) 1195 { 1196 unsigned int i; 1197 struct irq_chip *chip; 1198 struct irq_chip *chip_mbox; 1199 struct irq_chip *chip_wd; 1200 struct device_node *gpio_node; 1201 struct device_node *ciu_node; 1202 struct irq_domain *ciu_domain = NULL; 1203 1204 octeon_irq_init_ciu_percpu(); 1205 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; 1206 1207 octeon_irq_ip2 = octeon_irq_ip2_ciu; 1208 octeon_irq_ip3 = octeon_irq_ip3_ciu; 1209 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || 1210 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || 1211 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || 1212 OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 1213 chip = &octeon_irq_chip_ciu_v2; 1214 chip_mbox = &octeon_irq_chip_ciu_mbox_v2; 1215 chip_wd = &octeon_irq_chip_ciu_wd_v2; 1216 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; 1217 } else { 1218 chip = &octeon_irq_chip_ciu; 1219 chip_mbox = &octeon_irq_chip_ciu_mbox; 1220 chip_wd = &octeon_irq_chip_ciu_wd; 1221 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; 1222 } 1223 octeon_irq_ciu_chip = chip; 1224 octeon_irq_ip4 = octeon_irq_ip4_mask; 1225 1226 /* Mips internal */ 1227 octeon_irq_init_core(); 1228 1229 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); 1230 if (gpio_node) { 1231 struct octeon_irq_gpio_domain_data *gpiod; 1232 1233 gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); 1234 if (gpiod) { 1235 /* gpio domain host_data is the base hwirq number. */ 1236 gpiod->base_hwirq = 16; 1237 irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); 1238 of_node_put(gpio_node); 1239 } else 1240 pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); 1241 } else 1242 pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); 1243 1244 ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); 1245 if (ciu_node) { 1246 ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); 1247 irq_set_default_host(ciu_domain); 1248 of_node_put(ciu_node); 1249 } else 1250 panic("Cannot find device node for cavium,octeon-3860-ciu."); 1251 1252 /* CIU_0 */ 1253 for (i = 0; i < 16; i++) 1254 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); 1255 1256 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); 1257 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); 1258 1259 for (i = 0; i < 4; i++) 1260 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); 1261 for (i = 0; i < 4; i++) 1262 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); 1263 1264 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); 1265 for (i = 0; i < 4; i++) 1266 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); 1267 1268 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); 1269 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_BOOTDMA, 0, 63); 1270 1271 /* CIU_1 */ 1272 for (i = 0; i < 16; i++) 1273 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq); 1274 1275 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); 1276 1277 /* Enable the CIU lines */ 1278 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1279 clear_c0_status(STATUSF_IP4); 1280 } 1281 1282 /* 1283 * Watchdog interrupts are special. They are associated with a single 1284 * core, so we hardwire the affinity to that core. 1285 */ 1286 static void octeon_irq_ciu2_wd_enable(struct irq_data *data) 1287 { 1288 u64 mask; 1289 u64 en_addr; 1290 int coreid = data->irq - OCTEON_IRQ_WDOG0; 1291 union octeon_ciu_chip_data cd; 1292 1293 cd.p = irq_data_get_irq_chip_data(data); 1294 mask = 1ull << (cd.s.bit); 1295 1296 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); 1297 cvmx_write_csr(en_addr, mask); 1298 1299 } 1300 1301 static void octeon_irq_ciu2_enable(struct irq_data *data) 1302 { 1303 u64 mask; 1304 u64 en_addr; 1305 int cpu = next_cpu_for_irq(data); 1306 int coreid = octeon_coreid_for_cpu(cpu); 1307 union octeon_ciu_chip_data cd; 1308 1309 cd.p = irq_data_get_irq_chip_data(data); 1310 mask = 1ull << (cd.s.bit); 1311 1312 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); 1313 cvmx_write_csr(en_addr, mask); 1314 } 1315 1316 static void octeon_irq_ciu2_enable_local(struct irq_data *data) 1317 { 1318 u64 mask; 1319 u64 en_addr; 1320 int coreid = cvmx_get_core_num(); 1321 union octeon_ciu_chip_data cd; 1322 1323 cd.p = irq_data_get_irq_chip_data(data); 1324 mask = 1ull << (cd.s.bit); 1325 1326 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); 1327 cvmx_write_csr(en_addr, mask); 1328 1329 } 1330 1331 static void octeon_irq_ciu2_disable_local(struct irq_data *data) 1332 { 1333 u64 mask; 1334 u64 en_addr; 1335 int coreid = cvmx_get_core_num(); 1336 union octeon_ciu_chip_data cd; 1337 1338 cd.p = irq_data_get_irq_chip_data(data); 1339 mask = 1ull << (cd.s.bit); 1340 1341 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line); 1342 cvmx_write_csr(en_addr, mask); 1343 1344 } 1345 1346 static void octeon_irq_ciu2_ack(struct irq_data *data) 1347 { 1348 u64 mask; 1349 u64 en_addr; 1350 int coreid = cvmx_get_core_num(); 1351 union octeon_ciu_chip_data cd; 1352 1353 cd.p = irq_data_get_irq_chip_data(data); 1354 mask = 1ull << (cd.s.bit); 1355 1356 en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line); 1357 cvmx_write_csr(en_addr, mask); 1358 1359 } 1360 1361 static void octeon_irq_ciu2_disable_all(struct irq_data *data) 1362 { 1363 int cpu; 1364 u64 mask; 1365 union octeon_ciu_chip_data cd; 1366 1367 cd.p = irq_data_get_irq_chip_data(data); 1368 mask = 1ull << (cd.s.bit); 1369 1370 for_each_online_cpu(cpu) { 1371 u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); 1372 cvmx_write_csr(en_addr, mask); 1373 } 1374 } 1375 1376 static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data) 1377 { 1378 int cpu; 1379 u64 mask; 1380 1381 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1382 1383 for_each_online_cpu(cpu) { 1384 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu)); 1385 cvmx_write_csr(en_addr, mask); 1386 } 1387 } 1388 1389 static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data) 1390 { 1391 int cpu; 1392 u64 mask; 1393 1394 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1395 1396 for_each_online_cpu(cpu) { 1397 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu)); 1398 cvmx_write_csr(en_addr, mask); 1399 } 1400 } 1401 1402 static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data) 1403 { 1404 u64 mask; 1405 u64 en_addr; 1406 int coreid = cvmx_get_core_num(); 1407 1408 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1409 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid); 1410 cvmx_write_csr(en_addr, mask); 1411 } 1412 1413 static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data) 1414 { 1415 u64 mask; 1416 u64 en_addr; 1417 int coreid = cvmx_get_core_num(); 1418 1419 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1420 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid); 1421 cvmx_write_csr(en_addr, mask); 1422 } 1423 1424 #ifdef CONFIG_SMP 1425 static int octeon_irq_ciu2_set_affinity(struct irq_data *data, 1426 const struct cpumask *dest, bool force) 1427 { 1428 int cpu; 1429 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 1430 u64 mask; 1431 union octeon_ciu_chip_data cd; 1432 1433 if (!enable_one) 1434 return 0; 1435 1436 cd.p = irq_data_get_irq_chip_data(data); 1437 mask = 1ull << cd.s.bit; 1438 1439 for_each_online_cpu(cpu) { 1440 u64 en_addr; 1441 if (cpumask_test_cpu(cpu, dest) && enable_one) { 1442 enable_one = false; 1443 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); 1444 } else { 1445 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); 1446 } 1447 cvmx_write_csr(en_addr, mask); 1448 } 1449 1450 return 0; 1451 } 1452 #endif 1453 1454 static void octeon_irq_ciu2_enable_gpio(struct irq_data *data) 1455 { 1456 octeon_irq_gpio_setup(data); 1457 octeon_irq_ciu2_enable(data); 1458 } 1459 1460 static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) 1461 { 1462 union octeon_ciu_chip_data cd; 1463 cd.p = irq_data_get_irq_chip_data(data); 1464 1465 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); 1466 1467 octeon_irq_ciu2_disable_all(data); 1468 } 1469 1470 static struct irq_chip octeon_irq_chip_ciu2 = { 1471 .name = "CIU2-E", 1472 .irq_enable = octeon_irq_ciu2_enable, 1473 .irq_disable = octeon_irq_ciu2_disable_all, 1474 .irq_ack = octeon_irq_ciu2_ack, 1475 .irq_mask = octeon_irq_ciu2_disable_local, 1476 .irq_unmask = octeon_irq_ciu2_enable, 1477 #ifdef CONFIG_SMP 1478 .irq_set_affinity = octeon_irq_ciu2_set_affinity, 1479 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 1480 #endif 1481 }; 1482 1483 static struct irq_chip octeon_irq_chip_ciu2_mbox = { 1484 .name = "CIU2-M", 1485 .irq_enable = octeon_irq_ciu2_mbox_enable_all, 1486 .irq_disable = octeon_irq_ciu2_mbox_disable_all, 1487 .irq_ack = octeon_irq_ciu2_mbox_disable_local, 1488 .irq_eoi = octeon_irq_ciu2_mbox_enable_local, 1489 1490 .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local, 1491 .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local, 1492 .flags = IRQCHIP_ONOFFLINE_ENABLED, 1493 }; 1494 1495 static struct irq_chip octeon_irq_chip_ciu2_wd = { 1496 .name = "CIU2-W", 1497 .irq_enable = octeon_irq_ciu2_wd_enable, 1498 .irq_disable = octeon_irq_ciu2_disable_all, 1499 .irq_mask = octeon_irq_ciu2_disable_local, 1500 .irq_unmask = octeon_irq_ciu2_enable_local, 1501 }; 1502 1503 static struct irq_chip octeon_irq_chip_ciu2_gpio = { 1504 .name = "CIU-GPIO", 1505 .irq_enable = octeon_irq_ciu2_enable_gpio, 1506 .irq_disable = octeon_irq_ciu2_disable_gpio, 1507 .irq_ack = octeon_irq_ciu_gpio_ack, 1508 .irq_mask = octeon_irq_ciu2_disable_local, 1509 .irq_unmask = octeon_irq_ciu2_enable, 1510 .irq_set_type = octeon_irq_ciu_gpio_set_type, 1511 #ifdef CONFIG_SMP 1512 .irq_set_affinity = octeon_irq_ciu2_set_affinity, 1513 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 1514 #endif 1515 .flags = IRQCHIP_SET_TYPE_MASKED, 1516 }; 1517 1518 static int octeon_irq_ciu2_xlat(struct irq_domain *d, 1519 struct device_node *node, 1520 const u32 *intspec, 1521 unsigned int intsize, 1522 unsigned long *out_hwirq, 1523 unsigned int *out_type) 1524 { 1525 unsigned int ciu, bit; 1526 1527 ciu = intspec[0]; 1528 bit = intspec[1]; 1529 1530 /* Line 7 are the GPIO lines */ 1531 if (ciu > 6 || bit > 63) 1532 return -EINVAL; 1533 1534 *out_hwirq = (ciu << 6) | bit; 1535 *out_type = 0; 1536 1537 return 0; 1538 } 1539 1540 static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit) 1541 { 1542 bool edge = false; 1543 1544 if (line == 3) /* MIO */ 1545 switch (bit) { 1546 case 2: /* IPD_DRP */ 1547 case 8 ... 11: /* Timers */ 1548 case 48: /* PTP */ 1549 edge = true; 1550 break; 1551 default: 1552 break; 1553 } 1554 else if (line == 6) /* PKT */ 1555 switch (bit) { 1556 case 52 ... 53: /* ILK_DRP */ 1557 case 8 ... 12: /* GMX_DRP */ 1558 edge = true; 1559 break; 1560 default: 1561 break; 1562 } 1563 return edge; 1564 } 1565 1566 static int octeon_irq_ciu2_map(struct irq_domain *d, 1567 unsigned int virq, irq_hw_number_t hw) 1568 { 1569 unsigned int line = hw >> 6; 1570 unsigned int bit = hw & 63; 1571 1572 if (!octeon_irq_virq_in_range(virq)) 1573 return -EINVAL; 1574 1575 /* Line 7 are the GPIO lines */ 1576 if (line > 6 || octeon_irq_ciu_to_irq[line][bit] != 0) 1577 return -EINVAL; 1578 1579 if (octeon_irq_ciu2_is_edge(line, bit)) 1580 octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1581 &octeon_irq_chip_ciu2, 1582 handle_edge_irq); 1583 else 1584 octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1585 &octeon_irq_chip_ciu2, 1586 handle_level_irq); 1587 1588 return 0; 1589 } 1590 static int octeon_irq_ciu2_gpio_map(struct irq_domain *d, 1591 unsigned int virq, irq_hw_number_t hw) 1592 { 1593 return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio); 1594 } 1595 1596 static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { 1597 .map = octeon_irq_ciu2_map, 1598 .xlate = octeon_irq_ciu2_xlat, 1599 }; 1600 1601 static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = { 1602 .map = octeon_irq_ciu2_gpio_map, 1603 .xlate = octeon_irq_gpio_xlat, 1604 }; 1605 1606 static void octeon_irq_ciu2(void) 1607 { 1608 int line; 1609 int bit; 1610 int irq; 1611 u64 src_reg, src, sum; 1612 const unsigned long core_id = cvmx_get_core_num(); 1613 1614 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful; 1615 1616 if (unlikely(!sum)) 1617 goto spurious; 1618 1619 line = fls64(sum) - 1; 1620 src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line); 1621 src = cvmx_read_csr(src_reg); 1622 1623 if (unlikely(!src)) 1624 goto spurious; 1625 1626 bit = fls64(src) - 1; 1627 irq = octeon_irq_ciu_to_irq[line][bit]; 1628 if (unlikely(!irq)) 1629 goto spurious; 1630 1631 do_IRQ(irq); 1632 goto out; 1633 1634 spurious: 1635 spurious_interrupt(); 1636 out: 1637 /* CN68XX pass 1.x has an errata that accessing the ACK registers 1638 can stop interrupts from propagating */ 1639 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 1640 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); 1641 else 1642 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id)); 1643 return; 1644 } 1645 1646 static void octeon_irq_ciu2_mbox(void) 1647 { 1648 int line; 1649 1650 const unsigned long core_id = cvmx_get_core_num(); 1651 u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60; 1652 1653 if (unlikely(!sum)) 1654 goto spurious; 1655 1656 line = fls64(sum) - 1; 1657 1658 do_IRQ(OCTEON_IRQ_MBOX0 + line); 1659 goto out; 1660 1661 spurious: 1662 spurious_interrupt(); 1663 out: 1664 /* CN68XX pass 1.x has an errata that accessing the ACK registers 1665 can stop interrupts from propagating */ 1666 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 1667 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); 1668 else 1669 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id)); 1670 return; 1671 } 1672 1673 static void __init octeon_irq_init_ciu2(void) 1674 { 1675 unsigned int i; 1676 struct device_node *gpio_node; 1677 struct device_node *ciu_node; 1678 struct irq_domain *ciu_domain = NULL; 1679 1680 octeon_irq_init_ciu2_percpu(); 1681 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; 1682 1683 octeon_irq_ip2 = octeon_irq_ciu2; 1684 octeon_irq_ip3 = octeon_irq_ciu2_mbox; 1685 octeon_irq_ip4 = octeon_irq_ip4_mask; 1686 1687 /* Mips internal */ 1688 octeon_irq_init_core(); 1689 1690 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); 1691 if (gpio_node) { 1692 struct octeon_irq_gpio_domain_data *gpiod; 1693 1694 gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); 1695 if (gpiod) { 1696 /* gpio domain host_data is the base hwirq number. */ 1697 gpiod->base_hwirq = 7 << 6; 1698 irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod); 1699 of_node_put(gpio_node); 1700 } else 1701 pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); 1702 } else 1703 pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); 1704 1705 ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2"); 1706 if (ciu_node) { 1707 ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL); 1708 irq_set_default_host(ciu_domain); 1709 of_node_put(ciu_node); 1710 } else 1711 panic("Cannot find device node for cavium,octeon-6880-ciu2."); 1712 1713 /* CUI2 */ 1714 for (i = 0; i < 64; i++) 1715 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); 1716 1717 for (i = 0; i < 32; i++) 1718 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, 1719 &octeon_irq_chip_ciu2_wd, handle_level_irq); 1720 1721 for (i = 0; i < 4; i++) 1722 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); 1723 1724 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); 1725 1726 for (i = 0; i < 4; i++) 1727 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); 1728 1729 for (i = 0; i < 4; i++) 1730 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); 1731 1732 irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 1733 irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 1734 irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 1735 irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 1736 1737 /* Enable the CIU lines */ 1738 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1739 clear_c0_status(STATUSF_IP4); 1740 } 1741 1742 void __init arch_init_irq(void) 1743 { 1744 #ifdef CONFIG_SMP 1745 /* Set the default affinity to the boot cpu. */ 1746 cpumask_clear(irq_default_affinity); 1747 cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 1748 #endif 1749 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 1750 octeon_irq_init_ciu2(); 1751 else 1752 octeon_irq_init_ciu(); 1753 } 1754 1755 asmlinkage void plat_irq_dispatch(void) 1756 { 1757 unsigned long cop0_cause; 1758 unsigned long cop0_status; 1759 1760 while (1) { 1761 cop0_cause = read_c0_cause(); 1762 cop0_status = read_c0_status(); 1763 cop0_cause &= cop0_status; 1764 cop0_cause &= ST0_IM; 1765 1766 if (unlikely(cop0_cause & STATUSF_IP2)) 1767 octeon_irq_ip2(); 1768 else if (unlikely(cop0_cause & STATUSF_IP3)) 1769 octeon_irq_ip3(); 1770 else if (unlikely(cop0_cause & STATUSF_IP4)) 1771 octeon_irq_ip4(); 1772 else if (likely(cop0_cause)) 1773 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); 1774 else 1775 break; 1776 } 1777 } 1778 1779 #ifdef CONFIG_HOTPLUG_CPU 1780 1781 void fixup_irqs(void) 1782 { 1783 irq_cpu_offline(); 1784 } 1785 1786 #endif /* CONFIG_HOTPLUG_CPU */ 1787