1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2004-2014 Cavium, Inc. 7 */ 8 9 #include <linux/of_address.h> 10 #include <linux/interrupt.h> 11 #include <linux/irqdomain.h> 12 #include <linux/bitops.h> 13 #include <linux/of_irq.h> 14 #include <linux/percpu.h> 15 #include <linux/slab.h> 16 #include <linux/irq.h> 17 #include <linux/smp.h> 18 #include <linux/of.h> 19 20 #include <asm/octeon/octeon.h> 21 #include <asm/octeon/cvmx-ciu2-defs.h> 22 23 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); 24 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); 25 static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); 26 27 struct octeon_irq_ciu_domain_data { 28 int num_sum; /* number of sum registers (2 or 3). */ 29 }; 30 31 static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; 32 33 struct octeon_ciu_chip_data { 34 union { 35 struct { /* only used for ciu3 */ 36 u64 ciu3_addr; 37 unsigned int intsn; 38 }; 39 struct { /* only used for ciu/ciu2 */ 40 u8 line; 41 u8 bit; 42 u8 gpio_line; 43 }; 44 }; 45 int current_cpu; /* Next CPU expected to take this irq */ 46 }; 47 48 struct octeon_core_chip_data { 49 struct mutex core_irq_mutex; 50 bool current_en; 51 bool desired_en; 52 u8 bit; 53 }; 54 55 #define MIPS_CORE_IRQ_LINES 8 56 57 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; 58 59 static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, 60 struct irq_chip *chip, 61 irq_flow_handler_t handler) 62 { 63 struct octeon_ciu_chip_data *cd; 64 65 cd = kzalloc(sizeof(*cd), GFP_KERNEL); 66 if (!cd) 67 return -ENOMEM; 68 69 irq_set_chip_and_handler(irq, chip, handler); 70 71 cd->line = line; 72 cd->bit = bit; 73 cd->gpio_line = gpio_line; 74 75 irq_set_chip_data(irq, cd); 76 octeon_irq_ciu_to_irq[line][bit] = irq; 77 return 0; 78 } 79 80 static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq) 81 { 82 struct irq_data *data = irq_get_irq_data(irq); 83 struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); 84 85 irq_set_chip_data(irq, NULL); 86 kfree(cd); 87 } 88 89 static int octeon_irq_force_ciu_mapping(struct irq_domain *domain, 90 int irq, int line, int bit) 91 { 92 return irq_domain_associate(domain, irq, line << 6 | bit); 93 } 94 95 static int octeon_coreid_for_cpu(int cpu) 96 { 97 #ifdef CONFIG_SMP 98 return cpu_logical_map(cpu); 99 #else 100 return cvmx_get_core_num(); 101 #endif 102 } 103 104 static int octeon_cpu_for_coreid(int coreid) 105 { 106 #ifdef CONFIG_SMP 107 return cpu_number_map(coreid); 108 #else 109 return smp_processor_id(); 110 #endif 111 } 112 113 static void octeon_irq_core_ack(struct irq_data *data) 114 { 115 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 116 unsigned int bit = cd->bit; 117 118 /* 119 * We don't need to disable IRQs to make these atomic since 120 * they are already disabled earlier in the low level 121 * interrupt code. 122 */ 123 clear_c0_status(0x100 << bit); 124 /* The two user interrupts must be cleared manually. */ 125 if (bit < 2) 126 clear_c0_cause(0x100 << bit); 127 } 128 129 static void octeon_irq_core_eoi(struct irq_data *data) 130 { 131 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 132 133 /* 134 * We don't need to disable IRQs to make these atomic since 135 * they are already disabled earlier in the low level 136 * interrupt code. 137 */ 138 set_c0_status(0x100 << cd->bit); 139 } 140 141 static void octeon_irq_core_set_enable_local(void *arg) 142 { 143 struct irq_data *data = arg; 144 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 145 unsigned int mask = 0x100 << cd->bit; 146 147 /* 148 * Interrupts are already disabled, so these are atomic. 149 */ 150 if (cd->desired_en) 151 set_c0_status(mask); 152 else 153 clear_c0_status(mask); 154 155 } 156 157 static void octeon_irq_core_disable(struct irq_data *data) 158 { 159 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 160 cd->desired_en = false; 161 } 162 163 static void octeon_irq_core_enable(struct irq_data *data) 164 { 165 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 166 cd->desired_en = true; 167 } 168 169 static void octeon_irq_core_bus_lock(struct irq_data *data) 170 { 171 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 172 173 mutex_lock(&cd->core_irq_mutex); 174 } 175 176 static void octeon_irq_core_bus_sync_unlock(struct irq_data *data) 177 { 178 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 179 180 if (cd->desired_en != cd->current_en) { 181 on_each_cpu(octeon_irq_core_set_enable_local, data, 1); 182 183 cd->current_en = cd->desired_en; 184 } 185 186 mutex_unlock(&cd->core_irq_mutex); 187 } 188 189 static struct irq_chip octeon_irq_chip_core = { 190 .name = "Core", 191 .irq_enable = octeon_irq_core_enable, 192 .irq_disable = octeon_irq_core_disable, 193 .irq_ack = octeon_irq_core_ack, 194 .irq_eoi = octeon_irq_core_eoi, 195 .irq_bus_lock = octeon_irq_core_bus_lock, 196 .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock, 197 198 .irq_cpu_online = octeon_irq_core_eoi, 199 .irq_cpu_offline = octeon_irq_core_ack, 200 .flags = IRQCHIP_ONOFFLINE_ENABLED, 201 }; 202 203 static void __init octeon_irq_init_core(void) 204 { 205 int i; 206 int irq; 207 struct octeon_core_chip_data *cd; 208 209 for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) { 210 cd = &octeon_irq_core_chip_data[i]; 211 cd->current_en = false; 212 cd->desired_en = false; 213 cd->bit = i; 214 mutex_init(&cd->core_irq_mutex); 215 216 irq = OCTEON_IRQ_SW0 + i; 217 irq_set_chip_data(irq, cd); 218 irq_set_chip_and_handler(irq, &octeon_irq_chip_core, 219 handle_percpu_irq); 220 } 221 } 222 223 static int next_cpu_for_irq(struct irq_data *data) 224 { 225 226 #ifdef CONFIG_SMP 227 int cpu; 228 int weight = cpumask_weight(data->affinity); 229 struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); 230 231 if (weight > 1) { 232 cpu = cd->current_cpu; 233 for (;;) { 234 cpu = cpumask_next(cpu, data->affinity); 235 if (cpu >= nr_cpu_ids) { 236 cpu = -1; 237 continue; 238 } else if (cpumask_test_cpu(cpu, cpu_online_mask)) { 239 break; 240 } 241 } 242 } else if (weight == 1) { 243 cpu = cpumask_first(data->affinity); 244 } else { 245 cpu = smp_processor_id(); 246 } 247 cd->current_cpu = cpu; 248 return cpu; 249 #else 250 return smp_processor_id(); 251 #endif 252 } 253 254 static void octeon_irq_ciu_enable(struct irq_data *data) 255 { 256 int cpu = next_cpu_for_irq(data); 257 int coreid = octeon_coreid_for_cpu(cpu); 258 unsigned long *pen; 259 unsigned long flags; 260 struct octeon_ciu_chip_data *cd; 261 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 262 263 cd = irq_data_get_irq_chip_data(data); 264 265 raw_spin_lock_irqsave(lock, flags); 266 if (cd->line == 0) { 267 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 268 __set_bit(cd->bit, pen); 269 /* 270 * Must be visible to octeon_irq_ip{2,3}_ciu() before 271 * enabling the irq. 272 */ 273 wmb(); 274 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 275 } else { 276 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 277 __set_bit(cd->bit, pen); 278 /* 279 * Must be visible to octeon_irq_ip{2,3}_ciu() before 280 * enabling the irq. 281 */ 282 wmb(); 283 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 284 } 285 raw_spin_unlock_irqrestore(lock, flags); 286 } 287 288 static void octeon_irq_ciu_enable_local(struct irq_data *data) 289 { 290 unsigned long *pen; 291 unsigned long flags; 292 struct octeon_ciu_chip_data *cd; 293 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); 294 295 cd = irq_data_get_irq_chip_data(data); 296 297 raw_spin_lock_irqsave(lock, flags); 298 if (cd->line == 0) { 299 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); 300 __set_bit(cd->bit, pen); 301 /* 302 * Must be visible to octeon_irq_ip{2,3}_ciu() before 303 * enabling the irq. 304 */ 305 wmb(); 306 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 307 } else { 308 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); 309 __set_bit(cd->bit, pen); 310 /* 311 * Must be visible to octeon_irq_ip{2,3}_ciu() before 312 * enabling the irq. 313 */ 314 wmb(); 315 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); 316 } 317 raw_spin_unlock_irqrestore(lock, flags); 318 } 319 320 static void octeon_irq_ciu_disable_local(struct irq_data *data) 321 { 322 unsigned long *pen; 323 unsigned long flags; 324 struct octeon_ciu_chip_data *cd; 325 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); 326 327 cd = irq_data_get_irq_chip_data(data); 328 329 raw_spin_lock_irqsave(lock, flags); 330 if (cd->line == 0) { 331 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); 332 __clear_bit(cd->bit, pen); 333 /* 334 * Must be visible to octeon_irq_ip{2,3}_ciu() before 335 * enabling the irq. 336 */ 337 wmb(); 338 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 339 } else { 340 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); 341 __clear_bit(cd->bit, pen); 342 /* 343 * Must be visible to octeon_irq_ip{2,3}_ciu() before 344 * enabling the irq. 345 */ 346 wmb(); 347 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); 348 } 349 raw_spin_unlock_irqrestore(lock, flags); 350 } 351 352 static void octeon_irq_ciu_disable_all(struct irq_data *data) 353 { 354 unsigned long flags; 355 unsigned long *pen; 356 int cpu; 357 struct octeon_ciu_chip_data *cd; 358 raw_spinlock_t *lock; 359 360 cd = irq_data_get_irq_chip_data(data); 361 362 for_each_online_cpu(cpu) { 363 int coreid = octeon_coreid_for_cpu(cpu); 364 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 365 if (cd->line == 0) 366 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 367 else 368 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 369 370 raw_spin_lock_irqsave(lock, flags); 371 __clear_bit(cd->bit, pen); 372 /* 373 * Must be visible to octeon_irq_ip{2,3}_ciu() before 374 * enabling the irq. 375 */ 376 wmb(); 377 if (cd->line == 0) 378 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 379 else 380 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 381 raw_spin_unlock_irqrestore(lock, flags); 382 } 383 } 384 385 static void octeon_irq_ciu_enable_all(struct irq_data *data) 386 { 387 unsigned long flags; 388 unsigned long *pen; 389 int cpu; 390 struct octeon_ciu_chip_data *cd; 391 raw_spinlock_t *lock; 392 393 cd = irq_data_get_irq_chip_data(data); 394 395 for_each_online_cpu(cpu) { 396 int coreid = octeon_coreid_for_cpu(cpu); 397 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 398 if (cd->line == 0) 399 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 400 else 401 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 402 403 raw_spin_lock_irqsave(lock, flags); 404 __set_bit(cd->bit, pen); 405 /* 406 * Must be visible to octeon_irq_ip{2,3}_ciu() before 407 * enabling the irq. 408 */ 409 wmb(); 410 if (cd->line == 0) 411 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 412 else 413 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 414 raw_spin_unlock_irqrestore(lock, flags); 415 } 416 } 417 418 /* 419 * Enable the irq on the next core in the affinity set for chips that 420 * have the EN*_W1{S,C} registers. 421 */ 422 static void octeon_irq_ciu_enable_v2(struct irq_data *data) 423 { 424 u64 mask; 425 int cpu = next_cpu_for_irq(data); 426 struct octeon_ciu_chip_data *cd; 427 428 cd = irq_data_get_irq_chip_data(data); 429 mask = 1ull << (cd->bit); 430 431 /* 432 * Called under the desc lock, so these should never get out 433 * of sync. 434 */ 435 if (cd->line == 0) { 436 int index = octeon_coreid_for_cpu(cpu) * 2; 437 set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 438 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 439 } else { 440 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 441 set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 442 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 443 } 444 } 445 446 /* 447 * Enable the irq in the sum2 registers. 448 */ 449 static void octeon_irq_ciu_enable_sum2(struct irq_data *data) 450 { 451 u64 mask; 452 int cpu = next_cpu_for_irq(data); 453 int index = octeon_coreid_for_cpu(cpu); 454 struct octeon_ciu_chip_data *cd; 455 456 cd = irq_data_get_irq_chip_data(data); 457 mask = 1ull << (cd->bit); 458 459 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); 460 } 461 462 /* 463 * Disable the irq in the sum2 registers. 464 */ 465 static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data) 466 { 467 u64 mask; 468 int cpu = next_cpu_for_irq(data); 469 int index = octeon_coreid_for_cpu(cpu); 470 struct octeon_ciu_chip_data *cd; 471 472 cd = irq_data_get_irq_chip_data(data); 473 mask = 1ull << (cd->bit); 474 475 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); 476 } 477 478 static void octeon_irq_ciu_ack_sum2(struct irq_data *data) 479 { 480 u64 mask; 481 int cpu = next_cpu_for_irq(data); 482 int index = octeon_coreid_for_cpu(cpu); 483 struct octeon_ciu_chip_data *cd; 484 485 cd = irq_data_get_irq_chip_data(data); 486 mask = 1ull << (cd->bit); 487 488 cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask); 489 } 490 491 static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data) 492 { 493 int cpu; 494 struct octeon_ciu_chip_data *cd; 495 u64 mask; 496 497 cd = irq_data_get_irq_chip_data(data); 498 mask = 1ull << (cd->bit); 499 500 for_each_online_cpu(cpu) { 501 int coreid = octeon_coreid_for_cpu(cpu); 502 503 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask); 504 } 505 } 506 507 /* 508 * Enable the irq on the current CPU for chips that 509 * have the EN*_W1{S,C} registers. 510 */ 511 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) 512 { 513 u64 mask; 514 struct octeon_ciu_chip_data *cd; 515 516 cd = irq_data_get_irq_chip_data(data); 517 mask = 1ull << (cd->bit); 518 519 if (cd->line == 0) { 520 int index = cvmx_get_core_num() * 2; 521 set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); 522 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 523 } else { 524 int index = cvmx_get_core_num() * 2 + 1; 525 set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); 526 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 527 } 528 } 529 530 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) 531 { 532 u64 mask; 533 struct octeon_ciu_chip_data *cd; 534 535 cd = irq_data_get_irq_chip_data(data); 536 mask = 1ull << (cd->bit); 537 538 if (cd->line == 0) { 539 int index = cvmx_get_core_num() * 2; 540 clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); 541 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 542 } else { 543 int index = cvmx_get_core_num() * 2 + 1; 544 clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); 545 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 546 } 547 } 548 549 /* 550 * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq. 551 */ 552 static void octeon_irq_ciu_ack(struct irq_data *data) 553 { 554 u64 mask; 555 struct octeon_ciu_chip_data *cd; 556 557 cd = irq_data_get_irq_chip_data(data); 558 mask = 1ull << (cd->bit); 559 560 if (cd->line == 0) { 561 int index = cvmx_get_core_num() * 2; 562 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); 563 } else { 564 cvmx_write_csr(CVMX_CIU_INT_SUM1, mask); 565 } 566 } 567 568 /* 569 * Disable the irq on the all cores for chips that have the EN*_W1{S,C} 570 * registers. 571 */ 572 static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) 573 { 574 int cpu; 575 u64 mask; 576 struct octeon_ciu_chip_data *cd; 577 578 cd = irq_data_get_irq_chip_data(data); 579 mask = 1ull << (cd->bit); 580 581 if (cd->line == 0) { 582 for_each_online_cpu(cpu) { 583 int index = octeon_coreid_for_cpu(cpu) * 2; 584 clear_bit(cd->bit, 585 &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 586 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 587 } 588 } else { 589 for_each_online_cpu(cpu) { 590 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 591 clear_bit(cd->bit, 592 &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 593 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 594 } 595 } 596 } 597 598 /* 599 * Enable the irq on the all cores for chips that have the EN*_W1{S,C} 600 * registers. 601 */ 602 static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) 603 { 604 int cpu; 605 u64 mask; 606 struct octeon_ciu_chip_data *cd; 607 608 cd = irq_data_get_irq_chip_data(data); 609 mask = 1ull << (cd->bit); 610 611 if (cd->line == 0) { 612 for_each_online_cpu(cpu) { 613 int index = octeon_coreid_for_cpu(cpu) * 2; 614 set_bit(cd->bit, 615 &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 616 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 617 } 618 } else { 619 for_each_online_cpu(cpu) { 620 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 621 set_bit(cd->bit, 622 &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 623 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 624 } 625 } 626 } 627 628 static void octeon_irq_gpio_setup(struct irq_data *data) 629 { 630 union cvmx_gpio_bit_cfgx cfg; 631 struct octeon_ciu_chip_data *cd; 632 u32 t = irqd_get_trigger_type(data); 633 634 cd = irq_data_get_irq_chip_data(data); 635 636 cfg.u64 = 0; 637 cfg.s.int_en = 1; 638 cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0; 639 cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0; 640 641 /* 140 nS glitch filter*/ 642 cfg.s.fil_cnt = 7; 643 cfg.s.fil_sel = 3; 644 645 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64); 646 } 647 648 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) 649 { 650 octeon_irq_gpio_setup(data); 651 octeon_irq_ciu_enable_v2(data); 652 } 653 654 static void octeon_irq_ciu_enable_gpio(struct irq_data *data) 655 { 656 octeon_irq_gpio_setup(data); 657 octeon_irq_ciu_enable(data); 658 } 659 660 static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t) 661 { 662 irqd_set_trigger_type(data, t); 663 octeon_irq_gpio_setup(data); 664 665 return IRQ_SET_MASK_OK; 666 } 667 668 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) 669 { 670 struct octeon_ciu_chip_data *cd; 671 672 cd = irq_data_get_irq_chip_data(data); 673 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); 674 675 octeon_irq_ciu_disable_all_v2(data); 676 } 677 678 static void octeon_irq_ciu_disable_gpio(struct irq_data *data) 679 { 680 struct octeon_ciu_chip_data *cd; 681 682 cd = irq_data_get_irq_chip_data(data); 683 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); 684 685 octeon_irq_ciu_disable_all(data); 686 } 687 688 static void octeon_irq_ciu_gpio_ack(struct irq_data *data) 689 { 690 struct octeon_ciu_chip_data *cd; 691 u64 mask; 692 693 cd = irq_data_get_irq_chip_data(data); 694 mask = 1ull << (cd->gpio_line); 695 696 cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); 697 } 698 699 static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc) 700 { 701 if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) 702 handle_edge_irq(irq, desc); 703 else 704 handle_level_irq(irq, desc); 705 } 706 707 #ifdef CONFIG_SMP 708 709 static void octeon_irq_cpu_offline_ciu(struct irq_data *data) 710 { 711 int cpu = smp_processor_id(); 712 cpumask_t new_affinity; 713 714 if (!cpumask_test_cpu(cpu, data->affinity)) 715 return; 716 717 if (cpumask_weight(data->affinity) > 1) { 718 /* 719 * It has multi CPU affinity, just remove this CPU 720 * from the affinity set. 721 */ 722 cpumask_copy(&new_affinity, data->affinity); 723 cpumask_clear_cpu(cpu, &new_affinity); 724 } else { 725 /* Otherwise, put it on lowest numbered online CPU. */ 726 cpumask_clear(&new_affinity); 727 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); 728 } 729 irq_set_affinity_locked(data, &new_affinity, false); 730 } 731 732 static int octeon_irq_ciu_set_affinity(struct irq_data *data, 733 const struct cpumask *dest, bool force) 734 { 735 int cpu; 736 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 737 unsigned long flags; 738 struct octeon_ciu_chip_data *cd; 739 unsigned long *pen; 740 raw_spinlock_t *lock; 741 742 cd = irq_data_get_irq_chip_data(data); 743 744 /* 745 * For non-v2 CIU, we will allow only single CPU affinity. 746 * This removes the need to do locking in the .ack/.eoi 747 * functions. 748 */ 749 if (cpumask_weight(dest) != 1) 750 return -EINVAL; 751 752 if (!enable_one) 753 return 0; 754 755 756 for_each_online_cpu(cpu) { 757 int coreid = octeon_coreid_for_cpu(cpu); 758 759 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 760 raw_spin_lock_irqsave(lock, flags); 761 762 if (cd->line == 0) 763 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 764 else 765 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 766 767 if (cpumask_test_cpu(cpu, dest) && enable_one) { 768 enable_one = 0; 769 __set_bit(cd->bit, pen); 770 } else { 771 __clear_bit(cd->bit, pen); 772 } 773 /* 774 * Must be visible to octeon_irq_ip{2,3}_ciu() before 775 * enabling the irq. 776 */ 777 wmb(); 778 779 if (cd->line == 0) 780 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 781 else 782 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 783 784 raw_spin_unlock_irqrestore(lock, flags); 785 } 786 return 0; 787 } 788 789 /* 790 * Set affinity for the irq for chips that have the EN*_W1{S,C} 791 * registers. 792 */ 793 static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, 794 const struct cpumask *dest, 795 bool force) 796 { 797 int cpu; 798 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 799 u64 mask; 800 struct octeon_ciu_chip_data *cd; 801 802 if (!enable_one) 803 return 0; 804 805 cd = irq_data_get_irq_chip_data(data); 806 mask = 1ull << cd->bit; 807 808 if (cd->line == 0) { 809 for_each_online_cpu(cpu) { 810 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 811 int index = octeon_coreid_for_cpu(cpu) * 2; 812 if (cpumask_test_cpu(cpu, dest) && enable_one) { 813 enable_one = false; 814 set_bit(cd->bit, pen); 815 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 816 } else { 817 clear_bit(cd->bit, pen); 818 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 819 } 820 } 821 } else { 822 for_each_online_cpu(cpu) { 823 unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 824 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 825 if (cpumask_test_cpu(cpu, dest) && enable_one) { 826 enable_one = false; 827 set_bit(cd->bit, pen); 828 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 829 } else { 830 clear_bit(cd->bit, pen); 831 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 832 } 833 } 834 } 835 return 0; 836 } 837 838 static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data, 839 const struct cpumask *dest, 840 bool force) 841 { 842 int cpu; 843 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 844 u64 mask; 845 struct octeon_ciu_chip_data *cd; 846 847 if (!enable_one) 848 return 0; 849 850 cd = irq_data_get_irq_chip_data(data); 851 mask = 1ull << cd->bit; 852 853 for_each_online_cpu(cpu) { 854 int index = octeon_coreid_for_cpu(cpu); 855 856 if (cpumask_test_cpu(cpu, dest) && enable_one) { 857 enable_one = false; 858 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); 859 } else { 860 cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); 861 } 862 } 863 return 0; 864 } 865 #endif 866 867 /* 868 * Newer octeon chips have support for lockless CIU operation. 869 */ 870 static struct irq_chip octeon_irq_chip_ciu_v2 = { 871 .name = "CIU", 872 .irq_enable = octeon_irq_ciu_enable_v2, 873 .irq_disable = octeon_irq_ciu_disable_all_v2, 874 .irq_mask = octeon_irq_ciu_disable_local_v2, 875 .irq_unmask = octeon_irq_ciu_enable_v2, 876 #ifdef CONFIG_SMP 877 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, 878 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 879 #endif 880 }; 881 882 static struct irq_chip octeon_irq_chip_ciu_v2_edge = { 883 .name = "CIU", 884 .irq_enable = octeon_irq_ciu_enable_v2, 885 .irq_disable = octeon_irq_ciu_disable_all_v2, 886 .irq_ack = octeon_irq_ciu_ack, 887 .irq_mask = octeon_irq_ciu_disable_local_v2, 888 .irq_unmask = octeon_irq_ciu_enable_v2, 889 #ifdef CONFIG_SMP 890 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, 891 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 892 #endif 893 }; 894 895 /* 896 * Newer octeon chips have support for lockless CIU operation. 897 */ 898 static struct irq_chip octeon_irq_chip_ciu_sum2 = { 899 .name = "CIU", 900 .irq_enable = octeon_irq_ciu_enable_sum2, 901 .irq_disable = octeon_irq_ciu_disable_all_sum2, 902 .irq_mask = octeon_irq_ciu_disable_local_sum2, 903 .irq_unmask = octeon_irq_ciu_enable_sum2, 904 #ifdef CONFIG_SMP 905 .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, 906 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 907 #endif 908 }; 909 910 static struct irq_chip octeon_irq_chip_ciu_sum2_edge = { 911 .name = "CIU", 912 .irq_enable = octeon_irq_ciu_enable_sum2, 913 .irq_disable = octeon_irq_ciu_disable_all_sum2, 914 .irq_ack = octeon_irq_ciu_ack_sum2, 915 .irq_mask = octeon_irq_ciu_disable_local_sum2, 916 .irq_unmask = octeon_irq_ciu_enable_sum2, 917 #ifdef CONFIG_SMP 918 .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, 919 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 920 #endif 921 }; 922 923 static struct irq_chip octeon_irq_chip_ciu = { 924 .name = "CIU", 925 .irq_enable = octeon_irq_ciu_enable, 926 .irq_disable = octeon_irq_ciu_disable_all, 927 .irq_mask = octeon_irq_ciu_disable_local, 928 .irq_unmask = octeon_irq_ciu_enable, 929 #ifdef CONFIG_SMP 930 .irq_set_affinity = octeon_irq_ciu_set_affinity, 931 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 932 #endif 933 }; 934 935 static struct irq_chip octeon_irq_chip_ciu_edge = { 936 .name = "CIU", 937 .irq_enable = octeon_irq_ciu_enable, 938 .irq_disable = octeon_irq_ciu_disable_all, 939 .irq_ack = octeon_irq_ciu_ack, 940 .irq_mask = octeon_irq_ciu_disable_local, 941 .irq_unmask = octeon_irq_ciu_enable, 942 #ifdef CONFIG_SMP 943 .irq_set_affinity = octeon_irq_ciu_set_affinity, 944 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 945 #endif 946 }; 947 948 /* The mbox versions don't do any affinity or round-robin. */ 949 static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = { 950 .name = "CIU-M", 951 .irq_enable = octeon_irq_ciu_enable_all_v2, 952 .irq_disable = octeon_irq_ciu_disable_all_v2, 953 .irq_ack = octeon_irq_ciu_disable_local_v2, 954 .irq_eoi = octeon_irq_ciu_enable_local_v2, 955 956 .irq_cpu_online = octeon_irq_ciu_enable_local_v2, 957 .irq_cpu_offline = octeon_irq_ciu_disable_local_v2, 958 .flags = IRQCHIP_ONOFFLINE_ENABLED, 959 }; 960 961 static struct irq_chip octeon_irq_chip_ciu_mbox = { 962 .name = "CIU-M", 963 .irq_enable = octeon_irq_ciu_enable_all, 964 .irq_disable = octeon_irq_ciu_disable_all, 965 .irq_ack = octeon_irq_ciu_disable_local, 966 .irq_eoi = octeon_irq_ciu_enable_local, 967 968 .irq_cpu_online = octeon_irq_ciu_enable_local, 969 .irq_cpu_offline = octeon_irq_ciu_disable_local, 970 .flags = IRQCHIP_ONOFFLINE_ENABLED, 971 }; 972 973 static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = { 974 .name = "CIU-GPIO", 975 .irq_enable = octeon_irq_ciu_enable_gpio_v2, 976 .irq_disable = octeon_irq_ciu_disable_gpio_v2, 977 .irq_ack = octeon_irq_ciu_gpio_ack, 978 .irq_mask = octeon_irq_ciu_disable_local_v2, 979 .irq_unmask = octeon_irq_ciu_enable_v2, 980 .irq_set_type = octeon_irq_ciu_gpio_set_type, 981 #ifdef CONFIG_SMP 982 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, 983 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 984 #endif 985 .flags = IRQCHIP_SET_TYPE_MASKED, 986 }; 987 988 static struct irq_chip octeon_irq_chip_ciu_gpio = { 989 .name = "CIU-GPIO", 990 .irq_enable = octeon_irq_ciu_enable_gpio, 991 .irq_disable = octeon_irq_ciu_disable_gpio, 992 .irq_mask = octeon_irq_ciu_disable_local, 993 .irq_unmask = octeon_irq_ciu_enable, 994 .irq_ack = octeon_irq_ciu_gpio_ack, 995 .irq_set_type = octeon_irq_ciu_gpio_set_type, 996 #ifdef CONFIG_SMP 997 .irq_set_affinity = octeon_irq_ciu_set_affinity, 998 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 999 #endif 1000 .flags = IRQCHIP_SET_TYPE_MASKED, 1001 }; 1002 1003 /* 1004 * Watchdog interrupts are special. They are associated with a single 1005 * core, so we hardwire the affinity to that core. 1006 */ 1007 static void octeon_irq_ciu_wd_enable(struct irq_data *data) 1008 { 1009 unsigned long flags; 1010 unsigned long *pen; 1011 int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 1012 int cpu = octeon_cpu_for_coreid(coreid); 1013 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 1014 1015 raw_spin_lock_irqsave(lock, flags); 1016 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 1017 __set_bit(coreid, pen); 1018 /* 1019 * Must be visible to octeon_irq_ip{2,3}_ciu() before enabling 1020 * the irq. 1021 */ 1022 wmb(); 1023 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 1024 raw_spin_unlock_irqrestore(lock, flags); 1025 } 1026 1027 /* 1028 * Watchdog interrupts are special. They are associated with a single 1029 * core, so we hardwire the affinity to that core. 1030 */ 1031 static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data) 1032 { 1033 int coreid = data->irq - OCTEON_IRQ_WDOG0; 1034 int cpu = octeon_cpu_for_coreid(coreid); 1035 1036 set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 1037 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid); 1038 } 1039 1040 1041 static struct irq_chip octeon_irq_chip_ciu_wd_v2 = { 1042 .name = "CIU-W", 1043 .irq_enable = octeon_irq_ciu1_wd_enable_v2, 1044 .irq_disable = octeon_irq_ciu_disable_all_v2, 1045 .irq_mask = octeon_irq_ciu_disable_local_v2, 1046 .irq_unmask = octeon_irq_ciu_enable_local_v2, 1047 }; 1048 1049 static struct irq_chip octeon_irq_chip_ciu_wd = { 1050 .name = "CIU-W", 1051 .irq_enable = octeon_irq_ciu_wd_enable, 1052 .irq_disable = octeon_irq_ciu_disable_all, 1053 .irq_mask = octeon_irq_ciu_disable_local, 1054 .irq_unmask = octeon_irq_ciu_enable_local, 1055 }; 1056 1057 static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit) 1058 { 1059 bool edge = false; 1060 1061 if (line == 0) 1062 switch (bit) { 1063 case 48 ... 49: /* GMX DRP */ 1064 case 50: /* IPD_DRP */ 1065 case 52 ... 55: /* Timers */ 1066 case 58: /* MPI */ 1067 edge = true; 1068 break; 1069 default: 1070 break; 1071 } 1072 else /* line == 1 */ 1073 switch (bit) { 1074 case 47: /* PTP */ 1075 edge = true; 1076 break; 1077 default: 1078 break; 1079 } 1080 return edge; 1081 } 1082 1083 struct octeon_irq_gpio_domain_data { 1084 unsigned int base_hwirq; 1085 }; 1086 1087 static int octeon_irq_gpio_xlat(struct irq_domain *d, 1088 struct device_node *node, 1089 const u32 *intspec, 1090 unsigned int intsize, 1091 unsigned long *out_hwirq, 1092 unsigned int *out_type) 1093 { 1094 unsigned int type; 1095 unsigned int pin; 1096 unsigned int trigger; 1097 1098 if (d->of_node != node) 1099 return -EINVAL; 1100 1101 if (intsize < 2) 1102 return -EINVAL; 1103 1104 pin = intspec[0]; 1105 if (pin >= 16) 1106 return -EINVAL; 1107 1108 trigger = intspec[1]; 1109 1110 switch (trigger) { 1111 case 1: 1112 type = IRQ_TYPE_EDGE_RISING; 1113 break; 1114 case 2: 1115 type = IRQ_TYPE_EDGE_FALLING; 1116 break; 1117 case 4: 1118 type = IRQ_TYPE_LEVEL_HIGH; 1119 break; 1120 case 8: 1121 type = IRQ_TYPE_LEVEL_LOW; 1122 break; 1123 default: 1124 pr_err("Error: (%s) Invalid irq trigger specification: %x\n", 1125 node->name, 1126 trigger); 1127 type = IRQ_TYPE_LEVEL_LOW; 1128 break; 1129 } 1130 *out_type = type; 1131 *out_hwirq = pin; 1132 1133 return 0; 1134 } 1135 1136 static int octeon_irq_ciu_xlat(struct irq_domain *d, 1137 struct device_node *node, 1138 const u32 *intspec, 1139 unsigned int intsize, 1140 unsigned long *out_hwirq, 1141 unsigned int *out_type) 1142 { 1143 unsigned int ciu, bit; 1144 struct octeon_irq_ciu_domain_data *dd = d->host_data; 1145 1146 ciu = intspec[0]; 1147 bit = intspec[1]; 1148 1149 if (ciu >= dd->num_sum || bit > 63) 1150 return -EINVAL; 1151 1152 *out_hwirq = (ciu << 6) | bit; 1153 *out_type = 0; 1154 1155 return 0; 1156 } 1157 1158 static struct irq_chip *octeon_irq_ciu_chip; 1159 static struct irq_chip *octeon_irq_ciu_chip_edge; 1160 static struct irq_chip *octeon_irq_gpio_chip; 1161 1162 static bool octeon_irq_virq_in_range(unsigned int virq) 1163 { 1164 /* We cannot let it overflow the mapping array. */ 1165 if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0]))) 1166 return true; 1167 1168 WARN_ONCE(true, "virq out of range %u.\n", virq); 1169 return false; 1170 } 1171 1172 static int octeon_irq_ciu_map(struct irq_domain *d, 1173 unsigned int virq, irq_hw_number_t hw) 1174 { 1175 int rv; 1176 unsigned int line = hw >> 6; 1177 unsigned int bit = hw & 63; 1178 struct octeon_irq_ciu_domain_data *dd = d->host_data; 1179 1180 if (!octeon_irq_virq_in_range(virq)) 1181 return -EINVAL; 1182 1183 /* Don't map irq if it is reserved for GPIO. */ 1184 if (line == 0 && bit >= 16 && bit <32) 1185 return 0; 1186 1187 if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0) 1188 return -EINVAL; 1189 1190 if (line == 2) { 1191 if (octeon_irq_ciu_is_edge(line, bit)) 1192 rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1193 &octeon_irq_chip_ciu_sum2_edge, 1194 handle_edge_irq); 1195 else 1196 rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1197 &octeon_irq_chip_ciu_sum2, 1198 handle_level_irq); 1199 } else { 1200 if (octeon_irq_ciu_is_edge(line, bit)) 1201 rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1202 octeon_irq_ciu_chip_edge, 1203 handle_edge_irq); 1204 else 1205 rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1206 octeon_irq_ciu_chip, 1207 handle_level_irq); 1208 } 1209 return rv; 1210 } 1211 1212 static int octeon_irq_gpio_map(struct irq_domain *d, 1213 unsigned int virq, irq_hw_number_t hw) 1214 { 1215 struct octeon_irq_gpio_domain_data *gpiod = d->host_data; 1216 unsigned int line, bit; 1217 int r; 1218 1219 if (!octeon_irq_virq_in_range(virq)) 1220 return -EINVAL; 1221 1222 line = (hw + gpiod->base_hwirq) >> 6; 1223 bit = (hw + gpiod->base_hwirq) & 63; 1224 if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) || 1225 octeon_irq_ciu_to_irq[line][bit] != 0) 1226 return -EINVAL; 1227 1228 r = octeon_irq_set_ciu_mapping(virq, line, bit, hw, 1229 octeon_irq_gpio_chip, octeon_irq_handle_trigger); 1230 return r; 1231 } 1232 1233 static struct irq_domain_ops octeon_irq_domain_ciu_ops = { 1234 .map = octeon_irq_ciu_map, 1235 .unmap = octeon_irq_free_cd, 1236 .xlate = octeon_irq_ciu_xlat, 1237 }; 1238 1239 static struct irq_domain_ops octeon_irq_domain_gpio_ops = { 1240 .map = octeon_irq_gpio_map, 1241 .unmap = octeon_irq_free_cd, 1242 .xlate = octeon_irq_gpio_xlat, 1243 }; 1244 1245 static void octeon_irq_ip2_ciu(void) 1246 { 1247 const unsigned long core_id = cvmx_get_core_num(); 1248 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); 1249 1250 ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror); 1251 if (likely(ciu_sum)) { 1252 int bit = fls64(ciu_sum) - 1; 1253 int irq = octeon_irq_ciu_to_irq[0][bit]; 1254 if (likely(irq)) 1255 do_IRQ(irq); 1256 else 1257 spurious_interrupt(); 1258 } else { 1259 spurious_interrupt(); 1260 } 1261 } 1262 1263 static void octeon_irq_ip3_ciu(void) 1264 { 1265 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); 1266 1267 ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror); 1268 if (likely(ciu_sum)) { 1269 int bit = fls64(ciu_sum) - 1; 1270 int irq = octeon_irq_ciu_to_irq[1][bit]; 1271 if (likely(irq)) 1272 do_IRQ(irq); 1273 else 1274 spurious_interrupt(); 1275 } else { 1276 spurious_interrupt(); 1277 } 1278 } 1279 1280 static void octeon_irq_ip4_ciu(void) 1281 { 1282 int coreid = cvmx_get_core_num(); 1283 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid)); 1284 u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid)); 1285 1286 ciu_sum &= ciu_en; 1287 if (likely(ciu_sum)) { 1288 int bit = fls64(ciu_sum) - 1; 1289 int irq = octeon_irq_ciu_to_irq[2][bit]; 1290 1291 if (likely(irq)) 1292 do_IRQ(irq); 1293 else 1294 spurious_interrupt(); 1295 } else { 1296 spurious_interrupt(); 1297 } 1298 } 1299 1300 static bool octeon_irq_use_ip4; 1301 1302 static void octeon_irq_local_enable_ip4(void *arg) 1303 { 1304 set_c0_status(STATUSF_IP4); 1305 } 1306 1307 static void octeon_irq_ip4_mask(void) 1308 { 1309 clear_c0_status(STATUSF_IP4); 1310 spurious_interrupt(); 1311 } 1312 1313 static void (*octeon_irq_ip2)(void); 1314 static void (*octeon_irq_ip3)(void); 1315 static void (*octeon_irq_ip4)(void); 1316 1317 void (*octeon_irq_setup_secondary)(void); 1318 1319 void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h) 1320 { 1321 octeon_irq_ip4 = h; 1322 octeon_irq_use_ip4 = true; 1323 on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1); 1324 } 1325 1326 static void octeon_irq_percpu_enable(void) 1327 { 1328 irq_cpu_online(); 1329 } 1330 1331 static void octeon_irq_init_ciu_percpu(void) 1332 { 1333 int coreid = cvmx_get_core_num(); 1334 1335 1336 __this_cpu_write(octeon_irq_ciu0_en_mirror, 0); 1337 __this_cpu_write(octeon_irq_ciu1_en_mirror, 0); 1338 wmb(); 1339 raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock)); 1340 /* 1341 * Disable All CIU Interrupts. The ones we need will be 1342 * enabled later. Read the SUM register so we know the write 1343 * completed. 1344 */ 1345 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); 1346 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); 1347 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); 1348 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); 1349 cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); 1350 } 1351 1352 static void octeon_irq_init_ciu2_percpu(void) 1353 { 1354 u64 regx, ipx; 1355 int coreid = cvmx_get_core_num(); 1356 u64 base = CVMX_CIU2_EN_PPX_IP2_WRKQ(coreid); 1357 1358 /* 1359 * Disable All CIU2 Interrupts. The ones we need will be 1360 * enabled later. Read the SUM register so we know the write 1361 * completed. 1362 * 1363 * There are 9 registers and 3 IPX levels with strides 0x1000 1364 * and 0x200 respectivly. Use loops to clear them. 1365 */ 1366 for (regx = 0; regx <= 0x8000; regx += 0x1000) { 1367 for (ipx = 0; ipx <= 0x400; ipx += 0x200) 1368 cvmx_write_csr(base + regx + ipx, 0); 1369 } 1370 1371 cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid)); 1372 } 1373 1374 static void octeon_irq_setup_secondary_ciu(void) 1375 { 1376 octeon_irq_init_ciu_percpu(); 1377 octeon_irq_percpu_enable(); 1378 1379 /* Enable the CIU lines */ 1380 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1381 if (octeon_irq_use_ip4) 1382 set_c0_status(STATUSF_IP4); 1383 else 1384 clear_c0_status(STATUSF_IP4); 1385 } 1386 1387 static void octeon_irq_setup_secondary_ciu2(void) 1388 { 1389 octeon_irq_init_ciu2_percpu(); 1390 octeon_irq_percpu_enable(); 1391 1392 /* Enable the CIU lines */ 1393 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1394 if (octeon_irq_use_ip4) 1395 set_c0_status(STATUSF_IP4); 1396 else 1397 clear_c0_status(STATUSF_IP4); 1398 } 1399 1400 static int __init octeon_irq_init_ciu( 1401 struct device_node *ciu_node, struct device_node *parent) 1402 { 1403 unsigned int i, r; 1404 struct irq_chip *chip; 1405 struct irq_chip *chip_edge; 1406 struct irq_chip *chip_mbox; 1407 struct irq_chip *chip_wd; 1408 struct irq_domain *ciu_domain = NULL; 1409 struct octeon_irq_ciu_domain_data *dd; 1410 1411 dd = kzalloc(sizeof(*dd), GFP_KERNEL); 1412 if (!dd) 1413 return -ENOMEM; 1414 1415 octeon_irq_init_ciu_percpu(); 1416 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; 1417 1418 octeon_irq_ip2 = octeon_irq_ip2_ciu; 1419 octeon_irq_ip3 = octeon_irq_ip3_ciu; 1420 if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) 1421 && !OCTEON_IS_MODEL(OCTEON_CN63XX)) { 1422 octeon_irq_ip4 = octeon_irq_ip4_ciu; 1423 dd->num_sum = 3; 1424 octeon_irq_use_ip4 = true; 1425 } else { 1426 octeon_irq_ip4 = octeon_irq_ip4_mask; 1427 dd->num_sum = 2; 1428 octeon_irq_use_ip4 = false; 1429 } 1430 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || 1431 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || 1432 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || 1433 OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) { 1434 chip = &octeon_irq_chip_ciu_v2; 1435 chip_edge = &octeon_irq_chip_ciu_v2_edge; 1436 chip_mbox = &octeon_irq_chip_ciu_mbox_v2; 1437 chip_wd = &octeon_irq_chip_ciu_wd_v2; 1438 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; 1439 } else { 1440 chip = &octeon_irq_chip_ciu; 1441 chip_edge = &octeon_irq_chip_ciu_edge; 1442 chip_mbox = &octeon_irq_chip_ciu_mbox; 1443 chip_wd = &octeon_irq_chip_ciu_wd; 1444 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; 1445 } 1446 octeon_irq_ciu_chip = chip; 1447 octeon_irq_ciu_chip_edge = chip_edge; 1448 1449 /* Mips internal */ 1450 octeon_irq_init_core(); 1451 1452 ciu_domain = irq_domain_add_tree( 1453 ciu_node, &octeon_irq_domain_ciu_ops, dd); 1454 irq_set_default_host(ciu_domain); 1455 1456 /* CIU_0 */ 1457 for (i = 0; i < 16; i++) { 1458 r = octeon_irq_force_ciu_mapping( 1459 ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); 1460 if (r) 1461 goto err; 1462 } 1463 1464 r = octeon_irq_set_ciu_mapping( 1465 OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); 1466 if (r) 1467 goto err; 1468 r = octeon_irq_set_ciu_mapping( 1469 OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); 1470 if (r) 1471 goto err; 1472 1473 for (i = 0; i < 4; i++) { 1474 r = octeon_irq_force_ciu_mapping( 1475 ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); 1476 if (r) 1477 goto err; 1478 } 1479 for (i = 0; i < 4; i++) { 1480 r = octeon_irq_force_ciu_mapping( 1481 ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); 1482 if (r) 1483 goto err; 1484 } 1485 1486 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); 1487 if (r) 1488 goto err; 1489 1490 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); 1491 if (r) 1492 goto err; 1493 1494 for (i = 0; i < 4; i++) { 1495 r = octeon_irq_force_ciu_mapping( 1496 ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); 1497 if (r) 1498 goto err; 1499 } 1500 1501 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); 1502 if (r) 1503 goto err; 1504 1505 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); 1506 if (r) 1507 goto err; 1508 1509 /* CIU_1 */ 1510 for (i = 0; i < 16; i++) { 1511 r = octeon_irq_set_ciu_mapping( 1512 i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, 1513 handle_level_irq); 1514 if (r) 1515 goto err; 1516 } 1517 1518 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); 1519 if (r) 1520 goto err; 1521 1522 /* Enable the CIU lines */ 1523 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1524 if (octeon_irq_use_ip4) 1525 set_c0_status(STATUSF_IP4); 1526 else 1527 clear_c0_status(STATUSF_IP4); 1528 1529 return 0; 1530 err: 1531 return r; 1532 } 1533 1534 static int __init octeon_irq_init_gpio( 1535 struct device_node *gpio_node, struct device_node *parent) 1536 { 1537 struct octeon_irq_gpio_domain_data *gpiod; 1538 u32 interrupt_cells; 1539 unsigned int base_hwirq; 1540 int r; 1541 1542 r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells); 1543 if (r) 1544 return r; 1545 1546 if (interrupt_cells == 1) { 1547 u32 v; 1548 1549 r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v); 1550 if (r) { 1551 pr_warn("No \"interrupts\" property.\n"); 1552 return r; 1553 } 1554 base_hwirq = v; 1555 } else if (interrupt_cells == 2) { 1556 u32 v0, v1; 1557 1558 r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0); 1559 if (r) { 1560 pr_warn("No \"interrupts\" property.\n"); 1561 return r; 1562 } 1563 r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1); 1564 if (r) { 1565 pr_warn("No \"interrupts\" property.\n"); 1566 return r; 1567 } 1568 base_hwirq = (v0 << 6) | v1; 1569 } else { 1570 pr_warn("Bad \"#interrupt-cells\" property: %u\n", 1571 interrupt_cells); 1572 return -EINVAL; 1573 } 1574 1575 gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); 1576 if (gpiod) { 1577 /* gpio domain host_data is the base hwirq number. */ 1578 gpiod->base_hwirq = base_hwirq; 1579 irq_domain_add_linear( 1580 gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); 1581 } else { 1582 pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); 1583 return -ENOMEM; 1584 } 1585 1586 return 0; 1587 } 1588 /* 1589 * Watchdog interrupts are special. They are associated with a single 1590 * core, so we hardwire the affinity to that core. 1591 */ 1592 static void octeon_irq_ciu2_wd_enable(struct irq_data *data) 1593 { 1594 u64 mask; 1595 u64 en_addr; 1596 int coreid = data->irq - OCTEON_IRQ_WDOG0; 1597 struct octeon_ciu_chip_data *cd; 1598 1599 cd = irq_data_get_irq_chip_data(data); 1600 mask = 1ull << (cd->bit); 1601 1602 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + 1603 (0x1000ull * cd->line); 1604 cvmx_write_csr(en_addr, mask); 1605 1606 } 1607 1608 static void octeon_irq_ciu2_enable(struct irq_data *data) 1609 { 1610 u64 mask; 1611 u64 en_addr; 1612 int cpu = next_cpu_for_irq(data); 1613 int coreid = octeon_coreid_for_cpu(cpu); 1614 struct octeon_ciu_chip_data *cd; 1615 1616 cd = irq_data_get_irq_chip_data(data); 1617 mask = 1ull << (cd->bit); 1618 1619 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + 1620 (0x1000ull * cd->line); 1621 cvmx_write_csr(en_addr, mask); 1622 } 1623 1624 static void octeon_irq_ciu2_enable_local(struct irq_data *data) 1625 { 1626 u64 mask; 1627 u64 en_addr; 1628 int coreid = cvmx_get_core_num(); 1629 struct octeon_ciu_chip_data *cd; 1630 1631 cd = irq_data_get_irq_chip_data(data); 1632 mask = 1ull << (cd->bit); 1633 1634 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + 1635 (0x1000ull * cd->line); 1636 cvmx_write_csr(en_addr, mask); 1637 1638 } 1639 1640 static void octeon_irq_ciu2_disable_local(struct irq_data *data) 1641 { 1642 u64 mask; 1643 u64 en_addr; 1644 int coreid = cvmx_get_core_num(); 1645 struct octeon_ciu_chip_data *cd; 1646 1647 cd = irq_data_get_irq_chip_data(data); 1648 mask = 1ull << (cd->bit); 1649 1650 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + 1651 (0x1000ull * cd->line); 1652 cvmx_write_csr(en_addr, mask); 1653 1654 } 1655 1656 static void octeon_irq_ciu2_ack(struct irq_data *data) 1657 { 1658 u64 mask; 1659 u64 en_addr; 1660 int coreid = cvmx_get_core_num(); 1661 struct octeon_ciu_chip_data *cd; 1662 1663 cd = irq_data_get_irq_chip_data(data); 1664 mask = 1ull << (cd->bit); 1665 1666 en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line); 1667 cvmx_write_csr(en_addr, mask); 1668 1669 } 1670 1671 static void octeon_irq_ciu2_disable_all(struct irq_data *data) 1672 { 1673 int cpu; 1674 u64 mask; 1675 struct octeon_ciu_chip_data *cd; 1676 1677 cd = irq_data_get_irq_chip_data(data); 1678 mask = 1ull << (cd->bit); 1679 1680 for_each_online_cpu(cpu) { 1681 u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( 1682 octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line); 1683 cvmx_write_csr(en_addr, mask); 1684 } 1685 } 1686 1687 static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data) 1688 { 1689 int cpu; 1690 u64 mask; 1691 1692 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1693 1694 for_each_online_cpu(cpu) { 1695 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S( 1696 octeon_coreid_for_cpu(cpu)); 1697 cvmx_write_csr(en_addr, mask); 1698 } 1699 } 1700 1701 static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data) 1702 { 1703 int cpu; 1704 u64 mask; 1705 1706 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1707 1708 for_each_online_cpu(cpu) { 1709 u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C( 1710 octeon_coreid_for_cpu(cpu)); 1711 cvmx_write_csr(en_addr, mask); 1712 } 1713 } 1714 1715 static void octeon_irq_ciu2_mbox_enable_local(struct irq_data *data) 1716 { 1717 u64 mask; 1718 u64 en_addr; 1719 int coreid = cvmx_get_core_num(); 1720 1721 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1722 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(coreid); 1723 cvmx_write_csr(en_addr, mask); 1724 } 1725 1726 static void octeon_irq_ciu2_mbox_disable_local(struct irq_data *data) 1727 { 1728 u64 mask; 1729 u64 en_addr; 1730 int coreid = cvmx_get_core_num(); 1731 1732 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1733 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(coreid); 1734 cvmx_write_csr(en_addr, mask); 1735 } 1736 1737 #ifdef CONFIG_SMP 1738 static int octeon_irq_ciu2_set_affinity(struct irq_data *data, 1739 const struct cpumask *dest, bool force) 1740 { 1741 int cpu; 1742 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 1743 u64 mask; 1744 struct octeon_ciu_chip_data *cd; 1745 1746 if (!enable_one) 1747 return 0; 1748 1749 cd = irq_data_get_irq_chip_data(data); 1750 mask = 1ull << cd->bit; 1751 1752 for_each_online_cpu(cpu) { 1753 u64 en_addr; 1754 if (cpumask_test_cpu(cpu, dest) && enable_one) { 1755 enable_one = false; 1756 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S( 1757 octeon_coreid_for_cpu(cpu)) + 1758 (0x1000ull * cd->line); 1759 } else { 1760 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( 1761 octeon_coreid_for_cpu(cpu)) + 1762 (0x1000ull * cd->line); 1763 } 1764 cvmx_write_csr(en_addr, mask); 1765 } 1766 1767 return 0; 1768 } 1769 #endif 1770 1771 static void octeon_irq_ciu2_enable_gpio(struct irq_data *data) 1772 { 1773 octeon_irq_gpio_setup(data); 1774 octeon_irq_ciu2_enable(data); 1775 } 1776 1777 static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) 1778 { 1779 struct octeon_ciu_chip_data *cd; 1780 1781 cd = irq_data_get_irq_chip_data(data); 1782 1783 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); 1784 1785 octeon_irq_ciu2_disable_all(data); 1786 } 1787 1788 static struct irq_chip octeon_irq_chip_ciu2 = { 1789 .name = "CIU2-E", 1790 .irq_enable = octeon_irq_ciu2_enable, 1791 .irq_disable = octeon_irq_ciu2_disable_all, 1792 .irq_mask = octeon_irq_ciu2_disable_local, 1793 .irq_unmask = octeon_irq_ciu2_enable, 1794 #ifdef CONFIG_SMP 1795 .irq_set_affinity = octeon_irq_ciu2_set_affinity, 1796 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 1797 #endif 1798 }; 1799 1800 static struct irq_chip octeon_irq_chip_ciu2_edge = { 1801 .name = "CIU2-E", 1802 .irq_enable = octeon_irq_ciu2_enable, 1803 .irq_disable = octeon_irq_ciu2_disable_all, 1804 .irq_ack = octeon_irq_ciu2_ack, 1805 .irq_mask = octeon_irq_ciu2_disable_local, 1806 .irq_unmask = octeon_irq_ciu2_enable, 1807 #ifdef CONFIG_SMP 1808 .irq_set_affinity = octeon_irq_ciu2_set_affinity, 1809 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 1810 #endif 1811 }; 1812 1813 static struct irq_chip octeon_irq_chip_ciu2_mbox = { 1814 .name = "CIU2-M", 1815 .irq_enable = octeon_irq_ciu2_mbox_enable_all, 1816 .irq_disable = octeon_irq_ciu2_mbox_disable_all, 1817 .irq_ack = octeon_irq_ciu2_mbox_disable_local, 1818 .irq_eoi = octeon_irq_ciu2_mbox_enable_local, 1819 1820 .irq_cpu_online = octeon_irq_ciu2_mbox_enable_local, 1821 .irq_cpu_offline = octeon_irq_ciu2_mbox_disable_local, 1822 .flags = IRQCHIP_ONOFFLINE_ENABLED, 1823 }; 1824 1825 static struct irq_chip octeon_irq_chip_ciu2_wd = { 1826 .name = "CIU2-W", 1827 .irq_enable = octeon_irq_ciu2_wd_enable, 1828 .irq_disable = octeon_irq_ciu2_disable_all, 1829 .irq_mask = octeon_irq_ciu2_disable_local, 1830 .irq_unmask = octeon_irq_ciu2_enable_local, 1831 }; 1832 1833 static struct irq_chip octeon_irq_chip_ciu2_gpio = { 1834 .name = "CIU-GPIO", 1835 .irq_enable = octeon_irq_ciu2_enable_gpio, 1836 .irq_disable = octeon_irq_ciu2_disable_gpio, 1837 .irq_ack = octeon_irq_ciu_gpio_ack, 1838 .irq_mask = octeon_irq_ciu2_disable_local, 1839 .irq_unmask = octeon_irq_ciu2_enable, 1840 .irq_set_type = octeon_irq_ciu_gpio_set_type, 1841 #ifdef CONFIG_SMP 1842 .irq_set_affinity = octeon_irq_ciu2_set_affinity, 1843 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 1844 #endif 1845 .flags = IRQCHIP_SET_TYPE_MASKED, 1846 }; 1847 1848 static int octeon_irq_ciu2_xlat(struct irq_domain *d, 1849 struct device_node *node, 1850 const u32 *intspec, 1851 unsigned int intsize, 1852 unsigned long *out_hwirq, 1853 unsigned int *out_type) 1854 { 1855 unsigned int ciu, bit; 1856 1857 ciu = intspec[0]; 1858 bit = intspec[1]; 1859 1860 *out_hwirq = (ciu << 6) | bit; 1861 *out_type = 0; 1862 1863 return 0; 1864 } 1865 1866 static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit) 1867 { 1868 bool edge = false; 1869 1870 if (line == 3) /* MIO */ 1871 switch (bit) { 1872 case 2: /* IPD_DRP */ 1873 case 8 ... 11: /* Timers */ 1874 case 48: /* PTP */ 1875 edge = true; 1876 break; 1877 default: 1878 break; 1879 } 1880 else if (line == 6) /* PKT */ 1881 switch (bit) { 1882 case 52 ... 53: /* ILK_DRP */ 1883 case 8 ... 12: /* GMX_DRP */ 1884 edge = true; 1885 break; 1886 default: 1887 break; 1888 } 1889 return edge; 1890 } 1891 1892 static int octeon_irq_ciu2_map(struct irq_domain *d, 1893 unsigned int virq, irq_hw_number_t hw) 1894 { 1895 unsigned int line = hw >> 6; 1896 unsigned int bit = hw & 63; 1897 1898 if (!octeon_irq_virq_in_range(virq)) 1899 return -EINVAL; 1900 1901 /* 1902 * Don't map irq if it is reserved for GPIO. 1903 * (Line 7 are the GPIO lines.) 1904 */ 1905 if (line == 7) 1906 return 0; 1907 1908 if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0) 1909 return -EINVAL; 1910 1911 if (octeon_irq_ciu2_is_edge(line, bit)) 1912 octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1913 &octeon_irq_chip_ciu2_edge, 1914 handle_edge_irq); 1915 else 1916 octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1917 &octeon_irq_chip_ciu2, 1918 handle_level_irq); 1919 1920 return 0; 1921 } 1922 1923 static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { 1924 .map = octeon_irq_ciu2_map, 1925 .unmap = octeon_irq_free_cd, 1926 .xlate = octeon_irq_ciu2_xlat, 1927 }; 1928 1929 static void octeon_irq_ciu2(void) 1930 { 1931 int line; 1932 int bit; 1933 int irq; 1934 u64 src_reg, src, sum; 1935 const unsigned long core_id = cvmx_get_core_num(); 1936 1937 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(core_id)) & 0xfful; 1938 1939 if (unlikely(!sum)) 1940 goto spurious; 1941 1942 line = fls64(sum) - 1; 1943 src_reg = CVMX_CIU2_SRC_PPX_IP2_WRKQ(core_id) + (0x1000 * line); 1944 src = cvmx_read_csr(src_reg); 1945 1946 if (unlikely(!src)) 1947 goto spurious; 1948 1949 bit = fls64(src) - 1; 1950 irq = octeon_irq_ciu_to_irq[line][bit]; 1951 if (unlikely(!irq)) 1952 goto spurious; 1953 1954 do_IRQ(irq); 1955 goto out; 1956 1957 spurious: 1958 spurious_interrupt(); 1959 out: 1960 /* CN68XX pass 1.x has an errata that accessing the ACK registers 1961 can stop interrupts from propagating */ 1962 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 1963 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); 1964 else 1965 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP2(core_id)); 1966 return; 1967 } 1968 1969 static void octeon_irq_ciu2_mbox(void) 1970 { 1971 int line; 1972 1973 const unsigned long core_id = cvmx_get_core_num(); 1974 u64 sum = cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP3(core_id)) >> 60; 1975 1976 if (unlikely(!sum)) 1977 goto spurious; 1978 1979 line = fls64(sum) - 1; 1980 1981 do_IRQ(OCTEON_IRQ_MBOX0 + line); 1982 goto out; 1983 1984 spurious: 1985 spurious_interrupt(); 1986 out: 1987 /* CN68XX pass 1.x has an errata that accessing the ACK registers 1988 can stop interrupts from propagating */ 1989 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 1990 cvmx_read_csr(CVMX_CIU2_INTR_CIU_READY); 1991 else 1992 cvmx_read_csr(CVMX_CIU2_ACK_PPX_IP3(core_id)); 1993 return; 1994 } 1995 1996 static int __init octeon_irq_init_ciu2( 1997 struct device_node *ciu_node, struct device_node *parent) 1998 { 1999 unsigned int i, r; 2000 struct irq_domain *ciu_domain = NULL; 2001 2002 octeon_irq_init_ciu2_percpu(); 2003 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; 2004 2005 octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio; 2006 octeon_irq_ip2 = octeon_irq_ciu2; 2007 octeon_irq_ip3 = octeon_irq_ciu2_mbox; 2008 octeon_irq_ip4 = octeon_irq_ip4_mask; 2009 2010 /* Mips internal */ 2011 octeon_irq_init_core(); 2012 2013 ciu_domain = irq_domain_add_tree( 2014 ciu_node, &octeon_irq_domain_ciu2_ops, NULL); 2015 irq_set_default_host(ciu_domain); 2016 2017 /* CUI2 */ 2018 for (i = 0; i < 64; i++) { 2019 r = octeon_irq_force_ciu_mapping( 2020 ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); 2021 if (r) 2022 goto err; 2023 } 2024 2025 for (i = 0; i < 32; i++) { 2026 r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, 2027 &octeon_irq_chip_ciu2_wd, handle_level_irq); 2028 if (r) 2029 goto err; 2030 } 2031 2032 for (i = 0; i < 4; i++) { 2033 r = octeon_irq_force_ciu_mapping( 2034 ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); 2035 if (r) 2036 goto err; 2037 } 2038 2039 r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); 2040 if (r) 2041 goto err; 2042 2043 for (i = 0; i < 4; i++) { 2044 r = octeon_irq_force_ciu_mapping( 2045 ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); 2046 if (r) 2047 goto err; 2048 } 2049 2050 for (i = 0; i < 4; i++) { 2051 r = octeon_irq_force_ciu_mapping( 2052 ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); 2053 if (r) 2054 goto err; 2055 } 2056 2057 irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 2058 irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 2059 irq_set_chip_and_handler(OCTEON_IRQ_MBOX2, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 2060 irq_set_chip_and_handler(OCTEON_IRQ_MBOX3, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 2061 2062 /* Enable the CIU lines */ 2063 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 2064 clear_c0_status(STATUSF_IP4); 2065 return 0; 2066 err: 2067 return r; 2068 } 2069 2070 struct octeon_irq_cib_host_data { 2071 raw_spinlock_t lock; 2072 u64 raw_reg; 2073 u64 en_reg; 2074 int max_bits; 2075 }; 2076 2077 struct octeon_irq_cib_chip_data { 2078 struct octeon_irq_cib_host_data *host_data; 2079 int bit; 2080 }; 2081 2082 static void octeon_irq_cib_enable(struct irq_data *data) 2083 { 2084 unsigned long flags; 2085 u64 en; 2086 struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); 2087 struct octeon_irq_cib_host_data *host_data = cd->host_data; 2088 2089 raw_spin_lock_irqsave(&host_data->lock, flags); 2090 en = cvmx_read_csr(host_data->en_reg); 2091 en |= 1ull << cd->bit; 2092 cvmx_write_csr(host_data->en_reg, en); 2093 raw_spin_unlock_irqrestore(&host_data->lock, flags); 2094 } 2095 2096 static void octeon_irq_cib_disable(struct irq_data *data) 2097 { 2098 unsigned long flags; 2099 u64 en; 2100 struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); 2101 struct octeon_irq_cib_host_data *host_data = cd->host_data; 2102 2103 raw_spin_lock_irqsave(&host_data->lock, flags); 2104 en = cvmx_read_csr(host_data->en_reg); 2105 en &= ~(1ull << cd->bit); 2106 cvmx_write_csr(host_data->en_reg, en); 2107 raw_spin_unlock_irqrestore(&host_data->lock, flags); 2108 } 2109 2110 static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t) 2111 { 2112 irqd_set_trigger_type(data, t); 2113 return IRQ_SET_MASK_OK; 2114 } 2115 2116 static struct irq_chip octeon_irq_chip_cib = { 2117 .name = "CIB", 2118 .irq_enable = octeon_irq_cib_enable, 2119 .irq_disable = octeon_irq_cib_disable, 2120 .irq_mask = octeon_irq_cib_disable, 2121 .irq_unmask = octeon_irq_cib_enable, 2122 .irq_set_type = octeon_irq_cib_set_type, 2123 }; 2124 2125 static int octeon_irq_cib_xlat(struct irq_domain *d, 2126 struct device_node *node, 2127 const u32 *intspec, 2128 unsigned int intsize, 2129 unsigned long *out_hwirq, 2130 unsigned int *out_type) 2131 { 2132 unsigned int type = 0; 2133 2134 if (intsize == 2) 2135 type = intspec[1]; 2136 2137 switch (type) { 2138 case 0: /* unofficial value, but we might as well let it work. */ 2139 case 4: /* official value for level triggering. */ 2140 *out_type = IRQ_TYPE_LEVEL_HIGH; 2141 break; 2142 case 1: /* official value for edge triggering. */ 2143 *out_type = IRQ_TYPE_EDGE_RISING; 2144 break; 2145 default: /* Nothing else is acceptable. */ 2146 return -EINVAL; 2147 } 2148 2149 *out_hwirq = intspec[0]; 2150 2151 return 0; 2152 } 2153 2154 static int octeon_irq_cib_map(struct irq_domain *d, 2155 unsigned int virq, irq_hw_number_t hw) 2156 { 2157 struct octeon_irq_cib_host_data *host_data = d->host_data; 2158 struct octeon_irq_cib_chip_data *cd; 2159 2160 if (hw >= host_data->max_bits) { 2161 pr_err("ERROR: %s mapping %u is to big!\n", 2162 d->of_node->name, (unsigned)hw); 2163 return -EINVAL; 2164 } 2165 2166 cd = kzalloc(sizeof(*cd), GFP_KERNEL); 2167 cd->host_data = host_data; 2168 cd->bit = hw; 2169 2170 irq_set_chip_and_handler(virq, &octeon_irq_chip_cib, 2171 handle_simple_irq); 2172 irq_set_chip_data(virq, cd); 2173 return 0; 2174 } 2175 2176 static struct irq_domain_ops octeon_irq_domain_cib_ops = { 2177 .map = octeon_irq_cib_map, 2178 .unmap = octeon_irq_free_cd, 2179 .xlate = octeon_irq_cib_xlat, 2180 }; 2181 2182 /* Chain to real handler. */ 2183 static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data) 2184 { 2185 u64 en; 2186 u64 raw; 2187 u64 bits; 2188 int i; 2189 int irq; 2190 struct irq_domain *cib_domain = data; 2191 struct octeon_irq_cib_host_data *host_data = cib_domain->host_data; 2192 2193 en = cvmx_read_csr(host_data->en_reg); 2194 raw = cvmx_read_csr(host_data->raw_reg); 2195 2196 bits = en & raw; 2197 2198 for (i = 0; i < host_data->max_bits; i++) { 2199 if ((bits & 1ull << i) == 0) 2200 continue; 2201 irq = irq_find_mapping(cib_domain, i); 2202 if (!irq) { 2203 unsigned long flags; 2204 2205 pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n", 2206 i, host_data->raw_reg); 2207 raw_spin_lock_irqsave(&host_data->lock, flags); 2208 en = cvmx_read_csr(host_data->en_reg); 2209 en &= ~(1ull << i); 2210 cvmx_write_csr(host_data->en_reg, en); 2211 cvmx_write_csr(host_data->raw_reg, 1ull << i); 2212 raw_spin_unlock_irqrestore(&host_data->lock, flags); 2213 } else { 2214 struct irq_desc *desc = irq_to_desc(irq); 2215 struct irq_data *irq_data = irq_desc_get_irq_data(desc); 2216 /* If edge, acknowledge the bit we will be sending. */ 2217 if (irqd_get_trigger_type(irq_data) & 2218 IRQ_TYPE_EDGE_BOTH) 2219 cvmx_write_csr(host_data->raw_reg, 1ull << i); 2220 generic_handle_irq_desc(irq, desc); 2221 } 2222 } 2223 2224 return IRQ_HANDLED; 2225 } 2226 2227 static int __init octeon_irq_init_cib(struct device_node *ciu_node, 2228 struct device_node *parent) 2229 { 2230 const __be32 *addr; 2231 u32 val; 2232 struct octeon_irq_cib_host_data *host_data; 2233 int parent_irq; 2234 int r; 2235 struct irq_domain *cib_domain; 2236 2237 parent_irq = irq_of_parse_and_map(ciu_node, 0); 2238 if (!parent_irq) { 2239 pr_err("ERROR: Couldn't acquire parent_irq for %s\n.", 2240 ciu_node->name); 2241 return -EINVAL; 2242 } 2243 2244 host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); 2245 raw_spin_lock_init(&host_data->lock); 2246 2247 addr = of_get_address(ciu_node, 0, NULL, NULL); 2248 if (!addr) { 2249 pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name); 2250 return -EINVAL; 2251 } 2252 host_data->raw_reg = (u64)phys_to_virt( 2253 of_translate_address(ciu_node, addr)); 2254 2255 addr = of_get_address(ciu_node, 1, NULL, NULL); 2256 if (!addr) { 2257 pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name); 2258 return -EINVAL; 2259 } 2260 host_data->en_reg = (u64)phys_to_virt( 2261 of_translate_address(ciu_node, addr)); 2262 2263 r = of_property_read_u32(ciu_node, "cavium,max-bits", &val); 2264 if (r) { 2265 pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.", 2266 ciu_node->name); 2267 return r; 2268 } 2269 host_data->max_bits = val; 2270 2271 cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits, 2272 &octeon_irq_domain_cib_ops, 2273 host_data); 2274 if (!cib_domain) { 2275 pr_err("ERROR: Couldn't irq_domain_add_linear()\n."); 2276 return -ENOMEM; 2277 } 2278 2279 cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */ 2280 cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */ 2281 2282 r = request_irq(parent_irq, octeon_irq_cib_handler, 2283 IRQF_NO_THREAD, "cib", cib_domain); 2284 if (r) { 2285 pr_err("request_irq cib failed %d\n", r); 2286 return r; 2287 } 2288 pr_info("CIB interrupt controller probed: %llx %d\n", 2289 host_data->raw_reg, host_data->max_bits); 2290 return 0; 2291 } 2292 2293 static struct of_device_id ciu_types[] __initdata = { 2294 {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu}, 2295 {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio}, 2296 {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2}, 2297 {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib}, 2298 {} 2299 }; 2300 2301 void __init arch_init_irq(void) 2302 { 2303 #ifdef CONFIG_SMP 2304 /* Set the default affinity to the boot cpu. */ 2305 cpumask_clear(irq_default_affinity); 2306 cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 2307 #endif 2308 of_irq_init(ciu_types); 2309 } 2310 2311 asmlinkage void plat_irq_dispatch(void) 2312 { 2313 unsigned long cop0_cause; 2314 unsigned long cop0_status; 2315 2316 while (1) { 2317 cop0_cause = read_c0_cause(); 2318 cop0_status = read_c0_status(); 2319 cop0_cause &= cop0_status; 2320 cop0_cause &= ST0_IM; 2321 2322 if (cop0_cause & STATUSF_IP2) 2323 octeon_irq_ip2(); 2324 else if (cop0_cause & STATUSF_IP3) 2325 octeon_irq_ip3(); 2326 else if (cop0_cause & STATUSF_IP4) 2327 octeon_irq_ip4(); 2328 else if (cop0_cause) 2329 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); 2330 else 2331 break; 2332 } 2333 } 2334 2335 #ifdef CONFIG_HOTPLUG_CPU 2336 2337 void octeon_fixup_irqs(void) 2338 { 2339 irq_cpu_offline(); 2340 } 2341 2342 #endif /* CONFIG_HOTPLUG_CPU */ 2343