1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2004-2012 Cavium, Inc. 7 */ 8 9 #include <linux/interrupt.h> 10 #include <linux/irqdomain.h> 11 #include <linux/bitops.h> 12 #include <linux/percpu.h> 13 #include <linux/slab.h> 14 #include <linux/irq.h> 15 #include <linux/smp.h> 16 #include <linux/of.h> 17 18 #include <asm/octeon/octeon.h> 19 20 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); 21 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); 22 23 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); 24 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); 25 26 static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; 27 28 union octeon_ciu_chip_data { 29 void *p; 30 unsigned long l; 31 struct { 32 unsigned int line:6; 33 unsigned int bit:6; 34 } s; 35 }; 36 37 struct octeon_core_chip_data { 38 struct mutex core_irq_mutex; 39 bool current_en; 40 bool desired_en; 41 u8 bit; 42 }; 43 44 #define MIPS_CORE_IRQ_LINES 8 45 46 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; 47 48 static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, 49 struct irq_chip *chip, 50 irq_flow_handler_t handler) 51 { 52 union octeon_ciu_chip_data cd; 53 54 irq_set_chip_and_handler(irq, chip, handler); 55 56 cd.l = 0; 57 cd.s.line = line; 58 cd.s.bit = bit; 59 60 irq_set_chip_data(irq, cd.p); 61 octeon_irq_ciu_to_irq[line][bit] = irq; 62 } 63 64 static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, 65 int irq, int line, int bit) 66 { 67 irq_domain_associate(domain, irq, line << 6 | bit); 68 } 69 70 static int octeon_coreid_for_cpu(int cpu) 71 { 72 #ifdef CONFIG_SMP 73 return cpu_logical_map(cpu); 74 #else 75 return cvmx_get_core_num(); 76 #endif 77 } 78 79 static int octeon_cpu_for_coreid(int coreid) 80 { 81 #ifdef CONFIG_SMP 82 return cpu_number_map(coreid); 83 #else 84 return smp_processor_id(); 85 #endif 86 } 87 88 static void octeon_irq_core_ack(struct irq_data *data) 89 { 90 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 91 unsigned int bit = cd->bit; 92 93 /* 94 * We don't need to disable IRQs to make these atomic since 95 * they are already disabled earlier in the low level 96 * interrupt code. 97 */ 98 clear_c0_status(0x100 << bit); 99 /* The two user interrupts must be cleared manually. */ 100 if (bit < 2) 101 clear_c0_cause(0x100 << bit); 102 } 103 104 static void octeon_irq_core_eoi(struct irq_data *data) 105 { 106 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 107 108 /* 109 * We don't need to disable IRQs to make these atomic since 110 * they are already disabled earlier in the low level 111 * interrupt code. 112 */ 113 set_c0_status(0x100 << cd->bit); 114 } 115 116 static void octeon_irq_core_set_enable_local(void *arg) 117 { 118 struct irq_data *data = arg; 119 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 120 unsigned int mask = 0x100 << cd->bit; 121 122 /* 123 * Interrupts are already disabled, so these are atomic. 124 */ 125 if (cd->desired_en) 126 set_c0_status(mask); 127 else 128 clear_c0_status(mask); 129 130 } 131 132 static void octeon_irq_core_disable(struct irq_data *data) 133 { 134 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 135 cd->desired_en = false; 136 } 137 138 static void octeon_irq_core_enable(struct irq_data *data) 139 { 140 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 141 cd->desired_en = true; 142 } 143 144 static void octeon_irq_core_bus_lock(struct irq_data *data) 145 { 146 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 147 148 mutex_lock(&cd->core_irq_mutex); 149 } 150 151 static void octeon_irq_core_bus_sync_unlock(struct irq_data *data) 152 { 153 struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data); 154 155 if (cd->desired_en != cd->current_en) { 156 on_each_cpu(octeon_irq_core_set_enable_local, data, 1); 157 158 cd->current_en = cd->desired_en; 159 } 160 161 mutex_unlock(&cd->core_irq_mutex); 162 } 163 164 static struct irq_chip octeon_irq_chip_core = { 165 .name = "Core", 166 .irq_enable = octeon_irq_core_enable, 167 .irq_disable = octeon_irq_core_disable, 168 .irq_ack = octeon_irq_core_ack, 169 .irq_eoi = octeon_irq_core_eoi, 170 .irq_bus_lock = octeon_irq_core_bus_lock, 171 .irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock, 172 173 .irq_cpu_online = octeon_irq_core_eoi, 174 .irq_cpu_offline = octeon_irq_core_ack, 175 .flags = IRQCHIP_ONOFFLINE_ENABLED, 176 }; 177 178 static void __init octeon_irq_init_core(void) 179 { 180 int i; 181 int irq; 182 struct octeon_core_chip_data *cd; 183 184 for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) { 185 cd = &octeon_irq_core_chip_data[i]; 186 cd->current_en = false; 187 cd->desired_en = false; 188 cd->bit = i; 189 mutex_init(&cd->core_irq_mutex); 190 191 irq = OCTEON_IRQ_SW0 + i; 192 irq_set_chip_data(irq, cd); 193 irq_set_chip_and_handler(irq, &octeon_irq_chip_core, 194 handle_percpu_irq); 195 } 196 } 197 198 static int next_cpu_for_irq(struct irq_data *data) 199 { 200 201 #ifdef CONFIG_SMP 202 int cpu; 203 int weight = cpumask_weight(data->affinity); 204 205 if (weight > 1) { 206 cpu = smp_processor_id(); 207 for (;;) { 208 cpu = cpumask_next(cpu, data->affinity); 209 if (cpu >= nr_cpu_ids) { 210 cpu = -1; 211 continue; 212 } else if (cpumask_test_cpu(cpu, cpu_online_mask)) { 213 break; 214 } 215 } 216 } else if (weight == 1) { 217 cpu = cpumask_first(data->affinity); 218 } else { 219 cpu = smp_processor_id(); 220 } 221 return cpu; 222 #else 223 return smp_processor_id(); 224 #endif 225 } 226 227 static void octeon_irq_ciu_enable(struct irq_data *data) 228 { 229 int cpu = next_cpu_for_irq(data); 230 int coreid = octeon_coreid_for_cpu(cpu); 231 unsigned long *pen; 232 unsigned long flags; 233 union octeon_ciu_chip_data cd; 234 235 cd.p = irq_data_get_irq_chip_data(data); 236 237 if (cd.s.line == 0) { 238 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 239 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 240 set_bit(cd.s.bit, pen); 241 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 242 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 243 } else { 244 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 245 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 246 set_bit(cd.s.bit, pen); 247 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 248 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 249 } 250 } 251 252 static void octeon_irq_ciu_enable_local(struct irq_data *data) 253 { 254 unsigned long *pen; 255 unsigned long flags; 256 union octeon_ciu_chip_data cd; 257 258 cd.p = irq_data_get_irq_chip_data(data); 259 260 if (cd.s.line == 0) { 261 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 262 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); 263 set_bit(cd.s.bit, pen); 264 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 265 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 266 } else { 267 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 268 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); 269 set_bit(cd.s.bit, pen); 270 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); 271 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 272 } 273 } 274 275 static void octeon_irq_ciu_disable_local(struct irq_data *data) 276 { 277 unsigned long *pen; 278 unsigned long flags; 279 union octeon_ciu_chip_data cd; 280 281 cd.p = irq_data_get_irq_chip_data(data); 282 283 if (cd.s.line == 0) { 284 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 285 pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror); 286 clear_bit(cd.s.bit, pen); 287 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 288 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 289 } else { 290 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 291 pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror); 292 clear_bit(cd.s.bit, pen); 293 cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen); 294 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 295 } 296 } 297 298 static void octeon_irq_ciu_disable_all(struct irq_data *data) 299 { 300 unsigned long flags; 301 unsigned long *pen; 302 int cpu; 303 union octeon_ciu_chip_data cd; 304 305 wmb(); /* Make sure flag changes arrive before register updates. */ 306 307 cd.p = irq_data_get_irq_chip_data(data); 308 309 if (cd.s.line == 0) { 310 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 311 for_each_online_cpu(cpu) { 312 int coreid = octeon_coreid_for_cpu(cpu); 313 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 314 clear_bit(cd.s.bit, pen); 315 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 316 } 317 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 318 } else { 319 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 320 for_each_online_cpu(cpu) { 321 int coreid = octeon_coreid_for_cpu(cpu); 322 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 323 clear_bit(cd.s.bit, pen); 324 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 325 } 326 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 327 } 328 } 329 330 static void octeon_irq_ciu_enable_all(struct irq_data *data) 331 { 332 unsigned long flags; 333 unsigned long *pen; 334 int cpu; 335 union octeon_ciu_chip_data cd; 336 337 cd.p = irq_data_get_irq_chip_data(data); 338 339 if (cd.s.line == 0) { 340 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 341 for_each_online_cpu(cpu) { 342 int coreid = octeon_coreid_for_cpu(cpu); 343 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 344 set_bit(cd.s.bit, pen); 345 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 346 } 347 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 348 } else { 349 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 350 for_each_online_cpu(cpu) { 351 int coreid = octeon_coreid_for_cpu(cpu); 352 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 353 set_bit(cd.s.bit, pen); 354 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 355 } 356 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 357 } 358 } 359 360 /* 361 * Enable the irq on the next core in the affinity set for chips that 362 * have the EN*_W1{S,C} registers. 363 */ 364 static void octeon_irq_ciu_enable_v2(struct irq_data *data) 365 { 366 u64 mask; 367 int cpu = next_cpu_for_irq(data); 368 union octeon_ciu_chip_data cd; 369 370 cd.p = irq_data_get_irq_chip_data(data); 371 mask = 1ull << (cd.s.bit); 372 373 /* 374 * Called under the desc lock, so these should never get out 375 * of sync. 376 */ 377 if (cd.s.line == 0) { 378 int index = octeon_coreid_for_cpu(cpu) * 2; 379 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 380 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 381 } else { 382 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 383 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 384 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 385 } 386 } 387 388 /* 389 * Enable the irq on the current CPU for chips that 390 * have the EN*_W1{S,C} registers. 391 */ 392 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) 393 { 394 u64 mask; 395 union octeon_ciu_chip_data cd; 396 397 cd.p = irq_data_get_irq_chip_data(data); 398 mask = 1ull << (cd.s.bit); 399 400 if (cd.s.line == 0) { 401 int index = cvmx_get_core_num() * 2; 402 set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); 403 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 404 } else { 405 int index = cvmx_get_core_num() * 2 + 1; 406 set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); 407 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 408 } 409 } 410 411 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) 412 { 413 u64 mask; 414 union octeon_ciu_chip_data cd; 415 416 cd.p = irq_data_get_irq_chip_data(data); 417 mask = 1ull << (cd.s.bit); 418 419 if (cd.s.line == 0) { 420 int index = cvmx_get_core_num() * 2; 421 clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror)); 422 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 423 } else { 424 int index = cvmx_get_core_num() * 2 + 1; 425 clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror)); 426 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 427 } 428 } 429 430 /* 431 * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq. 432 */ 433 static void octeon_irq_ciu_ack(struct irq_data *data) 434 { 435 u64 mask; 436 union octeon_ciu_chip_data cd; 437 438 cd.p = data->chip_data; 439 mask = 1ull << (cd.s.bit); 440 441 if (cd.s.line == 0) { 442 int index = cvmx_get_core_num() * 2; 443 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); 444 } else { 445 cvmx_write_csr(CVMX_CIU_INT_SUM1, mask); 446 } 447 } 448 449 /* 450 * Disable the irq on the all cores for chips that have the EN*_W1{S,C} 451 * registers. 452 */ 453 static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) 454 { 455 int cpu; 456 u64 mask; 457 union octeon_ciu_chip_data cd; 458 459 wmb(); /* Make sure flag changes arrive before register updates. */ 460 461 cd.p = data->chip_data; 462 mask = 1ull << (cd.s.bit); 463 464 if (cd.s.line == 0) { 465 for_each_online_cpu(cpu) { 466 int index = octeon_coreid_for_cpu(cpu) * 2; 467 clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 468 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 469 } 470 } else { 471 for_each_online_cpu(cpu) { 472 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 473 clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 474 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 475 } 476 } 477 } 478 479 /* 480 * Enable the irq on the all cores for chips that have the EN*_W1{S,C} 481 * registers. 482 */ 483 static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) 484 { 485 int cpu; 486 u64 mask; 487 union octeon_ciu_chip_data cd; 488 489 cd.p = data->chip_data; 490 mask = 1ull << (cd.s.bit); 491 492 if (cd.s.line == 0) { 493 for_each_online_cpu(cpu) { 494 int index = octeon_coreid_for_cpu(cpu) * 2; 495 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 496 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 497 } 498 } else { 499 for_each_online_cpu(cpu) { 500 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 501 set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 502 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 503 } 504 } 505 } 506 507 static void octeon_irq_gpio_setup(struct irq_data *data) 508 { 509 union cvmx_gpio_bit_cfgx cfg; 510 union octeon_ciu_chip_data cd; 511 u32 t = irqd_get_trigger_type(data); 512 513 cd.p = irq_data_get_irq_chip_data(data); 514 515 cfg.u64 = 0; 516 cfg.s.int_en = 1; 517 cfg.s.int_type = (t & IRQ_TYPE_EDGE_BOTH) != 0; 518 cfg.s.rx_xor = (t & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) != 0; 519 520 /* 140 nS glitch filter*/ 521 cfg.s.fil_cnt = 7; 522 cfg.s.fil_sel = 3; 523 524 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.bit - 16), cfg.u64); 525 } 526 527 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) 528 { 529 octeon_irq_gpio_setup(data); 530 octeon_irq_ciu_enable_v2(data); 531 } 532 533 static void octeon_irq_ciu_enable_gpio(struct irq_data *data) 534 { 535 octeon_irq_gpio_setup(data); 536 octeon_irq_ciu_enable(data); 537 } 538 539 static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t) 540 { 541 irqd_set_trigger_type(data, t); 542 octeon_irq_gpio_setup(data); 543 544 return IRQ_SET_MASK_OK; 545 } 546 547 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) 548 { 549 union octeon_ciu_chip_data cd; 550 551 cd.p = irq_data_get_irq_chip_data(data); 552 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.bit - 16), 0); 553 554 octeon_irq_ciu_disable_all_v2(data); 555 } 556 557 static void octeon_irq_ciu_disable_gpio(struct irq_data *data) 558 { 559 union octeon_ciu_chip_data cd; 560 561 cd.p = irq_data_get_irq_chip_data(data); 562 cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.bit - 16), 0); 563 564 octeon_irq_ciu_disable_all(data); 565 } 566 567 static void octeon_irq_ciu_gpio_ack(struct irq_data *data) 568 { 569 union octeon_ciu_chip_data cd; 570 u64 mask; 571 572 cd.p = irq_data_get_irq_chip_data(data); 573 mask = 1ull << (cd.s.bit - 16); 574 575 cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); 576 } 577 578 static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc) 579 { 580 if (irqd_get_trigger_type(irq_desc_get_irq_data(desc)) & IRQ_TYPE_EDGE_BOTH) 581 handle_edge_irq(irq, desc); 582 else 583 handle_level_irq(irq, desc); 584 } 585 586 #ifdef CONFIG_SMP 587 588 static void octeon_irq_cpu_offline_ciu(struct irq_data *data) 589 { 590 int cpu = smp_processor_id(); 591 cpumask_t new_affinity; 592 593 if (!cpumask_test_cpu(cpu, data->affinity)) 594 return; 595 596 if (cpumask_weight(data->affinity) > 1) { 597 /* 598 * It has multi CPU affinity, just remove this CPU 599 * from the affinity set. 600 */ 601 cpumask_copy(&new_affinity, data->affinity); 602 cpumask_clear_cpu(cpu, &new_affinity); 603 } else { 604 /* Otherwise, put it on lowest numbered online CPU. */ 605 cpumask_clear(&new_affinity); 606 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); 607 } 608 __irq_set_affinity_locked(data, &new_affinity); 609 } 610 611 static int octeon_irq_ciu_set_affinity(struct irq_data *data, 612 const struct cpumask *dest, bool force) 613 { 614 int cpu; 615 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 616 unsigned long flags; 617 union octeon_ciu_chip_data cd; 618 619 cd.p = data->chip_data; 620 621 /* 622 * For non-v2 CIU, we will allow only single CPU affinity. 623 * This removes the need to do locking in the .ack/.eoi 624 * functions. 625 */ 626 if (cpumask_weight(dest) != 1) 627 return -EINVAL; 628 629 if (!enable_one) 630 return 0; 631 632 if (cd.s.line == 0) { 633 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 634 for_each_online_cpu(cpu) { 635 int coreid = octeon_coreid_for_cpu(cpu); 636 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 637 638 if (cpumask_test_cpu(cpu, dest) && enable_one) { 639 enable_one = false; 640 set_bit(cd.s.bit, pen); 641 } else { 642 clear_bit(cd.s.bit, pen); 643 } 644 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 645 } 646 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags); 647 } else { 648 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 649 for_each_online_cpu(cpu) { 650 int coreid = octeon_coreid_for_cpu(cpu); 651 unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 652 653 if (cpumask_test_cpu(cpu, dest) && enable_one) { 654 enable_one = false; 655 set_bit(cd.s.bit, pen); 656 } else { 657 clear_bit(cd.s.bit, pen); 658 } 659 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 660 } 661 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 662 } 663 return 0; 664 } 665 666 /* 667 * Set affinity for the irq for chips that have the EN*_W1{S,C} 668 * registers. 669 */ 670 static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, 671 const struct cpumask *dest, 672 bool force) 673 { 674 int cpu; 675 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 676 u64 mask; 677 union octeon_ciu_chip_data cd; 678 679 if (!enable_one) 680 return 0; 681 682 cd.p = data->chip_data; 683 mask = 1ull << cd.s.bit; 684 685 if (cd.s.line == 0) { 686 for_each_online_cpu(cpu) { 687 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 688 int index = octeon_coreid_for_cpu(cpu) * 2; 689 if (cpumask_test_cpu(cpu, dest) && enable_one) { 690 enable_one = false; 691 set_bit(cd.s.bit, pen); 692 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 693 } else { 694 clear_bit(cd.s.bit, pen); 695 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 696 } 697 } 698 } else { 699 for_each_online_cpu(cpu) { 700 unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 701 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 702 if (cpumask_test_cpu(cpu, dest) && enable_one) { 703 enable_one = false; 704 set_bit(cd.s.bit, pen); 705 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 706 } else { 707 clear_bit(cd.s.bit, pen); 708 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 709 } 710 } 711 } 712 return 0; 713 } 714 #endif 715 716 /* 717 * The v1 CIU code already masks things, so supply a dummy version to 718 * the core chip code. 719 */ 720 static void octeon_irq_dummy_mask(struct irq_data *data) 721 { 722 } 723 724 /* 725 * Newer octeon chips have support for lockless CIU operation. 726 */ 727 static struct irq_chip octeon_irq_chip_ciu_v2 = { 728 .name = "CIU", 729 .irq_enable = octeon_irq_ciu_enable_v2, 730 .irq_disable = octeon_irq_ciu_disable_all_v2, 731 .irq_ack = octeon_irq_ciu_ack, 732 .irq_mask = octeon_irq_ciu_disable_local_v2, 733 .irq_unmask = octeon_irq_ciu_enable_v2, 734 #ifdef CONFIG_SMP 735 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, 736 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 737 #endif 738 }; 739 740 static struct irq_chip octeon_irq_chip_ciu = { 741 .name = "CIU", 742 .irq_enable = octeon_irq_ciu_enable, 743 .irq_disable = octeon_irq_ciu_disable_all, 744 .irq_ack = octeon_irq_ciu_ack, 745 .irq_mask = octeon_irq_dummy_mask, 746 #ifdef CONFIG_SMP 747 .irq_set_affinity = octeon_irq_ciu_set_affinity, 748 .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 749 #endif 750 }; 751 752 /* The mbox versions don't do any affinity or round-robin. */ 753 static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = { 754 .name = "CIU-M", 755 .irq_enable = octeon_irq_ciu_enable_all_v2, 756 .irq_disable = octeon_irq_ciu_disable_all_v2, 757 .irq_ack = octeon_irq_ciu_disable_local_v2, 758 .irq_eoi = octeon_irq_ciu_enable_local_v2, 759 760 .irq_cpu_online = octeon_irq_ciu_enable_local_v2, 761 .irq_cpu_offline = octeon_irq_ciu_disable_local_v2, 762 .flags = IRQCHIP_ONOFFLINE_ENABLED, 763 }; 764 765 static struct irq_chip octeon_irq_chip_ciu_mbox = { 766 .name = "CIU-M", 767 .irq_enable = octeon_irq_ciu_enable_all, 768 .irq_disable = octeon_irq_ciu_disable_all, 769 770 .irq_cpu_online = octeon_irq_ciu_enable_local, 771 .irq_cpu_offline = octeon_irq_ciu_disable_local, 772 .flags = IRQCHIP_ONOFFLINE_ENABLED, 773 }; 774 775 static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = { 776 .name = "CIU-GPIO", 777 .irq_enable = octeon_irq_ciu_enable_gpio_v2, 778 .irq_disable = octeon_irq_ciu_disable_gpio_v2, 779 .irq_ack = octeon_irq_ciu_gpio_ack, 780 .irq_mask = octeon_irq_ciu_disable_local_v2, 781 .irq_unmask = octeon_irq_ciu_enable_v2, 782 .irq_set_type = octeon_irq_ciu_gpio_set_type, 783 #ifdef CONFIG_SMP 784 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, 785 #endif 786 .flags = IRQCHIP_SET_TYPE_MASKED, 787 }; 788 789 static struct irq_chip octeon_irq_chip_ciu_gpio = { 790 .name = "CIU-GPIO", 791 .irq_enable = octeon_irq_ciu_enable_gpio, 792 .irq_disable = octeon_irq_ciu_disable_gpio, 793 .irq_mask = octeon_irq_dummy_mask, 794 .irq_ack = octeon_irq_ciu_gpio_ack, 795 .irq_set_type = octeon_irq_ciu_gpio_set_type, 796 #ifdef CONFIG_SMP 797 .irq_set_affinity = octeon_irq_ciu_set_affinity, 798 #endif 799 .flags = IRQCHIP_SET_TYPE_MASKED, 800 }; 801 802 /* 803 * Watchdog interrupts are special. They are associated with a single 804 * core, so we hardwire the affinity to that core. 805 */ 806 static void octeon_irq_ciu_wd_enable(struct irq_data *data) 807 { 808 unsigned long flags; 809 unsigned long *pen; 810 int coreid = data->irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 811 int cpu = octeon_cpu_for_coreid(coreid); 812 813 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 814 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 815 set_bit(coreid, pen); 816 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); 817 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags); 818 } 819 820 /* 821 * Watchdog interrupts are special. They are associated with a single 822 * core, so we hardwire the affinity to that core. 823 */ 824 static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data) 825 { 826 int coreid = data->irq - OCTEON_IRQ_WDOG0; 827 int cpu = octeon_cpu_for_coreid(coreid); 828 829 set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 830 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid); 831 } 832 833 834 static struct irq_chip octeon_irq_chip_ciu_wd_v2 = { 835 .name = "CIU-W", 836 .irq_enable = octeon_irq_ciu1_wd_enable_v2, 837 .irq_disable = octeon_irq_ciu_disable_all_v2, 838 .irq_mask = octeon_irq_ciu_disable_local_v2, 839 .irq_unmask = octeon_irq_ciu_enable_local_v2, 840 }; 841 842 static struct irq_chip octeon_irq_chip_ciu_wd = { 843 .name = "CIU-W", 844 .irq_enable = octeon_irq_ciu_wd_enable, 845 .irq_disable = octeon_irq_ciu_disable_all, 846 .irq_mask = octeon_irq_dummy_mask, 847 }; 848 849 static bool octeon_irq_ciu_is_edge(unsigned int line, unsigned int bit) 850 { 851 bool edge = false; 852 853 if (line == 0) 854 switch (bit) { 855 case 48 ... 49: /* GMX DRP */ 856 case 50: /* IPD_DRP */ 857 case 52 ... 55: /* Timers */ 858 case 58: /* MPI */ 859 edge = true; 860 break; 861 default: 862 break; 863 } 864 else /* line == 1 */ 865 switch (bit) { 866 case 47: /* PTP */ 867 edge = true; 868 break; 869 default: 870 break; 871 } 872 return edge; 873 } 874 875 struct octeon_irq_gpio_domain_data { 876 unsigned int base_hwirq; 877 }; 878 879 static int octeon_irq_gpio_xlat(struct irq_domain *d, 880 struct device_node *node, 881 const u32 *intspec, 882 unsigned int intsize, 883 unsigned long *out_hwirq, 884 unsigned int *out_type) 885 { 886 unsigned int type; 887 unsigned int pin; 888 unsigned int trigger; 889 890 if (d->of_node != node) 891 return -EINVAL; 892 893 if (intsize < 2) 894 return -EINVAL; 895 896 pin = intspec[0]; 897 if (pin >= 16) 898 return -EINVAL; 899 900 trigger = intspec[1]; 901 902 switch (trigger) { 903 case 1: 904 type = IRQ_TYPE_EDGE_RISING; 905 break; 906 case 2: 907 type = IRQ_TYPE_EDGE_FALLING; 908 break; 909 case 4: 910 type = IRQ_TYPE_LEVEL_HIGH; 911 break; 912 case 8: 913 type = IRQ_TYPE_LEVEL_LOW; 914 break; 915 default: 916 pr_err("Error: (%s) Invalid irq trigger specification: %x\n", 917 node->name, 918 trigger); 919 type = IRQ_TYPE_LEVEL_LOW; 920 break; 921 } 922 *out_type = type; 923 *out_hwirq = pin; 924 925 return 0; 926 } 927 928 static int octeon_irq_ciu_xlat(struct irq_domain *d, 929 struct device_node *node, 930 const u32 *intspec, 931 unsigned int intsize, 932 unsigned long *out_hwirq, 933 unsigned int *out_type) 934 { 935 unsigned int ciu, bit; 936 937 ciu = intspec[0]; 938 bit = intspec[1]; 939 940 if (ciu > 1 || bit > 63) 941 return -EINVAL; 942 943 /* These are the GPIO lines */ 944 if (ciu == 0 && bit >= 16 && bit < 32) 945 return -EINVAL; 946 947 *out_hwirq = (ciu << 6) | bit; 948 *out_type = 0; 949 950 return 0; 951 } 952 953 static struct irq_chip *octeon_irq_ciu_chip; 954 static struct irq_chip *octeon_irq_gpio_chip; 955 956 static bool octeon_irq_virq_in_range(unsigned int virq) 957 { 958 /* We cannot let it overflow the mapping array. */ 959 if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0]))) 960 return true; 961 962 WARN_ONCE(true, "virq out of range %u.\n", virq); 963 return false; 964 } 965 966 static int octeon_irq_ciu_map(struct irq_domain *d, 967 unsigned int virq, irq_hw_number_t hw) 968 { 969 unsigned int line = hw >> 6; 970 unsigned int bit = hw & 63; 971 972 if (!octeon_irq_virq_in_range(virq)) 973 return -EINVAL; 974 975 if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) 976 return -EINVAL; 977 978 if (octeon_irq_ciu_is_edge(line, bit)) 979 octeon_irq_set_ciu_mapping(virq, line, bit, 980 octeon_irq_ciu_chip, 981 handle_edge_irq); 982 else 983 octeon_irq_set_ciu_mapping(virq, line, bit, 984 octeon_irq_ciu_chip, 985 handle_level_irq); 986 987 return 0; 988 } 989 990 static int octeon_irq_gpio_map(struct irq_domain *d, 991 unsigned int virq, irq_hw_number_t hw) 992 { 993 struct octeon_irq_gpio_domain_data *gpiod = d->host_data; 994 unsigned int line, bit; 995 996 if (!octeon_irq_virq_in_range(virq)) 997 return -EINVAL; 998 999 hw += gpiod->base_hwirq; 1000 line = hw >> 6; 1001 bit = hw & 63; 1002 if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) 1003 return -EINVAL; 1004 1005 octeon_irq_set_ciu_mapping(virq, line, bit, 1006 octeon_irq_gpio_chip, 1007 octeon_irq_handle_gpio); 1008 return 0; 1009 } 1010 1011 static struct irq_domain_ops octeon_irq_domain_ciu_ops = { 1012 .map = octeon_irq_ciu_map, 1013 .xlate = octeon_irq_ciu_xlat, 1014 }; 1015 1016 static struct irq_domain_ops octeon_irq_domain_gpio_ops = { 1017 .map = octeon_irq_gpio_map, 1018 .xlate = octeon_irq_gpio_xlat, 1019 }; 1020 1021 static void octeon_irq_ip2_v1(void) 1022 { 1023 const unsigned long core_id = cvmx_get_core_num(); 1024 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); 1025 1026 ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); 1027 clear_c0_status(STATUSF_IP2); 1028 if (likely(ciu_sum)) { 1029 int bit = fls64(ciu_sum) - 1; 1030 int irq = octeon_irq_ciu_to_irq[0][bit]; 1031 if (likely(irq)) 1032 do_IRQ(irq); 1033 else 1034 spurious_interrupt(); 1035 } else { 1036 spurious_interrupt(); 1037 } 1038 set_c0_status(STATUSF_IP2); 1039 } 1040 1041 static void octeon_irq_ip2_v2(void) 1042 { 1043 const unsigned long core_id = cvmx_get_core_num(); 1044 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2)); 1045 1046 ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror); 1047 if (likely(ciu_sum)) { 1048 int bit = fls64(ciu_sum) - 1; 1049 int irq = octeon_irq_ciu_to_irq[0][bit]; 1050 if (likely(irq)) 1051 do_IRQ(irq); 1052 else 1053 spurious_interrupt(); 1054 } else { 1055 spurious_interrupt(); 1056 } 1057 } 1058 static void octeon_irq_ip3_v1(void) 1059 { 1060 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); 1061 1062 ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); 1063 clear_c0_status(STATUSF_IP3); 1064 if (likely(ciu_sum)) { 1065 int bit = fls64(ciu_sum) - 1; 1066 int irq = octeon_irq_ciu_to_irq[1][bit]; 1067 if (likely(irq)) 1068 do_IRQ(irq); 1069 else 1070 spurious_interrupt(); 1071 } else { 1072 spurious_interrupt(); 1073 } 1074 set_c0_status(STATUSF_IP3); 1075 } 1076 1077 static void octeon_irq_ip3_v2(void) 1078 { 1079 u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1); 1080 1081 ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror); 1082 if (likely(ciu_sum)) { 1083 int bit = fls64(ciu_sum) - 1; 1084 int irq = octeon_irq_ciu_to_irq[1][bit]; 1085 if (likely(irq)) 1086 do_IRQ(irq); 1087 else 1088 spurious_interrupt(); 1089 } else { 1090 spurious_interrupt(); 1091 } 1092 } 1093 1094 static void octeon_irq_ip4_mask(void) 1095 { 1096 clear_c0_status(STATUSF_IP4); 1097 spurious_interrupt(); 1098 } 1099 1100 static void (*octeon_irq_ip2)(void); 1101 static void (*octeon_irq_ip3)(void); 1102 static void (*octeon_irq_ip4)(void); 1103 1104 void __cpuinitdata (*octeon_irq_setup_secondary)(void); 1105 1106 static void __cpuinit octeon_irq_percpu_enable(void) 1107 { 1108 irq_cpu_online(); 1109 } 1110 1111 static void __cpuinit octeon_irq_init_ciu_percpu(void) 1112 { 1113 int coreid = cvmx_get_core_num(); 1114 /* 1115 * Disable All CIU Interrupts. The ones we need will be 1116 * enabled later. Read the SUM register so we know the write 1117 * completed. 1118 */ 1119 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0); 1120 cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0); 1121 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0); 1122 cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0); 1123 cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2))); 1124 } 1125 1126 static void __cpuinit octeon_irq_setup_secondary_ciu(void) 1127 { 1128 1129 __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0; 1130 __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0; 1131 1132 octeon_irq_init_ciu_percpu(); 1133 octeon_irq_percpu_enable(); 1134 1135 /* Enable the CIU lines */ 1136 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1137 clear_c0_status(STATUSF_IP4); 1138 } 1139 1140 static void __init octeon_irq_init_ciu(void) 1141 { 1142 unsigned int i; 1143 struct irq_chip *chip; 1144 struct irq_chip *chip_mbox; 1145 struct irq_chip *chip_wd; 1146 struct device_node *gpio_node; 1147 struct device_node *ciu_node; 1148 struct irq_domain *ciu_domain = NULL; 1149 1150 octeon_irq_init_ciu_percpu(); 1151 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; 1152 1153 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || 1154 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || 1155 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || 1156 OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 1157 octeon_irq_ip2 = octeon_irq_ip2_v2; 1158 octeon_irq_ip3 = octeon_irq_ip3_v2; 1159 chip = &octeon_irq_chip_ciu_v2; 1160 chip_mbox = &octeon_irq_chip_ciu_mbox_v2; 1161 chip_wd = &octeon_irq_chip_ciu_wd_v2; 1162 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; 1163 } else { 1164 octeon_irq_ip2 = octeon_irq_ip2_v1; 1165 octeon_irq_ip3 = octeon_irq_ip3_v1; 1166 chip = &octeon_irq_chip_ciu; 1167 chip_mbox = &octeon_irq_chip_ciu_mbox; 1168 chip_wd = &octeon_irq_chip_ciu_wd; 1169 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; 1170 } 1171 octeon_irq_ciu_chip = chip; 1172 octeon_irq_ip4 = octeon_irq_ip4_mask; 1173 1174 /* Mips internal */ 1175 octeon_irq_init_core(); 1176 1177 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); 1178 if (gpio_node) { 1179 struct octeon_irq_gpio_domain_data *gpiod; 1180 1181 gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); 1182 if (gpiod) { 1183 /* gpio domain host_data is the base hwirq number. */ 1184 gpiod->base_hwirq = 16; 1185 irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); 1186 of_node_put(gpio_node); 1187 } else 1188 pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); 1189 } else 1190 pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); 1191 1192 ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); 1193 if (ciu_node) { 1194 ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); 1195 of_node_put(ciu_node); 1196 } else 1197 panic("Cannot find device node for cavium,octeon-3860-ciu."); 1198 1199 /* CIU_0 */ 1200 for (i = 0; i < 16; i++) 1201 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); 1202 1203 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq); 1204 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq); 1205 1206 for (i = 0; i < 4; i++) 1207 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); 1208 for (i = 0; i < 4; i++) 1209 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); 1210 1211 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); 1212 for (i = 0; i < 4; i++) 1213 octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); 1214 1215 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); 1216 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_BOOTDMA, 0, 63); 1217 1218 /* CIU_1 */ 1219 for (i = 0; i < 16; i++) 1220 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq); 1221 1222 octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); 1223 1224 /* Enable the CIU lines */ 1225 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1226 clear_c0_status(STATUSF_IP4); 1227 } 1228 1229 void __init arch_init_irq(void) 1230 { 1231 #ifdef CONFIG_SMP 1232 /* Set the default affinity to the boot cpu. */ 1233 cpumask_clear(irq_default_affinity); 1234 cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 1235 #endif 1236 octeon_irq_init_ciu(); 1237 } 1238 1239 asmlinkage void plat_irq_dispatch(void) 1240 { 1241 unsigned long cop0_cause; 1242 unsigned long cop0_status; 1243 1244 while (1) { 1245 cop0_cause = read_c0_cause(); 1246 cop0_status = read_c0_status(); 1247 cop0_cause &= cop0_status; 1248 cop0_cause &= ST0_IM; 1249 1250 if (unlikely(cop0_cause & STATUSF_IP2)) 1251 octeon_irq_ip2(); 1252 else if (unlikely(cop0_cause & STATUSF_IP3)) 1253 octeon_irq_ip3(); 1254 else if (unlikely(cop0_cause & STATUSF_IP4)) 1255 octeon_irq_ip4(); 1256 else if (likely(cop0_cause)) 1257 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); 1258 else 1259 break; 1260 } 1261 } 1262 1263 #ifdef CONFIG_HOTPLUG_CPU 1264 1265 void fixup_irqs(void) 1266 { 1267 irq_cpu_offline(); 1268 } 1269 1270 #endif /* CONFIG_HOTPLUG_CPU */ 1271