1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2016,2017 IBM Corporation. 4 */ 5 6 #define pr_fmt(fmt) "xive: " fmt 7 8 #include <linux/types.h> 9 #include <linux/threads.h> 10 #include <linux/kernel.h> 11 #include <linux/irq.h> 12 #include <linux/debugfs.h> 13 #include <linux/smp.h> 14 #include <linux/interrupt.h> 15 #include <linux/seq_file.h> 16 #include <linux/init.h> 17 #include <linux/cpu.h> 18 #include <linux/of.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/msi.h> 22 #include <linux/vmalloc.h> 23 24 #include <asm/prom.h> 25 #include <asm/io.h> 26 #include <asm/smp.h> 27 #include <asm/machdep.h> 28 #include <asm/irq.h> 29 #include <asm/errno.h> 30 #include <asm/xive.h> 31 #include <asm/xive-regs.h> 32 #include <asm/xmon.h> 33 34 #include "xive-internal.h" 35 36 #undef DEBUG_FLUSH 37 #undef DEBUG_ALL 38 39 #ifdef DEBUG_ALL 40 #define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \ 41 smp_processor_id(), ## __VA_ARGS__) 42 #else 43 #define DBG_VERBOSE(fmt...) do { } while(0) 44 #endif 45 46 bool __xive_enabled; 47 EXPORT_SYMBOL_GPL(__xive_enabled); 48 bool xive_cmdline_disabled; 49 50 /* We use only one priority for now */ 51 static u8 xive_irq_priority; 52 53 /* TIMA exported to KVM */ 54 void __iomem *xive_tima; 55 EXPORT_SYMBOL_GPL(xive_tima); 56 u32 xive_tima_offset; 57 58 /* Backend ops */ 59 static const struct xive_ops *xive_ops; 60 61 /* Our global interrupt domain */ 62 static struct irq_domain *xive_irq_domain; 63 64 #ifdef CONFIG_SMP 65 /* The IPIs use the same logical irq number when on the same chip */ 66 static struct xive_ipi_desc { 67 unsigned int irq; 68 char name[16]; 69 } *xive_ipis; 70 71 /* 72 * Use early_cpu_to_node() for hot-plugged CPUs 73 */ 74 static unsigned int xive_ipi_cpu_to_irq(unsigned int cpu) 75 { 76 return xive_ipis[early_cpu_to_node(cpu)].irq; 77 } 78 #endif 79 80 /* Xive state for each CPU */ 81 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu); 82 83 /* An invalid CPU target */ 84 #define XIVE_INVALID_TARGET (-1) 85 86 /* 87 * Read the next entry in a queue, return its content if it's valid 88 * or 0 if there is no new entry. 89 * 90 * The queue pointer is moved forward unless "just_peek" is set 91 */ 92 static u32 xive_read_eq(struct xive_q *q, bool just_peek) 93 { 94 u32 cur; 95 96 if (!q->qpage) 97 return 0; 98 cur = be32_to_cpup(q->qpage + q->idx); 99 100 /* Check valid bit (31) vs current toggle polarity */ 101 if ((cur >> 31) == q->toggle) 102 return 0; 103 104 /* If consuming from the queue ... */ 105 if (!just_peek) { 106 /* Next entry */ 107 q->idx = (q->idx + 1) & q->msk; 108 109 /* Wrap around: flip valid toggle */ 110 if (q->idx == 0) 111 q->toggle ^= 1; 112 } 113 /* Mask out the valid bit (31) */ 114 return cur & 0x7fffffff; 115 } 116 117 /* 118 * Scans all the queue that may have interrupts in them 119 * (based on "pending_prio") in priority order until an 120 * interrupt is found or all the queues are empty. 121 * 122 * Then updates the CPPR (Current Processor Priority 123 * Register) based on the most favored interrupt found 124 * (0xff if none) and return what was found (0 if none). 125 * 126 * If just_peek is set, return the most favored pending 127 * interrupt if any but don't update the queue pointers. 128 * 129 * Note: This function can operate generically on any number 130 * of queues (up to 8). The current implementation of the XIVE 131 * driver only uses a single queue however. 132 * 133 * Note2: This will also "flush" "the pending_count" of a queue 134 * into the "count" when that queue is observed to be empty. 135 * This is used to keep track of the amount of interrupts 136 * targetting a queue. When an interrupt is moved away from 137 * a queue, we only decrement that queue count once the queue 138 * has been observed empty to avoid races. 139 */ 140 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek) 141 { 142 u32 irq = 0; 143 u8 prio = 0; 144 145 /* Find highest pending priority */ 146 while (xc->pending_prio != 0) { 147 struct xive_q *q; 148 149 prio = ffs(xc->pending_prio) - 1; 150 DBG_VERBOSE("scan_irq: trying prio %d\n", prio); 151 152 /* Try to fetch */ 153 irq = xive_read_eq(&xc->queue[prio], just_peek); 154 155 /* Found something ? That's it */ 156 if (irq) { 157 if (just_peek || irq_to_desc(irq)) 158 break; 159 /* 160 * We should never get here; if we do then we must 161 * have failed to synchronize the interrupt properly 162 * when shutting it down. 163 */ 164 pr_crit("xive: got interrupt %d without descriptor, dropping\n", 165 irq); 166 WARN_ON(1); 167 continue; 168 } 169 170 /* Clear pending bits */ 171 xc->pending_prio &= ~(1 << prio); 172 173 /* 174 * Check if the queue count needs adjusting due to 175 * interrupts being moved away. See description of 176 * xive_dec_target_count() 177 */ 178 q = &xc->queue[prio]; 179 if (atomic_read(&q->pending_count)) { 180 int p = atomic_xchg(&q->pending_count, 0); 181 if (p) { 182 WARN_ON(p > atomic_read(&q->count)); 183 atomic_sub(p, &q->count); 184 } 185 } 186 } 187 188 /* If nothing was found, set CPPR to 0xff */ 189 if (irq == 0) 190 prio = 0xff; 191 192 /* Update HW CPPR to match if necessary */ 193 if (prio != xc->cppr) { 194 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio); 195 xc->cppr = prio; 196 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio); 197 } 198 199 return irq; 200 } 201 202 /* 203 * This is used to perform the magic loads from an ESB 204 * described in xive-regs.h 205 */ 206 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset) 207 { 208 u64 val; 209 210 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 211 offset |= XIVE_ESB_LD_ST_MO; 212 213 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) 214 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); 215 else 216 val = in_be64(xd->eoi_mmio + offset); 217 218 return (u8)val; 219 } 220 221 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data) 222 { 223 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) 224 xive_ops->esb_rw(xd->hw_irq, offset, data, 1); 225 else 226 out_be64(xd->eoi_mmio + offset, data); 227 } 228 229 #ifdef CONFIG_XMON 230 static notrace void xive_dump_eq(const char *name, struct xive_q *q) 231 { 232 u32 i0, i1, idx; 233 234 if (!q->qpage) 235 return; 236 idx = q->idx; 237 i0 = be32_to_cpup(q->qpage + idx); 238 idx = (idx + 1) & q->msk; 239 i1 = be32_to_cpup(q->qpage + idx); 240 xmon_printf("%s idx=%d T=%d %08x %08x ...", name, 241 q->idx, q->toggle, i0, i1); 242 } 243 244 notrace void xmon_xive_do_dump(int cpu) 245 { 246 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 247 248 xmon_printf("CPU %d:", cpu); 249 if (xc) { 250 xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); 251 252 #ifdef CONFIG_SMP 253 { 254 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); 255 256 xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi, 257 val & XIVE_ESB_VAL_P ? 'P' : '-', 258 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 259 } 260 #endif 261 xive_dump_eq("EQ", &xc->queue[xive_irq_priority]); 262 } 263 xmon_printf("\n"); 264 } 265 266 static struct irq_data *xive_get_irq_data(u32 hw_irq) 267 { 268 unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq); 269 270 return irq ? irq_get_irq_data(irq) : NULL; 271 } 272 273 int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d) 274 { 275 int rc; 276 u32 target; 277 u8 prio; 278 u32 lirq; 279 280 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); 281 if (rc) { 282 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); 283 return rc; 284 } 285 286 xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", 287 hw_irq, target, prio, lirq); 288 289 if (!d) 290 d = xive_get_irq_data(hw_irq); 291 292 if (d) { 293 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 294 u64 val = xive_esb_read(xd, XIVE_ESB_GET); 295 296 xmon_printf("flags=%c%c%c PQ=%c%c", 297 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', 298 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', 299 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', 300 val & XIVE_ESB_VAL_P ? 'P' : '-', 301 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 302 } 303 304 xmon_printf("\n"); 305 return 0; 306 } 307 308 void xmon_xive_get_irq_all(void) 309 { 310 unsigned int i; 311 struct irq_desc *desc; 312 313 for_each_irq_desc(i, desc) { 314 struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i); 315 316 if (d) 317 xmon_xive_get_irq_config(irqd_to_hwirq(d), d); 318 } 319 } 320 321 #endif /* CONFIG_XMON */ 322 323 static unsigned int xive_get_irq(void) 324 { 325 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 326 u32 irq; 327 328 /* 329 * This can be called either as a result of a HW interrupt or 330 * as a "replay" because EOI decided there was still something 331 * in one of the queues. 332 * 333 * First we perform an ACK cycle in order to update our mask 334 * of pending priorities. This will also have the effect of 335 * updating the CPPR to the most favored pending interrupts. 336 * 337 * In the future, if we have a way to differentiate a first 338 * entry (on HW interrupt) from a replay triggered by EOI, 339 * we could skip this on replays unless we soft-mask tells us 340 * that a new HW interrupt occurred. 341 */ 342 xive_ops->update_pending(xc); 343 344 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio); 345 346 /* Scan our queue(s) for interrupts */ 347 irq = xive_scan_interrupts(xc, false); 348 349 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n", 350 irq, xc->pending_prio); 351 352 /* Return pending interrupt if any */ 353 if (irq == XIVE_BAD_IRQ) 354 return 0; 355 return irq; 356 } 357 358 /* 359 * After EOI'ing an interrupt, we need to re-check the queue 360 * to see if another interrupt is pending since multiple 361 * interrupts can coalesce into a single notification to the 362 * CPU. 363 * 364 * If we find that there is indeed more in there, we call 365 * force_external_irq_replay() to make Linux synthetize an 366 * external interrupt on the next call to local_irq_restore(). 367 */ 368 static void xive_do_queue_eoi(struct xive_cpu *xc) 369 { 370 if (xive_scan_interrupts(xc, true) != 0) { 371 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio); 372 force_external_irq_replay(); 373 } 374 } 375 376 /* 377 * EOI an interrupt at the source. There are several methods 378 * to do this depending on the HW version and source type 379 */ 380 static void xive_do_source_eoi(struct xive_irq_data *xd) 381 { 382 u8 eoi_val; 383 384 xd->stale_p = false; 385 386 /* If the XIVE supports the new "store EOI facility, use it */ 387 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) { 388 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0); 389 return; 390 } 391 392 /* 393 * For LSIs, we use the "EOI cycle" special load rather than 394 * PQ bits, as they are automatically re-triggered in HW when 395 * still pending. 396 */ 397 if (xd->flags & XIVE_IRQ_FLAG_LSI) { 398 xive_esb_read(xd, XIVE_ESB_LOAD_EOI); 399 return; 400 } 401 402 /* 403 * Otherwise, we use the special MMIO that does a clear of 404 * both P and Q and returns the old Q. This allows us to then 405 * do a re-trigger if Q was set rather than synthesizing an 406 * interrupt in software 407 */ 408 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00); 409 DBG_VERBOSE("eoi_val=%x\n", eoi_val); 410 411 /* Re-trigger if needed */ 412 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio) 413 out_be64(xd->trig_mmio, 0); 414 } 415 416 /* irq_chip eoi callback, called with irq descriptor lock held */ 417 static void xive_irq_eoi(struct irq_data *d) 418 { 419 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 420 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 421 422 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", 423 d->irq, irqd_to_hwirq(d), xc->pending_prio); 424 425 /* 426 * EOI the source if it hasn't been disabled and hasn't 427 * been passed-through to a KVM guest 428 */ 429 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) && 430 !(xd->flags & XIVE_IRQ_FLAG_NO_EOI)) 431 xive_do_source_eoi(xd); 432 else 433 xd->stale_p = true; 434 435 /* 436 * Clear saved_p to indicate that it's no longer occupying 437 * a queue slot on the target queue 438 */ 439 xd->saved_p = false; 440 441 /* Check for more work in the queue */ 442 xive_do_queue_eoi(xc); 443 } 444 445 /* 446 * Helper used to mask and unmask an interrupt source. 447 */ 448 static void xive_do_source_set_mask(struct xive_irq_data *xd, 449 bool mask) 450 { 451 u64 val; 452 453 /* 454 * If the interrupt had P set, it may be in a queue. 455 * 456 * We need to make sure we don't re-enable it until it 457 * has been fetched from that queue and EOId. We keep 458 * a copy of that P state and use it to restore the 459 * ESB accordingly on unmask. 460 */ 461 if (mask) { 462 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01); 463 if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P)) 464 xd->saved_p = true; 465 xd->stale_p = false; 466 } else if (xd->saved_p) { 467 xive_esb_read(xd, XIVE_ESB_SET_PQ_10); 468 xd->saved_p = false; 469 } else { 470 xive_esb_read(xd, XIVE_ESB_SET_PQ_00); 471 xd->stale_p = false; 472 } 473 } 474 475 /* 476 * Try to chose "cpu" as a new interrupt target. Increments 477 * the queue accounting for that target if it's not already 478 * full. 479 */ 480 static bool xive_try_pick_target(int cpu) 481 { 482 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 483 struct xive_q *q = &xc->queue[xive_irq_priority]; 484 int max; 485 486 /* 487 * Calculate max number of interrupts in that queue. 488 * 489 * We leave a gap of 1 just in case... 490 */ 491 max = (q->msk + 1) - 1; 492 return !!atomic_add_unless(&q->count, 1, max); 493 } 494 495 /* 496 * Un-account an interrupt for a target CPU. We don't directly 497 * decrement q->count since the interrupt might still be present 498 * in the queue. 499 * 500 * Instead increment a separate counter "pending_count" which 501 * will be substracted from "count" later when that CPU observes 502 * the queue to be empty. 503 */ 504 static void xive_dec_target_count(int cpu) 505 { 506 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 507 struct xive_q *q = &xc->queue[xive_irq_priority]; 508 509 if (WARN_ON(cpu < 0 || !xc)) { 510 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc); 511 return; 512 } 513 514 /* 515 * We increment the "pending count" which will be used 516 * to decrement the target queue count whenever it's next 517 * processed and found empty. This ensure that we don't 518 * decrement while we still have the interrupt there 519 * occupying a slot. 520 */ 521 atomic_inc(&q->pending_count); 522 } 523 524 /* Find a tentative CPU target in a CPU mask */ 525 static int xive_find_target_in_mask(const struct cpumask *mask, 526 unsigned int fuzz) 527 { 528 int cpu, first, num, i; 529 530 /* Pick up a starting point CPU in the mask based on fuzz */ 531 num = min_t(int, cpumask_weight(mask), nr_cpu_ids); 532 first = fuzz % num; 533 534 /* Locate it */ 535 cpu = cpumask_first(mask); 536 for (i = 0; i < first && cpu < nr_cpu_ids; i++) 537 cpu = cpumask_next(cpu, mask); 538 539 /* Sanity check */ 540 if (WARN_ON(cpu >= nr_cpu_ids)) 541 cpu = cpumask_first(cpu_online_mask); 542 543 /* Remember first one to handle wrap-around */ 544 first = cpu; 545 546 /* 547 * Now go through the entire mask until we find a valid 548 * target. 549 */ 550 do { 551 /* 552 * We re-check online as the fallback case passes us 553 * an untested affinity mask 554 */ 555 if (cpu_online(cpu) && xive_try_pick_target(cpu)) 556 return cpu; 557 cpu = cpumask_next(cpu, mask); 558 /* Wrap around */ 559 if (cpu >= nr_cpu_ids) 560 cpu = cpumask_first(mask); 561 } while (cpu != first); 562 563 return -1; 564 } 565 566 /* 567 * Pick a target CPU for an interrupt. This is done at 568 * startup or if the affinity is changed in a way that 569 * invalidates the current target. 570 */ 571 static int xive_pick_irq_target(struct irq_data *d, 572 const struct cpumask *affinity) 573 { 574 static unsigned int fuzz; 575 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 576 cpumask_var_t mask; 577 int cpu = -1; 578 579 /* 580 * If we have chip IDs, first we try to build a mask of 581 * CPUs matching the CPU and find a target in there 582 */ 583 if (xd->src_chip != XIVE_INVALID_CHIP_ID && 584 zalloc_cpumask_var(&mask, GFP_ATOMIC)) { 585 /* Build a mask of matching chip IDs */ 586 for_each_cpu_and(cpu, affinity, cpu_online_mask) { 587 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 588 if (xc->chip_id == xd->src_chip) 589 cpumask_set_cpu(cpu, mask); 590 } 591 /* Try to find a target */ 592 if (cpumask_empty(mask)) 593 cpu = -1; 594 else 595 cpu = xive_find_target_in_mask(mask, fuzz++); 596 free_cpumask_var(mask); 597 if (cpu >= 0) 598 return cpu; 599 fuzz--; 600 } 601 602 /* No chip IDs, fallback to using the affinity mask */ 603 return xive_find_target_in_mask(affinity, fuzz++); 604 } 605 606 static unsigned int xive_irq_startup(struct irq_data *d) 607 { 608 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 609 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 610 int target, rc; 611 612 xd->saved_p = false; 613 xd->stale_p = false; 614 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n", 615 d->irq, hw_irq, d); 616 617 /* Pick a target */ 618 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d)); 619 if (target == XIVE_INVALID_TARGET) { 620 /* Try again breaking affinity */ 621 target = xive_pick_irq_target(d, cpu_online_mask); 622 if (target == XIVE_INVALID_TARGET) 623 return -ENXIO; 624 pr_warn("irq %d started with broken affinity\n", d->irq); 625 } 626 627 /* Sanity check */ 628 if (WARN_ON(target == XIVE_INVALID_TARGET || 629 target >= nr_cpu_ids)) 630 target = smp_processor_id(); 631 632 xd->target = target; 633 634 /* 635 * Configure the logical number to be the Linux IRQ number 636 * and set the target queue 637 */ 638 rc = xive_ops->configure_irq(hw_irq, 639 get_hard_smp_processor_id(target), 640 xive_irq_priority, d->irq); 641 if (rc) 642 return rc; 643 644 /* Unmask the ESB */ 645 xive_do_source_set_mask(xd, false); 646 647 return 0; 648 } 649 650 /* called with irq descriptor lock held */ 651 static void xive_irq_shutdown(struct irq_data *d) 652 { 653 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 654 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 655 656 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n", 657 d->irq, hw_irq, d); 658 659 if (WARN_ON(xd->target == XIVE_INVALID_TARGET)) 660 return; 661 662 /* Mask the interrupt at the source */ 663 xive_do_source_set_mask(xd, true); 664 665 /* 666 * Mask the interrupt in HW in the IVT/EAS and set the number 667 * to be the "bad" IRQ number 668 */ 669 xive_ops->configure_irq(hw_irq, 670 get_hard_smp_processor_id(xd->target), 671 0xff, XIVE_BAD_IRQ); 672 673 xive_dec_target_count(xd->target); 674 xd->target = XIVE_INVALID_TARGET; 675 } 676 677 static void xive_irq_unmask(struct irq_data *d) 678 { 679 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 680 681 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd); 682 683 xive_do_source_set_mask(xd, false); 684 } 685 686 static void xive_irq_mask(struct irq_data *d) 687 { 688 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 689 690 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd); 691 692 xive_do_source_set_mask(xd, true); 693 } 694 695 static int xive_irq_set_affinity(struct irq_data *d, 696 const struct cpumask *cpumask, 697 bool force) 698 { 699 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 700 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 701 u32 target, old_target; 702 int rc = 0; 703 704 pr_debug("%s: irq %d/%x\n", __func__, d->irq, hw_irq); 705 706 /* Is this valid ? */ 707 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) 708 return -EINVAL; 709 710 /* 711 * If existing target is already in the new mask, and is 712 * online then do nothing. 713 */ 714 if (xd->target != XIVE_INVALID_TARGET && 715 cpu_online(xd->target) && 716 cpumask_test_cpu(xd->target, cpumask)) 717 return IRQ_SET_MASK_OK; 718 719 /* Pick a new target */ 720 target = xive_pick_irq_target(d, cpumask); 721 722 /* No target found */ 723 if (target == XIVE_INVALID_TARGET) 724 return -ENXIO; 725 726 /* Sanity check */ 727 if (WARN_ON(target >= nr_cpu_ids)) 728 target = smp_processor_id(); 729 730 old_target = xd->target; 731 732 /* 733 * Only configure the irq if it's not currently passed-through to 734 * a KVM guest 735 */ 736 if (!irqd_is_forwarded_to_vcpu(d)) 737 rc = xive_ops->configure_irq(hw_irq, 738 get_hard_smp_processor_id(target), 739 xive_irq_priority, d->irq); 740 if (rc < 0) { 741 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq); 742 return rc; 743 } 744 745 pr_debug(" target: 0x%x\n", target); 746 xd->target = target; 747 748 /* Give up previous target */ 749 if (old_target != XIVE_INVALID_TARGET) 750 xive_dec_target_count(old_target); 751 752 return IRQ_SET_MASK_OK; 753 } 754 755 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type) 756 { 757 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 758 759 /* 760 * We only support these. This has really no effect other than setting 761 * the corresponding descriptor bits mind you but those will in turn 762 * affect the resend function when re-enabling an edge interrupt. 763 * 764 * Set set the default to edge as explained in map(). 765 */ 766 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE) 767 flow_type = IRQ_TYPE_EDGE_RISING; 768 769 if (flow_type != IRQ_TYPE_EDGE_RISING && 770 flow_type != IRQ_TYPE_LEVEL_LOW) 771 return -EINVAL; 772 773 irqd_set_trigger_type(d, flow_type); 774 775 /* 776 * Double check it matches what the FW thinks 777 * 778 * NOTE: We don't know yet if the PAPR interface will provide 779 * the LSI vs MSI information apart from the device-tree so 780 * this check might have to move into an optional backend call 781 * that is specific to the native backend 782 */ 783 if ((flow_type == IRQ_TYPE_LEVEL_LOW) != 784 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) { 785 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n", 786 d->irq, (u32)irqd_to_hwirq(d), 787 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge", 788 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge"); 789 } 790 791 return IRQ_SET_MASK_OK_NOCOPY; 792 } 793 794 static int xive_irq_retrigger(struct irq_data *d) 795 { 796 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 797 798 /* This should be only for MSIs */ 799 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) 800 return 0; 801 802 /* 803 * To perform a retrigger, we first set the PQ bits to 804 * 11, then perform an EOI. 805 */ 806 xive_esb_read(xd, XIVE_ESB_SET_PQ_11); 807 xive_do_source_eoi(xd); 808 809 return 1; 810 } 811 812 /* 813 * Caller holds the irq descriptor lock, so this won't be called 814 * concurrently with xive_get_irqchip_state on the same interrupt. 815 */ 816 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) 817 { 818 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 819 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 820 int rc; 821 u8 pq; 822 823 /* 824 * This is called by KVM with state non-NULL for enabling 825 * pass-through or NULL for disabling it 826 */ 827 if (state) { 828 irqd_set_forwarded_to_vcpu(d); 829 830 /* Set it to PQ=10 state to prevent further sends */ 831 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10); 832 if (!xd->stale_p) { 833 xd->saved_p = !!(pq & XIVE_ESB_VAL_P); 834 xd->stale_p = !xd->saved_p; 835 } 836 837 /* No target ? nothing to do */ 838 if (xd->target == XIVE_INVALID_TARGET) { 839 /* 840 * An untargetted interrupt should have been 841 * also masked at the source 842 */ 843 WARN_ON(xd->saved_p); 844 845 return 0; 846 } 847 848 /* 849 * If P was set, adjust state to PQ=11 to indicate 850 * that a resend is needed for the interrupt to reach 851 * the guest. Also remember the value of P. 852 * 853 * This also tells us that it's in flight to a host queue 854 * or has already been fetched but hasn't been EOIed yet 855 * by the host. This it's potentially using up a host 856 * queue slot. This is important to know because as long 857 * as this is the case, we must not hard-unmask it when 858 * "returning" that interrupt to the host. 859 * 860 * This saved_p is cleared by the host EOI, when we know 861 * for sure the queue slot is no longer in use. 862 */ 863 if (xd->saved_p) { 864 xive_esb_read(xd, XIVE_ESB_SET_PQ_11); 865 866 /* 867 * Sync the XIVE source HW to ensure the interrupt 868 * has gone through the EAS before we change its 869 * target to the guest. That should guarantee us 870 * that we *will* eventually get an EOI for it on 871 * the host. Otherwise there would be a small window 872 * for P to be seen here but the interrupt going 873 * to the guest queue. 874 */ 875 if (xive_ops->sync_source) 876 xive_ops->sync_source(hw_irq); 877 } 878 } else { 879 irqd_clr_forwarded_to_vcpu(d); 880 881 /* No host target ? hard mask and return */ 882 if (xd->target == XIVE_INVALID_TARGET) { 883 xive_do_source_set_mask(xd, true); 884 return 0; 885 } 886 887 /* 888 * Sync the XIVE source HW to ensure the interrupt 889 * has gone through the EAS before we change its 890 * target to the host. 891 */ 892 if (xive_ops->sync_source) 893 xive_ops->sync_source(hw_irq); 894 895 /* 896 * By convention we are called with the interrupt in 897 * a PQ=10 or PQ=11 state, ie, it won't fire and will 898 * have latched in Q whether there's a pending HW 899 * interrupt or not. 900 * 901 * First reconfigure the target. 902 */ 903 rc = xive_ops->configure_irq(hw_irq, 904 get_hard_smp_processor_id(xd->target), 905 xive_irq_priority, d->irq); 906 if (rc) 907 return rc; 908 909 /* 910 * Then if saved_p is not set, effectively re-enable the 911 * interrupt with an EOI. If it is set, we know there is 912 * still a message in a host queue somewhere that will be 913 * EOId eventually. 914 * 915 * Note: We don't check irqd_irq_disabled(). Effectively, 916 * we *will* let the irq get through even if masked if the 917 * HW is still firing it in order to deal with the whole 918 * saved_p business properly. If the interrupt triggers 919 * while masked, the generic code will re-mask it anyway. 920 */ 921 if (!xd->saved_p) 922 xive_do_source_eoi(xd); 923 924 } 925 return 0; 926 } 927 928 /* Called with irq descriptor lock held. */ 929 static int xive_get_irqchip_state(struct irq_data *data, 930 enum irqchip_irq_state which, bool *state) 931 { 932 struct xive_irq_data *xd = irq_data_get_irq_handler_data(data); 933 u8 pq; 934 935 switch (which) { 936 case IRQCHIP_STATE_ACTIVE: 937 pq = xive_esb_read(xd, XIVE_ESB_GET); 938 939 /* 940 * The esb value being all 1's means we couldn't get 941 * the PQ state of the interrupt through mmio. It may 942 * happen, for example when querying a PHB interrupt 943 * while the PHB is in an error state. We consider the 944 * interrupt to be inactive in that case. 945 */ 946 *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p && 947 (xd->saved_p || !!(pq & XIVE_ESB_VAL_P)); 948 return 0; 949 default: 950 return -EINVAL; 951 } 952 } 953 954 static struct irq_chip xive_irq_chip = { 955 .name = "XIVE-IRQ", 956 .irq_startup = xive_irq_startup, 957 .irq_shutdown = xive_irq_shutdown, 958 .irq_eoi = xive_irq_eoi, 959 .irq_mask = xive_irq_mask, 960 .irq_unmask = xive_irq_unmask, 961 .irq_set_affinity = xive_irq_set_affinity, 962 .irq_set_type = xive_irq_set_type, 963 .irq_retrigger = xive_irq_retrigger, 964 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity, 965 .irq_get_irqchip_state = xive_get_irqchip_state, 966 }; 967 968 bool is_xive_irq(struct irq_chip *chip) 969 { 970 return chip == &xive_irq_chip; 971 } 972 EXPORT_SYMBOL_GPL(is_xive_irq); 973 974 void xive_cleanup_irq_data(struct xive_irq_data *xd) 975 { 976 pr_debug("%s for HW %x\n", __func__, xd->hw_irq); 977 978 if (xd->eoi_mmio) { 979 iounmap(xd->eoi_mmio); 980 if (xd->eoi_mmio == xd->trig_mmio) 981 xd->trig_mmio = NULL; 982 xd->eoi_mmio = NULL; 983 } 984 if (xd->trig_mmio) { 985 iounmap(xd->trig_mmio); 986 xd->trig_mmio = NULL; 987 } 988 } 989 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data); 990 991 static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) 992 { 993 struct xive_irq_data *xd; 994 int rc; 995 996 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL); 997 if (!xd) 998 return -ENOMEM; 999 rc = xive_ops->populate_irq_data(hw, xd); 1000 if (rc) { 1001 kfree(xd); 1002 return rc; 1003 } 1004 xd->target = XIVE_INVALID_TARGET; 1005 irq_set_handler_data(virq, xd); 1006 1007 /* 1008 * Turn OFF by default the interrupt being mapped. A side 1009 * effect of this check is the mapping the ESB page of the 1010 * interrupt in the Linux address space. This prevents page 1011 * fault issues in the crash handler which masks all 1012 * interrupts. 1013 */ 1014 xive_esb_read(xd, XIVE_ESB_SET_PQ_01); 1015 1016 return 0; 1017 } 1018 1019 void xive_irq_free_data(unsigned int virq) 1020 { 1021 struct xive_irq_data *xd = irq_get_handler_data(virq); 1022 1023 if (!xd) 1024 return; 1025 irq_set_handler_data(virq, NULL); 1026 xive_cleanup_irq_data(xd); 1027 kfree(xd); 1028 } 1029 EXPORT_SYMBOL_GPL(xive_irq_free_data); 1030 1031 #ifdef CONFIG_SMP 1032 1033 static void xive_cause_ipi(int cpu) 1034 { 1035 struct xive_cpu *xc; 1036 struct xive_irq_data *xd; 1037 1038 xc = per_cpu(xive_cpu, cpu); 1039 1040 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n", 1041 smp_processor_id(), cpu, xc->hw_ipi); 1042 1043 xd = &xc->ipi_data; 1044 if (WARN_ON(!xd->trig_mmio)) 1045 return; 1046 out_be64(xd->trig_mmio, 0); 1047 } 1048 1049 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id) 1050 { 1051 return smp_ipi_demux(); 1052 } 1053 1054 static void xive_ipi_eoi(struct irq_data *d) 1055 { 1056 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1057 1058 /* Handle possible race with unplug and drop stale IPIs */ 1059 if (!xc) 1060 return; 1061 1062 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", 1063 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); 1064 1065 xive_do_source_eoi(&xc->ipi_data); 1066 xive_do_queue_eoi(xc); 1067 } 1068 1069 static void xive_ipi_do_nothing(struct irq_data *d) 1070 { 1071 /* 1072 * Nothing to do, we never mask/unmask IPIs, but the callback 1073 * has to exist for the struct irq_chip. 1074 */ 1075 } 1076 1077 static struct irq_chip xive_ipi_chip = { 1078 .name = "XIVE-IPI", 1079 .irq_eoi = xive_ipi_eoi, 1080 .irq_mask = xive_ipi_do_nothing, 1081 .irq_unmask = xive_ipi_do_nothing, 1082 }; 1083 1084 /* 1085 * IPIs are marked per-cpu. We use separate HW interrupts under the 1086 * hood but associated with the same "linux" interrupt 1087 */ 1088 struct xive_ipi_alloc_info { 1089 irq_hw_number_t hwirq; 1090 }; 1091 1092 static int xive_ipi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1093 unsigned int nr_irqs, void *arg) 1094 { 1095 struct xive_ipi_alloc_info *info = arg; 1096 int i; 1097 1098 for (i = 0; i < nr_irqs; i++) { 1099 irq_domain_set_info(domain, virq + i, info->hwirq + i, &xive_ipi_chip, 1100 domain->host_data, handle_percpu_irq, 1101 NULL, NULL); 1102 } 1103 return 0; 1104 } 1105 1106 static const struct irq_domain_ops xive_ipi_irq_domain_ops = { 1107 .alloc = xive_ipi_irq_domain_alloc, 1108 }; 1109 1110 static int __init xive_request_ipi(void) 1111 { 1112 struct fwnode_handle *fwnode; 1113 struct irq_domain *ipi_domain; 1114 unsigned int node; 1115 int ret = -ENOMEM; 1116 1117 fwnode = irq_domain_alloc_named_fwnode("XIVE-IPI"); 1118 if (!fwnode) 1119 goto out; 1120 1121 ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids, 1122 &xive_ipi_irq_domain_ops, NULL); 1123 if (!ipi_domain) 1124 goto out_free_fwnode; 1125 1126 xive_ipis = kcalloc(nr_node_ids, sizeof(*xive_ipis), GFP_KERNEL | __GFP_NOFAIL); 1127 if (!xive_ipis) 1128 goto out_free_domain; 1129 1130 for_each_node(node) { 1131 struct xive_ipi_desc *xid = &xive_ipis[node]; 1132 struct xive_ipi_alloc_info info = { node }; 1133 1134 /* Skip nodes without CPUs */ 1135 if (cpumask_empty(cpumask_of_node(node))) 1136 continue; 1137 1138 /* 1139 * Map one IPI interrupt per node for all cpus of that node. 1140 * Since the HW interrupt number doesn't have any meaning, 1141 * simply use the node number. 1142 */ 1143 ret = irq_domain_alloc_irqs(ipi_domain, 1, node, &info); 1144 if (ret < 0) 1145 goto out_free_xive_ipis; 1146 xid->irq = ret; 1147 1148 snprintf(xid->name, sizeof(xid->name), "IPI-%d", node); 1149 1150 ret = request_irq(xid->irq, xive_muxed_ipi_action, 1151 IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD, 1152 xid->name, NULL); 1153 1154 WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret); 1155 } 1156 1157 return ret; 1158 1159 out_free_xive_ipis: 1160 kfree(xive_ipis); 1161 out_free_domain: 1162 irq_domain_remove(ipi_domain); 1163 out_free_fwnode: 1164 irq_domain_free_fwnode(fwnode); 1165 out: 1166 return ret; 1167 } 1168 1169 static int xive_setup_cpu_ipi(unsigned int cpu) 1170 { 1171 unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu); 1172 struct xive_cpu *xc; 1173 int rc; 1174 1175 pr_debug("Setting up IPI for CPU %d\n", cpu); 1176 1177 xc = per_cpu(xive_cpu, cpu); 1178 1179 /* Check if we are already setup */ 1180 if (xc->hw_ipi != XIVE_BAD_IRQ) 1181 return 0; 1182 1183 /* Grab an IPI from the backend, this will populate xc->hw_ipi */ 1184 if (xive_ops->get_ipi(cpu, xc)) 1185 return -EIO; 1186 1187 /* 1188 * Populate the IRQ data in the xive_cpu structure and 1189 * configure the HW / enable the IPIs. 1190 */ 1191 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data); 1192 if (rc) { 1193 pr_err("Failed to populate IPI data on CPU %d\n", cpu); 1194 return -EIO; 1195 } 1196 rc = xive_ops->configure_irq(xc->hw_ipi, 1197 get_hard_smp_processor_id(cpu), 1198 xive_irq_priority, xive_ipi_irq); 1199 if (rc) { 1200 pr_err("Failed to map IPI CPU %d\n", cpu); 1201 return -EIO; 1202 } 1203 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu, 1204 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio); 1205 1206 /* Unmask it */ 1207 xive_do_source_set_mask(&xc->ipi_data, false); 1208 1209 return 0; 1210 } 1211 1212 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) 1213 { 1214 unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu); 1215 1216 /* Disable the IPI and free the IRQ data */ 1217 1218 /* Already cleaned up ? */ 1219 if (xc->hw_ipi == XIVE_BAD_IRQ) 1220 return; 1221 1222 /* Mask the IPI */ 1223 xive_do_source_set_mask(&xc->ipi_data, true); 1224 1225 /* 1226 * Note: We don't call xive_cleanup_irq_data() to free 1227 * the mappings as this is called from an IPI on kexec 1228 * which is not a safe environment to call iounmap() 1229 */ 1230 1231 /* Deconfigure/mask in the backend */ 1232 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(), 1233 0xff, xive_ipi_irq); 1234 1235 /* Free the IPIs in the backend */ 1236 xive_ops->put_ipi(cpu, xc); 1237 } 1238 1239 void __init xive_smp_probe(void) 1240 { 1241 smp_ops->cause_ipi = xive_cause_ipi; 1242 1243 /* Register the IPI */ 1244 xive_request_ipi(); 1245 1246 /* Allocate and setup IPI for the boot CPU */ 1247 xive_setup_cpu_ipi(smp_processor_id()); 1248 } 1249 1250 #endif /* CONFIG_SMP */ 1251 1252 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq, 1253 irq_hw_number_t hw) 1254 { 1255 int rc; 1256 1257 /* 1258 * Mark interrupts as edge sensitive by default so that resend 1259 * actually works. Will fix that up below if needed. 1260 */ 1261 irq_clear_status_flags(virq, IRQ_LEVEL); 1262 1263 rc = xive_irq_alloc_data(virq, hw); 1264 if (rc) 1265 return rc; 1266 1267 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq); 1268 1269 return 0; 1270 } 1271 1272 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq) 1273 { 1274 xive_irq_free_data(virq); 1275 } 1276 1277 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct, 1278 const u32 *intspec, unsigned int intsize, 1279 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 1280 1281 { 1282 *out_hwirq = intspec[0]; 1283 1284 /* 1285 * If intsize is at least 2, we look for the type in the second cell, 1286 * we assume the LSB indicates a level interrupt. 1287 */ 1288 if (intsize > 1) { 1289 if (intspec[1] & 1) 1290 *out_flags = IRQ_TYPE_LEVEL_LOW; 1291 else 1292 *out_flags = IRQ_TYPE_EDGE_RISING; 1293 } else 1294 *out_flags = IRQ_TYPE_LEVEL_LOW; 1295 1296 return 0; 1297 } 1298 1299 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node, 1300 enum irq_domain_bus_token bus_token) 1301 { 1302 return xive_ops->match(node); 1303 } 1304 1305 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS 1306 static const char * const esb_names[] = { "RESET", "OFF", "PENDING", "QUEUED" }; 1307 1308 static const struct { 1309 u64 mask; 1310 char *name; 1311 } xive_irq_flags[] = { 1312 { XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" }, 1313 { XIVE_IRQ_FLAG_LSI, "LSI" }, 1314 { XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" }, 1315 { XIVE_IRQ_FLAG_NO_EOI, "NO_EOI" }, 1316 }; 1317 1318 static void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d, 1319 struct irq_data *irqd, int ind) 1320 { 1321 struct xive_irq_data *xd; 1322 u64 val; 1323 int i; 1324 1325 /* No IRQ domain level information. To be done */ 1326 if (!irqd) 1327 return; 1328 1329 if (!is_xive_irq(irq_data_get_irq_chip(irqd))) 1330 return; 1331 1332 seq_printf(m, "%*sXIVE:\n", ind, ""); 1333 ind++; 1334 1335 xd = irq_data_get_irq_handler_data(irqd); 1336 if (!xd) { 1337 seq_printf(m, "%*snot assigned\n", ind, ""); 1338 return; 1339 } 1340 1341 val = xive_esb_read(xd, XIVE_ESB_GET); 1342 seq_printf(m, "%*sESB: %s\n", ind, "", esb_names[val & 0x3]); 1343 seq_printf(m, "%*sPstate: %s %s\n", ind, "", xd->stale_p ? "stale" : "", 1344 xd->saved_p ? "saved" : ""); 1345 seq_printf(m, "%*sTarget: %d\n", ind, "", xd->target); 1346 seq_printf(m, "%*sChip: %d\n", ind, "", xd->src_chip); 1347 seq_printf(m, "%*sTrigger: 0x%016llx\n", ind, "", xd->trig_page); 1348 seq_printf(m, "%*sEOI: 0x%016llx\n", ind, "", xd->eoi_page); 1349 seq_printf(m, "%*sFlags: 0x%llx\n", ind, "", xd->flags); 1350 for (i = 0; i < ARRAY_SIZE(xive_irq_flags); i++) { 1351 if (xd->flags & xive_irq_flags[i].mask) 1352 seq_printf(m, "%*s%s\n", ind + 12, "", xive_irq_flags[i].name); 1353 } 1354 } 1355 #endif 1356 1357 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1358 static int xive_irq_domain_translate(struct irq_domain *d, 1359 struct irq_fwspec *fwspec, 1360 unsigned long *hwirq, 1361 unsigned int *type) 1362 { 1363 return xive_irq_domain_xlate(d, to_of_node(fwspec->fwnode), 1364 fwspec->param, fwspec->param_count, 1365 hwirq, type); 1366 } 1367 1368 static int xive_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1369 unsigned int nr_irqs, void *arg) 1370 { 1371 struct irq_fwspec *fwspec = arg; 1372 irq_hw_number_t hwirq; 1373 unsigned int type = IRQ_TYPE_NONE; 1374 int i, rc; 1375 1376 rc = xive_irq_domain_translate(domain, fwspec, &hwirq, &type); 1377 if (rc) 1378 return rc; 1379 1380 pr_debug("%s %d/%lx #%d\n", __func__, virq, hwirq, nr_irqs); 1381 1382 for (i = 0; i < nr_irqs; i++) { 1383 /* TODO: call xive_irq_domain_map() */ 1384 1385 /* 1386 * Mark interrupts as edge sensitive by default so that resend 1387 * actually works. Will fix that up below if needed. 1388 */ 1389 irq_clear_status_flags(virq, IRQ_LEVEL); 1390 1391 /* allocates and sets handler data */ 1392 rc = xive_irq_alloc_data(virq + i, hwirq + i); 1393 if (rc) 1394 return rc; 1395 1396 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, 1397 &xive_irq_chip, domain->host_data); 1398 irq_set_handler(virq + i, handle_fasteoi_irq); 1399 } 1400 1401 return 0; 1402 } 1403 1404 static void xive_irq_domain_free(struct irq_domain *domain, 1405 unsigned int virq, unsigned int nr_irqs) 1406 { 1407 int i; 1408 1409 pr_debug("%s %d #%d\n", __func__, virq, nr_irqs); 1410 1411 for (i = 0; i < nr_irqs; i++) 1412 xive_irq_free_data(virq + i); 1413 } 1414 #endif 1415 1416 static const struct irq_domain_ops xive_irq_domain_ops = { 1417 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1418 .alloc = xive_irq_domain_alloc, 1419 .free = xive_irq_domain_free, 1420 .translate = xive_irq_domain_translate, 1421 #endif 1422 .match = xive_irq_domain_match, 1423 .map = xive_irq_domain_map, 1424 .unmap = xive_irq_domain_unmap, 1425 .xlate = xive_irq_domain_xlate, 1426 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS 1427 .debug_show = xive_irq_domain_debug_show, 1428 #endif 1429 }; 1430 1431 static void __init xive_init_host(struct device_node *np) 1432 { 1433 xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ, 1434 &xive_irq_domain_ops, NULL); 1435 if (WARN_ON(xive_irq_domain == NULL)) 1436 return; 1437 irq_set_default_host(xive_irq_domain); 1438 } 1439 1440 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) 1441 { 1442 if (xc->queue[xive_irq_priority].qpage) 1443 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority); 1444 } 1445 1446 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) 1447 { 1448 int rc = 0; 1449 1450 /* We setup 1 queues for now with a 64k page */ 1451 if (!xc->queue[xive_irq_priority].qpage) 1452 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority); 1453 1454 return rc; 1455 } 1456 1457 static int xive_prepare_cpu(unsigned int cpu) 1458 { 1459 struct xive_cpu *xc; 1460 1461 xc = per_cpu(xive_cpu, cpu); 1462 if (!xc) { 1463 xc = kzalloc_node(sizeof(struct xive_cpu), 1464 GFP_KERNEL, cpu_to_node(cpu)); 1465 if (!xc) 1466 return -ENOMEM; 1467 xc->hw_ipi = XIVE_BAD_IRQ; 1468 xc->chip_id = XIVE_INVALID_CHIP_ID; 1469 if (xive_ops->prepare_cpu) 1470 xive_ops->prepare_cpu(cpu, xc); 1471 1472 per_cpu(xive_cpu, cpu) = xc; 1473 } 1474 1475 /* Setup EQs if not already */ 1476 return xive_setup_cpu_queues(cpu, xc); 1477 } 1478 1479 static void xive_setup_cpu(void) 1480 { 1481 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1482 1483 /* The backend might have additional things to do */ 1484 if (xive_ops->setup_cpu) 1485 xive_ops->setup_cpu(smp_processor_id(), xc); 1486 1487 /* Set CPPR to 0xff to enable flow of interrupts */ 1488 xc->cppr = 0xff; 1489 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); 1490 } 1491 1492 #ifdef CONFIG_SMP 1493 void xive_smp_setup_cpu(void) 1494 { 1495 pr_devel("SMP setup CPU %d\n", smp_processor_id()); 1496 1497 /* This will have already been done on the boot CPU */ 1498 if (smp_processor_id() != boot_cpuid) 1499 xive_setup_cpu(); 1500 1501 } 1502 1503 int xive_smp_prepare_cpu(unsigned int cpu) 1504 { 1505 int rc; 1506 1507 /* Allocate per-CPU data and queues */ 1508 rc = xive_prepare_cpu(cpu); 1509 if (rc) 1510 return rc; 1511 1512 /* Allocate and setup IPI for the new CPU */ 1513 return xive_setup_cpu_ipi(cpu); 1514 } 1515 1516 #ifdef CONFIG_HOTPLUG_CPU 1517 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc) 1518 { 1519 u32 irq; 1520 1521 /* We assume local irqs are disabled */ 1522 WARN_ON(!irqs_disabled()); 1523 1524 /* Check what's already in the CPU queue */ 1525 while ((irq = xive_scan_interrupts(xc, false)) != 0) { 1526 /* 1527 * We need to re-route that interrupt to its new destination. 1528 * First get and lock the descriptor 1529 */ 1530 struct irq_desc *desc = irq_to_desc(irq); 1531 struct irq_data *d = irq_desc_get_irq_data(desc); 1532 struct xive_irq_data *xd; 1533 1534 /* 1535 * Ignore anything that isn't a XIVE irq and ignore 1536 * IPIs, so can just be dropped. 1537 */ 1538 if (d->domain != xive_irq_domain) 1539 continue; 1540 1541 /* 1542 * The IRQ should have already been re-routed, it's just a 1543 * stale in the old queue, so re-trigger it in order to make 1544 * it reach is new destination. 1545 */ 1546 #ifdef DEBUG_FLUSH 1547 pr_info("CPU %d: Got irq %d while offline, re-sending...\n", 1548 cpu, irq); 1549 #endif 1550 raw_spin_lock(&desc->lock); 1551 xd = irq_desc_get_handler_data(desc); 1552 1553 /* 1554 * Clear saved_p to indicate that it's no longer pending 1555 */ 1556 xd->saved_p = false; 1557 1558 /* 1559 * For LSIs, we EOI, this will cause a resend if it's 1560 * still asserted. Otherwise do an MSI retrigger. 1561 */ 1562 if (xd->flags & XIVE_IRQ_FLAG_LSI) 1563 xive_do_source_eoi(xd); 1564 else 1565 xive_irq_retrigger(d); 1566 1567 raw_spin_unlock(&desc->lock); 1568 } 1569 } 1570 1571 void xive_smp_disable_cpu(void) 1572 { 1573 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1574 unsigned int cpu = smp_processor_id(); 1575 1576 /* Migrate interrupts away from the CPU */ 1577 irq_migrate_all_off_this_cpu(); 1578 1579 /* Set CPPR to 0 to disable flow of interrupts */ 1580 xc->cppr = 0; 1581 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); 1582 1583 /* Flush everything still in the queue */ 1584 xive_flush_cpu_queue(cpu, xc); 1585 1586 /* Re-enable CPPR */ 1587 xc->cppr = 0xff; 1588 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); 1589 } 1590 1591 void xive_flush_interrupt(void) 1592 { 1593 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1594 unsigned int cpu = smp_processor_id(); 1595 1596 /* Called if an interrupt occurs while the CPU is hot unplugged */ 1597 xive_flush_cpu_queue(cpu, xc); 1598 } 1599 1600 #endif /* CONFIG_HOTPLUG_CPU */ 1601 1602 #endif /* CONFIG_SMP */ 1603 1604 void xive_teardown_cpu(void) 1605 { 1606 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1607 unsigned int cpu = smp_processor_id(); 1608 1609 /* Set CPPR to 0 to disable flow of interrupts */ 1610 xc->cppr = 0; 1611 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); 1612 1613 if (xive_ops->teardown_cpu) 1614 xive_ops->teardown_cpu(cpu, xc); 1615 1616 #ifdef CONFIG_SMP 1617 /* Get rid of IPI */ 1618 xive_cleanup_cpu_ipi(cpu, xc); 1619 #endif 1620 1621 /* Disable and free the queues */ 1622 xive_cleanup_cpu_queues(cpu, xc); 1623 } 1624 1625 void xive_shutdown(void) 1626 { 1627 xive_ops->shutdown(); 1628 } 1629 1630 bool __init xive_core_init(struct device_node *np, const struct xive_ops *ops, 1631 void __iomem *area, u32 offset, u8 max_prio) 1632 { 1633 xive_tima = area; 1634 xive_tima_offset = offset; 1635 xive_ops = ops; 1636 xive_irq_priority = max_prio; 1637 1638 ppc_md.get_irq = xive_get_irq; 1639 __xive_enabled = true; 1640 1641 pr_devel("Initializing host..\n"); 1642 xive_init_host(np); 1643 1644 pr_devel("Initializing boot CPU..\n"); 1645 1646 /* Allocate per-CPU data and queues */ 1647 xive_prepare_cpu(smp_processor_id()); 1648 1649 /* Get ready for interrupts */ 1650 xive_setup_cpu(); 1651 1652 pr_info("Interrupt handling initialized with %s backend\n", 1653 xive_ops->name); 1654 pr_info("Using priority %d for all interrupts\n", max_prio); 1655 1656 return true; 1657 } 1658 1659 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift) 1660 { 1661 unsigned int alloc_order; 1662 struct page *pages; 1663 __be32 *qpage; 1664 1665 alloc_order = xive_alloc_order(queue_shift); 1666 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order); 1667 if (!pages) 1668 return ERR_PTR(-ENOMEM); 1669 qpage = (__be32 *)page_address(pages); 1670 memset(qpage, 0, 1 << queue_shift); 1671 1672 return qpage; 1673 } 1674 1675 static int __init xive_off(char *arg) 1676 { 1677 xive_cmdline_disabled = true; 1678 return 0; 1679 } 1680 __setup("xive=off", xive_off); 1681 1682 static void xive_debug_show_cpu(struct seq_file *m, int cpu) 1683 { 1684 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 1685 1686 seq_printf(m, "CPU %d:", cpu); 1687 if (xc) { 1688 seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); 1689 1690 #ifdef CONFIG_SMP 1691 { 1692 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); 1693 1694 seq_printf(m, "IPI=0x%08x PQ=%c%c ", xc->hw_ipi, 1695 val & XIVE_ESB_VAL_P ? 'P' : '-', 1696 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 1697 } 1698 #endif 1699 { 1700 struct xive_q *q = &xc->queue[xive_irq_priority]; 1701 u32 i0, i1, idx; 1702 1703 if (q->qpage) { 1704 idx = q->idx; 1705 i0 = be32_to_cpup(q->qpage + idx); 1706 idx = (idx + 1) & q->msk; 1707 i1 = be32_to_cpup(q->qpage + idx); 1708 seq_printf(m, "EQ idx=%d T=%d %08x %08x ...", 1709 q->idx, q->toggle, i0, i1); 1710 } 1711 } 1712 } 1713 seq_puts(m, "\n"); 1714 } 1715 1716 static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d) 1717 { 1718 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 1719 int rc; 1720 u32 target; 1721 u8 prio; 1722 u32 lirq; 1723 struct xive_irq_data *xd; 1724 u64 val; 1725 1726 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); 1727 if (rc) { 1728 seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); 1729 return; 1730 } 1731 1732 seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", 1733 hw_irq, target, prio, lirq); 1734 1735 xd = irq_data_get_irq_handler_data(d); 1736 val = xive_esb_read(xd, XIVE_ESB_GET); 1737 seq_printf(m, "flags=%c%c%c PQ=%c%c", 1738 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', 1739 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', 1740 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', 1741 val & XIVE_ESB_VAL_P ? 'P' : '-', 1742 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 1743 seq_puts(m, "\n"); 1744 } 1745 1746 static int xive_core_debug_show(struct seq_file *m, void *private) 1747 { 1748 unsigned int i; 1749 struct irq_desc *desc; 1750 int cpu; 1751 1752 if (xive_ops->debug_show) 1753 xive_ops->debug_show(m, private); 1754 1755 for_each_possible_cpu(cpu) 1756 xive_debug_show_cpu(m, cpu); 1757 1758 for_each_irq_desc(i, desc) { 1759 struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i); 1760 1761 if (d) 1762 xive_debug_show_irq(m, d); 1763 } 1764 return 0; 1765 } 1766 DEFINE_SHOW_ATTRIBUTE(xive_core_debug); 1767 1768 int xive_core_debug_init(void) 1769 { 1770 if (xive_enabled()) 1771 debugfs_create_file("xive", 0400, arch_debugfs_dir, 1772 NULL, &xive_core_debug_fops); 1773 return 0; 1774 } 1775