1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2016,2017 IBM Corporation. 4 */ 5 6 #define pr_fmt(fmt) "xive: " fmt 7 8 #include <linux/types.h> 9 #include <linux/threads.h> 10 #include <linux/kernel.h> 11 #include <linux/irq.h> 12 #include <linux/debugfs.h> 13 #include <linux/smp.h> 14 #include <linux/interrupt.h> 15 #include <linux/seq_file.h> 16 #include <linux/init.h> 17 #include <linux/cpu.h> 18 #include <linux/of.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/msi.h> 22 #include <linux/vmalloc.h> 23 24 #include <asm/debugfs.h> 25 #include <asm/prom.h> 26 #include <asm/io.h> 27 #include <asm/smp.h> 28 #include <asm/machdep.h> 29 #include <asm/irq.h> 30 #include <asm/errno.h> 31 #include <asm/xive.h> 32 #include <asm/xive-regs.h> 33 #include <asm/xmon.h> 34 35 #include "xive-internal.h" 36 37 #undef DEBUG_FLUSH 38 #undef DEBUG_ALL 39 40 #ifdef DEBUG_ALL 41 #define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \ 42 smp_processor_id(), ## __VA_ARGS__) 43 #else 44 #define DBG_VERBOSE(fmt...) do { } while(0) 45 #endif 46 47 bool __xive_enabled; 48 EXPORT_SYMBOL_GPL(__xive_enabled); 49 bool xive_cmdline_disabled; 50 51 /* We use only one priority for now */ 52 static u8 xive_irq_priority; 53 54 /* TIMA exported to KVM */ 55 void __iomem *xive_tima; 56 EXPORT_SYMBOL_GPL(xive_tima); 57 u32 xive_tima_offset; 58 59 /* Backend ops */ 60 static const struct xive_ops *xive_ops; 61 62 /* Our global interrupt domain */ 63 static struct irq_domain *xive_irq_domain; 64 65 #ifdef CONFIG_SMP 66 /* The IPIs all use the same logical irq number */ 67 static u32 xive_ipi_irq; 68 #endif 69 70 /* Xive state for each CPU */ 71 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu); 72 73 /* An invalid CPU target */ 74 #define XIVE_INVALID_TARGET (-1) 75 76 /* 77 * Read the next entry in a queue, return its content if it's valid 78 * or 0 if there is no new entry. 79 * 80 * The queue pointer is moved forward unless "just_peek" is set 81 */ 82 static u32 xive_read_eq(struct xive_q *q, bool just_peek) 83 { 84 u32 cur; 85 86 if (!q->qpage) 87 return 0; 88 cur = be32_to_cpup(q->qpage + q->idx); 89 90 /* Check valid bit (31) vs current toggle polarity */ 91 if ((cur >> 31) == q->toggle) 92 return 0; 93 94 /* If consuming from the queue ... */ 95 if (!just_peek) { 96 /* Next entry */ 97 q->idx = (q->idx + 1) & q->msk; 98 99 /* Wrap around: flip valid toggle */ 100 if (q->idx == 0) 101 q->toggle ^= 1; 102 } 103 /* Mask out the valid bit (31) */ 104 return cur & 0x7fffffff; 105 } 106 107 /* 108 * Scans all the queue that may have interrupts in them 109 * (based on "pending_prio") in priority order until an 110 * interrupt is found or all the queues are empty. 111 * 112 * Then updates the CPPR (Current Processor Priority 113 * Register) based on the most favored interrupt found 114 * (0xff if none) and return what was found (0 if none). 115 * 116 * If just_peek is set, return the most favored pending 117 * interrupt if any but don't update the queue pointers. 118 * 119 * Note: This function can operate generically on any number 120 * of queues (up to 8). The current implementation of the XIVE 121 * driver only uses a single queue however. 122 * 123 * Note2: This will also "flush" "the pending_count" of a queue 124 * into the "count" when that queue is observed to be empty. 125 * This is used to keep track of the amount of interrupts 126 * targetting a queue. When an interrupt is moved away from 127 * a queue, we only decrement that queue count once the queue 128 * has been observed empty to avoid races. 129 */ 130 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek) 131 { 132 u32 irq = 0; 133 u8 prio = 0; 134 135 /* Find highest pending priority */ 136 while (xc->pending_prio != 0) { 137 struct xive_q *q; 138 139 prio = ffs(xc->pending_prio) - 1; 140 DBG_VERBOSE("scan_irq: trying prio %d\n", prio); 141 142 /* Try to fetch */ 143 irq = xive_read_eq(&xc->queue[prio], just_peek); 144 145 /* Found something ? That's it */ 146 if (irq) { 147 if (just_peek || irq_to_desc(irq)) 148 break; 149 /* 150 * We should never get here; if we do then we must 151 * have failed to synchronize the interrupt properly 152 * when shutting it down. 153 */ 154 pr_crit("xive: got interrupt %d without descriptor, dropping\n", 155 irq); 156 WARN_ON(1); 157 continue; 158 } 159 160 /* Clear pending bits */ 161 xc->pending_prio &= ~(1 << prio); 162 163 /* 164 * Check if the queue count needs adjusting due to 165 * interrupts being moved away. See description of 166 * xive_dec_target_count() 167 */ 168 q = &xc->queue[prio]; 169 if (atomic_read(&q->pending_count)) { 170 int p = atomic_xchg(&q->pending_count, 0); 171 if (p) { 172 WARN_ON(p > atomic_read(&q->count)); 173 atomic_sub(p, &q->count); 174 } 175 } 176 } 177 178 /* If nothing was found, set CPPR to 0xff */ 179 if (irq == 0) 180 prio = 0xff; 181 182 /* Update HW CPPR to match if necessary */ 183 if (prio != xc->cppr) { 184 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio); 185 xc->cppr = prio; 186 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio); 187 } 188 189 return irq; 190 } 191 192 /* 193 * This is used to perform the magic loads from an ESB 194 * described in xive-regs.h 195 */ 196 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset) 197 { 198 u64 val; 199 200 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 201 offset |= XIVE_ESB_LD_ST_MO; 202 203 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) 204 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); 205 else 206 val = in_be64(xd->eoi_mmio + offset); 207 208 return (u8)val; 209 } 210 211 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data) 212 { 213 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) 214 xive_ops->esb_rw(xd->hw_irq, offset, data, 1); 215 else 216 out_be64(xd->eoi_mmio + offset, data); 217 } 218 219 #ifdef CONFIG_XMON 220 static notrace void xive_dump_eq(const char *name, struct xive_q *q) 221 { 222 u32 i0, i1, idx; 223 224 if (!q->qpage) 225 return; 226 idx = q->idx; 227 i0 = be32_to_cpup(q->qpage + idx); 228 idx = (idx + 1) & q->msk; 229 i1 = be32_to_cpup(q->qpage + idx); 230 xmon_printf("%s idx=%d T=%d %08x %08x ...", name, 231 q->idx, q->toggle, i0, i1); 232 } 233 234 notrace void xmon_xive_do_dump(int cpu) 235 { 236 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 237 238 xmon_printf("CPU %d:", cpu); 239 if (xc) { 240 xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); 241 242 #ifdef CONFIG_SMP 243 { 244 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); 245 246 xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi, 247 val & XIVE_ESB_VAL_P ? 'P' : '-', 248 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 249 } 250 #endif 251 xive_dump_eq("EQ", &xc->queue[xive_irq_priority]); 252 } 253 xmon_printf("\n"); 254 } 255 256 static struct irq_data *xive_get_irq_data(u32 hw_irq) 257 { 258 unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq); 259 260 return irq ? irq_get_irq_data(irq) : NULL; 261 } 262 263 int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d) 264 { 265 int rc; 266 u32 target; 267 u8 prio; 268 u32 lirq; 269 270 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); 271 if (rc) { 272 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); 273 return rc; 274 } 275 276 xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", 277 hw_irq, target, prio, lirq); 278 279 if (!d) 280 d = xive_get_irq_data(hw_irq); 281 282 if (d) { 283 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 284 u64 val = xive_esb_read(xd, XIVE_ESB_GET); 285 286 xmon_printf("flags=%c%c%c PQ=%c%c", 287 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', 288 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', 289 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', 290 val & XIVE_ESB_VAL_P ? 'P' : '-', 291 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 292 } 293 294 xmon_printf("\n"); 295 return 0; 296 } 297 298 void xmon_xive_get_irq_all(void) 299 { 300 unsigned int i; 301 struct irq_desc *desc; 302 303 for_each_irq_desc(i, desc) { 304 struct irq_data *d = irq_desc_get_irq_data(desc); 305 unsigned int hwirq = (unsigned int)irqd_to_hwirq(d); 306 307 if (d->domain == xive_irq_domain) 308 xmon_xive_get_irq_config(hwirq, d); 309 } 310 } 311 312 #endif /* CONFIG_XMON */ 313 314 static unsigned int xive_get_irq(void) 315 { 316 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 317 u32 irq; 318 319 /* 320 * This can be called either as a result of a HW interrupt or 321 * as a "replay" because EOI decided there was still something 322 * in one of the queues. 323 * 324 * First we perform an ACK cycle in order to update our mask 325 * of pending priorities. This will also have the effect of 326 * updating the CPPR to the most favored pending interrupts. 327 * 328 * In the future, if we have a way to differentiate a first 329 * entry (on HW interrupt) from a replay triggered by EOI, 330 * we could skip this on replays unless we soft-mask tells us 331 * that a new HW interrupt occurred. 332 */ 333 xive_ops->update_pending(xc); 334 335 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio); 336 337 /* Scan our queue(s) for interrupts */ 338 irq = xive_scan_interrupts(xc, false); 339 340 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n", 341 irq, xc->pending_prio); 342 343 /* Return pending interrupt if any */ 344 if (irq == XIVE_BAD_IRQ) 345 return 0; 346 return irq; 347 } 348 349 /* 350 * After EOI'ing an interrupt, we need to re-check the queue 351 * to see if another interrupt is pending since multiple 352 * interrupts can coalesce into a single notification to the 353 * CPU. 354 * 355 * If we find that there is indeed more in there, we call 356 * force_external_irq_replay() to make Linux synthetize an 357 * external interrupt on the next call to local_irq_restore(). 358 */ 359 static void xive_do_queue_eoi(struct xive_cpu *xc) 360 { 361 if (xive_scan_interrupts(xc, true) != 0) { 362 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio); 363 force_external_irq_replay(); 364 } 365 } 366 367 /* 368 * EOI an interrupt at the source. There are several methods 369 * to do this depending on the HW version and source type 370 */ 371 static void xive_do_source_eoi(struct xive_irq_data *xd) 372 { 373 u8 eoi_val; 374 375 xd->stale_p = false; 376 377 /* If the XIVE supports the new "store EOI facility, use it */ 378 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) { 379 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0); 380 return; 381 } 382 383 /* 384 * For LSIs, we use the "EOI cycle" special load rather than 385 * PQ bits, as they are automatically re-triggered in HW when 386 * still pending. 387 */ 388 if (xd->flags & XIVE_IRQ_FLAG_LSI) { 389 xive_esb_read(xd, XIVE_ESB_LOAD_EOI); 390 return; 391 } 392 393 /* 394 * Otherwise, we use the special MMIO that does a clear of 395 * both P and Q and returns the old Q. This allows us to then 396 * do a re-trigger if Q was set rather than synthesizing an 397 * interrupt in software 398 */ 399 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00); 400 DBG_VERBOSE("eoi_val=%x\n", eoi_val); 401 402 /* Re-trigger if needed */ 403 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio) 404 out_be64(xd->trig_mmio, 0); 405 } 406 407 /* irq_chip eoi callback, called with irq descriptor lock held */ 408 static void xive_irq_eoi(struct irq_data *d) 409 { 410 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 411 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 412 413 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", 414 d->irq, irqd_to_hwirq(d), xc->pending_prio); 415 416 /* 417 * EOI the source if it hasn't been disabled and hasn't 418 * been passed-through to a KVM guest 419 */ 420 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) && 421 !(xd->flags & XIVE_IRQ_FLAG_NO_EOI)) 422 xive_do_source_eoi(xd); 423 else 424 xd->stale_p = true; 425 426 /* 427 * Clear saved_p to indicate that it's no longer occupying 428 * a queue slot on the target queue 429 */ 430 xd->saved_p = false; 431 432 /* Check for more work in the queue */ 433 xive_do_queue_eoi(xc); 434 } 435 436 /* 437 * Helper used to mask and unmask an interrupt source. 438 */ 439 static void xive_do_source_set_mask(struct xive_irq_data *xd, 440 bool mask) 441 { 442 u64 val; 443 444 /* 445 * If the interrupt had P set, it may be in a queue. 446 * 447 * We need to make sure we don't re-enable it until it 448 * has been fetched from that queue and EOId. We keep 449 * a copy of that P state and use it to restore the 450 * ESB accordingly on unmask. 451 */ 452 if (mask) { 453 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01); 454 if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P)) 455 xd->saved_p = true; 456 xd->stale_p = false; 457 } else if (xd->saved_p) { 458 xive_esb_read(xd, XIVE_ESB_SET_PQ_10); 459 xd->saved_p = false; 460 } else { 461 xive_esb_read(xd, XIVE_ESB_SET_PQ_00); 462 xd->stale_p = false; 463 } 464 } 465 466 /* 467 * Try to chose "cpu" as a new interrupt target. Increments 468 * the queue accounting for that target if it's not already 469 * full. 470 */ 471 static bool xive_try_pick_target(int cpu) 472 { 473 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 474 struct xive_q *q = &xc->queue[xive_irq_priority]; 475 int max; 476 477 /* 478 * Calculate max number of interrupts in that queue. 479 * 480 * We leave a gap of 1 just in case... 481 */ 482 max = (q->msk + 1) - 1; 483 return !!atomic_add_unless(&q->count, 1, max); 484 } 485 486 /* 487 * Un-account an interrupt for a target CPU. We don't directly 488 * decrement q->count since the interrupt might still be present 489 * in the queue. 490 * 491 * Instead increment a separate counter "pending_count" which 492 * will be substracted from "count" later when that CPU observes 493 * the queue to be empty. 494 */ 495 static void xive_dec_target_count(int cpu) 496 { 497 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 498 struct xive_q *q = &xc->queue[xive_irq_priority]; 499 500 if (WARN_ON(cpu < 0 || !xc)) { 501 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc); 502 return; 503 } 504 505 /* 506 * We increment the "pending count" which will be used 507 * to decrement the target queue count whenever it's next 508 * processed and found empty. This ensure that we don't 509 * decrement while we still have the interrupt there 510 * occupying a slot. 511 */ 512 atomic_inc(&q->pending_count); 513 } 514 515 /* Find a tentative CPU target in a CPU mask */ 516 static int xive_find_target_in_mask(const struct cpumask *mask, 517 unsigned int fuzz) 518 { 519 int cpu, first, num, i; 520 521 /* Pick up a starting point CPU in the mask based on fuzz */ 522 num = min_t(int, cpumask_weight(mask), nr_cpu_ids); 523 first = fuzz % num; 524 525 /* Locate it */ 526 cpu = cpumask_first(mask); 527 for (i = 0; i < first && cpu < nr_cpu_ids; i++) 528 cpu = cpumask_next(cpu, mask); 529 530 /* Sanity check */ 531 if (WARN_ON(cpu >= nr_cpu_ids)) 532 cpu = cpumask_first(cpu_online_mask); 533 534 /* Remember first one to handle wrap-around */ 535 first = cpu; 536 537 /* 538 * Now go through the entire mask until we find a valid 539 * target. 540 */ 541 do { 542 /* 543 * We re-check online as the fallback case passes us 544 * an untested affinity mask 545 */ 546 if (cpu_online(cpu) && xive_try_pick_target(cpu)) 547 return cpu; 548 cpu = cpumask_next(cpu, mask); 549 /* Wrap around */ 550 if (cpu >= nr_cpu_ids) 551 cpu = cpumask_first(mask); 552 } while (cpu != first); 553 554 return -1; 555 } 556 557 /* 558 * Pick a target CPU for an interrupt. This is done at 559 * startup or if the affinity is changed in a way that 560 * invalidates the current target. 561 */ 562 static int xive_pick_irq_target(struct irq_data *d, 563 const struct cpumask *affinity) 564 { 565 static unsigned int fuzz; 566 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 567 cpumask_var_t mask; 568 int cpu = -1; 569 570 /* 571 * If we have chip IDs, first we try to build a mask of 572 * CPUs matching the CPU and find a target in there 573 */ 574 if (xd->src_chip != XIVE_INVALID_CHIP_ID && 575 zalloc_cpumask_var(&mask, GFP_ATOMIC)) { 576 /* Build a mask of matching chip IDs */ 577 for_each_cpu_and(cpu, affinity, cpu_online_mask) { 578 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 579 if (xc->chip_id == xd->src_chip) 580 cpumask_set_cpu(cpu, mask); 581 } 582 /* Try to find a target */ 583 if (cpumask_empty(mask)) 584 cpu = -1; 585 else 586 cpu = xive_find_target_in_mask(mask, fuzz++); 587 free_cpumask_var(mask); 588 if (cpu >= 0) 589 return cpu; 590 fuzz--; 591 } 592 593 /* No chip IDs, fallback to using the affinity mask */ 594 return xive_find_target_in_mask(affinity, fuzz++); 595 } 596 597 static unsigned int xive_irq_startup(struct irq_data *d) 598 { 599 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 600 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 601 int target, rc; 602 603 xd->saved_p = false; 604 xd->stale_p = false; 605 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n", 606 d->irq, hw_irq, d); 607 608 #ifdef CONFIG_PCI_MSI 609 /* 610 * The generic MSI code returns with the interrupt disabled on the 611 * card, using the MSI mask bits. Firmware doesn't appear to unmask 612 * at that level, so we do it here by hand. 613 */ 614 if (irq_data_get_msi_desc(d)) 615 pci_msi_unmask_irq(d); 616 #endif 617 618 /* Pick a target */ 619 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d)); 620 if (target == XIVE_INVALID_TARGET) { 621 /* Try again breaking affinity */ 622 target = xive_pick_irq_target(d, cpu_online_mask); 623 if (target == XIVE_INVALID_TARGET) 624 return -ENXIO; 625 pr_warn("irq %d started with broken affinity\n", d->irq); 626 } 627 628 /* Sanity check */ 629 if (WARN_ON(target == XIVE_INVALID_TARGET || 630 target >= nr_cpu_ids)) 631 target = smp_processor_id(); 632 633 xd->target = target; 634 635 /* 636 * Configure the logical number to be the Linux IRQ number 637 * and set the target queue 638 */ 639 rc = xive_ops->configure_irq(hw_irq, 640 get_hard_smp_processor_id(target), 641 xive_irq_priority, d->irq); 642 if (rc) 643 return rc; 644 645 /* Unmask the ESB */ 646 xive_do_source_set_mask(xd, false); 647 648 return 0; 649 } 650 651 /* called with irq descriptor lock held */ 652 static void xive_irq_shutdown(struct irq_data *d) 653 { 654 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 655 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 656 657 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n", 658 d->irq, hw_irq, d); 659 660 if (WARN_ON(xd->target == XIVE_INVALID_TARGET)) 661 return; 662 663 /* Mask the interrupt at the source */ 664 xive_do_source_set_mask(xd, true); 665 666 /* 667 * Mask the interrupt in HW in the IVT/EAS and set the number 668 * to be the "bad" IRQ number 669 */ 670 xive_ops->configure_irq(hw_irq, 671 get_hard_smp_processor_id(xd->target), 672 0xff, XIVE_BAD_IRQ); 673 674 xive_dec_target_count(xd->target); 675 xd->target = XIVE_INVALID_TARGET; 676 } 677 678 static void xive_irq_unmask(struct irq_data *d) 679 { 680 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 681 682 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd); 683 684 xive_do_source_set_mask(xd, false); 685 } 686 687 static void xive_irq_mask(struct irq_data *d) 688 { 689 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 690 691 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd); 692 693 xive_do_source_set_mask(xd, true); 694 } 695 696 static int xive_irq_set_affinity(struct irq_data *d, 697 const struct cpumask *cpumask, 698 bool force) 699 { 700 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 701 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 702 u32 target, old_target; 703 int rc = 0; 704 705 pr_devel("xive_irq_set_affinity: irq %d\n", d->irq); 706 707 /* Is this valid ? */ 708 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) 709 return -EINVAL; 710 711 /* Don't do anything if the interrupt isn't started */ 712 if (!irqd_is_started(d)) 713 return IRQ_SET_MASK_OK; 714 715 /* 716 * If existing target is already in the new mask, and is 717 * online then do nothing. 718 */ 719 if (xd->target != XIVE_INVALID_TARGET && 720 cpu_online(xd->target) && 721 cpumask_test_cpu(xd->target, cpumask)) 722 return IRQ_SET_MASK_OK; 723 724 /* Pick a new target */ 725 target = xive_pick_irq_target(d, cpumask); 726 727 /* No target found */ 728 if (target == XIVE_INVALID_TARGET) 729 return -ENXIO; 730 731 /* Sanity check */ 732 if (WARN_ON(target >= nr_cpu_ids)) 733 target = smp_processor_id(); 734 735 old_target = xd->target; 736 737 /* 738 * Only configure the irq if it's not currently passed-through to 739 * a KVM guest 740 */ 741 if (!irqd_is_forwarded_to_vcpu(d)) 742 rc = xive_ops->configure_irq(hw_irq, 743 get_hard_smp_processor_id(target), 744 xive_irq_priority, d->irq); 745 if (rc < 0) { 746 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq); 747 return rc; 748 } 749 750 pr_devel(" target: 0x%x\n", target); 751 xd->target = target; 752 753 /* Give up previous target */ 754 if (old_target != XIVE_INVALID_TARGET) 755 xive_dec_target_count(old_target); 756 757 return IRQ_SET_MASK_OK; 758 } 759 760 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type) 761 { 762 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 763 764 /* 765 * We only support these. This has really no effect other than setting 766 * the corresponding descriptor bits mind you but those will in turn 767 * affect the resend function when re-enabling an edge interrupt. 768 * 769 * Set set the default to edge as explained in map(). 770 */ 771 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE) 772 flow_type = IRQ_TYPE_EDGE_RISING; 773 774 if (flow_type != IRQ_TYPE_EDGE_RISING && 775 flow_type != IRQ_TYPE_LEVEL_LOW) 776 return -EINVAL; 777 778 irqd_set_trigger_type(d, flow_type); 779 780 /* 781 * Double check it matches what the FW thinks 782 * 783 * NOTE: We don't know yet if the PAPR interface will provide 784 * the LSI vs MSI information apart from the device-tree so 785 * this check might have to move into an optional backend call 786 * that is specific to the native backend 787 */ 788 if ((flow_type == IRQ_TYPE_LEVEL_LOW) != 789 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) { 790 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n", 791 d->irq, (u32)irqd_to_hwirq(d), 792 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge", 793 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge"); 794 } 795 796 return IRQ_SET_MASK_OK_NOCOPY; 797 } 798 799 static int xive_irq_retrigger(struct irq_data *d) 800 { 801 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 802 803 /* This should be only for MSIs */ 804 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) 805 return 0; 806 807 /* 808 * To perform a retrigger, we first set the PQ bits to 809 * 11, then perform an EOI. 810 */ 811 xive_esb_read(xd, XIVE_ESB_SET_PQ_11); 812 xive_do_source_eoi(xd); 813 814 return 1; 815 } 816 817 /* 818 * Caller holds the irq descriptor lock, so this won't be called 819 * concurrently with xive_get_irqchip_state on the same interrupt. 820 */ 821 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) 822 { 823 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 824 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 825 int rc; 826 u8 pq; 827 828 /* 829 * This is called by KVM with state non-NULL for enabling 830 * pass-through or NULL for disabling it 831 */ 832 if (state) { 833 irqd_set_forwarded_to_vcpu(d); 834 835 /* Set it to PQ=10 state to prevent further sends */ 836 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10); 837 if (!xd->stale_p) { 838 xd->saved_p = !!(pq & XIVE_ESB_VAL_P); 839 xd->stale_p = !xd->saved_p; 840 } 841 842 /* No target ? nothing to do */ 843 if (xd->target == XIVE_INVALID_TARGET) { 844 /* 845 * An untargetted interrupt should have been 846 * also masked at the source 847 */ 848 WARN_ON(xd->saved_p); 849 850 return 0; 851 } 852 853 /* 854 * If P was set, adjust state to PQ=11 to indicate 855 * that a resend is needed for the interrupt to reach 856 * the guest. Also remember the value of P. 857 * 858 * This also tells us that it's in flight to a host queue 859 * or has already been fetched but hasn't been EOIed yet 860 * by the host. This it's potentially using up a host 861 * queue slot. This is important to know because as long 862 * as this is the case, we must not hard-unmask it when 863 * "returning" that interrupt to the host. 864 * 865 * This saved_p is cleared by the host EOI, when we know 866 * for sure the queue slot is no longer in use. 867 */ 868 if (xd->saved_p) { 869 xive_esb_read(xd, XIVE_ESB_SET_PQ_11); 870 871 /* 872 * Sync the XIVE source HW to ensure the interrupt 873 * has gone through the EAS before we change its 874 * target to the guest. That should guarantee us 875 * that we *will* eventually get an EOI for it on 876 * the host. Otherwise there would be a small window 877 * for P to be seen here but the interrupt going 878 * to the guest queue. 879 */ 880 if (xive_ops->sync_source) 881 xive_ops->sync_source(hw_irq); 882 } 883 } else { 884 irqd_clr_forwarded_to_vcpu(d); 885 886 /* No host target ? hard mask and return */ 887 if (xd->target == XIVE_INVALID_TARGET) { 888 xive_do_source_set_mask(xd, true); 889 return 0; 890 } 891 892 /* 893 * Sync the XIVE source HW to ensure the interrupt 894 * has gone through the EAS before we change its 895 * target to the host. 896 */ 897 if (xive_ops->sync_source) 898 xive_ops->sync_source(hw_irq); 899 900 /* 901 * By convention we are called with the interrupt in 902 * a PQ=10 or PQ=11 state, ie, it won't fire and will 903 * have latched in Q whether there's a pending HW 904 * interrupt or not. 905 * 906 * First reconfigure the target. 907 */ 908 rc = xive_ops->configure_irq(hw_irq, 909 get_hard_smp_processor_id(xd->target), 910 xive_irq_priority, d->irq); 911 if (rc) 912 return rc; 913 914 /* 915 * Then if saved_p is not set, effectively re-enable the 916 * interrupt with an EOI. If it is set, we know there is 917 * still a message in a host queue somewhere that will be 918 * EOId eventually. 919 * 920 * Note: We don't check irqd_irq_disabled(). Effectively, 921 * we *will* let the irq get through even if masked if the 922 * HW is still firing it in order to deal with the whole 923 * saved_p business properly. If the interrupt triggers 924 * while masked, the generic code will re-mask it anyway. 925 */ 926 if (!xd->saved_p) 927 xive_do_source_eoi(xd); 928 929 } 930 return 0; 931 } 932 933 /* Called with irq descriptor lock held. */ 934 static int xive_get_irqchip_state(struct irq_data *data, 935 enum irqchip_irq_state which, bool *state) 936 { 937 struct xive_irq_data *xd = irq_data_get_irq_handler_data(data); 938 u8 pq; 939 940 switch (which) { 941 case IRQCHIP_STATE_ACTIVE: 942 pq = xive_esb_read(xd, XIVE_ESB_GET); 943 944 /* 945 * The esb value being all 1's means we couldn't get 946 * the PQ state of the interrupt through mmio. It may 947 * happen, for example when querying a PHB interrupt 948 * while the PHB is in an error state. We consider the 949 * interrupt to be inactive in that case. 950 */ 951 *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p && 952 (xd->saved_p || !!(pq & XIVE_ESB_VAL_P)); 953 return 0; 954 default: 955 return -EINVAL; 956 } 957 } 958 959 static struct irq_chip xive_irq_chip = { 960 .name = "XIVE-IRQ", 961 .irq_startup = xive_irq_startup, 962 .irq_shutdown = xive_irq_shutdown, 963 .irq_eoi = xive_irq_eoi, 964 .irq_mask = xive_irq_mask, 965 .irq_unmask = xive_irq_unmask, 966 .irq_set_affinity = xive_irq_set_affinity, 967 .irq_set_type = xive_irq_set_type, 968 .irq_retrigger = xive_irq_retrigger, 969 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity, 970 .irq_get_irqchip_state = xive_get_irqchip_state, 971 }; 972 973 bool is_xive_irq(struct irq_chip *chip) 974 { 975 return chip == &xive_irq_chip; 976 } 977 EXPORT_SYMBOL_GPL(is_xive_irq); 978 979 void xive_cleanup_irq_data(struct xive_irq_data *xd) 980 { 981 if (xd->eoi_mmio) { 982 unmap_kernel_range((unsigned long)xd->eoi_mmio, 983 1u << xd->esb_shift); 984 iounmap(xd->eoi_mmio); 985 if (xd->eoi_mmio == xd->trig_mmio) 986 xd->trig_mmio = NULL; 987 xd->eoi_mmio = NULL; 988 } 989 if (xd->trig_mmio) { 990 unmap_kernel_range((unsigned long)xd->trig_mmio, 991 1u << xd->esb_shift); 992 iounmap(xd->trig_mmio); 993 xd->trig_mmio = NULL; 994 } 995 } 996 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data); 997 998 static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) 999 { 1000 struct xive_irq_data *xd; 1001 int rc; 1002 1003 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL); 1004 if (!xd) 1005 return -ENOMEM; 1006 rc = xive_ops->populate_irq_data(hw, xd); 1007 if (rc) { 1008 kfree(xd); 1009 return rc; 1010 } 1011 xd->target = XIVE_INVALID_TARGET; 1012 irq_set_handler_data(virq, xd); 1013 1014 /* 1015 * Turn OFF by default the interrupt being mapped. A side 1016 * effect of this check is the mapping the ESB page of the 1017 * interrupt in the Linux address space. This prevents page 1018 * fault issues in the crash handler which masks all 1019 * interrupts. 1020 */ 1021 xive_esb_read(xd, XIVE_ESB_SET_PQ_01); 1022 1023 return 0; 1024 } 1025 1026 static void xive_irq_free_data(unsigned int virq) 1027 { 1028 struct xive_irq_data *xd = irq_get_handler_data(virq); 1029 1030 if (!xd) 1031 return; 1032 irq_set_handler_data(virq, NULL); 1033 xive_cleanup_irq_data(xd); 1034 kfree(xd); 1035 } 1036 1037 #ifdef CONFIG_SMP 1038 1039 static void xive_cause_ipi(int cpu) 1040 { 1041 struct xive_cpu *xc; 1042 struct xive_irq_data *xd; 1043 1044 xc = per_cpu(xive_cpu, cpu); 1045 1046 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n", 1047 smp_processor_id(), cpu, xc->hw_ipi); 1048 1049 xd = &xc->ipi_data; 1050 if (WARN_ON(!xd->trig_mmio)) 1051 return; 1052 out_be64(xd->trig_mmio, 0); 1053 } 1054 1055 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id) 1056 { 1057 return smp_ipi_demux(); 1058 } 1059 1060 static void xive_ipi_eoi(struct irq_data *d) 1061 { 1062 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1063 1064 /* Handle possible race with unplug and drop stale IPIs */ 1065 if (!xc) 1066 return; 1067 1068 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", 1069 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); 1070 1071 xive_do_source_eoi(&xc->ipi_data); 1072 xive_do_queue_eoi(xc); 1073 } 1074 1075 static void xive_ipi_do_nothing(struct irq_data *d) 1076 { 1077 /* 1078 * Nothing to do, we never mask/unmask IPIs, but the callback 1079 * has to exist for the struct irq_chip. 1080 */ 1081 } 1082 1083 static struct irq_chip xive_ipi_chip = { 1084 .name = "XIVE-IPI", 1085 .irq_eoi = xive_ipi_eoi, 1086 .irq_mask = xive_ipi_do_nothing, 1087 .irq_unmask = xive_ipi_do_nothing, 1088 }; 1089 1090 /* 1091 * IPIs are marked per-cpu. We use separate HW interrupts under the 1092 * hood but associated with the same "linux" interrupt 1093 */ 1094 static int xive_ipi_irq_domain_map(struct irq_domain *h, unsigned int virq, 1095 irq_hw_number_t hw) 1096 { 1097 irq_set_chip_and_handler(virq, &xive_ipi_chip, handle_percpu_irq); 1098 return 0; 1099 } 1100 1101 static const struct irq_domain_ops xive_ipi_irq_domain_ops = { 1102 .map = xive_ipi_irq_domain_map, 1103 }; 1104 1105 static int __init xive_request_ipi(void) 1106 { 1107 struct fwnode_handle *fwnode; 1108 struct irq_domain *ipi_domain; 1109 unsigned int virq; 1110 int ret = -ENOMEM; 1111 1112 fwnode = irq_domain_alloc_named_fwnode("XIVE-IPI"); 1113 if (!fwnode) 1114 goto out; 1115 1116 ipi_domain = irq_domain_create_linear(fwnode, 1, 1117 &xive_ipi_irq_domain_ops, NULL); 1118 if (!ipi_domain) 1119 goto out_free_fwnode; 1120 1121 /* Initialize it */ 1122 virq = irq_create_mapping(ipi_domain, XIVE_IPI_HW_IRQ); 1123 if (!virq) { 1124 ret = -EINVAL; 1125 goto out_free_domain; 1126 } 1127 1128 xive_ipi_irq = virq; 1129 1130 ret = request_irq(virq, xive_muxed_ipi_action, 1131 IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL); 1132 1133 WARN(ret < 0, "Failed to request IPI %d: %d\n", virq, ret); 1134 return ret; 1135 1136 out_free_domain: 1137 irq_domain_remove(ipi_domain); 1138 out_free_fwnode: 1139 irq_domain_free_fwnode(fwnode); 1140 out: 1141 return ret; 1142 } 1143 1144 static int xive_setup_cpu_ipi(unsigned int cpu) 1145 { 1146 struct xive_cpu *xc; 1147 int rc; 1148 1149 pr_debug("Setting up IPI for CPU %d\n", cpu); 1150 1151 xc = per_cpu(xive_cpu, cpu); 1152 1153 /* Check if we are already setup */ 1154 if (xc->hw_ipi != XIVE_BAD_IRQ) 1155 return 0; 1156 1157 /* Grab an IPI from the backend, this will populate xc->hw_ipi */ 1158 if (xive_ops->get_ipi(cpu, xc)) 1159 return -EIO; 1160 1161 /* 1162 * Populate the IRQ data in the xive_cpu structure and 1163 * configure the HW / enable the IPIs. 1164 */ 1165 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data); 1166 if (rc) { 1167 pr_err("Failed to populate IPI data on CPU %d\n", cpu); 1168 return -EIO; 1169 } 1170 rc = xive_ops->configure_irq(xc->hw_ipi, 1171 get_hard_smp_processor_id(cpu), 1172 xive_irq_priority, xive_ipi_irq); 1173 if (rc) { 1174 pr_err("Failed to map IPI CPU %d\n", cpu); 1175 return -EIO; 1176 } 1177 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu, 1178 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio); 1179 1180 /* Unmask it */ 1181 xive_do_source_set_mask(&xc->ipi_data, false); 1182 1183 return 0; 1184 } 1185 1186 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) 1187 { 1188 /* Disable the IPI and free the IRQ data */ 1189 1190 /* Already cleaned up ? */ 1191 if (xc->hw_ipi == XIVE_BAD_IRQ) 1192 return; 1193 1194 /* Mask the IPI */ 1195 xive_do_source_set_mask(&xc->ipi_data, true); 1196 1197 /* 1198 * Note: We don't call xive_cleanup_irq_data() to free 1199 * the mappings as this is called from an IPI on kexec 1200 * which is not a safe environment to call iounmap() 1201 */ 1202 1203 /* Deconfigure/mask in the backend */ 1204 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(), 1205 0xff, xive_ipi_irq); 1206 1207 /* Free the IPIs in the backend */ 1208 xive_ops->put_ipi(cpu, xc); 1209 } 1210 1211 void __init xive_smp_probe(void) 1212 { 1213 smp_ops->cause_ipi = xive_cause_ipi; 1214 1215 /* Register the IPI */ 1216 xive_request_ipi(); 1217 1218 /* Allocate and setup IPI for the boot CPU */ 1219 xive_setup_cpu_ipi(smp_processor_id()); 1220 } 1221 1222 #endif /* CONFIG_SMP */ 1223 1224 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq, 1225 irq_hw_number_t hw) 1226 { 1227 int rc; 1228 1229 /* 1230 * Mark interrupts as edge sensitive by default so that resend 1231 * actually works. Will fix that up below if needed. 1232 */ 1233 irq_clear_status_flags(virq, IRQ_LEVEL); 1234 1235 rc = xive_irq_alloc_data(virq, hw); 1236 if (rc) 1237 return rc; 1238 1239 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq); 1240 1241 return 0; 1242 } 1243 1244 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq) 1245 { 1246 xive_irq_free_data(virq); 1247 } 1248 1249 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct, 1250 const u32 *intspec, unsigned int intsize, 1251 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 1252 1253 { 1254 *out_hwirq = intspec[0]; 1255 1256 /* 1257 * If intsize is at least 2, we look for the type in the second cell, 1258 * we assume the LSB indicates a level interrupt. 1259 */ 1260 if (intsize > 1) { 1261 if (intspec[1] & 1) 1262 *out_flags = IRQ_TYPE_LEVEL_LOW; 1263 else 1264 *out_flags = IRQ_TYPE_EDGE_RISING; 1265 } else 1266 *out_flags = IRQ_TYPE_LEVEL_LOW; 1267 1268 return 0; 1269 } 1270 1271 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node, 1272 enum irq_domain_bus_token bus_token) 1273 { 1274 return xive_ops->match(node); 1275 } 1276 1277 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS 1278 static const char * const esb_names[] = { "RESET", "OFF", "PENDING", "QUEUED" }; 1279 1280 static const struct { 1281 u64 mask; 1282 char *name; 1283 } xive_irq_flags[] = { 1284 { XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" }, 1285 { XIVE_IRQ_FLAG_LSI, "LSI" }, 1286 { XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" }, 1287 { XIVE_IRQ_FLAG_NO_EOI, "NO_EOI" }, 1288 }; 1289 1290 static void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d, 1291 struct irq_data *irqd, int ind) 1292 { 1293 struct xive_irq_data *xd; 1294 u64 val; 1295 int i; 1296 1297 /* No IRQ domain level information. To be done */ 1298 if (!irqd) 1299 return; 1300 1301 if (!is_xive_irq(irq_data_get_irq_chip(irqd))) 1302 return; 1303 1304 seq_printf(m, "%*sXIVE:\n", ind, ""); 1305 ind++; 1306 1307 xd = irq_data_get_irq_handler_data(irqd); 1308 if (!xd) { 1309 seq_printf(m, "%*snot assigned\n", ind, ""); 1310 return; 1311 } 1312 1313 val = xive_esb_read(xd, XIVE_ESB_GET); 1314 seq_printf(m, "%*sESB: %s\n", ind, "", esb_names[val & 0x3]); 1315 seq_printf(m, "%*sPstate: %s %s\n", ind, "", xd->stale_p ? "stale" : "", 1316 xd->saved_p ? "saved" : ""); 1317 seq_printf(m, "%*sTarget: %d\n", ind, "", xd->target); 1318 seq_printf(m, "%*sChip: %d\n", ind, "", xd->src_chip); 1319 seq_printf(m, "%*sTrigger: 0x%016llx\n", ind, "", xd->trig_page); 1320 seq_printf(m, "%*sEOI: 0x%016llx\n", ind, "", xd->eoi_page); 1321 seq_printf(m, "%*sFlags: 0x%llx\n", ind, "", xd->flags); 1322 for (i = 0; i < ARRAY_SIZE(xive_irq_flags); i++) { 1323 if (xd->flags & xive_irq_flags[i].mask) 1324 seq_printf(m, "%*s%s\n", ind + 12, "", xive_irq_flags[i].name); 1325 } 1326 } 1327 #endif 1328 1329 static const struct irq_domain_ops xive_irq_domain_ops = { 1330 .match = xive_irq_domain_match, 1331 .map = xive_irq_domain_map, 1332 .unmap = xive_irq_domain_unmap, 1333 .xlate = xive_irq_domain_xlate, 1334 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS 1335 .debug_show = xive_irq_domain_debug_show, 1336 #endif 1337 }; 1338 1339 static void __init xive_init_host(struct device_node *np) 1340 { 1341 xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ, 1342 &xive_irq_domain_ops, NULL); 1343 if (WARN_ON(xive_irq_domain == NULL)) 1344 return; 1345 irq_set_default_host(xive_irq_domain); 1346 } 1347 1348 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) 1349 { 1350 if (xc->queue[xive_irq_priority].qpage) 1351 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority); 1352 } 1353 1354 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) 1355 { 1356 int rc = 0; 1357 1358 /* We setup 1 queues for now with a 64k page */ 1359 if (!xc->queue[xive_irq_priority].qpage) 1360 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority); 1361 1362 return rc; 1363 } 1364 1365 static int xive_prepare_cpu(unsigned int cpu) 1366 { 1367 struct xive_cpu *xc; 1368 1369 xc = per_cpu(xive_cpu, cpu); 1370 if (!xc) { 1371 struct device_node *np; 1372 1373 xc = kzalloc_node(sizeof(struct xive_cpu), 1374 GFP_KERNEL, cpu_to_node(cpu)); 1375 if (!xc) 1376 return -ENOMEM; 1377 np = of_get_cpu_node(cpu, NULL); 1378 if (np) 1379 xc->chip_id = of_get_ibm_chip_id(np); 1380 of_node_put(np); 1381 xc->hw_ipi = XIVE_BAD_IRQ; 1382 1383 per_cpu(xive_cpu, cpu) = xc; 1384 } 1385 1386 /* Setup EQs if not already */ 1387 return xive_setup_cpu_queues(cpu, xc); 1388 } 1389 1390 static void xive_setup_cpu(void) 1391 { 1392 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1393 1394 /* The backend might have additional things to do */ 1395 if (xive_ops->setup_cpu) 1396 xive_ops->setup_cpu(smp_processor_id(), xc); 1397 1398 /* Set CPPR to 0xff to enable flow of interrupts */ 1399 xc->cppr = 0xff; 1400 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); 1401 } 1402 1403 #ifdef CONFIG_SMP 1404 void xive_smp_setup_cpu(void) 1405 { 1406 pr_devel("SMP setup CPU %d\n", smp_processor_id()); 1407 1408 /* This will have already been done on the boot CPU */ 1409 if (smp_processor_id() != boot_cpuid) 1410 xive_setup_cpu(); 1411 1412 } 1413 1414 int xive_smp_prepare_cpu(unsigned int cpu) 1415 { 1416 int rc; 1417 1418 /* Allocate per-CPU data and queues */ 1419 rc = xive_prepare_cpu(cpu); 1420 if (rc) 1421 return rc; 1422 1423 /* Allocate and setup IPI for the new CPU */ 1424 return xive_setup_cpu_ipi(cpu); 1425 } 1426 1427 #ifdef CONFIG_HOTPLUG_CPU 1428 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc) 1429 { 1430 u32 irq; 1431 1432 /* We assume local irqs are disabled */ 1433 WARN_ON(!irqs_disabled()); 1434 1435 /* Check what's already in the CPU queue */ 1436 while ((irq = xive_scan_interrupts(xc, false)) != 0) { 1437 /* 1438 * We need to re-route that interrupt to its new destination. 1439 * First get and lock the descriptor 1440 */ 1441 struct irq_desc *desc = irq_to_desc(irq); 1442 struct irq_data *d = irq_desc_get_irq_data(desc); 1443 struct xive_irq_data *xd; 1444 1445 /* 1446 * Ignore anything that isn't a XIVE irq and ignore 1447 * IPIs, so can just be dropped. 1448 */ 1449 if (d->domain != xive_irq_domain) 1450 continue; 1451 1452 /* 1453 * The IRQ should have already been re-routed, it's just a 1454 * stale in the old queue, so re-trigger it in order to make 1455 * it reach is new destination. 1456 */ 1457 #ifdef DEBUG_FLUSH 1458 pr_info("CPU %d: Got irq %d while offline, re-sending...\n", 1459 cpu, irq); 1460 #endif 1461 raw_spin_lock(&desc->lock); 1462 xd = irq_desc_get_handler_data(desc); 1463 1464 /* 1465 * Clear saved_p to indicate that it's no longer pending 1466 */ 1467 xd->saved_p = false; 1468 1469 /* 1470 * For LSIs, we EOI, this will cause a resend if it's 1471 * still asserted. Otherwise do an MSI retrigger. 1472 */ 1473 if (xd->flags & XIVE_IRQ_FLAG_LSI) 1474 xive_do_source_eoi(xd); 1475 else 1476 xive_irq_retrigger(d); 1477 1478 raw_spin_unlock(&desc->lock); 1479 } 1480 } 1481 1482 void xive_smp_disable_cpu(void) 1483 { 1484 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1485 unsigned int cpu = smp_processor_id(); 1486 1487 /* Migrate interrupts away from the CPU */ 1488 irq_migrate_all_off_this_cpu(); 1489 1490 /* Set CPPR to 0 to disable flow of interrupts */ 1491 xc->cppr = 0; 1492 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); 1493 1494 /* Flush everything still in the queue */ 1495 xive_flush_cpu_queue(cpu, xc); 1496 1497 /* Re-enable CPPR */ 1498 xc->cppr = 0xff; 1499 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); 1500 } 1501 1502 void xive_flush_interrupt(void) 1503 { 1504 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1505 unsigned int cpu = smp_processor_id(); 1506 1507 /* Called if an interrupt occurs while the CPU is hot unplugged */ 1508 xive_flush_cpu_queue(cpu, xc); 1509 } 1510 1511 #endif /* CONFIG_HOTPLUG_CPU */ 1512 1513 #endif /* CONFIG_SMP */ 1514 1515 void xive_teardown_cpu(void) 1516 { 1517 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1518 unsigned int cpu = smp_processor_id(); 1519 1520 /* Set CPPR to 0 to disable flow of interrupts */ 1521 xc->cppr = 0; 1522 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); 1523 1524 if (xive_ops->teardown_cpu) 1525 xive_ops->teardown_cpu(cpu, xc); 1526 1527 #ifdef CONFIG_SMP 1528 /* Get rid of IPI */ 1529 xive_cleanup_cpu_ipi(cpu, xc); 1530 #endif 1531 1532 /* Disable and free the queues */ 1533 xive_cleanup_cpu_queues(cpu, xc); 1534 } 1535 1536 void xive_shutdown(void) 1537 { 1538 xive_ops->shutdown(); 1539 } 1540 1541 bool __init xive_core_init(struct device_node *np, const struct xive_ops *ops, 1542 void __iomem *area, u32 offset, u8 max_prio) 1543 { 1544 xive_tima = area; 1545 xive_tima_offset = offset; 1546 xive_ops = ops; 1547 xive_irq_priority = max_prio; 1548 1549 ppc_md.get_irq = xive_get_irq; 1550 __xive_enabled = true; 1551 1552 pr_devel("Initializing host..\n"); 1553 xive_init_host(np); 1554 1555 pr_devel("Initializing boot CPU..\n"); 1556 1557 /* Allocate per-CPU data and queues */ 1558 xive_prepare_cpu(smp_processor_id()); 1559 1560 /* Get ready for interrupts */ 1561 xive_setup_cpu(); 1562 1563 pr_info("Interrupt handling initialized with %s backend\n", 1564 xive_ops->name); 1565 pr_info("Using priority %d for all interrupts\n", max_prio); 1566 1567 return true; 1568 } 1569 1570 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift) 1571 { 1572 unsigned int alloc_order; 1573 struct page *pages; 1574 __be32 *qpage; 1575 1576 alloc_order = xive_alloc_order(queue_shift); 1577 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order); 1578 if (!pages) 1579 return ERR_PTR(-ENOMEM); 1580 qpage = (__be32 *)page_address(pages); 1581 memset(qpage, 0, 1 << queue_shift); 1582 1583 return qpage; 1584 } 1585 1586 static int __init xive_off(char *arg) 1587 { 1588 xive_cmdline_disabled = true; 1589 return 0; 1590 } 1591 __setup("xive=off", xive_off); 1592 1593 static void xive_debug_show_cpu(struct seq_file *m, int cpu) 1594 { 1595 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 1596 1597 seq_printf(m, "CPU %d:", cpu); 1598 if (xc) { 1599 seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); 1600 1601 #ifdef CONFIG_SMP 1602 { 1603 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); 1604 1605 seq_printf(m, "IPI=0x%08x PQ=%c%c ", xc->hw_ipi, 1606 val & XIVE_ESB_VAL_P ? 'P' : '-', 1607 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 1608 } 1609 #endif 1610 { 1611 struct xive_q *q = &xc->queue[xive_irq_priority]; 1612 u32 i0, i1, idx; 1613 1614 if (q->qpage) { 1615 idx = q->idx; 1616 i0 = be32_to_cpup(q->qpage + idx); 1617 idx = (idx + 1) & q->msk; 1618 i1 = be32_to_cpup(q->qpage + idx); 1619 seq_printf(m, "EQ idx=%d T=%d %08x %08x ...", 1620 q->idx, q->toggle, i0, i1); 1621 } 1622 } 1623 } 1624 seq_puts(m, "\n"); 1625 } 1626 1627 static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d) 1628 { 1629 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 1630 int rc; 1631 u32 target; 1632 u8 prio; 1633 u32 lirq; 1634 struct xive_irq_data *xd; 1635 u64 val; 1636 1637 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); 1638 if (rc) { 1639 seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); 1640 return; 1641 } 1642 1643 seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", 1644 hw_irq, target, prio, lirq); 1645 1646 xd = irq_data_get_irq_handler_data(d); 1647 val = xive_esb_read(xd, XIVE_ESB_GET); 1648 seq_printf(m, "flags=%c%c%c PQ=%c%c", 1649 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', 1650 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', 1651 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', 1652 val & XIVE_ESB_VAL_P ? 'P' : '-', 1653 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 1654 seq_puts(m, "\n"); 1655 } 1656 1657 static int xive_core_debug_show(struct seq_file *m, void *private) 1658 { 1659 unsigned int i; 1660 struct irq_desc *desc; 1661 int cpu; 1662 1663 if (xive_ops->debug_show) 1664 xive_ops->debug_show(m, private); 1665 1666 for_each_possible_cpu(cpu) 1667 xive_debug_show_cpu(m, cpu); 1668 1669 for_each_irq_desc(i, desc) { 1670 struct irq_data *d = irq_desc_get_irq_data(desc); 1671 1672 if (d->domain == xive_irq_domain) 1673 xive_debug_show_irq(m, d); 1674 } 1675 return 0; 1676 } 1677 DEFINE_SHOW_ATTRIBUTE(xive_core_debug); 1678 1679 int xive_core_debug_init(void) 1680 { 1681 if (xive_enabled()) 1682 debugfs_create_file("xive", 0400, powerpc_debugfs_root, 1683 NULL, &xive_core_debug_fops); 1684 return 0; 1685 } 1686