1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2016,2017 IBM Corporation. 4 */ 5 6 #define pr_fmt(fmt) "xive: " fmt 7 8 #include <linux/types.h> 9 #include <linux/threads.h> 10 #include <linux/kernel.h> 11 #include <linux/irq.h> 12 #include <linux/debugfs.h> 13 #include <linux/smp.h> 14 #include <linux/interrupt.h> 15 #include <linux/seq_file.h> 16 #include <linux/init.h> 17 #include <linux/cpu.h> 18 #include <linux/of.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/msi.h> 22 #include <linux/vmalloc.h> 23 24 #include <asm/debugfs.h> 25 #include <asm/prom.h> 26 #include <asm/io.h> 27 #include <asm/smp.h> 28 #include <asm/machdep.h> 29 #include <asm/irq.h> 30 #include <asm/errno.h> 31 #include <asm/xive.h> 32 #include <asm/xive-regs.h> 33 #include <asm/xmon.h> 34 35 #include "xive-internal.h" 36 37 #undef DEBUG_FLUSH 38 #undef DEBUG_ALL 39 40 #ifdef DEBUG_ALL 41 #define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \ 42 smp_processor_id(), ## __VA_ARGS__) 43 #else 44 #define DBG_VERBOSE(fmt...) do { } while(0) 45 #endif 46 47 bool __xive_enabled; 48 EXPORT_SYMBOL_GPL(__xive_enabled); 49 bool xive_cmdline_disabled; 50 51 /* We use only one priority for now */ 52 static u8 xive_irq_priority; 53 54 /* TIMA exported to KVM */ 55 void __iomem *xive_tima; 56 EXPORT_SYMBOL_GPL(xive_tima); 57 u32 xive_tima_offset; 58 59 /* Backend ops */ 60 static const struct xive_ops *xive_ops; 61 62 /* Our global interrupt domain */ 63 static struct irq_domain *xive_irq_domain; 64 65 #ifdef CONFIG_SMP 66 /* The IPIs all use the same logical irq number */ 67 static u32 xive_ipi_irq; 68 #endif 69 70 /* Xive state for each CPU */ 71 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu); 72 73 /* An invalid CPU target */ 74 #define XIVE_INVALID_TARGET (-1) 75 76 /* 77 * Read the next entry in a queue, return its content if it's valid 78 * or 0 if there is no new entry. 79 * 80 * The queue pointer is moved forward unless "just_peek" is set 81 */ 82 static u32 xive_read_eq(struct xive_q *q, bool just_peek) 83 { 84 u32 cur; 85 86 if (!q->qpage) 87 return 0; 88 cur = be32_to_cpup(q->qpage + q->idx); 89 90 /* Check valid bit (31) vs current toggle polarity */ 91 if ((cur >> 31) == q->toggle) 92 return 0; 93 94 /* If consuming from the queue ... */ 95 if (!just_peek) { 96 /* Next entry */ 97 q->idx = (q->idx + 1) & q->msk; 98 99 /* Wrap around: flip valid toggle */ 100 if (q->idx == 0) 101 q->toggle ^= 1; 102 } 103 /* Mask out the valid bit (31) */ 104 return cur & 0x7fffffff; 105 } 106 107 /* 108 * Scans all the queue that may have interrupts in them 109 * (based on "pending_prio") in priority order until an 110 * interrupt is found or all the queues are empty. 111 * 112 * Then updates the CPPR (Current Processor Priority 113 * Register) based on the most favored interrupt found 114 * (0xff if none) and return what was found (0 if none). 115 * 116 * If just_peek is set, return the most favored pending 117 * interrupt if any but don't update the queue pointers. 118 * 119 * Note: This function can operate generically on any number 120 * of queues (up to 8). The current implementation of the XIVE 121 * driver only uses a single queue however. 122 * 123 * Note2: This will also "flush" "the pending_count" of a queue 124 * into the "count" when that queue is observed to be empty. 125 * This is used to keep track of the amount of interrupts 126 * targetting a queue. When an interrupt is moved away from 127 * a queue, we only decrement that queue count once the queue 128 * has been observed empty to avoid races. 129 */ 130 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek) 131 { 132 u32 irq = 0; 133 u8 prio = 0; 134 135 /* Find highest pending priority */ 136 while (xc->pending_prio != 0) { 137 struct xive_q *q; 138 139 prio = ffs(xc->pending_prio) - 1; 140 DBG_VERBOSE("scan_irq: trying prio %d\n", prio); 141 142 /* Try to fetch */ 143 irq = xive_read_eq(&xc->queue[prio], just_peek); 144 145 /* Found something ? That's it */ 146 if (irq) { 147 if (just_peek || irq_to_desc(irq)) 148 break; 149 /* 150 * We should never get here; if we do then we must 151 * have failed to synchronize the interrupt properly 152 * when shutting it down. 153 */ 154 pr_crit("xive: got interrupt %d without descriptor, dropping\n", 155 irq); 156 WARN_ON(1); 157 continue; 158 } 159 160 /* Clear pending bits */ 161 xc->pending_prio &= ~(1 << prio); 162 163 /* 164 * Check if the queue count needs adjusting due to 165 * interrupts being moved away. See description of 166 * xive_dec_target_count() 167 */ 168 q = &xc->queue[prio]; 169 if (atomic_read(&q->pending_count)) { 170 int p = atomic_xchg(&q->pending_count, 0); 171 if (p) { 172 WARN_ON(p > atomic_read(&q->count)); 173 atomic_sub(p, &q->count); 174 } 175 } 176 } 177 178 /* If nothing was found, set CPPR to 0xff */ 179 if (irq == 0) 180 prio = 0xff; 181 182 /* Update HW CPPR to match if necessary */ 183 if (prio != xc->cppr) { 184 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio); 185 xc->cppr = prio; 186 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio); 187 } 188 189 return irq; 190 } 191 192 /* 193 * This is used to perform the magic loads from an ESB 194 * described in xive-regs.h 195 */ 196 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset) 197 { 198 u64 val; 199 200 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 201 offset |= XIVE_ESB_LD_ST_MO; 202 203 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) 204 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); 205 else 206 val = in_be64(xd->eoi_mmio + offset); 207 208 return (u8)val; 209 } 210 211 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data) 212 { 213 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) 214 xive_ops->esb_rw(xd->hw_irq, offset, data, 1); 215 else 216 out_be64(xd->eoi_mmio + offset, data); 217 } 218 219 #ifdef CONFIG_XMON 220 static notrace void xive_dump_eq(const char *name, struct xive_q *q) 221 { 222 u32 i0, i1, idx; 223 224 if (!q->qpage) 225 return; 226 idx = q->idx; 227 i0 = be32_to_cpup(q->qpage + idx); 228 idx = (idx + 1) & q->msk; 229 i1 = be32_to_cpup(q->qpage + idx); 230 xmon_printf("%s idx=%d T=%d %08x %08x ...", name, 231 q->idx, q->toggle, i0, i1); 232 } 233 234 notrace void xmon_xive_do_dump(int cpu) 235 { 236 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 237 238 xmon_printf("CPU %d:", cpu); 239 if (xc) { 240 xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); 241 242 #ifdef CONFIG_SMP 243 { 244 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); 245 246 xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi, 247 val & XIVE_ESB_VAL_P ? 'P' : '-', 248 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 249 } 250 #endif 251 xive_dump_eq("EQ", &xc->queue[xive_irq_priority]); 252 } 253 xmon_printf("\n"); 254 } 255 256 int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d) 257 { 258 struct irq_chip *chip = irq_data_get_irq_chip(d); 259 int rc; 260 u32 target; 261 u8 prio; 262 u32 lirq; 263 264 if (!is_xive_irq(chip)) 265 return -EINVAL; 266 267 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); 268 if (rc) { 269 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); 270 return rc; 271 } 272 273 xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", 274 hw_irq, target, prio, lirq); 275 276 if (d) { 277 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 278 u64 val = xive_esb_read(xd, XIVE_ESB_GET); 279 280 xmon_printf("flags=%c%c%c PQ=%c%c", 281 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', 282 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', 283 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', 284 val & XIVE_ESB_VAL_P ? 'P' : '-', 285 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 286 } 287 288 xmon_printf("\n"); 289 return 0; 290 } 291 292 #endif /* CONFIG_XMON */ 293 294 static unsigned int xive_get_irq(void) 295 { 296 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 297 u32 irq; 298 299 /* 300 * This can be called either as a result of a HW interrupt or 301 * as a "replay" because EOI decided there was still something 302 * in one of the queues. 303 * 304 * First we perform an ACK cycle in order to update our mask 305 * of pending priorities. This will also have the effect of 306 * updating the CPPR to the most favored pending interrupts. 307 * 308 * In the future, if we have a way to differentiate a first 309 * entry (on HW interrupt) from a replay triggered by EOI, 310 * we could skip this on replays unless we soft-mask tells us 311 * that a new HW interrupt occurred. 312 */ 313 xive_ops->update_pending(xc); 314 315 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio); 316 317 /* Scan our queue(s) for interrupts */ 318 irq = xive_scan_interrupts(xc, false); 319 320 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n", 321 irq, xc->pending_prio); 322 323 /* Return pending interrupt if any */ 324 if (irq == XIVE_BAD_IRQ) 325 return 0; 326 return irq; 327 } 328 329 /* 330 * After EOI'ing an interrupt, we need to re-check the queue 331 * to see if another interrupt is pending since multiple 332 * interrupts can coalesce into a single notification to the 333 * CPU. 334 * 335 * If we find that there is indeed more in there, we call 336 * force_external_irq_replay() to make Linux synthetize an 337 * external interrupt on the next call to local_irq_restore(). 338 */ 339 static void xive_do_queue_eoi(struct xive_cpu *xc) 340 { 341 if (xive_scan_interrupts(xc, true) != 0) { 342 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio); 343 force_external_irq_replay(); 344 } 345 } 346 347 /* 348 * EOI an interrupt at the source. There are several methods 349 * to do this depending on the HW version and source type 350 */ 351 static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd) 352 { 353 xd->stale_p = false; 354 /* If the XIVE supports the new "store EOI facility, use it */ 355 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 356 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0); 357 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { 358 /* 359 * The FW told us to call it. This happens for some 360 * interrupt sources that need additional HW whacking 361 * beyond the ESB manipulation. For example LPC interrupts 362 * on P9 DD1.0 needed a latch to be clared in the LPC bridge 363 * itself. The Firmware will take care of it. 364 */ 365 if (WARN_ON_ONCE(!xive_ops->eoi)) 366 return; 367 xive_ops->eoi(hw_irq); 368 } else { 369 u8 eoi_val; 370 371 /* 372 * Otherwise for EOI, we use the special MMIO that does 373 * a clear of both P and Q and returns the old Q, 374 * except for LSIs where we use the "EOI cycle" special 375 * load. 376 * 377 * This allows us to then do a re-trigger if Q was set 378 * rather than synthesizing an interrupt in software 379 * 380 * For LSIs the HW EOI cycle is used rather than PQ bits, 381 * as they are automatically re-triggred in HW when still 382 * pending. 383 */ 384 if (xd->flags & XIVE_IRQ_FLAG_LSI) 385 xive_esb_read(xd, XIVE_ESB_LOAD_EOI); 386 else { 387 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00); 388 DBG_VERBOSE("eoi_val=%x\n", eoi_val); 389 390 /* Re-trigger if needed */ 391 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio) 392 out_be64(xd->trig_mmio, 0); 393 } 394 } 395 } 396 397 /* irq_chip eoi callback, called with irq descriptor lock held */ 398 static void xive_irq_eoi(struct irq_data *d) 399 { 400 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 401 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 402 403 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", 404 d->irq, irqd_to_hwirq(d), xc->pending_prio); 405 406 /* 407 * EOI the source if it hasn't been disabled and hasn't 408 * been passed-through to a KVM guest 409 */ 410 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) && 411 !(xd->flags & XIVE_IRQ_FLAG_NO_EOI)) 412 xive_do_source_eoi(irqd_to_hwirq(d), xd); 413 else 414 xd->stale_p = true; 415 416 /* 417 * Clear saved_p to indicate that it's no longer occupying 418 * a queue slot on the target queue 419 */ 420 xd->saved_p = false; 421 422 /* Check for more work in the queue */ 423 xive_do_queue_eoi(xc); 424 } 425 426 /* 427 * Helper used to mask and unmask an interrupt source. 428 */ 429 static void xive_do_source_set_mask(struct xive_irq_data *xd, 430 bool mask) 431 { 432 u64 val; 433 434 /* 435 * If the interrupt had P set, it may be in a queue. 436 * 437 * We need to make sure we don't re-enable it until it 438 * has been fetched from that queue and EOId. We keep 439 * a copy of that P state and use it to restore the 440 * ESB accordingly on unmask. 441 */ 442 if (mask) { 443 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01); 444 if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P)) 445 xd->saved_p = true; 446 xd->stale_p = false; 447 } else if (xd->saved_p) { 448 xive_esb_read(xd, XIVE_ESB_SET_PQ_10); 449 xd->saved_p = false; 450 } else { 451 xive_esb_read(xd, XIVE_ESB_SET_PQ_00); 452 xd->stale_p = false; 453 } 454 } 455 456 /* 457 * Try to chose "cpu" as a new interrupt target. Increments 458 * the queue accounting for that target if it's not already 459 * full. 460 */ 461 static bool xive_try_pick_target(int cpu) 462 { 463 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 464 struct xive_q *q = &xc->queue[xive_irq_priority]; 465 int max; 466 467 /* 468 * Calculate max number of interrupts in that queue. 469 * 470 * We leave a gap of 1 just in case... 471 */ 472 max = (q->msk + 1) - 1; 473 return !!atomic_add_unless(&q->count, 1, max); 474 } 475 476 /* 477 * Un-account an interrupt for a target CPU. We don't directly 478 * decrement q->count since the interrupt might still be present 479 * in the queue. 480 * 481 * Instead increment a separate counter "pending_count" which 482 * will be substracted from "count" later when that CPU observes 483 * the queue to be empty. 484 */ 485 static void xive_dec_target_count(int cpu) 486 { 487 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 488 struct xive_q *q = &xc->queue[xive_irq_priority]; 489 490 if (WARN_ON(cpu < 0 || !xc)) { 491 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc); 492 return; 493 } 494 495 /* 496 * We increment the "pending count" which will be used 497 * to decrement the target queue count whenever it's next 498 * processed and found empty. This ensure that we don't 499 * decrement while we still have the interrupt there 500 * occupying a slot. 501 */ 502 atomic_inc(&q->pending_count); 503 } 504 505 /* Find a tentative CPU target in a CPU mask */ 506 static int xive_find_target_in_mask(const struct cpumask *mask, 507 unsigned int fuzz) 508 { 509 int cpu, first, num, i; 510 511 /* Pick up a starting point CPU in the mask based on fuzz */ 512 num = min_t(int, cpumask_weight(mask), nr_cpu_ids); 513 first = fuzz % num; 514 515 /* Locate it */ 516 cpu = cpumask_first(mask); 517 for (i = 0; i < first && cpu < nr_cpu_ids; i++) 518 cpu = cpumask_next(cpu, mask); 519 520 /* Sanity check */ 521 if (WARN_ON(cpu >= nr_cpu_ids)) 522 cpu = cpumask_first(cpu_online_mask); 523 524 /* Remember first one to handle wrap-around */ 525 first = cpu; 526 527 /* 528 * Now go through the entire mask until we find a valid 529 * target. 530 */ 531 do { 532 /* 533 * We re-check online as the fallback case passes us 534 * an untested affinity mask 535 */ 536 if (cpu_online(cpu) && xive_try_pick_target(cpu)) 537 return cpu; 538 cpu = cpumask_next(cpu, mask); 539 /* Wrap around */ 540 if (cpu >= nr_cpu_ids) 541 cpu = cpumask_first(mask); 542 } while (cpu != first); 543 544 return -1; 545 } 546 547 /* 548 * Pick a target CPU for an interrupt. This is done at 549 * startup or if the affinity is changed in a way that 550 * invalidates the current target. 551 */ 552 static int xive_pick_irq_target(struct irq_data *d, 553 const struct cpumask *affinity) 554 { 555 static unsigned int fuzz; 556 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 557 cpumask_var_t mask; 558 int cpu = -1; 559 560 /* 561 * If we have chip IDs, first we try to build a mask of 562 * CPUs matching the CPU and find a target in there 563 */ 564 if (xd->src_chip != XIVE_INVALID_CHIP_ID && 565 zalloc_cpumask_var(&mask, GFP_ATOMIC)) { 566 /* Build a mask of matching chip IDs */ 567 for_each_cpu_and(cpu, affinity, cpu_online_mask) { 568 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 569 if (xc->chip_id == xd->src_chip) 570 cpumask_set_cpu(cpu, mask); 571 } 572 /* Try to find a target */ 573 if (cpumask_empty(mask)) 574 cpu = -1; 575 else 576 cpu = xive_find_target_in_mask(mask, fuzz++); 577 free_cpumask_var(mask); 578 if (cpu >= 0) 579 return cpu; 580 fuzz--; 581 } 582 583 /* No chip IDs, fallback to using the affinity mask */ 584 return xive_find_target_in_mask(affinity, fuzz++); 585 } 586 587 static unsigned int xive_irq_startup(struct irq_data *d) 588 { 589 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 590 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 591 int target, rc; 592 593 xd->saved_p = false; 594 xd->stale_p = false; 595 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n", 596 d->irq, hw_irq, d); 597 598 #ifdef CONFIG_PCI_MSI 599 /* 600 * The generic MSI code returns with the interrupt disabled on the 601 * card, using the MSI mask bits. Firmware doesn't appear to unmask 602 * at that level, so we do it here by hand. 603 */ 604 if (irq_data_get_msi_desc(d)) 605 pci_msi_unmask_irq(d); 606 #endif 607 608 /* Pick a target */ 609 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d)); 610 if (target == XIVE_INVALID_TARGET) { 611 /* Try again breaking affinity */ 612 target = xive_pick_irq_target(d, cpu_online_mask); 613 if (target == XIVE_INVALID_TARGET) 614 return -ENXIO; 615 pr_warn("irq %d started with broken affinity\n", d->irq); 616 } 617 618 /* Sanity check */ 619 if (WARN_ON(target == XIVE_INVALID_TARGET || 620 target >= nr_cpu_ids)) 621 target = smp_processor_id(); 622 623 xd->target = target; 624 625 /* 626 * Configure the logical number to be the Linux IRQ number 627 * and set the target queue 628 */ 629 rc = xive_ops->configure_irq(hw_irq, 630 get_hard_smp_processor_id(target), 631 xive_irq_priority, d->irq); 632 if (rc) 633 return rc; 634 635 /* Unmask the ESB */ 636 xive_do_source_set_mask(xd, false); 637 638 return 0; 639 } 640 641 /* called with irq descriptor lock held */ 642 static void xive_irq_shutdown(struct irq_data *d) 643 { 644 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 645 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 646 647 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n", 648 d->irq, hw_irq, d); 649 650 if (WARN_ON(xd->target == XIVE_INVALID_TARGET)) 651 return; 652 653 /* Mask the interrupt at the source */ 654 xive_do_source_set_mask(xd, true); 655 656 /* 657 * Mask the interrupt in HW in the IVT/EAS and set the number 658 * to be the "bad" IRQ number 659 */ 660 xive_ops->configure_irq(hw_irq, 661 get_hard_smp_processor_id(xd->target), 662 0xff, XIVE_BAD_IRQ); 663 664 xive_dec_target_count(xd->target); 665 xd->target = XIVE_INVALID_TARGET; 666 } 667 668 static void xive_irq_unmask(struct irq_data *d) 669 { 670 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 671 672 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd); 673 674 xive_do_source_set_mask(xd, false); 675 } 676 677 static void xive_irq_mask(struct irq_data *d) 678 { 679 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 680 681 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd); 682 683 xive_do_source_set_mask(xd, true); 684 } 685 686 static int xive_irq_set_affinity(struct irq_data *d, 687 const struct cpumask *cpumask, 688 bool force) 689 { 690 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 691 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 692 u32 target, old_target; 693 int rc = 0; 694 695 pr_devel("xive_irq_set_affinity: irq %d\n", d->irq); 696 697 /* Is this valid ? */ 698 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) 699 return -EINVAL; 700 701 /* Don't do anything if the interrupt isn't started */ 702 if (!irqd_is_started(d)) 703 return IRQ_SET_MASK_OK; 704 705 /* 706 * If existing target is already in the new mask, and is 707 * online then do nothing. 708 */ 709 if (xd->target != XIVE_INVALID_TARGET && 710 cpu_online(xd->target) && 711 cpumask_test_cpu(xd->target, cpumask)) 712 return IRQ_SET_MASK_OK; 713 714 /* Pick a new target */ 715 target = xive_pick_irq_target(d, cpumask); 716 717 /* No target found */ 718 if (target == XIVE_INVALID_TARGET) 719 return -ENXIO; 720 721 /* Sanity check */ 722 if (WARN_ON(target >= nr_cpu_ids)) 723 target = smp_processor_id(); 724 725 old_target = xd->target; 726 727 /* 728 * Only configure the irq if it's not currently passed-through to 729 * a KVM guest 730 */ 731 if (!irqd_is_forwarded_to_vcpu(d)) 732 rc = xive_ops->configure_irq(hw_irq, 733 get_hard_smp_processor_id(target), 734 xive_irq_priority, d->irq); 735 if (rc < 0) { 736 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq); 737 return rc; 738 } 739 740 pr_devel(" target: 0x%x\n", target); 741 xd->target = target; 742 743 /* Give up previous target */ 744 if (old_target != XIVE_INVALID_TARGET) 745 xive_dec_target_count(old_target); 746 747 return IRQ_SET_MASK_OK; 748 } 749 750 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type) 751 { 752 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 753 754 /* 755 * We only support these. This has really no effect other than setting 756 * the corresponding descriptor bits mind you but those will in turn 757 * affect the resend function when re-enabling an edge interrupt. 758 * 759 * Set set the default to edge as explained in map(). 760 */ 761 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE) 762 flow_type = IRQ_TYPE_EDGE_RISING; 763 764 if (flow_type != IRQ_TYPE_EDGE_RISING && 765 flow_type != IRQ_TYPE_LEVEL_LOW) 766 return -EINVAL; 767 768 irqd_set_trigger_type(d, flow_type); 769 770 /* 771 * Double check it matches what the FW thinks 772 * 773 * NOTE: We don't know yet if the PAPR interface will provide 774 * the LSI vs MSI information apart from the device-tree so 775 * this check might have to move into an optional backend call 776 * that is specific to the native backend 777 */ 778 if ((flow_type == IRQ_TYPE_LEVEL_LOW) != 779 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) { 780 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n", 781 d->irq, (u32)irqd_to_hwirq(d), 782 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge", 783 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge"); 784 } 785 786 return IRQ_SET_MASK_OK_NOCOPY; 787 } 788 789 static int xive_irq_retrigger(struct irq_data *d) 790 { 791 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 792 793 /* This should be only for MSIs */ 794 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) 795 return 0; 796 797 /* 798 * To perform a retrigger, we first set the PQ bits to 799 * 11, then perform an EOI. 800 */ 801 xive_esb_read(xd, XIVE_ESB_SET_PQ_11); 802 803 /* 804 * Note: We pass "0" to the hw_irq argument in order to 805 * avoid calling into the backend EOI code which we don't 806 * want to do in the case of a re-trigger. Backends typically 807 * only do EOI for LSIs anyway. 808 */ 809 xive_do_source_eoi(0, xd); 810 811 return 1; 812 } 813 814 /* 815 * Caller holds the irq descriptor lock, so this won't be called 816 * concurrently with xive_get_irqchip_state on the same interrupt. 817 */ 818 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) 819 { 820 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 821 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 822 int rc; 823 u8 pq; 824 825 /* 826 * This is called by KVM with state non-NULL for enabling 827 * pass-through or NULL for disabling it 828 */ 829 if (state) { 830 irqd_set_forwarded_to_vcpu(d); 831 832 /* Set it to PQ=10 state to prevent further sends */ 833 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10); 834 if (!xd->stale_p) { 835 xd->saved_p = !!(pq & XIVE_ESB_VAL_P); 836 xd->stale_p = !xd->saved_p; 837 } 838 839 /* No target ? nothing to do */ 840 if (xd->target == XIVE_INVALID_TARGET) { 841 /* 842 * An untargetted interrupt should have been 843 * also masked at the source 844 */ 845 WARN_ON(xd->saved_p); 846 847 return 0; 848 } 849 850 /* 851 * If P was set, adjust state to PQ=11 to indicate 852 * that a resend is needed for the interrupt to reach 853 * the guest. Also remember the value of P. 854 * 855 * This also tells us that it's in flight to a host queue 856 * or has already been fetched but hasn't been EOIed yet 857 * by the host. This it's potentially using up a host 858 * queue slot. This is important to know because as long 859 * as this is the case, we must not hard-unmask it when 860 * "returning" that interrupt to the host. 861 * 862 * This saved_p is cleared by the host EOI, when we know 863 * for sure the queue slot is no longer in use. 864 */ 865 if (xd->saved_p) { 866 xive_esb_read(xd, XIVE_ESB_SET_PQ_11); 867 868 /* 869 * Sync the XIVE source HW to ensure the interrupt 870 * has gone through the EAS before we change its 871 * target to the guest. That should guarantee us 872 * that we *will* eventually get an EOI for it on 873 * the host. Otherwise there would be a small window 874 * for P to be seen here but the interrupt going 875 * to the guest queue. 876 */ 877 if (xive_ops->sync_source) 878 xive_ops->sync_source(hw_irq); 879 } 880 } else { 881 irqd_clr_forwarded_to_vcpu(d); 882 883 /* No host target ? hard mask and return */ 884 if (xd->target == XIVE_INVALID_TARGET) { 885 xive_do_source_set_mask(xd, true); 886 return 0; 887 } 888 889 /* 890 * Sync the XIVE source HW to ensure the interrupt 891 * has gone through the EAS before we change its 892 * target to the host. 893 */ 894 if (xive_ops->sync_source) 895 xive_ops->sync_source(hw_irq); 896 897 /* 898 * By convention we are called with the interrupt in 899 * a PQ=10 or PQ=11 state, ie, it won't fire and will 900 * have latched in Q whether there's a pending HW 901 * interrupt or not. 902 * 903 * First reconfigure the target. 904 */ 905 rc = xive_ops->configure_irq(hw_irq, 906 get_hard_smp_processor_id(xd->target), 907 xive_irq_priority, d->irq); 908 if (rc) 909 return rc; 910 911 /* 912 * Then if saved_p is not set, effectively re-enable the 913 * interrupt with an EOI. If it is set, we know there is 914 * still a message in a host queue somewhere that will be 915 * EOId eventually. 916 * 917 * Note: We don't check irqd_irq_disabled(). Effectively, 918 * we *will* let the irq get through even if masked if the 919 * HW is still firing it in order to deal with the whole 920 * saved_p business properly. If the interrupt triggers 921 * while masked, the generic code will re-mask it anyway. 922 */ 923 if (!xd->saved_p) 924 xive_do_source_eoi(hw_irq, xd); 925 926 } 927 return 0; 928 } 929 930 /* Called with irq descriptor lock held. */ 931 static int xive_get_irqchip_state(struct irq_data *data, 932 enum irqchip_irq_state which, bool *state) 933 { 934 struct xive_irq_data *xd = irq_data_get_irq_handler_data(data); 935 u8 pq; 936 937 switch (which) { 938 case IRQCHIP_STATE_ACTIVE: 939 pq = xive_esb_read(xd, XIVE_ESB_GET); 940 941 /* 942 * The esb value being all 1's means we couldn't get 943 * the PQ state of the interrupt through mmio. It may 944 * happen, for example when querying a PHB interrupt 945 * while the PHB is in an error state. We consider the 946 * interrupt to be inactive in that case. 947 */ 948 *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p && 949 (xd->saved_p || !!(pq & XIVE_ESB_VAL_P)); 950 return 0; 951 default: 952 return -EINVAL; 953 } 954 } 955 956 static struct irq_chip xive_irq_chip = { 957 .name = "XIVE-IRQ", 958 .irq_startup = xive_irq_startup, 959 .irq_shutdown = xive_irq_shutdown, 960 .irq_eoi = xive_irq_eoi, 961 .irq_mask = xive_irq_mask, 962 .irq_unmask = xive_irq_unmask, 963 .irq_set_affinity = xive_irq_set_affinity, 964 .irq_set_type = xive_irq_set_type, 965 .irq_retrigger = xive_irq_retrigger, 966 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity, 967 .irq_get_irqchip_state = xive_get_irqchip_state, 968 }; 969 970 bool is_xive_irq(struct irq_chip *chip) 971 { 972 return chip == &xive_irq_chip; 973 } 974 EXPORT_SYMBOL_GPL(is_xive_irq); 975 976 void xive_cleanup_irq_data(struct xive_irq_data *xd) 977 { 978 if (xd->eoi_mmio) { 979 unmap_kernel_range((unsigned long)xd->eoi_mmio, 980 1u << xd->esb_shift); 981 iounmap(xd->eoi_mmio); 982 if (xd->eoi_mmio == xd->trig_mmio) 983 xd->trig_mmio = NULL; 984 xd->eoi_mmio = NULL; 985 } 986 if (xd->trig_mmio) { 987 unmap_kernel_range((unsigned long)xd->trig_mmio, 988 1u << xd->esb_shift); 989 iounmap(xd->trig_mmio); 990 xd->trig_mmio = NULL; 991 } 992 } 993 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data); 994 995 static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) 996 { 997 struct xive_irq_data *xd; 998 int rc; 999 1000 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL); 1001 if (!xd) 1002 return -ENOMEM; 1003 rc = xive_ops->populate_irq_data(hw, xd); 1004 if (rc) { 1005 kfree(xd); 1006 return rc; 1007 } 1008 xd->target = XIVE_INVALID_TARGET; 1009 irq_set_handler_data(virq, xd); 1010 1011 /* 1012 * Turn OFF by default the interrupt being mapped. A side 1013 * effect of this check is the mapping the ESB page of the 1014 * interrupt in the Linux address space. This prevents page 1015 * fault issues in the crash handler which masks all 1016 * interrupts. 1017 */ 1018 xive_esb_read(xd, XIVE_ESB_SET_PQ_01); 1019 1020 return 0; 1021 } 1022 1023 static void xive_irq_free_data(unsigned int virq) 1024 { 1025 struct xive_irq_data *xd = irq_get_handler_data(virq); 1026 1027 if (!xd) 1028 return; 1029 irq_set_handler_data(virq, NULL); 1030 xive_cleanup_irq_data(xd); 1031 kfree(xd); 1032 } 1033 1034 #ifdef CONFIG_SMP 1035 1036 static void xive_cause_ipi(int cpu) 1037 { 1038 struct xive_cpu *xc; 1039 struct xive_irq_data *xd; 1040 1041 xc = per_cpu(xive_cpu, cpu); 1042 1043 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n", 1044 smp_processor_id(), cpu, xc->hw_ipi); 1045 1046 xd = &xc->ipi_data; 1047 if (WARN_ON(!xd->trig_mmio)) 1048 return; 1049 out_be64(xd->trig_mmio, 0); 1050 } 1051 1052 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id) 1053 { 1054 return smp_ipi_demux(); 1055 } 1056 1057 static void xive_ipi_eoi(struct irq_data *d) 1058 { 1059 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1060 1061 /* Handle possible race with unplug and drop stale IPIs */ 1062 if (!xc) 1063 return; 1064 1065 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", 1066 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); 1067 1068 xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data); 1069 xive_do_queue_eoi(xc); 1070 } 1071 1072 static void xive_ipi_do_nothing(struct irq_data *d) 1073 { 1074 /* 1075 * Nothing to do, we never mask/unmask IPIs, but the callback 1076 * has to exist for the struct irq_chip. 1077 */ 1078 } 1079 1080 static struct irq_chip xive_ipi_chip = { 1081 .name = "XIVE-IPI", 1082 .irq_eoi = xive_ipi_eoi, 1083 .irq_mask = xive_ipi_do_nothing, 1084 .irq_unmask = xive_ipi_do_nothing, 1085 }; 1086 1087 static void __init xive_request_ipi(void) 1088 { 1089 unsigned int virq; 1090 1091 /* 1092 * Initialization failed, move on, we might manage to 1093 * reach the point where we display our errors before 1094 * the system falls appart 1095 */ 1096 if (!xive_irq_domain) 1097 return; 1098 1099 /* Initialize it */ 1100 virq = irq_create_mapping(xive_irq_domain, XIVE_IPI_HW_IRQ); 1101 xive_ipi_irq = virq; 1102 1103 WARN_ON(request_irq(virq, xive_muxed_ipi_action, 1104 IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL)); 1105 } 1106 1107 static int xive_setup_cpu_ipi(unsigned int cpu) 1108 { 1109 struct xive_cpu *xc; 1110 int rc; 1111 1112 pr_debug("Setting up IPI for CPU %d\n", cpu); 1113 1114 xc = per_cpu(xive_cpu, cpu); 1115 1116 /* Check if we are already setup */ 1117 if (xc->hw_ipi != XIVE_BAD_IRQ) 1118 return 0; 1119 1120 /* Grab an IPI from the backend, this will populate xc->hw_ipi */ 1121 if (xive_ops->get_ipi(cpu, xc)) 1122 return -EIO; 1123 1124 /* 1125 * Populate the IRQ data in the xive_cpu structure and 1126 * configure the HW / enable the IPIs. 1127 */ 1128 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data); 1129 if (rc) { 1130 pr_err("Failed to populate IPI data on CPU %d\n", cpu); 1131 return -EIO; 1132 } 1133 rc = xive_ops->configure_irq(xc->hw_ipi, 1134 get_hard_smp_processor_id(cpu), 1135 xive_irq_priority, xive_ipi_irq); 1136 if (rc) { 1137 pr_err("Failed to map IPI CPU %d\n", cpu); 1138 return -EIO; 1139 } 1140 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu, 1141 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio); 1142 1143 /* Unmask it */ 1144 xive_do_source_set_mask(&xc->ipi_data, false); 1145 1146 return 0; 1147 } 1148 1149 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) 1150 { 1151 /* Disable the IPI and free the IRQ data */ 1152 1153 /* Already cleaned up ? */ 1154 if (xc->hw_ipi == XIVE_BAD_IRQ) 1155 return; 1156 1157 /* Mask the IPI */ 1158 xive_do_source_set_mask(&xc->ipi_data, true); 1159 1160 /* 1161 * Note: We don't call xive_cleanup_irq_data() to free 1162 * the mappings as this is called from an IPI on kexec 1163 * which is not a safe environment to call iounmap() 1164 */ 1165 1166 /* Deconfigure/mask in the backend */ 1167 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(), 1168 0xff, xive_ipi_irq); 1169 1170 /* Free the IPIs in the backend */ 1171 xive_ops->put_ipi(cpu, xc); 1172 } 1173 1174 void __init xive_smp_probe(void) 1175 { 1176 smp_ops->cause_ipi = xive_cause_ipi; 1177 1178 /* Register the IPI */ 1179 xive_request_ipi(); 1180 1181 /* Allocate and setup IPI for the boot CPU */ 1182 xive_setup_cpu_ipi(smp_processor_id()); 1183 } 1184 1185 #endif /* CONFIG_SMP */ 1186 1187 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq, 1188 irq_hw_number_t hw) 1189 { 1190 int rc; 1191 1192 /* 1193 * Mark interrupts as edge sensitive by default so that resend 1194 * actually works. Will fix that up below if needed. 1195 */ 1196 irq_clear_status_flags(virq, IRQ_LEVEL); 1197 1198 #ifdef CONFIG_SMP 1199 /* IPIs are special and come up with HW number 0 */ 1200 if (hw == XIVE_IPI_HW_IRQ) { 1201 /* 1202 * IPIs are marked per-cpu. We use separate HW interrupts under 1203 * the hood but associated with the same "linux" interrupt 1204 */ 1205 irq_set_chip_and_handler(virq, &xive_ipi_chip, 1206 handle_percpu_irq); 1207 return 0; 1208 } 1209 #endif 1210 1211 rc = xive_irq_alloc_data(virq, hw); 1212 if (rc) 1213 return rc; 1214 1215 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq); 1216 1217 return 0; 1218 } 1219 1220 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq) 1221 { 1222 struct irq_data *data = irq_get_irq_data(virq); 1223 unsigned int hw_irq; 1224 1225 /* XXX Assign BAD number */ 1226 if (!data) 1227 return; 1228 hw_irq = (unsigned int)irqd_to_hwirq(data); 1229 if (hw_irq != XIVE_IPI_HW_IRQ) 1230 xive_irq_free_data(virq); 1231 } 1232 1233 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct, 1234 const u32 *intspec, unsigned int intsize, 1235 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 1236 1237 { 1238 *out_hwirq = intspec[0]; 1239 1240 /* 1241 * If intsize is at least 2, we look for the type in the second cell, 1242 * we assume the LSB indicates a level interrupt. 1243 */ 1244 if (intsize > 1) { 1245 if (intspec[1] & 1) 1246 *out_flags = IRQ_TYPE_LEVEL_LOW; 1247 else 1248 *out_flags = IRQ_TYPE_EDGE_RISING; 1249 } else 1250 *out_flags = IRQ_TYPE_LEVEL_LOW; 1251 1252 return 0; 1253 } 1254 1255 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node, 1256 enum irq_domain_bus_token bus_token) 1257 { 1258 return xive_ops->match(node); 1259 } 1260 1261 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS 1262 static const char * const esb_names[] = { "RESET", "OFF", "PENDING", "QUEUED" }; 1263 1264 static const struct { 1265 u64 mask; 1266 char *name; 1267 } xive_irq_flags[] = { 1268 { XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" }, 1269 { XIVE_IRQ_FLAG_LSI, "LSI" }, 1270 { XIVE_IRQ_FLAG_EOI_FW, "EOI_FW" }, 1271 { XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" }, 1272 { XIVE_IRQ_FLAG_NO_EOI, "NO_EOI" }, 1273 }; 1274 1275 static void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d, 1276 struct irq_data *irqd, int ind) 1277 { 1278 struct xive_irq_data *xd; 1279 u64 val; 1280 int i; 1281 1282 /* No IRQ domain level information. To be done */ 1283 if (!irqd) 1284 return; 1285 1286 if (!is_xive_irq(irq_data_get_irq_chip(irqd))) 1287 return; 1288 1289 seq_printf(m, "%*sXIVE:\n", ind, ""); 1290 ind++; 1291 1292 xd = irq_data_get_irq_handler_data(irqd); 1293 if (!xd) { 1294 seq_printf(m, "%*snot assigned\n", ind, ""); 1295 return; 1296 } 1297 1298 val = xive_esb_read(xd, XIVE_ESB_GET); 1299 seq_printf(m, "%*sESB: %s\n", ind, "", esb_names[val & 0x3]); 1300 seq_printf(m, "%*sPstate: %s %s\n", ind, "", xd->stale_p ? "stale" : "", 1301 xd->saved_p ? "saved" : ""); 1302 seq_printf(m, "%*sTarget: %d\n", ind, "", xd->target); 1303 seq_printf(m, "%*sChip: %d\n", ind, "", xd->src_chip); 1304 seq_printf(m, "%*sTrigger: 0x%016llx\n", ind, "", xd->trig_page); 1305 seq_printf(m, "%*sEOI: 0x%016llx\n", ind, "", xd->eoi_page); 1306 seq_printf(m, "%*sFlags: 0x%llx\n", ind, "", xd->flags); 1307 for (i = 0; i < ARRAY_SIZE(xive_irq_flags); i++) { 1308 if (xd->flags & xive_irq_flags[i].mask) 1309 seq_printf(m, "%*s%s\n", ind + 12, "", xive_irq_flags[i].name); 1310 } 1311 } 1312 #endif 1313 1314 static const struct irq_domain_ops xive_irq_domain_ops = { 1315 .match = xive_irq_domain_match, 1316 .map = xive_irq_domain_map, 1317 .unmap = xive_irq_domain_unmap, 1318 .xlate = xive_irq_domain_xlate, 1319 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS 1320 .debug_show = xive_irq_domain_debug_show, 1321 #endif 1322 }; 1323 1324 static void __init xive_init_host(struct device_node *np) 1325 { 1326 xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ, 1327 &xive_irq_domain_ops, NULL); 1328 if (WARN_ON(xive_irq_domain == NULL)) 1329 return; 1330 irq_set_default_host(xive_irq_domain); 1331 } 1332 1333 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) 1334 { 1335 if (xc->queue[xive_irq_priority].qpage) 1336 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority); 1337 } 1338 1339 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) 1340 { 1341 int rc = 0; 1342 1343 /* We setup 1 queues for now with a 64k page */ 1344 if (!xc->queue[xive_irq_priority].qpage) 1345 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority); 1346 1347 return rc; 1348 } 1349 1350 static int xive_prepare_cpu(unsigned int cpu) 1351 { 1352 struct xive_cpu *xc; 1353 1354 xc = per_cpu(xive_cpu, cpu); 1355 if (!xc) { 1356 struct device_node *np; 1357 1358 xc = kzalloc_node(sizeof(struct xive_cpu), 1359 GFP_KERNEL, cpu_to_node(cpu)); 1360 if (!xc) 1361 return -ENOMEM; 1362 np = of_get_cpu_node(cpu, NULL); 1363 if (np) 1364 xc->chip_id = of_get_ibm_chip_id(np); 1365 of_node_put(np); 1366 xc->hw_ipi = XIVE_BAD_IRQ; 1367 1368 per_cpu(xive_cpu, cpu) = xc; 1369 } 1370 1371 /* Setup EQs if not already */ 1372 return xive_setup_cpu_queues(cpu, xc); 1373 } 1374 1375 static void xive_setup_cpu(void) 1376 { 1377 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1378 1379 /* The backend might have additional things to do */ 1380 if (xive_ops->setup_cpu) 1381 xive_ops->setup_cpu(smp_processor_id(), xc); 1382 1383 /* Set CPPR to 0xff to enable flow of interrupts */ 1384 xc->cppr = 0xff; 1385 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); 1386 } 1387 1388 #ifdef CONFIG_SMP 1389 void xive_smp_setup_cpu(void) 1390 { 1391 pr_devel("SMP setup CPU %d\n", smp_processor_id()); 1392 1393 /* This will have already been done on the boot CPU */ 1394 if (smp_processor_id() != boot_cpuid) 1395 xive_setup_cpu(); 1396 1397 } 1398 1399 int xive_smp_prepare_cpu(unsigned int cpu) 1400 { 1401 int rc; 1402 1403 /* Allocate per-CPU data and queues */ 1404 rc = xive_prepare_cpu(cpu); 1405 if (rc) 1406 return rc; 1407 1408 /* Allocate and setup IPI for the new CPU */ 1409 return xive_setup_cpu_ipi(cpu); 1410 } 1411 1412 #ifdef CONFIG_HOTPLUG_CPU 1413 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc) 1414 { 1415 u32 irq; 1416 1417 /* We assume local irqs are disabled */ 1418 WARN_ON(!irqs_disabled()); 1419 1420 /* Check what's already in the CPU queue */ 1421 while ((irq = xive_scan_interrupts(xc, false)) != 0) { 1422 /* 1423 * We need to re-route that interrupt to its new destination. 1424 * First get and lock the descriptor 1425 */ 1426 struct irq_desc *desc = irq_to_desc(irq); 1427 struct irq_data *d = irq_desc_get_irq_data(desc); 1428 struct xive_irq_data *xd; 1429 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 1430 1431 /* 1432 * Ignore anything that isn't a XIVE irq and ignore 1433 * IPIs, so can just be dropped. 1434 */ 1435 if (d->domain != xive_irq_domain || hw_irq == XIVE_IPI_HW_IRQ) 1436 continue; 1437 1438 /* 1439 * The IRQ should have already been re-routed, it's just a 1440 * stale in the old queue, so re-trigger it in order to make 1441 * it reach is new destination. 1442 */ 1443 #ifdef DEBUG_FLUSH 1444 pr_info("CPU %d: Got irq %d while offline, re-sending...\n", 1445 cpu, irq); 1446 #endif 1447 raw_spin_lock(&desc->lock); 1448 xd = irq_desc_get_handler_data(desc); 1449 1450 /* 1451 * Clear saved_p to indicate that it's no longer pending 1452 */ 1453 xd->saved_p = false; 1454 1455 /* 1456 * For LSIs, we EOI, this will cause a resend if it's 1457 * still asserted. Otherwise do an MSI retrigger. 1458 */ 1459 if (xd->flags & XIVE_IRQ_FLAG_LSI) 1460 xive_do_source_eoi(irqd_to_hwirq(d), xd); 1461 else 1462 xive_irq_retrigger(d); 1463 1464 raw_spin_unlock(&desc->lock); 1465 } 1466 } 1467 1468 void xive_smp_disable_cpu(void) 1469 { 1470 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1471 unsigned int cpu = smp_processor_id(); 1472 1473 /* Migrate interrupts away from the CPU */ 1474 irq_migrate_all_off_this_cpu(); 1475 1476 /* Set CPPR to 0 to disable flow of interrupts */ 1477 xc->cppr = 0; 1478 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); 1479 1480 /* Flush everything still in the queue */ 1481 xive_flush_cpu_queue(cpu, xc); 1482 1483 /* Re-enable CPPR */ 1484 xc->cppr = 0xff; 1485 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); 1486 } 1487 1488 void xive_flush_interrupt(void) 1489 { 1490 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1491 unsigned int cpu = smp_processor_id(); 1492 1493 /* Called if an interrupt occurs while the CPU is hot unplugged */ 1494 xive_flush_cpu_queue(cpu, xc); 1495 } 1496 1497 #endif /* CONFIG_HOTPLUG_CPU */ 1498 1499 #endif /* CONFIG_SMP */ 1500 1501 void xive_teardown_cpu(void) 1502 { 1503 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1504 unsigned int cpu = smp_processor_id(); 1505 1506 /* Set CPPR to 0 to disable flow of interrupts */ 1507 xc->cppr = 0; 1508 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); 1509 1510 if (xive_ops->teardown_cpu) 1511 xive_ops->teardown_cpu(cpu, xc); 1512 1513 #ifdef CONFIG_SMP 1514 /* Get rid of IPI */ 1515 xive_cleanup_cpu_ipi(cpu, xc); 1516 #endif 1517 1518 /* Disable and free the queues */ 1519 xive_cleanup_cpu_queues(cpu, xc); 1520 } 1521 1522 void xive_shutdown(void) 1523 { 1524 xive_ops->shutdown(); 1525 } 1526 1527 bool __init xive_core_init(struct device_node *np, const struct xive_ops *ops, 1528 void __iomem *area, u32 offset, u8 max_prio) 1529 { 1530 xive_tima = area; 1531 xive_tima_offset = offset; 1532 xive_ops = ops; 1533 xive_irq_priority = max_prio; 1534 1535 ppc_md.get_irq = xive_get_irq; 1536 __xive_enabled = true; 1537 1538 pr_devel("Initializing host..\n"); 1539 xive_init_host(np); 1540 1541 pr_devel("Initializing boot CPU..\n"); 1542 1543 /* Allocate per-CPU data and queues */ 1544 xive_prepare_cpu(smp_processor_id()); 1545 1546 /* Get ready for interrupts */ 1547 xive_setup_cpu(); 1548 1549 pr_info("Interrupt handling initialized with %s backend\n", 1550 xive_ops->name); 1551 pr_info("Using priority %d for all interrupts\n", max_prio); 1552 1553 return true; 1554 } 1555 1556 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift) 1557 { 1558 unsigned int alloc_order; 1559 struct page *pages; 1560 __be32 *qpage; 1561 1562 alloc_order = xive_alloc_order(queue_shift); 1563 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order); 1564 if (!pages) 1565 return ERR_PTR(-ENOMEM); 1566 qpage = (__be32 *)page_address(pages); 1567 memset(qpage, 0, 1 << queue_shift); 1568 1569 return qpage; 1570 } 1571 1572 static int __init xive_off(char *arg) 1573 { 1574 xive_cmdline_disabled = true; 1575 return 0; 1576 } 1577 __setup("xive=off", xive_off); 1578 1579 static void xive_debug_show_cpu(struct seq_file *m, int cpu) 1580 { 1581 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 1582 1583 seq_printf(m, "CPU %d:", cpu); 1584 if (xc) { 1585 seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); 1586 1587 #ifdef CONFIG_SMP 1588 { 1589 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); 1590 1591 seq_printf(m, "IPI=0x%08x PQ=%c%c ", xc->hw_ipi, 1592 val & XIVE_ESB_VAL_P ? 'P' : '-', 1593 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 1594 } 1595 #endif 1596 { 1597 struct xive_q *q = &xc->queue[xive_irq_priority]; 1598 u32 i0, i1, idx; 1599 1600 if (q->qpage) { 1601 idx = q->idx; 1602 i0 = be32_to_cpup(q->qpage + idx); 1603 idx = (idx + 1) & q->msk; 1604 i1 = be32_to_cpup(q->qpage + idx); 1605 seq_printf(m, "EQ idx=%d T=%d %08x %08x ...", 1606 q->idx, q->toggle, i0, i1); 1607 } 1608 } 1609 } 1610 seq_puts(m, "\n"); 1611 } 1612 1613 static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data *d) 1614 { 1615 struct irq_chip *chip = irq_data_get_irq_chip(d); 1616 int rc; 1617 u32 target; 1618 u8 prio; 1619 u32 lirq; 1620 1621 if (!is_xive_irq(chip)) 1622 return; 1623 1624 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); 1625 if (rc) { 1626 seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); 1627 return; 1628 } 1629 1630 seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", 1631 hw_irq, target, prio, lirq); 1632 1633 if (d) { 1634 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 1635 u64 val = xive_esb_read(xd, XIVE_ESB_GET); 1636 1637 seq_printf(m, "flags=%c%c%c PQ=%c%c", 1638 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', 1639 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', 1640 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', 1641 val & XIVE_ESB_VAL_P ? 'P' : '-', 1642 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 1643 } 1644 seq_puts(m, "\n"); 1645 } 1646 1647 static int xive_core_debug_show(struct seq_file *m, void *private) 1648 { 1649 unsigned int i; 1650 struct irq_desc *desc; 1651 int cpu; 1652 1653 if (xive_ops->debug_show) 1654 xive_ops->debug_show(m, private); 1655 1656 for_each_possible_cpu(cpu) 1657 xive_debug_show_cpu(m, cpu); 1658 1659 for_each_irq_desc(i, desc) { 1660 struct irq_data *d = irq_desc_get_irq_data(desc); 1661 unsigned int hw_irq; 1662 1663 if (!d) 1664 continue; 1665 1666 hw_irq = (unsigned int)irqd_to_hwirq(d); 1667 1668 /* IPIs are special (HW number 0) */ 1669 if (hw_irq != XIVE_IPI_HW_IRQ) 1670 xive_debug_show_irq(m, hw_irq, d); 1671 } 1672 return 0; 1673 } 1674 DEFINE_SHOW_ATTRIBUTE(xive_core_debug); 1675 1676 int xive_core_debug_init(void) 1677 { 1678 if (xive_enabled()) 1679 debugfs_create_file("xive", 0400, powerpc_debugfs_root, 1680 NULL, &xive_core_debug_fops); 1681 return 0; 1682 } 1683