1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation. 4 */ 5 6 #define pr_fmt(fmt) "xive-kvm: " fmt 7 8 #include <linux/kernel.h> 9 #include <linux/kvm_host.h> 10 #include <linux/err.h> 11 #include <linux/gfp.h> 12 #include <linux/spinlock.h> 13 #include <linux/delay.h> 14 #include <linux/percpu.h> 15 #include <linux/cpumask.h> 16 #include <linux/uaccess.h> 17 #include <linux/irqdomain.h> 18 #include <asm/kvm_book3s.h> 19 #include <asm/kvm_ppc.h> 20 #include <asm/hvcall.h> 21 #include <asm/xics.h> 22 #include <asm/xive.h> 23 #include <asm/xive-regs.h> 24 #include <asm/debug.h> 25 #include <asm/debugfs.h> 26 #include <asm/time.h> 27 #include <asm/opal.h> 28 29 #include <linux/debugfs.h> 30 #include <linux/seq_file.h> 31 32 #include "book3s_xive.h" 33 34 35 /* 36 * Virtual mode variants of the hcalls for use on radix/radix 37 * with AIL. They require the VCPU's VP to be "pushed" 38 * 39 * We still instantiate them here because we use some of the 40 * generated utility functions as well in this file. 41 */ 42 #define XIVE_RUNTIME_CHECKS 43 #define X_PFX xive_vm_ 44 #define X_STATIC static 45 #define X_STAT_PFX stat_vm_ 46 #define __x_tima xive_tima 47 #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio)) 48 #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio)) 49 #define __x_writeb __raw_writeb 50 #define __x_readw __raw_readw 51 #define __x_readq __raw_readq 52 #define __x_writeq __raw_writeq 53 54 #include "book3s_xive_template.c" 55 56 /* 57 * We leave a gap of a couple of interrupts in the queue to 58 * account for the IPI and additional safety guard. 59 */ 60 #define XIVE_Q_GAP 2 61 62 static bool kvmppc_xive_vcpu_has_save_restore(struct kvm_vcpu *vcpu) 63 { 64 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 65 66 /* Check enablement at VP level */ 67 return xc->vp_cam & TM_QW1W2_HO; 68 } 69 70 bool kvmppc_xive_check_save_restore(struct kvm_vcpu *vcpu) 71 { 72 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 73 struct kvmppc_xive *xive = xc->xive; 74 75 if (xive->flags & KVMPPC_XIVE_FLAG_SAVE_RESTORE) 76 return kvmppc_xive_vcpu_has_save_restore(vcpu); 77 78 return true; 79 } 80 81 /* 82 * Push a vcpu's context to the XIVE on guest entry. 83 * This assumes we are in virtual mode (MMU on) 84 */ 85 void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) 86 { 87 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt; 88 u64 pq; 89 90 /* 91 * Nothing to do if the platform doesn't have a XIVE 92 * or this vCPU doesn't have its own XIVE context 93 * (e.g. because it's not using an in-kernel interrupt controller). 94 */ 95 if (!tima || !vcpu->arch.xive_cam_word) 96 return; 97 98 eieio(); 99 if (!kvmppc_xive_vcpu_has_save_restore(vcpu)) 100 __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS); 101 __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2); 102 vcpu->arch.xive_pushed = 1; 103 eieio(); 104 105 /* 106 * We clear the irq_pending flag. There is a small chance of a 107 * race vs. the escalation interrupt happening on another 108 * processor setting it again, but the only consequence is to 109 * cause a spurious wakeup on the next H_CEDE, which is not an 110 * issue. 111 */ 112 vcpu->arch.irq_pending = 0; 113 114 /* 115 * In single escalation mode, if the escalation interrupt is 116 * on, we mask it. 117 */ 118 if (vcpu->arch.xive_esc_on) { 119 pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + 120 XIVE_ESB_SET_PQ_01)); 121 mb(); 122 123 /* 124 * We have a possible subtle race here: The escalation 125 * interrupt might have fired and be on its way to the 126 * host queue while we mask it, and if we unmask it 127 * early enough (re-cede right away), there is a 128 * theorical possibility that it fires again, thus 129 * landing in the target queue more than once which is 130 * a big no-no. 131 * 132 * Fortunately, solving this is rather easy. If the 133 * above load setting PQ to 01 returns a previous 134 * value where P is set, then we know the escalation 135 * interrupt is somewhere on its way to the host. In 136 * that case we simply don't clear the xive_esc_on 137 * flag below. It will be eventually cleared by the 138 * handler for the escalation interrupt. 139 * 140 * Then, when doing a cede, we check that flag again 141 * before re-enabling the escalation interrupt, and if 142 * set, we abort the cede. 143 */ 144 if (!(pq & XIVE_ESB_VAL_P)) 145 /* Now P is 0, we can clear the flag */ 146 vcpu->arch.xive_esc_on = 0; 147 } 148 } 149 EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu); 150 151 /* 152 * Pull a vcpu's context from the XIVE on guest exit. 153 * This assumes we are in virtual mode (MMU on) 154 */ 155 void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) 156 { 157 void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt; 158 159 if (!vcpu->arch.xive_pushed) 160 return; 161 162 /* 163 * Should not have been pushed if there is no tima 164 */ 165 if (WARN_ON(!tima)) 166 return; 167 168 eieio(); 169 /* First load to pull the context, we ignore the value */ 170 __raw_readl(tima + TM_SPC_PULL_OS_CTX); 171 /* Second load to recover the context state (Words 0 and 1) */ 172 if (!kvmppc_xive_vcpu_has_save_restore(vcpu)) 173 vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS); 174 175 /* Fixup some of the state for the next load */ 176 vcpu->arch.xive_saved_state.lsmfb = 0; 177 vcpu->arch.xive_saved_state.ack = 0xff; 178 vcpu->arch.xive_pushed = 0; 179 eieio(); 180 } 181 EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu); 182 183 void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) 184 { 185 void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr; 186 187 if (!esc_vaddr) 188 return; 189 190 /* we are using XIVE with single escalation */ 191 192 if (vcpu->arch.xive_esc_on) { 193 /* 194 * If we still have a pending escalation, abort the cede, 195 * and we must set PQ to 10 rather than 00 so that we don't 196 * potentially end up with two entries for the escalation 197 * interrupt in the XIVE interrupt queue. In that case 198 * we also don't want to set xive_esc_on to 1 here in 199 * case we race with xive_esc_irq(). 200 */ 201 vcpu->arch.ceded = 0; 202 /* 203 * The escalation interrupts are special as we don't EOI them. 204 * There is no need to use the load-after-store ordering offset 205 * to set PQ to 10 as we won't use StoreEOI. 206 */ 207 __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_10); 208 } else { 209 vcpu->arch.xive_esc_on = true; 210 mb(); 211 __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00); 212 } 213 mb(); 214 } 215 EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation); 216 217 /* 218 * This is a simple trigger for a generic XIVE IRQ. This must 219 * only be called for interrupts that support a trigger page 220 */ 221 static bool xive_irq_trigger(struct xive_irq_data *xd) 222 { 223 /* This should be only for MSIs */ 224 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) 225 return false; 226 227 /* Those interrupts should always have a trigger page */ 228 if (WARN_ON(!xd->trig_mmio)) 229 return false; 230 231 out_be64(xd->trig_mmio, 0); 232 233 return true; 234 } 235 236 static irqreturn_t xive_esc_irq(int irq, void *data) 237 { 238 struct kvm_vcpu *vcpu = data; 239 240 vcpu->arch.irq_pending = 1; 241 smp_mb(); 242 if (vcpu->arch.ceded) 243 kvmppc_fast_vcpu_kick(vcpu); 244 245 /* Since we have the no-EOI flag, the interrupt is effectively 246 * disabled now. Clearing xive_esc_on means we won't bother 247 * doing so on the next entry. 248 * 249 * This also allows the entry code to know that if a PQ combination 250 * of 10 is observed while xive_esc_on is true, it means the queue 251 * contains an unprocessed escalation interrupt. We don't make use of 252 * that knowledge today but might (see comment in book3s_hv_rmhandler.S) 253 */ 254 vcpu->arch.xive_esc_on = false; 255 256 /* This orders xive_esc_on = false vs. subsequent stale_p = true */ 257 smp_wmb(); /* goes with smp_mb() in cleanup_single_escalation */ 258 259 return IRQ_HANDLED; 260 } 261 262 int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, 263 bool single_escalation) 264 { 265 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 266 struct xive_q *q = &xc->queues[prio]; 267 char *name = NULL; 268 int rc; 269 270 /* Already there ? */ 271 if (xc->esc_virq[prio]) 272 return 0; 273 274 /* Hook up the escalation interrupt */ 275 xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); 276 if (!xc->esc_virq[prio]) { 277 pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n", 278 prio, xc->server_num); 279 return -EIO; 280 } 281 282 if (single_escalation) 283 name = kasprintf(GFP_KERNEL, "kvm-%d-%d", 284 vcpu->kvm->arch.lpid, xc->server_num); 285 else 286 name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d", 287 vcpu->kvm->arch.lpid, xc->server_num, prio); 288 if (!name) { 289 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n", 290 prio, xc->server_num); 291 rc = -ENOMEM; 292 goto error; 293 } 294 295 pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio); 296 297 rc = request_irq(xc->esc_virq[prio], xive_esc_irq, 298 IRQF_NO_THREAD, name, vcpu); 299 if (rc) { 300 pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n", 301 prio, xc->server_num); 302 goto error; 303 } 304 xc->esc_virq_names[prio] = name; 305 306 /* In single escalation mode, we grab the ESB MMIO of the 307 * interrupt and mask it. Also populate the VCPU v/raddr 308 * of the ESB page for use by asm entry/exit code. Finally 309 * set the XIVE_IRQ_FLAG_NO_EOI flag which will prevent the 310 * core code from performing an EOI on the escalation 311 * interrupt, thus leaving it effectively masked after 312 * it fires once. 313 */ 314 if (single_escalation) { 315 struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]); 316 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 317 318 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); 319 vcpu->arch.xive_esc_raddr = xd->eoi_page; 320 vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio; 321 xd->flags |= XIVE_IRQ_FLAG_NO_EOI; 322 } 323 324 return 0; 325 error: 326 irq_dispose_mapping(xc->esc_virq[prio]); 327 xc->esc_virq[prio] = 0; 328 kfree(name); 329 return rc; 330 } 331 332 static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio) 333 { 334 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 335 struct kvmppc_xive *xive = xc->xive; 336 struct xive_q *q = &xc->queues[prio]; 337 void *qpage; 338 int rc; 339 340 if (WARN_ON(q->qpage)) 341 return 0; 342 343 /* Allocate the queue and retrieve infos on current node for now */ 344 qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order); 345 if (!qpage) { 346 pr_err("Failed to allocate queue %d for VCPU %d\n", 347 prio, xc->server_num); 348 return -ENOMEM; 349 } 350 memset(qpage, 0, 1 << xive->q_order); 351 352 /* 353 * Reconfigure the queue. This will set q->qpage only once the 354 * queue is fully configured. This is a requirement for prio 0 355 * as we will stop doing EOIs for every IPI as soon as we observe 356 * qpage being non-NULL, and instead will only EOI when we receive 357 * corresponding queue 0 entries 358 */ 359 rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, 360 xive->q_order, true); 361 if (rc) 362 pr_err("Failed to configure queue %d for VCPU %d\n", 363 prio, xc->server_num); 364 return rc; 365 } 366 367 /* Called with xive->lock held */ 368 static int xive_check_provisioning(struct kvm *kvm, u8 prio) 369 { 370 struct kvmppc_xive *xive = kvm->arch.xive; 371 struct kvm_vcpu *vcpu; 372 int i, rc; 373 374 lockdep_assert_held(&xive->lock); 375 376 /* Already provisioned ? */ 377 if (xive->qmap & (1 << prio)) 378 return 0; 379 380 pr_devel("Provisioning prio... %d\n", prio); 381 382 /* Provision each VCPU and enable escalations if needed */ 383 kvm_for_each_vcpu(i, vcpu, kvm) { 384 if (!vcpu->arch.xive_vcpu) 385 continue; 386 rc = xive_provision_queue(vcpu, prio); 387 if (rc == 0 && !kvmppc_xive_has_single_escalation(xive)) 388 kvmppc_xive_attach_escalation(vcpu, prio, 389 kvmppc_xive_has_single_escalation(xive)); 390 if (rc) 391 return rc; 392 } 393 394 /* Order previous stores and mark it as provisioned */ 395 mb(); 396 xive->qmap |= (1 << prio); 397 return 0; 398 } 399 400 static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio) 401 { 402 struct kvm_vcpu *vcpu; 403 struct kvmppc_xive_vcpu *xc; 404 struct xive_q *q; 405 406 /* Locate target server */ 407 vcpu = kvmppc_xive_find_server(kvm, server); 408 if (!vcpu) { 409 pr_warn("%s: Can't find server %d\n", __func__, server); 410 return; 411 } 412 xc = vcpu->arch.xive_vcpu; 413 if (WARN_ON(!xc)) 414 return; 415 416 q = &xc->queues[prio]; 417 atomic_inc(&q->pending_count); 418 } 419 420 static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio) 421 { 422 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 423 struct xive_q *q; 424 u32 max; 425 426 if (WARN_ON(!xc)) 427 return -ENXIO; 428 if (!xc->valid) 429 return -ENXIO; 430 431 q = &xc->queues[prio]; 432 if (WARN_ON(!q->qpage)) 433 return -ENXIO; 434 435 /* Calculate max number of interrupts in that queue. */ 436 max = (q->msk + 1) - XIVE_Q_GAP; 437 return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; 438 } 439 440 int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio) 441 { 442 struct kvm_vcpu *vcpu; 443 int i, rc; 444 445 /* Locate target server */ 446 vcpu = kvmppc_xive_find_server(kvm, *server); 447 if (!vcpu) { 448 pr_devel("Can't find server %d\n", *server); 449 return -EINVAL; 450 } 451 452 pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio); 453 454 /* Try pick it */ 455 rc = xive_try_pick_queue(vcpu, prio); 456 if (rc == 0) 457 return rc; 458 459 pr_devel(" .. failed, looking up candidate...\n"); 460 461 /* Failed, pick another VCPU */ 462 kvm_for_each_vcpu(i, vcpu, kvm) { 463 if (!vcpu->arch.xive_vcpu) 464 continue; 465 rc = xive_try_pick_queue(vcpu, prio); 466 if (rc == 0) { 467 *server = vcpu->arch.xive_vcpu->server_num; 468 pr_devel(" found on 0x%x/%d\n", *server, prio); 469 return rc; 470 } 471 } 472 pr_devel(" no available target !\n"); 473 474 /* No available target ! */ 475 return -EBUSY; 476 } 477 478 static u8 xive_lock_and_mask(struct kvmppc_xive *xive, 479 struct kvmppc_xive_src_block *sb, 480 struct kvmppc_xive_irq_state *state) 481 { 482 struct xive_irq_data *xd; 483 u32 hw_num; 484 u8 old_prio; 485 u64 val; 486 487 /* 488 * Take the lock, set masked, try again if racing 489 * with H_EOI 490 */ 491 for (;;) { 492 arch_spin_lock(&sb->lock); 493 old_prio = state->guest_priority; 494 state->guest_priority = MASKED; 495 mb(); 496 if (!state->in_eoi) 497 break; 498 state->guest_priority = old_prio; 499 arch_spin_unlock(&sb->lock); 500 } 501 502 /* No change ? Bail */ 503 if (old_prio == MASKED) 504 return old_prio; 505 506 /* Get the right irq */ 507 kvmppc_xive_select_irq(state, &hw_num, &xd); 508 509 /* Set PQ to 10, return old P and old Q and remember them */ 510 val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10); 511 state->old_p = !!(val & 2); 512 state->old_q = !!(val & 1); 513 514 /* 515 * Synchronize hardware to sensure the queues are updated when 516 * masking 517 */ 518 xive_native_sync_source(hw_num); 519 520 return old_prio; 521 } 522 523 static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb, 524 struct kvmppc_xive_irq_state *state) 525 { 526 /* 527 * Take the lock try again if racing with H_EOI 528 */ 529 for (;;) { 530 arch_spin_lock(&sb->lock); 531 if (!state->in_eoi) 532 break; 533 arch_spin_unlock(&sb->lock); 534 } 535 } 536 537 static void xive_finish_unmask(struct kvmppc_xive *xive, 538 struct kvmppc_xive_src_block *sb, 539 struct kvmppc_xive_irq_state *state, 540 u8 prio) 541 { 542 struct xive_irq_data *xd; 543 u32 hw_num; 544 545 /* If we aren't changing a thing, move on */ 546 if (state->guest_priority != MASKED) 547 goto bail; 548 549 /* Get the right irq */ 550 kvmppc_xive_select_irq(state, &hw_num, &xd); 551 552 /* Old Q set, set PQ to 11 */ 553 if (state->old_q) 554 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11); 555 556 /* 557 * If not old P, then perform an "effective" EOI, 558 * on the source. This will handle the cases where 559 * FW EOI is needed. 560 */ 561 if (!state->old_p) 562 xive_vm_source_eoi(hw_num, xd); 563 564 /* Synchronize ordering and mark unmasked */ 565 mb(); 566 bail: 567 state->guest_priority = prio; 568 } 569 570 /* 571 * Target an interrupt to a given server/prio, this will fallback 572 * to another server if necessary and perform the HW targetting 573 * updates as needed 574 * 575 * NOTE: Must be called with the state lock held 576 */ 577 static int xive_target_interrupt(struct kvm *kvm, 578 struct kvmppc_xive_irq_state *state, 579 u32 server, u8 prio) 580 { 581 struct kvmppc_xive *xive = kvm->arch.xive; 582 u32 hw_num; 583 int rc; 584 585 /* 586 * This will return a tentative server and actual 587 * priority. The count for that new target will have 588 * already been incremented. 589 */ 590 rc = kvmppc_xive_select_target(kvm, &server, prio); 591 592 /* 593 * We failed to find a target ? Not much we can do 594 * at least until we support the GIQ. 595 */ 596 if (rc) 597 return rc; 598 599 /* 600 * Increment the old queue pending count if there 601 * was one so that the old queue count gets adjusted later 602 * when observed to be empty. 603 */ 604 if (state->act_priority != MASKED) 605 xive_inc_q_pending(kvm, 606 state->act_server, 607 state->act_priority); 608 /* 609 * Update state and HW 610 */ 611 state->act_priority = prio; 612 state->act_server = server; 613 614 /* Get the right irq */ 615 kvmppc_xive_select_irq(state, &hw_num, NULL); 616 617 return xive_native_configure_irq(hw_num, 618 kvmppc_xive_vp(xive, server), 619 prio, state->number); 620 } 621 622 /* 623 * Targetting rules: In order to avoid losing track of 624 * pending interrupts accross mask and unmask, which would 625 * allow queue overflows, we implement the following rules: 626 * 627 * - Unless it was never enabled (or we run out of capacity) 628 * an interrupt is always targetted at a valid server/queue 629 * pair even when "masked" by the guest. This pair tends to 630 * be the last one used but it can be changed under some 631 * circumstances. That allows us to separate targetting 632 * from masking, we only handle accounting during (re)targetting, 633 * this also allows us to let an interrupt drain into its target 634 * queue after masking, avoiding complex schemes to remove 635 * interrupts out of remote processor queues. 636 * 637 * - When masking, we set PQ to 10 and save the previous value 638 * of P and Q. 639 * 640 * - When unmasking, if saved Q was set, we set PQ to 11 641 * otherwise we leave PQ to the HW state which will be either 642 * 10 if nothing happened or 11 if the interrupt fired while 643 * masked. Effectively we are OR'ing the previous Q into the 644 * HW Q. 645 * 646 * Then if saved P is clear, we do an effective EOI (Q->P->Trigger) 647 * which will unmask the interrupt and shoot a new one if Q was 648 * set. 649 * 650 * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11, 651 * effectively meaning an H_EOI from the guest is still expected 652 * for that interrupt). 653 * 654 * - If H_EOI occurs while masked, we clear the saved P. 655 * 656 * - When changing target, we account on the new target and 657 * increment a separate "pending" counter on the old one. 658 * This pending counter will be used to decrement the old 659 * target's count when its queue has been observed empty. 660 */ 661 662 int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server, 663 u32 priority) 664 { 665 struct kvmppc_xive *xive = kvm->arch.xive; 666 struct kvmppc_xive_src_block *sb; 667 struct kvmppc_xive_irq_state *state; 668 u8 new_act_prio; 669 int rc = 0; 670 u16 idx; 671 672 if (!xive) 673 return -ENODEV; 674 675 pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n", 676 irq, server, priority); 677 678 /* First, check provisioning of queues */ 679 if (priority != MASKED) { 680 mutex_lock(&xive->lock); 681 rc = xive_check_provisioning(xive->kvm, 682 xive_prio_from_guest(priority)); 683 mutex_unlock(&xive->lock); 684 } 685 if (rc) { 686 pr_devel(" provisioning failure %d !\n", rc); 687 return rc; 688 } 689 690 sb = kvmppc_xive_find_source(xive, irq, &idx); 691 if (!sb) 692 return -EINVAL; 693 state = &sb->irq_state[idx]; 694 695 /* 696 * We first handle masking/unmasking since the locking 697 * might need to be retried due to EOIs, we'll handle 698 * targetting changes later. These functions will return 699 * with the SB lock held. 700 * 701 * xive_lock_and_mask() will also set state->guest_priority 702 * but won't otherwise change other fields of the state. 703 * 704 * xive_lock_for_unmask will not actually unmask, this will 705 * be done later by xive_finish_unmask() once the targetting 706 * has been done, so we don't try to unmask an interrupt 707 * that hasn't yet been targetted. 708 */ 709 if (priority == MASKED) 710 xive_lock_and_mask(xive, sb, state); 711 else 712 xive_lock_for_unmask(sb, state); 713 714 715 /* 716 * Then we handle targetting. 717 * 718 * First calculate a new "actual priority" 719 */ 720 new_act_prio = state->act_priority; 721 if (priority != MASKED) 722 new_act_prio = xive_prio_from_guest(priority); 723 724 pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n", 725 new_act_prio, state->act_server, state->act_priority); 726 727 /* 728 * Then check if we actually need to change anything, 729 * 730 * The condition for re-targetting the interrupt is that 731 * we have a valid new priority (new_act_prio is not 0xff) 732 * and either the server or the priority changed. 733 * 734 * Note: If act_priority was ff and the new priority is 735 * also ff, we don't do anything and leave the interrupt 736 * untargetted. An attempt of doing an int_on on an 737 * untargetted interrupt will fail. If that is a problem 738 * we could initialize interrupts with valid default 739 */ 740 741 if (new_act_prio != MASKED && 742 (state->act_server != server || 743 state->act_priority != new_act_prio)) 744 rc = xive_target_interrupt(kvm, state, server, new_act_prio); 745 746 /* 747 * Perform the final unmasking of the interrupt source 748 * if necessary 749 */ 750 if (priority != MASKED) 751 xive_finish_unmask(xive, sb, state, priority); 752 753 /* 754 * Finally Update saved_priority to match. Only int_on/off 755 * set this field to a different value. 756 */ 757 state->saved_priority = priority; 758 759 arch_spin_unlock(&sb->lock); 760 return rc; 761 } 762 763 int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, 764 u32 *priority) 765 { 766 struct kvmppc_xive *xive = kvm->arch.xive; 767 struct kvmppc_xive_src_block *sb; 768 struct kvmppc_xive_irq_state *state; 769 u16 idx; 770 771 if (!xive) 772 return -ENODEV; 773 774 sb = kvmppc_xive_find_source(xive, irq, &idx); 775 if (!sb) 776 return -EINVAL; 777 state = &sb->irq_state[idx]; 778 arch_spin_lock(&sb->lock); 779 *server = state->act_server; 780 *priority = state->guest_priority; 781 arch_spin_unlock(&sb->lock); 782 783 return 0; 784 } 785 786 int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) 787 { 788 struct kvmppc_xive *xive = kvm->arch.xive; 789 struct kvmppc_xive_src_block *sb; 790 struct kvmppc_xive_irq_state *state; 791 u16 idx; 792 793 if (!xive) 794 return -ENODEV; 795 796 sb = kvmppc_xive_find_source(xive, irq, &idx); 797 if (!sb) 798 return -EINVAL; 799 state = &sb->irq_state[idx]; 800 801 pr_devel("int_on(irq=0x%x)\n", irq); 802 803 /* 804 * Check if interrupt was not targetted 805 */ 806 if (state->act_priority == MASKED) { 807 pr_devel("int_on on untargetted interrupt\n"); 808 return -EINVAL; 809 } 810 811 /* If saved_priority is 0xff, do nothing */ 812 if (state->saved_priority == MASKED) 813 return 0; 814 815 /* 816 * Lock and unmask it. 817 */ 818 xive_lock_for_unmask(sb, state); 819 xive_finish_unmask(xive, sb, state, state->saved_priority); 820 arch_spin_unlock(&sb->lock); 821 822 return 0; 823 } 824 825 int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) 826 { 827 struct kvmppc_xive *xive = kvm->arch.xive; 828 struct kvmppc_xive_src_block *sb; 829 struct kvmppc_xive_irq_state *state; 830 u16 idx; 831 832 if (!xive) 833 return -ENODEV; 834 835 sb = kvmppc_xive_find_source(xive, irq, &idx); 836 if (!sb) 837 return -EINVAL; 838 state = &sb->irq_state[idx]; 839 840 pr_devel("int_off(irq=0x%x)\n", irq); 841 842 /* 843 * Lock and mask 844 */ 845 state->saved_priority = xive_lock_and_mask(xive, sb, state); 846 arch_spin_unlock(&sb->lock); 847 848 return 0; 849 } 850 851 static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq) 852 { 853 struct kvmppc_xive_src_block *sb; 854 struct kvmppc_xive_irq_state *state; 855 u16 idx; 856 857 sb = kvmppc_xive_find_source(xive, irq, &idx); 858 if (!sb) 859 return false; 860 state = &sb->irq_state[idx]; 861 if (!state->valid) 862 return false; 863 864 /* 865 * Trigger the IPI. This assumes we never restore a pass-through 866 * interrupt which should be safe enough 867 */ 868 xive_irq_trigger(&state->ipi_data); 869 870 return true; 871 } 872 873 u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) 874 { 875 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 876 877 if (!xc) 878 return 0; 879 880 /* Return the per-cpu state for state saving/migration */ 881 return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT | 882 (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT | 883 (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT; 884 } 885 886 int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) 887 { 888 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 889 struct kvmppc_xive *xive = vcpu->kvm->arch.xive; 890 u8 cppr, mfrr; 891 u32 xisr; 892 893 if (!xc || !xive) 894 return -ENOENT; 895 896 /* Grab individual state fields. We don't use pending_pri */ 897 cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT; 898 xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) & 899 KVM_REG_PPC_ICP_XISR_MASK; 900 mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT; 901 902 pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n", 903 xc->server_num, cppr, mfrr, xisr); 904 905 /* 906 * We can't update the state of a "pushed" VCPU, but that 907 * shouldn't happen because the vcpu->mutex makes running a 908 * vcpu mutually exclusive with doing one_reg get/set on it. 909 */ 910 if (WARN_ON(vcpu->arch.xive_pushed)) 911 return -EIO; 912 913 /* Update VCPU HW saved state */ 914 vcpu->arch.xive_saved_state.cppr = cppr; 915 xc->hw_cppr = xc->cppr = cppr; 916 917 /* 918 * Update MFRR state. If it's not 0xff, we mark the VCPU as 919 * having a pending MFRR change, which will re-evaluate the 920 * target. The VCPU will thus potentially get a spurious 921 * interrupt but that's not a big deal. 922 */ 923 xc->mfrr = mfrr; 924 if (mfrr < cppr) 925 xive_irq_trigger(&xc->vp_ipi_data); 926 927 /* 928 * Now saved XIRR is "interesting". It means there's something in 929 * the legacy "1 element" queue... for an IPI we simply ignore it, 930 * as the MFRR restore will handle that. For anything else we need 931 * to force a resend of the source. 932 * However the source may not have been setup yet. If that's the 933 * case, we keep that info and increment a counter in the xive to 934 * tell subsequent xive_set_source() to go look. 935 */ 936 if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) { 937 xc->delayed_irq = xisr; 938 xive->delayed_irqs++; 939 pr_devel(" xisr restore delayed\n"); 940 } 941 942 return 0; 943 } 944 945 int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, 946 unsigned long host_irq) 947 { 948 struct kvmppc_xive *xive = kvm->arch.xive; 949 struct kvmppc_xive_src_block *sb; 950 struct kvmppc_xive_irq_state *state; 951 struct irq_data *host_data = 952 irq_domain_get_irq_data(irq_get_default_host(), host_irq); 953 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data); 954 u16 idx; 955 u8 prio; 956 int rc; 957 958 if (!xive) 959 return -ENODEV; 960 961 pr_debug("%s: GIRQ 0x%lx host IRQ %ld XIVE HW IRQ 0x%x\n", 962 __func__, guest_irq, host_irq, hw_irq); 963 964 sb = kvmppc_xive_find_source(xive, guest_irq, &idx); 965 if (!sb) 966 return -EINVAL; 967 state = &sb->irq_state[idx]; 968 969 /* 970 * Mark the passed-through interrupt as going to a VCPU, 971 * this will prevent further EOIs and similar operations 972 * from the XIVE code. It will also mask the interrupt 973 * to either PQ=10 or 11 state, the latter if the interrupt 974 * is pending. This will allow us to unmask or retrigger it 975 * after routing it to the guest with a simple EOI. 976 * 977 * The "state" argument is a "token", all it needs is to be 978 * non-NULL to switch to passed-through or NULL for the 979 * other way around. We may not yet have an actual VCPU 980 * target here and we don't really care. 981 */ 982 rc = irq_set_vcpu_affinity(host_irq, state); 983 if (rc) { 984 pr_err("Failed to set VCPU affinity for host IRQ %ld\n", host_irq); 985 return rc; 986 } 987 988 /* 989 * Mask and read state of IPI. We need to know if its P bit 990 * is set as that means it's potentially already using a 991 * queue entry in the target 992 */ 993 prio = xive_lock_and_mask(xive, sb, state); 994 pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio, 995 state->old_p, state->old_q); 996 997 /* Turn the IPI hard off */ 998 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); 999 1000 /* 1001 * Reset ESB guest mapping. Needed when ESB pages are exposed 1002 * to the guest in XIVE native mode 1003 */ 1004 if (xive->ops && xive->ops->reset_mapped) 1005 xive->ops->reset_mapped(kvm, guest_irq); 1006 1007 /* Grab info about irq */ 1008 state->pt_number = hw_irq; 1009 state->pt_data = irq_data_get_irq_handler_data(host_data); 1010 1011 /* 1012 * Configure the IRQ to match the existing configuration of 1013 * the IPI if it was already targetted. Otherwise this will 1014 * mask the interrupt in a lossy way (act_priority is 0xff) 1015 * which is fine for a never started interrupt. 1016 */ 1017 xive_native_configure_irq(hw_irq, 1018 kvmppc_xive_vp(xive, state->act_server), 1019 state->act_priority, state->number); 1020 1021 /* 1022 * We do an EOI to enable the interrupt (and retrigger if needed) 1023 * if the guest has the interrupt unmasked and the P bit was *not* 1024 * set in the IPI. If it was set, we know a slot may still be in 1025 * use in the target queue thus we have to wait for a guest 1026 * originated EOI 1027 */ 1028 if (prio != MASKED && !state->old_p) 1029 xive_vm_source_eoi(hw_irq, state->pt_data); 1030 1031 /* Clear old_p/old_q as they are no longer relevant */ 1032 state->old_p = state->old_q = false; 1033 1034 /* Restore guest prio (unlocks EOI) */ 1035 mb(); 1036 state->guest_priority = prio; 1037 arch_spin_unlock(&sb->lock); 1038 1039 return 0; 1040 } 1041 EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped); 1042 1043 int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, 1044 unsigned long host_irq) 1045 { 1046 struct kvmppc_xive *xive = kvm->arch.xive; 1047 struct kvmppc_xive_src_block *sb; 1048 struct kvmppc_xive_irq_state *state; 1049 u16 idx; 1050 u8 prio; 1051 int rc; 1052 1053 if (!xive) 1054 return -ENODEV; 1055 1056 pr_debug("%s: GIRQ 0x%lx host IRQ %ld\n", __func__, guest_irq, host_irq); 1057 1058 sb = kvmppc_xive_find_source(xive, guest_irq, &idx); 1059 if (!sb) 1060 return -EINVAL; 1061 state = &sb->irq_state[idx]; 1062 1063 /* 1064 * Mask and read state of IRQ. We need to know if its P bit 1065 * is set as that means it's potentially already using a 1066 * queue entry in the target 1067 */ 1068 prio = xive_lock_and_mask(xive, sb, state); 1069 pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio, 1070 state->old_p, state->old_q); 1071 1072 /* 1073 * If old_p is set, the interrupt is pending, we switch it to 1074 * PQ=11. This will force a resend in the host so the interrupt 1075 * isn't lost to whatver host driver may pick it up 1076 */ 1077 if (state->old_p) 1078 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11); 1079 1080 /* Release the passed-through interrupt to the host */ 1081 rc = irq_set_vcpu_affinity(host_irq, NULL); 1082 if (rc) { 1083 pr_err("Failed to clr VCPU affinity for host IRQ %ld\n", host_irq); 1084 return rc; 1085 } 1086 1087 /* Forget about the IRQ */ 1088 state->pt_number = 0; 1089 state->pt_data = NULL; 1090 1091 /* 1092 * Reset ESB guest mapping. Needed when ESB pages are exposed 1093 * to the guest in XIVE native mode 1094 */ 1095 if (xive->ops && xive->ops->reset_mapped) { 1096 xive->ops->reset_mapped(kvm, guest_irq); 1097 } 1098 1099 /* Reconfigure the IPI */ 1100 xive_native_configure_irq(state->ipi_number, 1101 kvmppc_xive_vp(xive, state->act_server), 1102 state->act_priority, state->number); 1103 1104 /* 1105 * If old_p is set (we have a queue entry potentially 1106 * occupied) or the interrupt is masked, we set the IPI 1107 * to PQ=10 state. Otherwise we just re-enable it (PQ=00). 1108 */ 1109 if (prio == MASKED || state->old_p) 1110 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10); 1111 else 1112 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00); 1113 1114 /* Restore guest prio (unlocks EOI) */ 1115 mb(); 1116 state->guest_priority = prio; 1117 arch_spin_unlock(&sb->lock); 1118 1119 return 0; 1120 } 1121 EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped); 1122 1123 void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu) 1124 { 1125 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 1126 struct kvm *kvm = vcpu->kvm; 1127 struct kvmppc_xive *xive = kvm->arch.xive; 1128 int i, j; 1129 1130 for (i = 0; i <= xive->max_sbid; i++) { 1131 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; 1132 1133 if (!sb) 1134 continue; 1135 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) { 1136 struct kvmppc_xive_irq_state *state = &sb->irq_state[j]; 1137 1138 if (!state->valid) 1139 continue; 1140 if (state->act_priority == MASKED) 1141 continue; 1142 if (state->act_server != xc->server_num) 1143 continue; 1144 1145 /* Clean it up */ 1146 arch_spin_lock(&sb->lock); 1147 state->act_priority = MASKED; 1148 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); 1149 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); 1150 if (state->pt_number) { 1151 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01); 1152 xive_native_configure_irq(state->pt_number, 0, MASKED, 0); 1153 } 1154 arch_spin_unlock(&sb->lock); 1155 } 1156 } 1157 1158 /* Disable vcpu's escalation interrupt */ 1159 if (vcpu->arch.xive_esc_on) { 1160 __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + 1161 XIVE_ESB_SET_PQ_01)); 1162 vcpu->arch.xive_esc_on = false; 1163 } 1164 1165 /* 1166 * Clear pointers to escalation interrupt ESB. 1167 * This is safe because the vcpu->mutex is held, preventing 1168 * any other CPU from concurrently executing a KVM_RUN ioctl. 1169 */ 1170 vcpu->arch.xive_esc_vaddr = 0; 1171 vcpu->arch.xive_esc_raddr = 0; 1172 } 1173 1174 /* 1175 * In single escalation mode, the escalation interrupt is marked so 1176 * that EOI doesn't re-enable it, but just sets the stale_p flag to 1177 * indicate that the P bit has already been dealt with. However, the 1178 * assembly code that enters the guest sets PQ to 00 without clearing 1179 * stale_p (because it has no easy way to address it). Hence we have 1180 * to adjust stale_p before shutting down the interrupt. 1181 */ 1182 void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu, 1183 struct kvmppc_xive_vcpu *xc, int irq) 1184 { 1185 struct irq_data *d = irq_get_irq_data(irq); 1186 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 1187 1188 /* 1189 * This slightly odd sequence gives the right result 1190 * (i.e. stale_p set if xive_esc_on is false) even if 1191 * we race with xive_esc_irq() and xive_irq_eoi(). 1192 */ 1193 xd->stale_p = false; 1194 smp_mb(); /* paired with smb_wmb in xive_esc_irq */ 1195 if (!vcpu->arch.xive_esc_on) 1196 xd->stale_p = true; 1197 } 1198 1199 void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) 1200 { 1201 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 1202 struct kvmppc_xive *xive = vcpu->kvm->arch.xive; 1203 int i; 1204 1205 if (!kvmppc_xics_enabled(vcpu)) 1206 return; 1207 1208 if (!xc) 1209 return; 1210 1211 pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); 1212 1213 /* Ensure no interrupt is still routed to that VP */ 1214 xc->valid = false; 1215 kvmppc_xive_disable_vcpu_interrupts(vcpu); 1216 1217 /* Mask the VP IPI */ 1218 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01); 1219 1220 /* Free escalations */ 1221 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { 1222 if (xc->esc_virq[i]) { 1223 if (kvmppc_xive_has_single_escalation(xc->xive)) 1224 xive_cleanup_single_escalation(vcpu, xc, 1225 xc->esc_virq[i]); 1226 free_irq(xc->esc_virq[i], vcpu); 1227 irq_dispose_mapping(xc->esc_virq[i]); 1228 kfree(xc->esc_virq_names[i]); 1229 } 1230 } 1231 1232 /* Disable the VP */ 1233 xive_native_disable_vp(xc->vp_id); 1234 1235 /* Clear the cam word so guest entry won't try to push context */ 1236 vcpu->arch.xive_cam_word = 0; 1237 1238 /* Free the queues */ 1239 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { 1240 struct xive_q *q = &xc->queues[i]; 1241 1242 xive_native_disable_queue(xc->vp_id, q, i); 1243 if (q->qpage) { 1244 free_pages((unsigned long)q->qpage, 1245 xive->q_page_order); 1246 q->qpage = NULL; 1247 } 1248 } 1249 1250 /* Free the IPI */ 1251 if (xc->vp_ipi) { 1252 xive_cleanup_irq_data(&xc->vp_ipi_data); 1253 xive_native_free_irq(xc->vp_ipi); 1254 } 1255 /* Free the VP */ 1256 kfree(xc); 1257 1258 /* Cleanup the vcpu */ 1259 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; 1260 vcpu->arch.xive_vcpu = NULL; 1261 } 1262 1263 static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu) 1264 { 1265 /* We have a block of xive->nr_servers VPs. We just need to check 1266 * packed vCPU ids are below that. 1267 */ 1268 return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers; 1269 } 1270 1271 int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp) 1272 { 1273 u32 vp_id; 1274 1275 if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) { 1276 pr_devel("Out of bounds !\n"); 1277 return -EINVAL; 1278 } 1279 1280 if (xive->vp_base == XIVE_INVALID_VP) { 1281 xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers); 1282 pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers); 1283 1284 if (xive->vp_base == XIVE_INVALID_VP) 1285 return -ENOSPC; 1286 } 1287 1288 vp_id = kvmppc_xive_vp(xive, cpu); 1289 if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) { 1290 pr_devel("Duplicate !\n"); 1291 return -EEXIST; 1292 } 1293 1294 *vp = vp_id; 1295 1296 return 0; 1297 } 1298 1299 int kvmppc_xive_connect_vcpu(struct kvm_device *dev, 1300 struct kvm_vcpu *vcpu, u32 cpu) 1301 { 1302 struct kvmppc_xive *xive = dev->private; 1303 struct kvmppc_xive_vcpu *xc; 1304 int i, r = -EBUSY; 1305 u32 vp_id; 1306 1307 pr_devel("connect_vcpu(cpu=%d)\n", cpu); 1308 1309 if (dev->ops != &kvm_xive_ops) { 1310 pr_devel("Wrong ops !\n"); 1311 return -EPERM; 1312 } 1313 if (xive->kvm != vcpu->kvm) 1314 return -EPERM; 1315 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) 1316 return -EBUSY; 1317 1318 /* We need to synchronize with queue provisioning */ 1319 mutex_lock(&xive->lock); 1320 1321 r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id); 1322 if (r) 1323 goto bail; 1324 1325 xc = kzalloc(sizeof(*xc), GFP_KERNEL); 1326 if (!xc) { 1327 r = -ENOMEM; 1328 goto bail; 1329 } 1330 1331 vcpu->arch.xive_vcpu = xc; 1332 xc->xive = xive; 1333 xc->vcpu = vcpu; 1334 xc->server_num = cpu; 1335 xc->vp_id = vp_id; 1336 xc->mfrr = 0xff; 1337 xc->valid = true; 1338 1339 r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); 1340 if (r) 1341 goto bail; 1342 1343 if (!kvmppc_xive_check_save_restore(vcpu)) { 1344 pr_err("inconsistent save-restore setup for VCPU %d\n", cpu); 1345 r = -EIO; 1346 goto bail; 1347 } 1348 1349 /* Configure VCPU fields for use by assembly push/pull */ 1350 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); 1351 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); 1352 1353 /* Allocate IPI */ 1354 xc->vp_ipi = xive_native_alloc_irq(); 1355 if (!xc->vp_ipi) { 1356 pr_err("Failed to allocate xive irq for VCPU IPI\n"); 1357 r = -EIO; 1358 goto bail; 1359 } 1360 pr_devel(" IPI=0x%x\n", xc->vp_ipi); 1361 1362 r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data); 1363 if (r) 1364 goto bail; 1365 1366 /* 1367 * Enable the VP first as the single escalation mode will 1368 * affect escalation interrupts numbering 1369 */ 1370 r = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive)); 1371 if (r) { 1372 pr_err("Failed to enable VP in OPAL, err %d\n", r); 1373 goto bail; 1374 } 1375 1376 /* 1377 * Initialize queues. Initially we set them all for no queueing 1378 * and we enable escalation for queue 0 only which we'll use for 1379 * our mfrr change notifications. If the VCPU is hot-plugged, we 1380 * do handle provisioning however based on the existing "map" 1381 * of enabled queues. 1382 */ 1383 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { 1384 struct xive_q *q = &xc->queues[i]; 1385 1386 /* Single escalation, no queue 7 */ 1387 if (i == 7 && kvmppc_xive_has_single_escalation(xive)) 1388 break; 1389 1390 /* Is queue already enabled ? Provision it */ 1391 if (xive->qmap & (1 << i)) { 1392 r = xive_provision_queue(vcpu, i); 1393 if (r == 0 && !kvmppc_xive_has_single_escalation(xive)) 1394 kvmppc_xive_attach_escalation( 1395 vcpu, i, kvmppc_xive_has_single_escalation(xive)); 1396 if (r) 1397 goto bail; 1398 } else { 1399 r = xive_native_configure_queue(xc->vp_id, 1400 q, i, NULL, 0, true); 1401 if (r) { 1402 pr_err("Failed to configure queue %d for VCPU %d\n", 1403 i, cpu); 1404 goto bail; 1405 } 1406 } 1407 } 1408 1409 /* If not done above, attach priority 0 escalation */ 1410 r = kvmppc_xive_attach_escalation(vcpu, 0, kvmppc_xive_has_single_escalation(xive)); 1411 if (r) 1412 goto bail; 1413 1414 /* Route the IPI */ 1415 r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI); 1416 if (!r) 1417 xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00); 1418 1419 bail: 1420 mutex_unlock(&xive->lock); 1421 if (r) { 1422 kvmppc_xive_cleanup_vcpu(vcpu); 1423 return r; 1424 } 1425 1426 vcpu->arch.irq_type = KVMPPC_IRQ_XICS; 1427 return 0; 1428 } 1429 1430 /* 1431 * Scanning of queues before/after migration save 1432 */ 1433 static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq) 1434 { 1435 struct kvmppc_xive_src_block *sb; 1436 struct kvmppc_xive_irq_state *state; 1437 u16 idx; 1438 1439 sb = kvmppc_xive_find_source(xive, irq, &idx); 1440 if (!sb) 1441 return; 1442 1443 state = &sb->irq_state[idx]; 1444 1445 /* Some sanity checking */ 1446 if (!state->valid) { 1447 pr_err("invalid irq 0x%x in cpu queue!\n", irq); 1448 return; 1449 } 1450 1451 /* 1452 * If the interrupt is in a queue it should have P set. 1453 * We warn so that gets reported. A backtrace isn't useful 1454 * so no need to use a WARN_ON. 1455 */ 1456 if (!state->saved_p) 1457 pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq); 1458 1459 /* Set flag */ 1460 state->in_queue = true; 1461 } 1462 1463 static void xive_pre_save_mask_irq(struct kvmppc_xive *xive, 1464 struct kvmppc_xive_src_block *sb, 1465 u32 irq) 1466 { 1467 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; 1468 1469 if (!state->valid) 1470 return; 1471 1472 /* Mask and save state, this will also sync HW queues */ 1473 state->saved_scan_prio = xive_lock_and_mask(xive, sb, state); 1474 1475 /* Transfer P and Q */ 1476 state->saved_p = state->old_p; 1477 state->saved_q = state->old_q; 1478 1479 /* Unlock */ 1480 arch_spin_unlock(&sb->lock); 1481 } 1482 1483 static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive, 1484 struct kvmppc_xive_src_block *sb, 1485 u32 irq) 1486 { 1487 struct kvmppc_xive_irq_state *state = &sb->irq_state[irq]; 1488 1489 if (!state->valid) 1490 return; 1491 1492 /* 1493 * Lock / exclude EOI (not technically necessary if the 1494 * guest isn't running concurrently. If this becomes a 1495 * performance issue we can probably remove the lock. 1496 */ 1497 xive_lock_for_unmask(sb, state); 1498 1499 /* Restore mask/prio if it wasn't masked */ 1500 if (state->saved_scan_prio != MASKED) 1501 xive_finish_unmask(xive, sb, state, state->saved_scan_prio); 1502 1503 /* Unlock */ 1504 arch_spin_unlock(&sb->lock); 1505 } 1506 1507 static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q) 1508 { 1509 u32 idx = q->idx; 1510 u32 toggle = q->toggle; 1511 u32 irq; 1512 1513 do { 1514 irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle); 1515 if (irq > XICS_IPI) 1516 xive_pre_save_set_queued(xive, irq); 1517 } while(irq); 1518 } 1519 1520 static void xive_pre_save_scan(struct kvmppc_xive *xive) 1521 { 1522 struct kvm_vcpu *vcpu = NULL; 1523 int i, j; 1524 1525 /* 1526 * See comment in xive_get_source() about how this 1527 * work. Collect a stable state for all interrupts 1528 */ 1529 for (i = 0; i <= xive->max_sbid; i++) { 1530 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; 1531 if (!sb) 1532 continue; 1533 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) 1534 xive_pre_save_mask_irq(xive, sb, j); 1535 } 1536 1537 /* Then scan the queues and update the "in_queue" flag */ 1538 kvm_for_each_vcpu(i, vcpu, xive->kvm) { 1539 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 1540 if (!xc) 1541 continue; 1542 for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) { 1543 if (xc->queues[j].qpage) 1544 xive_pre_save_queue(xive, &xc->queues[j]); 1545 } 1546 } 1547 1548 /* Finally restore interrupt states */ 1549 for (i = 0; i <= xive->max_sbid; i++) { 1550 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; 1551 if (!sb) 1552 continue; 1553 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) 1554 xive_pre_save_unmask_irq(xive, sb, j); 1555 } 1556 } 1557 1558 static void xive_post_save_scan(struct kvmppc_xive *xive) 1559 { 1560 u32 i, j; 1561 1562 /* Clear all the in_queue flags */ 1563 for (i = 0; i <= xive->max_sbid; i++) { 1564 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; 1565 if (!sb) 1566 continue; 1567 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) 1568 sb->irq_state[j].in_queue = false; 1569 } 1570 1571 /* Next get_source() will do a new scan */ 1572 xive->saved_src_count = 0; 1573 } 1574 1575 /* 1576 * This returns the source configuration and state to user space. 1577 */ 1578 static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr) 1579 { 1580 struct kvmppc_xive_src_block *sb; 1581 struct kvmppc_xive_irq_state *state; 1582 u64 __user *ubufp = (u64 __user *) addr; 1583 u64 val, prio; 1584 u16 idx; 1585 1586 sb = kvmppc_xive_find_source(xive, irq, &idx); 1587 if (!sb) 1588 return -ENOENT; 1589 1590 state = &sb->irq_state[idx]; 1591 1592 if (!state->valid) 1593 return -ENOENT; 1594 1595 pr_devel("get_source(%ld)...\n", irq); 1596 1597 /* 1598 * So to properly save the state into something that looks like a 1599 * XICS migration stream we cannot treat interrupts individually. 1600 * 1601 * We need, instead, mask them all (& save their previous PQ state) 1602 * to get a stable state in the HW, then sync them to ensure that 1603 * any interrupt that had already fired hits its queue, and finally 1604 * scan all the queues to collect which interrupts are still present 1605 * in the queues, so we can set the "pending" flag on them and 1606 * they can be resent on restore. 1607 * 1608 * So we do it all when the "first" interrupt gets saved, all the 1609 * state is collected at that point, the rest of xive_get_source() 1610 * will merely collect and convert that state to the expected 1611 * userspace bit mask. 1612 */ 1613 if (xive->saved_src_count == 0) 1614 xive_pre_save_scan(xive); 1615 xive->saved_src_count++; 1616 1617 /* Convert saved state into something compatible with xics */ 1618 val = state->act_server; 1619 prio = state->saved_scan_prio; 1620 1621 if (prio == MASKED) { 1622 val |= KVM_XICS_MASKED; 1623 prio = state->saved_priority; 1624 } 1625 val |= prio << KVM_XICS_PRIORITY_SHIFT; 1626 if (state->lsi) { 1627 val |= KVM_XICS_LEVEL_SENSITIVE; 1628 if (state->saved_p) 1629 val |= KVM_XICS_PENDING; 1630 } else { 1631 if (state->saved_p) 1632 val |= KVM_XICS_PRESENTED; 1633 1634 if (state->saved_q) 1635 val |= KVM_XICS_QUEUED; 1636 1637 /* 1638 * We mark it pending (which will attempt a re-delivery) 1639 * if we are in a queue *or* we were masked and had 1640 * Q set which is equivalent to the XICS "masked pending" 1641 * state 1642 */ 1643 if (state->in_queue || (prio == MASKED && state->saved_q)) 1644 val |= KVM_XICS_PENDING; 1645 } 1646 1647 /* 1648 * If that was the last interrupt saved, reset the 1649 * in_queue flags 1650 */ 1651 if (xive->saved_src_count == xive->src_count) 1652 xive_post_save_scan(xive); 1653 1654 /* Copy the result to userspace */ 1655 if (put_user(val, ubufp)) 1656 return -EFAULT; 1657 1658 return 0; 1659 } 1660 1661 struct kvmppc_xive_src_block *kvmppc_xive_create_src_block( 1662 struct kvmppc_xive *xive, int irq) 1663 { 1664 struct kvmppc_xive_src_block *sb; 1665 int i, bid; 1666 1667 bid = irq >> KVMPPC_XICS_ICS_SHIFT; 1668 1669 mutex_lock(&xive->lock); 1670 1671 /* block already exists - somebody else got here first */ 1672 if (xive->src_blocks[bid]) 1673 goto out; 1674 1675 /* Create the ICS */ 1676 sb = kzalloc(sizeof(*sb), GFP_KERNEL); 1677 if (!sb) 1678 goto out; 1679 1680 sb->id = bid; 1681 1682 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { 1683 sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i; 1684 sb->irq_state[i].eisn = 0; 1685 sb->irq_state[i].guest_priority = MASKED; 1686 sb->irq_state[i].saved_priority = MASKED; 1687 sb->irq_state[i].act_priority = MASKED; 1688 } 1689 smp_wmb(); 1690 xive->src_blocks[bid] = sb; 1691 1692 if (bid > xive->max_sbid) 1693 xive->max_sbid = bid; 1694 1695 out: 1696 mutex_unlock(&xive->lock); 1697 return xive->src_blocks[bid]; 1698 } 1699 1700 static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq) 1701 { 1702 struct kvm *kvm = xive->kvm; 1703 struct kvm_vcpu *vcpu = NULL; 1704 int i; 1705 1706 kvm_for_each_vcpu(i, vcpu, kvm) { 1707 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 1708 1709 if (!xc) 1710 continue; 1711 1712 if (xc->delayed_irq == irq) { 1713 xc->delayed_irq = 0; 1714 xive->delayed_irqs--; 1715 return true; 1716 } 1717 } 1718 return false; 1719 } 1720 1721 static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) 1722 { 1723 struct kvmppc_xive_src_block *sb; 1724 struct kvmppc_xive_irq_state *state; 1725 u64 __user *ubufp = (u64 __user *) addr; 1726 u16 idx; 1727 u64 val; 1728 u8 act_prio, guest_prio; 1729 u32 server; 1730 int rc = 0; 1731 1732 if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS) 1733 return -ENOENT; 1734 1735 pr_devel("set_source(irq=0x%lx)\n", irq); 1736 1737 /* Find the source */ 1738 sb = kvmppc_xive_find_source(xive, irq, &idx); 1739 if (!sb) { 1740 pr_devel("No source, creating source block...\n"); 1741 sb = kvmppc_xive_create_src_block(xive, irq); 1742 if (!sb) { 1743 pr_devel("Failed to create block...\n"); 1744 return -ENOMEM; 1745 } 1746 } 1747 state = &sb->irq_state[idx]; 1748 1749 /* Read user passed data */ 1750 if (get_user(val, ubufp)) { 1751 pr_devel("fault getting user info !\n"); 1752 return -EFAULT; 1753 } 1754 1755 server = val & KVM_XICS_DESTINATION_MASK; 1756 guest_prio = val >> KVM_XICS_PRIORITY_SHIFT; 1757 1758 pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n", 1759 val, server, guest_prio); 1760 1761 /* 1762 * If the source doesn't already have an IPI, allocate 1763 * one and get the corresponding data 1764 */ 1765 if (!state->ipi_number) { 1766 state->ipi_number = xive_native_alloc_irq(); 1767 if (state->ipi_number == 0) { 1768 pr_devel("Failed to allocate IPI !\n"); 1769 return -ENOMEM; 1770 } 1771 xive_native_populate_irq_data(state->ipi_number, &state->ipi_data); 1772 pr_devel(" src_ipi=0x%x\n", state->ipi_number); 1773 } 1774 1775 /* 1776 * We use lock_and_mask() to set us in the right masked 1777 * state. We will override that state from the saved state 1778 * further down, but this will handle the cases of interrupts 1779 * that need FW masking. We set the initial guest_priority to 1780 * 0 before calling it to ensure it actually performs the masking. 1781 */ 1782 state->guest_priority = 0; 1783 xive_lock_and_mask(xive, sb, state); 1784 1785 /* 1786 * Now, we select a target if we have one. If we don't we 1787 * leave the interrupt untargetted. It means that an interrupt 1788 * can become "untargetted" accross migration if it was masked 1789 * by set_xive() but there is little we can do about it. 1790 */ 1791 1792 /* First convert prio and mark interrupt as untargetted */ 1793 act_prio = xive_prio_from_guest(guest_prio); 1794 state->act_priority = MASKED; 1795 1796 /* 1797 * We need to drop the lock due to the mutex below. Hopefully 1798 * nothing is touching that interrupt yet since it hasn't been 1799 * advertized to a running guest yet 1800 */ 1801 arch_spin_unlock(&sb->lock); 1802 1803 /* If we have a priority target the interrupt */ 1804 if (act_prio != MASKED) { 1805 /* First, check provisioning of queues */ 1806 mutex_lock(&xive->lock); 1807 rc = xive_check_provisioning(xive->kvm, act_prio); 1808 mutex_unlock(&xive->lock); 1809 1810 /* Target interrupt */ 1811 if (rc == 0) 1812 rc = xive_target_interrupt(xive->kvm, state, 1813 server, act_prio); 1814 /* 1815 * If provisioning or targetting failed, leave it 1816 * alone and masked. It will remain disabled until 1817 * the guest re-targets it. 1818 */ 1819 } 1820 1821 /* 1822 * Find out if this was a delayed irq stashed in an ICP, 1823 * in which case, treat it as pending 1824 */ 1825 if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) { 1826 val |= KVM_XICS_PENDING; 1827 pr_devel(" Found delayed ! forcing PENDING !\n"); 1828 } 1829 1830 /* Cleanup the SW state */ 1831 state->old_p = false; 1832 state->old_q = false; 1833 state->lsi = false; 1834 state->asserted = false; 1835 1836 /* Restore LSI state */ 1837 if (val & KVM_XICS_LEVEL_SENSITIVE) { 1838 state->lsi = true; 1839 if (val & KVM_XICS_PENDING) 1840 state->asserted = true; 1841 pr_devel(" LSI ! Asserted=%d\n", state->asserted); 1842 } 1843 1844 /* 1845 * Restore P and Q. If the interrupt was pending, we 1846 * force Q and !P, which will trigger a resend. 1847 * 1848 * That means that a guest that had both an interrupt 1849 * pending (queued) and Q set will restore with only 1850 * one instance of that interrupt instead of 2, but that 1851 * is perfectly fine as coalescing interrupts that haven't 1852 * been presented yet is always allowed. 1853 */ 1854 if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING)) 1855 state->old_p = true; 1856 if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING) 1857 state->old_q = true; 1858 1859 pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q); 1860 1861 /* 1862 * If the interrupt was unmasked, update guest priority and 1863 * perform the appropriate state transition and do a 1864 * re-trigger if necessary. 1865 */ 1866 if (val & KVM_XICS_MASKED) { 1867 pr_devel(" masked, saving prio\n"); 1868 state->guest_priority = MASKED; 1869 state->saved_priority = guest_prio; 1870 } else { 1871 pr_devel(" unmasked, restoring to prio %d\n", guest_prio); 1872 xive_finish_unmask(xive, sb, state, guest_prio); 1873 state->saved_priority = guest_prio; 1874 } 1875 1876 /* Increment the number of valid sources and mark this one valid */ 1877 if (!state->valid) 1878 xive->src_count++; 1879 state->valid = true; 1880 1881 return 0; 1882 } 1883 1884 int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, 1885 bool line_status) 1886 { 1887 struct kvmppc_xive *xive = kvm->arch.xive; 1888 struct kvmppc_xive_src_block *sb; 1889 struct kvmppc_xive_irq_state *state; 1890 u16 idx; 1891 1892 if (!xive) 1893 return -ENODEV; 1894 1895 sb = kvmppc_xive_find_source(xive, irq, &idx); 1896 if (!sb) 1897 return -EINVAL; 1898 1899 /* Perform locklessly .... (we need to do some RCUisms here...) */ 1900 state = &sb->irq_state[idx]; 1901 if (!state->valid) 1902 return -EINVAL; 1903 1904 /* We don't allow a trigger on a passed-through interrupt */ 1905 if (state->pt_number) 1906 return -EINVAL; 1907 1908 if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL) 1909 state->asserted = true; 1910 else if (level == 0 || level == KVM_INTERRUPT_UNSET) { 1911 state->asserted = false; 1912 return 0; 1913 } 1914 1915 /* Trigger the IPI */ 1916 xive_irq_trigger(&state->ipi_data); 1917 1918 return 0; 1919 } 1920 1921 int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr) 1922 { 1923 u32 __user *ubufp = (u32 __user *) addr; 1924 u32 nr_servers; 1925 int rc = 0; 1926 1927 if (get_user(nr_servers, ubufp)) 1928 return -EFAULT; 1929 1930 pr_devel("%s nr_servers=%u\n", __func__, nr_servers); 1931 1932 if (!nr_servers || nr_servers > KVM_MAX_VCPU_ID) 1933 return -EINVAL; 1934 1935 mutex_lock(&xive->lock); 1936 if (xive->vp_base != XIVE_INVALID_VP) 1937 /* The VP block is allocated once and freed when the device 1938 * is released. Better not allow to change its size since its 1939 * used by connect_vcpu to validate vCPU ids are valid (eg, 1940 * setting it back to a higher value could allow connect_vcpu 1941 * to come up with a VP id that goes beyond the VP block, which 1942 * is likely to cause a crash in OPAL). 1943 */ 1944 rc = -EBUSY; 1945 else if (nr_servers > KVM_MAX_VCPUS) 1946 /* We don't need more servers. Higher vCPU ids get packed 1947 * down below KVM_MAX_VCPUS by kvmppc_pack_vcpu_id(). 1948 */ 1949 xive->nr_servers = KVM_MAX_VCPUS; 1950 else 1951 xive->nr_servers = nr_servers; 1952 1953 mutex_unlock(&xive->lock); 1954 1955 return rc; 1956 } 1957 1958 static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1959 { 1960 struct kvmppc_xive *xive = dev->private; 1961 1962 /* We honor the existing XICS ioctl */ 1963 switch (attr->group) { 1964 case KVM_DEV_XICS_GRP_SOURCES: 1965 return xive_set_source(xive, attr->attr, attr->addr); 1966 case KVM_DEV_XICS_GRP_CTRL: 1967 switch (attr->attr) { 1968 case KVM_DEV_XICS_NR_SERVERS: 1969 return kvmppc_xive_set_nr_servers(xive, attr->addr); 1970 } 1971 } 1972 return -ENXIO; 1973 } 1974 1975 static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1976 { 1977 struct kvmppc_xive *xive = dev->private; 1978 1979 /* We honor the existing XICS ioctl */ 1980 switch (attr->group) { 1981 case KVM_DEV_XICS_GRP_SOURCES: 1982 return xive_get_source(xive, attr->attr, attr->addr); 1983 } 1984 return -ENXIO; 1985 } 1986 1987 static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 1988 { 1989 /* We honor the same limits as XICS, at least for now */ 1990 switch (attr->group) { 1991 case KVM_DEV_XICS_GRP_SOURCES: 1992 if (attr->attr >= KVMPPC_XICS_FIRST_IRQ && 1993 attr->attr < KVMPPC_XICS_NR_IRQS) 1994 return 0; 1995 break; 1996 case KVM_DEV_XICS_GRP_CTRL: 1997 switch (attr->attr) { 1998 case KVM_DEV_XICS_NR_SERVERS: 1999 return 0; 2000 } 2001 } 2002 return -ENXIO; 2003 } 2004 2005 static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd) 2006 { 2007 xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01); 2008 xive_native_configure_irq(hw_num, 0, MASKED, 0); 2009 } 2010 2011 void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) 2012 { 2013 int i; 2014 2015 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { 2016 struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; 2017 2018 if (!state->valid) 2019 continue; 2020 2021 kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data); 2022 xive_cleanup_irq_data(&state->ipi_data); 2023 xive_native_free_irq(state->ipi_number); 2024 2025 /* Pass-through, cleanup too but keep IRQ hw data */ 2026 if (state->pt_number) 2027 kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data); 2028 2029 state->valid = false; 2030 } 2031 } 2032 2033 /* 2034 * Called when device fd is closed. kvm->lock is held. 2035 */ 2036 static void kvmppc_xive_release(struct kvm_device *dev) 2037 { 2038 struct kvmppc_xive *xive = dev->private; 2039 struct kvm *kvm = xive->kvm; 2040 struct kvm_vcpu *vcpu; 2041 int i; 2042 2043 pr_devel("Releasing xive device\n"); 2044 2045 /* 2046 * Since this is the device release function, we know that 2047 * userspace does not have any open fd referring to the 2048 * device. Therefore there can not be any of the device 2049 * attribute set/get functions being executed concurrently, 2050 * and similarly, the connect_vcpu and set/clr_mapped 2051 * functions also cannot be being executed. 2052 */ 2053 2054 debugfs_remove(xive->dentry); 2055 2056 /* 2057 * We should clean up the vCPU interrupt presenters first. 2058 */ 2059 kvm_for_each_vcpu(i, vcpu, kvm) { 2060 /* 2061 * Take vcpu->mutex to ensure that no one_reg get/set ioctl 2062 * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently. 2063 * Holding the vcpu->mutex also means that the vcpu cannot 2064 * be executing the KVM_RUN ioctl, and therefore it cannot 2065 * be executing the XIVE push or pull code or accessing 2066 * the XIVE MMIO regions. 2067 */ 2068 mutex_lock(&vcpu->mutex); 2069 kvmppc_xive_cleanup_vcpu(vcpu); 2070 mutex_unlock(&vcpu->mutex); 2071 } 2072 2073 /* 2074 * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type 2075 * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe 2076 * against xive code getting called during vcpu execution or 2077 * set/get one_reg operations. 2078 */ 2079 kvm->arch.xive = NULL; 2080 2081 /* Mask and free interrupts */ 2082 for (i = 0; i <= xive->max_sbid; i++) { 2083 if (xive->src_blocks[i]) 2084 kvmppc_xive_free_sources(xive->src_blocks[i]); 2085 kfree(xive->src_blocks[i]); 2086 xive->src_blocks[i] = NULL; 2087 } 2088 2089 if (xive->vp_base != XIVE_INVALID_VP) 2090 xive_native_free_vp_block(xive->vp_base); 2091 2092 /* 2093 * A reference of the kvmppc_xive pointer is now kept under 2094 * the xive_devices struct of the machine for reuse. It is 2095 * freed when the VM is destroyed for now until we fix all the 2096 * execution paths. 2097 */ 2098 2099 kfree(dev); 2100 } 2101 2102 /* 2103 * When the guest chooses the interrupt mode (XICS legacy or XIVE 2104 * native), the VM will switch of KVM device. The previous device will 2105 * be "released" before the new one is created. 2106 * 2107 * Until we are sure all execution paths are well protected, provide a 2108 * fail safe (transitional) method for device destruction, in which 2109 * the XIVE device pointer is recycled and not directly freed. 2110 */ 2111 struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type) 2112 { 2113 struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ? 2114 &kvm->arch.xive_devices.native : 2115 &kvm->arch.xive_devices.xics_on_xive; 2116 struct kvmppc_xive *xive = *kvm_xive_device; 2117 2118 if (!xive) { 2119 xive = kzalloc(sizeof(*xive), GFP_KERNEL); 2120 *kvm_xive_device = xive; 2121 } else { 2122 memset(xive, 0, sizeof(*xive)); 2123 } 2124 2125 return xive; 2126 } 2127 2128 /* 2129 * Create a XICS device with XIVE backend. kvm->lock is held. 2130 */ 2131 static int kvmppc_xive_create(struct kvm_device *dev, u32 type) 2132 { 2133 struct kvmppc_xive *xive; 2134 struct kvm *kvm = dev->kvm; 2135 2136 pr_devel("Creating xive for partition\n"); 2137 2138 /* Already there ? */ 2139 if (kvm->arch.xive) 2140 return -EEXIST; 2141 2142 xive = kvmppc_xive_get_device(kvm, type); 2143 if (!xive) 2144 return -ENOMEM; 2145 2146 dev->private = xive; 2147 xive->dev = dev; 2148 xive->kvm = kvm; 2149 mutex_init(&xive->lock); 2150 2151 /* We use the default queue size set by the host */ 2152 xive->q_order = xive_native_default_eq_shift(); 2153 if (xive->q_order < PAGE_SHIFT) 2154 xive->q_page_order = 0; 2155 else 2156 xive->q_page_order = xive->q_order - PAGE_SHIFT; 2157 2158 /* VP allocation is delayed to the first call to connect_vcpu */ 2159 xive->vp_base = XIVE_INVALID_VP; 2160 /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets 2161 * on a POWER9 system. 2162 */ 2163 xive->nr_servers = KVM_MAX_VCPUS; 2164 2165 if (xive_native_has_single_escalation()) 2166 xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION; 2167 2168 if (xive_native_has_save_restore()) 2169 xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE; 2170 2171 kvm->arch.xive = xive; 2172 return 0; 2173 } 2174 2175 int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req) 2176 { 2177 struct kvmppc_vcore *vc = vcpu->arch.vcore; 2178 2179 /* The VM should have configured XICS mode before doing XICS hcalls. */ 2180 if (!kvmppc_xics_enabled(vcpu)) 2181 return H_TOO_HARD; 2182 2183 switch (req) { 2184 case H_XIRR: 2185 return xive_vm_h_xirr(vcpu); 2186 case H_CPPR: 2187 return xive_vm_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4)); 2188 case H_EOI: 2189 return xive_vm_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4)); 2190 case H_IPI: 2191 return xive_vm_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4), 2192 kvmppc_get_gpr(vcpu, 5)); 2193 case H_IPOLL: 2194 return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4)); 2195 case H_XIRR_X: 2196 xive_vm_h_xirr(vcpu); 2197 kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset); 2198 return H_SUCCESS; 2199 } 2200 2201 return H_UNSUPPORTED; 2202 } 2203 EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall); 2204 2205 int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu) 2206 { 2207 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 2208 unsigned int i; 2209 2210 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { 2211 struct xive_q *q = &xc->queues[i]; 2212 u32 i0, i1, idx; 2213 2214 if (!q->qpage && !xc->esc_virq[i]) 2215 continue; 2216 2217 if (q->qpage) { 2218 seq_printf(m, " q[%d]: ", i); 2219 idx = q->idx; 2220 i0 = be32_to_cpup(q->qpage + idx); 2221 idx = (idx + 1) & q->msk; 2222 i1 = be32_to_cpup(q->qpage + idx); 2223 seq_printf(m, "T=%d %08x %08x...\n", q->toggle, 2224 i0, i1); 2225 } 2226 if (xc->esc_virq[i]) { 2227 struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); 2228 struct xive_irq_data *xd = 2229 irq_data_get_irq_handler_data(d); 2230 u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); 2231 2232 seq_printf(m, " ESC %d %c%c EOI @%llx", 2233 xc->esc_virq[i], 2234 (pq & XIVE_ESB_VAL_P) ? 'P' : '-', 2235 (pq & XIVE_ESB_VAL_Q) ? 'Q' : '-', 2236 xd->eoi_page); 2237 seq_puts(m, "\n"); 2238 } 2239 } 2240 return 0; 2241 } 2242 2243 void kvmppc_xive_debug_show_sources(struct seq_file *m, 2244 struct kvmppc_xive_src_block *sb) 2245 { 2246 int i; 2247 2248 seq_puts(m, " LISN HW/CHIP TYPE PQ EISN CPU/PRIO\n"); 2249 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { 2250 struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; 2251 struct xive_irq_data *xd; 2252 u64 pq; 2253 u32 hw_num; 2254 2255 if (!state->valid) 2256 continue; 2257 2258 kvmppc_xive_select_irq(state, &hw_num, &xd); 2259 2260 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); 2261 2262 seq_printf(m, "%08x %08x/%02x", state->number, hw_num, 2263 xd->src_chip); 2264 if (state->lsi) 2265 seq_printf(m, " %cLSI", state->asserted ? '^' : ' '); 2266 else 2267 seq_puts(m, " MSI"); 2268 2269 seq_printf(m, " %s %c%c %08x % 4d/%d", 2270 state->ipi_number == hw_num ? "IPI" : " PT", 2271 pq & XIVE_ESB_VAL_P ? 'P' : '-', 2272 pq & XIVE_ESB_VAL_Q ? 'Q' : '-', 2273 state->eisn, state->act_server, 2274 state->act_priority); 2275 2276 seq_puts(m, "\n"); 2277 } 2278 } 2279 2280 static int xive_debug_show(struct seq_file *m, void *private) 2281 { 2282 struct kvmppc_xive *xive = m->private; 2283 struct kvm *kvm = xive->kvm; 2284 struct kvm_vcpu *vcpu; 2285 u64 t_rm_h_xirr = 0; 2286 u64 t_rm_h_ipoll = 0; 2287 u64 t_rm_h_cppr = 0; 2288 u64 t_rm_h_eoi = 0; 2289 u64 t_rm_h_ipi = 0; 2290 u64 t_vm_h_xirr = 0; 2291 u64 t_vm_h_ipoll = 0; 2292 u64 t_vm_h_cppr = 0; 2293 u64 t_vm_h_eoi = 0; 2294 u64 t_vm_h_ipi = 0; 2295 unsigned int i; 2296 2297 if (!kvm) 2298 return 0; 2299 2300 seq_puts(m, "=========\nVCPU state\n=========\n"); 2301 2302 kvm_for_each_vcpu(i, vcpu, kvm) { 2303 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; 2304 2305 if (!xc) 2306 continue; 2307 2308 seq_printf(m, "VCPU %d: VP:%#x/%02x\n" 2309 " CPPR:%#x HWCPPR:%#x MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n", 2310 xc->server_num, xc->vp_id, xc->vp_chip_id, 2311 xc->cppr, xc->hw_cppr, 2312 xc->mfrr, xc->pending, 2313 xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); 2314 2315 kvmppc_xive_debug_show_queues(m, vcpu); 2316 2317 t_rm_h_xirr += xc->stat_rm_h_xirr; 2318 t_rm_h_ipoll += xc->stat_rm_h_ipoll; 2319 t_rm_h_cppr += xc->stat_rm_h_cppr; 2320 t_rm_h_eoi += xc->stat_rm_h_eoi; 2321 t_rm_h_ipi += xc->stat_rm_h_ipi; 2322 t_vm_h_xirr += xc->stat_vm_h_xirr; 2323 t_vm_h_ipoll += xc->stat_vm_h_ipoll; 2324 t_vm_h_cppr += xc->stat_vm_h_cppr; 2325 t_vm_h_eoi += xc->stat_vm_h_eoi; 2326 t_vm_h_ipi += xc->stat_vm_h_ipi; 2327 } 2328 2329 seq_puts(m, "Hcalls totals\n"); 2330 seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr); 2331 seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll); 2332 seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr); 2333 seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi); 2334 seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi); 2335 2336 seq_puts(m, "=========\nSources\n=========\n"); 2337 2338 for (i = 0; i <= xive->max_sbid; i++) { 2339 struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; 2340 2341 if (sb) { 2342 arch_spin_lock(&sb->lock); 2343 kvmppc_xive_debug_show_sources(m, sb); 2344 arch_spin_unlock(&sb->lock); 2345 } 2346 } 2347 2348 return 0; 2349 } 2350 2351 DEFINE_SHOW_ATTRIBUTE(xive_debug); 2352 2353 static void xive_debugfs_init(struct kvmppc_xive *xive) 2354 { 2355 char *name; 2356 2357 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive); 2358 if (!name) { 2359 pr_err("%s: no memory for name\n", __func__); 2360 return; 2361 } 2362 2363 xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root, 2364 xive, &xive_debug_fops); 2365 2366 pr_debug("%s: created %s\n", __func__, name); 2367 kfree(name); 2368 } 2369 2370 static void kvmppc_xive_init(struct kvm_device *dev) 2371 { 2372 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private; 2373 2374 /* Register some debug interfaces */ 2375 xive_debugfs_init(xive); 2376 } 2377 2378 struct kvm_device_ops kvm_xive_ops = { 2379 .name = "kvm-xive", 2380 .create = kvmppc_xive_create, 2381 .init = kvmppc_xive_init, 2382 .release = kvmppc_xive_release, 2383 .set_attr = xive_set_attr, 2384 .get_attr = xive_get_attr, 2385 .has_attr = xive_has_attr, 2386 }; 2387