1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 4 */ 5 6 #include <linux/cpu.h> 7 #include <linux/kvm_host.h> 8 #include <linux/preempt.h> 9 #include <linux/export.h> 10 #include <linux/sched.h> 11 #include <linux/spinlock.h> 12 #include <linux/init.h> 13 #include <linux/memblock.h> 14 #include <linux/sizes.h> 15 #include <linux/cma.h> 16 #include <linux/bitops.h> 17 18 #include <asm/asm-prototypes.h> 19 #include <asm/cputable.h> 20 #include <asm/interrupt.h> 21 #include <asm/kvm_ppc.h> 22 #include <asm/kvm_book3s.h> 23 #include <asm/archrandom.h> 24 #include <asm/xics.h> 25 #include <asm/xive.h> 26 #include <asm/dbell.h> 27 #include <asm/cputhreads.h> 28 #include <asm/io.h> 29 #include <asm/opal.h> 30 #include <asm/smp.h> 31 32 #define KVM_CMA_CHUNK_ORDER 18 33 34 #include "book3s_xics.h" 35 #include "book3s_xive.h" 36 37 /* 38 * The XIVE module will populate these when it loads 39 */ 40 unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu); 41 unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server); 42 int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, 43 unsigned long mfrr); 44 int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr); 45 int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); 46 EXPORT_SYMBOL_GPL(__xive_vm_h_xirr); 47 EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll); 48 EXPORT_SYMBOL_GPL(__xive_vm_h_ipi); 49 EXPORT_SYMBOL_GPL(__xive_vm_h_cppr); 50 EXPORT_SYMBOL_GPL(__xive_vm_h_eoi); 51 52 /* 53 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) 54 * should be power of 2. 55 */ 56 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ 57 /* 58 * By default we reserve 5% of memory for hash pagetable allocation. 59 */ 60 static unsigned long kvm_cma_resv_ratio = 5; 61 62 static struct cma *kvm_cma; 63 64 static int __init early_parse_kvm_cma_resv(char *p) 65 { 66 pr_debug("%s(%s)\n", __func__, p); 67 if (!p) 68 return -EINVAL; 69 return kstrtoul(p, 0, &kvm_cma_resv_ratio); 70 } 71 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); 72 73 struct page *kvm_alloc_hpt_cma(unsigned long nr_pages) 74 { 75 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 76 77 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES), 78 false); 79 } 80 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma); 81 82 void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages) 83 { 84 cma_release(kvm_cma, page, nr_pages); 85 } 86 EXPORT_SYMBOL_GPL(kvm_free_hpt_cma); 87 88 /** 89 * kvm_cma_reserve() - reserve area for kvm hash pagetable 90 * 91 * This function reserves memory from early allocator. It should be 92 * called by arch specific code once the memblock allocator 93 * has been activated and all other subsystems have already allocated/reserved 94 * memory. 95 */ 96 void __init kvm_cma_reserve(void) 97 { 98 unsigned long align_size; 99 phys_addr_t selected_size; 100 101 /* 102 * We need CMA reservation only when we are in HV mode 103 */ 104 if (!cpu_has_feature(CPU_FTR_HVMODE)) 105 return; 106 107 selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100); 108 if (selected_size) { 109 pr_info("%s: reserving %ld MiB for global area\n", __func__, 110 (unsigned long)selected_size / SZ_1M); 111 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; 112 cma_declare_contiguous(0, selected_size, 0, align_size, 113 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma", 114 &kvm_cma); 115 } 116 } 117 118 /* 119 * Real-mode H_CONFER implementation. 120 * We check if we are the only vcpu out of this virtual core 121 * still running in the guest and not ceded. If so, we pop up 122 * to the virtual-mode implementation; if not, just return to 123 * the guest. 124 */ 125 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, 126 unsigned int yield_count) 127 { 128 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 129 int ptid = local_paca->kvm_hstate.ptid; 130 int threads_running; 131 int threads_ceded; 132 int threads_conferring; 133 u64 stop = get_tb() + 10 * tb_ticks_per_usec; 134 int rv = H_SUCCESS; /* => don't yield */ 135 136 set_bit(ptid, &vc->conferring_threads); 137 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { 138 threads_running = VCORE_ENTRY_MAP(vc); 139 threads_ceded = vc->napping_threads; 140 threads_conferring = vc->conferring_threads; 141 if ((threads_ceded | threads_conferring) == threads_running) { 142 rv = H_TOO_HARD; /* => do yield */ 143 break; 144 } 145 } 146 clear_bit(ptid, &vc->conferring_threads); 147 return rv; 148 } 149 150 /* 151 * When running HV mode KVM we need to block certain operations while KVM VMs 152 * exist in the system. We use a counter of VMs to track this. 153 * 154 * One of the operations we need to block is onlining of secondaries, so we 155 * protect hv_vm_count with get/put_online_cpus(). 156 */ 157 static atomic_t hv_vm_count; 158 159 void kvm_hv_vm_activated(void) 160 { 161 get_online_cpus(); 162 atomic_inc(&hv_vm_count); 163 put_online_cpus(); 164 } 165 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); 166 167 void kvm_hv_vm_deactivated(void) 168 { 169 get_online_cpus(); 170 atomic_dec(&hv_vm_count); 171 put_online_cpus(); 172 } 173 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); 174 175 bool kvm_hv_mode_active(void) 176 { 177 return atomic_read(&hv_vm_count) != 0; 178 } 179 180 extern int hcall_real_table[], hcall_real_table_end[]; 181 182 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) 183 { 184 cmd /= 4; 185 if (cmd < hcall_real_table_end - hcall_real_table && 186 hcall_real_table[cmd]) 187 return 1; 188 189 return 0; 190 } 191 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); 192 193 int kvmppc_hwrng_present(void) 194 { 195 return powernv_hwrng_present(); 196 } 197 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); 198 199 long kvmppc_h_random(struct kvm_vcpu *vcpu) 200 { 201 int r; 202 203 /* Only need to do the expensive mfmsr() on radix */ 204 if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR)) 205 r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]); 206 else 207 r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]); 208 if (r) 209 return H_SUCCESS; 210 211 return H_HARDWARE; 212 } 213 214 /* 215 * Send an interrupt or message to another CPU. 216 * The caller needs to include any barrier needed to order writes 217 * to memory vs. the IPI/message. 218 */ 219 void kvmhv_rm_send_ipi(int cpu) 220 { 221 void __iomem *xics_phys; 222 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 223 224 /* For a nested hypervisor, use the XICS via hcall */ 225 if (kvmhv_on_pseries()) { 226 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 227 228 plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu), 229 IPI_PRIORITY); 230 return; 231 } 232 233 /* On POWER9 we can use msgsnd for any destination cpu. */ 234 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 235 msg |= get_hard_smp_processor_id(cpu); 236 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 237 return; 238 } 239 240 /* On POWER8 for IPIs to threads in the same core, use msgsnd. */ 241 if (cpu_has_feature(CPU_FTR_ARCH_207S) && 242 cpu_first_thread_sibling(cpu) == 243 cpu_first_thread_sibling(raw_smp_processor_id())) { 244 msg |= cpu_thread_in_core(cpu); 245 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 246 return; 247 } 248 249 /* We should never reach this */ 250 if (WARN_ON_ONCE(xics_on_xive())) 251 return; 252 253 /* Else poke the target with an IPI */ 254 xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys; 255 if (xics_phys) 256 __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); 257 else 258 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); 259 } 260 261 /* 262 * The following functions are called from the assembly code 263 * in book3s_hv_rmhandlers.S. 264 */ 265 static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) 266 { 267 int cpu = vc->pcpu; 268 269 /* Order setting of exit map vs. msgsnd/IPI */ 270 smp_mb(); 271 for (; active; active >>= 1, ++cpu) 272 if (active & 1) 273 kvmhv_rm_send_ipi(cpu); 274 } 275 276 void kvmhv_commence_exit(int trap) 277 { 278 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 279 int ptid = local_paca->kvm_hstate.ptid; 280 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; 281 int me, ee, i, t; 282 int cpu0; 283 284 /* Set our bit in the threads-exiting-guest map in the 0xff00 285 bits of vcore->entry_exit_map */ 286 me = 0x100 << ptid; 287 do { 288 ee = vc->entry_exit_map; 289 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); 290 291 /* Are we the first here? */ 292 if ((ee >> 8) != 0) 293 return; 294 295 /* 296 * Trigger the other threads in this vcore to exit the guest. 297 * If this is a hypervisor decrementer interrupt then they 298 * will be already on their way out of the guest. 299 */ 300 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) 301 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); 302 303 /* 304 * If we are doing dynamic micro-threading, interrupt the other 305 * subcores to pull them out of their guests too. 306 */ 307 if (!sip) 308 return; 309 310 for (i = 0; i < MAX_SUBCORES; ++i) { 311 vc = sip->vc[i]; 312 if (!vc) 313 break; 314 do { 315 ee = vc->entry_exit_map; 316 /* Already asked to exit? */ 317 if ((ee >> 8) != 0) 318 break; 319 } while (cmpxchg(&vc->entry_exit_map, ee, 320 ee | VCORE_EXIT_REQ) != ee); 321 if ((ee >> 8) == 0) 322 kvmhv_interrupt_vcore(vc, ee); 323 } 324 325 /* 326 * On POWER9 when running a HPT guest on a radix host (sip != NULL), 327 * we have to interrupt inactive CPU threads to get them to 328 * restore the host LPCR value. 329 */ 330 if (sip->lpcr_req) { 331 if (cmpxchg(&sip->do_restore, 0, 1) == 0) { 332 vc = local_paca->kvm_hstate.kvm_vcore; 333 cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid; 334 for (t = 1; t < threads_per_core; ++t) { 335 if (sip->napped[t]) 336 kvmhv_rm_send_ipi(cpu0 + t); 337 } 338 } 339 } 340 } 341 342 struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; 343 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); 344 345 #ifdef CONFIG_KVM_XICS 346 static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, 347 u32 xisr) 348 { 349 int i; 350 351 /* 352 * We access the mapped array here without a lock. That 353 * is safe because we never reduce the number of entries 354 * in the array and we never change the v_hwirq field of 355 * an entry once it is set. 356 * 357 * We have also carefully ordered the stores in the writer 358 * and the loads here in the reader, so that if we find a matching 359 * hwirq here, the associated GSI and irq_desc fields are valid. 360 */ 361 for (i = 0; i < pimap->n_mapped; i++) { 362 if (xisr == pimap->mapped[i].r_hwirq) { 363 /* 364 * Order subsequent reads in the caller to serialize 365 * with the writer. 366 */ 367 smp_rmb(); 368 return &pimap->mapped[i]; 369 } 370 } 371 return NULL; 372 } 373 374 /* 375 * If we have an interrupt that's not an IPI, check if we have a 376 * passthrough adapter and if so, check if this external interrupt 377 * is for the adapter. 378 * We will attempt to deliver the IRQ directly to the target VCPU's 379 * ICP, the virtual ICP (based on affinity - the xive value in ICS). 380 * 381 * If the delivery fails or if this is not for a passthrough adapter, 382 * return to the host to handle this interrupt. We earlier 383 * saved a copy of the XIRR in the PACA, it will be picked up by 384 * the host ICP driver. 385 */ 386 static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 387 { 388 struct kvmppc_passthru_irqmap *pimap; 389 struct kvmppc_irq_map *irq_map; 390 struct kvm_vcpu *vcpu; 391 392 vcpu = local_paca->kvm_hstate.kvm_vcpu; 393 if (!vcpu) 394 return 1; 395 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); 396 if (!pimap) 397 return 1; 398 irq_map = get_irqmap(pimap, xisr); 399 if (!irq_map) 400 return 1; 401 402 /* We're handling this interrupt, generic code doesn't need to */ 403 local_paca->kvm_hstate.saved_xirr = 0; 404 405 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again); 406 } 407 408 #else 409 static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 410 { 411 return 1; 412 } 413 #endif 414 415 /* 416 * Determine what sort of external interrupt is pending (if any). 417 * Returns: 418 * 0 if no interrupt is pending 419 * 1 if an interrupt is pending that needs to be handled by the host 420 * 2 Passthrough that needs completion in the host 421 * -1 if there was a guest wakeup IPI (which has now been cleared) 422 * -2 if there is PCI passthrough external interrupt that was handled 423 */ 424 static long kvmppc_read_one_intr(bool *again); 425 426 long kvmppc_read_intr(void) 427 { 428 long ret = 0; 429 long rc; 430 bool again; 431 432 if (xive_enabled()) 433 return 1; 434 435 do { 436 again = false; 437 rc = kvmppc_read_one_intr(&again); 438 if (rc && (ret == 0 || rc > ret)) 439 ret = rc; 440 } while (again); 441 return ret; 442 } 443 444 static long kvmppc_read_one_intr(bool *again) 445 { 446 void __iomem *xics_phys; 447 u32 h_xirr; 448 __be32 xirr; 449 u32 xisr; 450 u8 host_ipi; 451 int64_t rc; 452 453 if (xive_enabled()) 454 return 1; 455 456 /* see if a host IPI is pending */ 457 host_ipi = local_paca->kvm_hstate.host_ipi; 458 if (host_ipi) 459 return 1; 460 461 /* Now read the interrupt from the ICP */ 462 if (kvmhv_on_pseries()) { 463 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 464 465 rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF); 466 xirr = cpu_to_be32(retbuf[0]); 467 } else { 468 xics_phys = local_paca->kvm_hstate.xics_phys; 469 rc = 0; 470 if (!xics_phys) 471 rc = opal_int_get_xirr(&xirr, false); 472 else 473 xirr = __raw_rm_readl(xics_phys + XICS_XIRR); 474 } 475 if (rc < 0) 476 return 1; 477 478 /* 479 * Save XIRR for later. Since we get control in reverse endian 480 * on LE systems, save it byte reversed and fetch it back in 481 * host endian. Note that xirr is the value read from the 482 * XIRR register, while h_xirr is the host endian version. 483 */ 484 h_xirr = be32_to_cpu(xirr); 485 local_paca->kvm_hstate.saved_xirr = h_xirr; 486 xisr = h_xirr & 0xffffff; 487 /* 488 * Ensure that the store/load complete to guarantee all side 489 * effects of loading from XIRR has completed 490 */ 491 smp_mb(); 492 493 /* if nothing pending in the ICP */ 494 if (!xisr) 495 return 0; 496 497 /* We found something in the ICP... 498 * 499 * If it is an IPI, clear the MFRR and EOI it. 500 */ 501 if (xisr == XICS_IPI) { 502 rc = 0; 503 if (kvmhv_on_pseries()) { 504 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 505 506 plpar_hcall_raw(H_IPI, retbuf, 507 hard_smp_processor_id(), 0xff); 508 plpar_hcall_raw(H_EOI, retbuf, h_xirr); 509 } else if (xics_phys) { 510 __raw_rm_writeb(0xff, xics_phys + XICS_MFRR); 511 __raw_rm_writel(xirr, xics_phys + XICS_XIRR); 512 } else { 513 opal_int_set_mfrr(hard_smp_processor_id(), 0xff); 514 rc = opal_int_eoi(h_xirr); 515 } 516 /* If rc > 0, there is another interrupt pending */ 517 *again = rc > 0; 518 519 /* 520 * Need to ensure side effects of above stores 521 * complete before proceeding. 522 */ 523 smp_mb(); 524 525 /* 526 * We need to re-check host IPI now in case it got set in the 527 * meantime. If it's clear, we bounce the interrupt to the 528 * guest 529 */ 530 host_ipi = local_paca->kvm_hstate.host_ipi; 531 if (unlikely(host_ipi != 0)) { 532 /* We raced with the host, 533 * we need to resend that IPI, bummer 534 */ 535 if (kvmhv_on_pseries()) { 536 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 537 538 plpar_hcall_raw(H_IPI, retbuf, 539 hard_smp_processor_id(), 540 IPI_PRIORITY); 541 } else if (xics_phys) 542 __raw_rm_writeb(IPI_PRIORITY, 543 xics_phys + XICS_MFRR); 544 else 545 opal_int_set_mfrr(hard_smp_processor_id(), 546 IPI_PRIORITY); 547 /* Let side effects complete */ 548 smp_mb(); 549 return 1; 550 } 551 552 /* OK, it's an IPI for us */ 553 local_paca->kvm_hstate.saved_xirr = 0; 554 return -1; 555 } 556 557 return kvmppc_check_passthru(xisr, xirr, again); 558 } 559 560 #ifdef CONFIG_KVM_XICS 561 static inline bool is_rm(void) 562 { 563 return !(mfmsr() & MSR_DR); 564 } 565 566 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) 567 { 568 if (!kvmppc_xics_enabled(vcpu)) 569 return H_TOO_HARD; 570 if (xics_on_xive()) { 571 if (is_rm()) 572 return xive_rm_h_xirr(vcpu); 573 if (unlikely(!__xive_vm_h_xirr)) 574 return H_NOT_AVAILABLE; 575 return __xive_vm_h_xirr(vcpu); 576 } else 577 return xics_rm_h_xirr(vcpu); 578 } 579 580 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) 581 { 582 if (!kvmppc_xics_enabled(vcpu)) 583 return H_TOO_HARD; 584 vcpu->arch.regs.gpr[5] = get_tb(); 585 if (xics_on_xive()) { 586 if (is_rm()) 587 return xive_rm_h_xirr(vcpu); 588 if (unlikely(!__xive_vm_h_xirr)) 589 return H_NOT_AVAILABLE; 590 return __xive_vm_h_xirr(vcpu); 591 } else 592 return xics_rm_h_xirr(vcpu); 593 } 594 595 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) 596 { 597 if (!kvmppc_xics_enabled(vcpu)) 598 return H_TOO_HARD; 599 if (xics_on_xive()) { 600 if (is_rm()) 601 return xive_rm_h_ipoll(vcpu, server); 602 if (unlikely(!__xive_vm_h_ipoll)) 603 return H_NOT_AVAILABLE; 604 return __xive_vm_h_ipoll(vcpu, server); 605 } else 606 return H_TOO_HARD; 607 } 608 609 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, 610 unsigned long mfrr) 611 { 612 if (!kvmppc_xics_enabled(vcpu)) 613 return H_TOO_HARD; 614 if (xics_on_xive()) { 615 if (is_rm()) 616 return xive_rm_h_ipi(vcpu, server, mfrr); 617 if (unlikely(!__xive_vm_h_ipi)) 618 return H_NOT_AVAILABLE; 619 return __xive_vm_h_ipi(vcpu, server, mfrr); 620 } else 621 return xics_rm_h_ipi(vcpu, server, mfrr); 622 } 623 624 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) 625 { 626 if (!kvmppc_xics_enabled(vcpu)) 627 return H_TOO_HARD; 628 if (xics_on_xive()) { 629 if (is_rm()) 630 return xive_rm_h_cppr(vcpu, cppr); 631 if (unlikely(!__xive_vm_h_cppr)) 632 return H_NOT_AVAILABLE; 633 return __xive_vm_h_cppr(vcpu, cppr); 634 } else 635 return xics_rm_h_cppr(vcpu, cppr); 636 } 637 638 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) 639 { 640 if (!kvmppc_xics_enabled(vcpu)) 641 return H_TOO_HARD; 642 if (xics_on_xive()) { 643 if (is_rm()) 644 return xive_rm_h_eoi(vcpu, xirr); 645 if (unlikely(!__xive_vm_h_eoi)) 646 return H_NOT_AVAILABLE; 647 return __xive_vm_h_eoi(vcpu, xirr); 648 } else 649 return xics_rm_h_eoi(vcpu, xirr); 650 } 651 #endif /* CONFIG_KVM_XICS */ 652 653 void kvmppc_bad_interrupt(struct pt_regs *regs) 654 { 655 /* 656 * 100 could happen at any time, 200 can happen due to invalid real 657 * address access for example (or any time due to a hardware problem). 658 */ 659 if (TRAP(regs) == 0x100) { 660 get_paca()->in_nmi++; 661 system_reset_exception(regs); 662 get_paca()->in_nmi--; 663 } else if (TRAP(regs) == 0x200) { 664 machine_check_exception(regs); 665 } else { 666 die("Bad interrupt in KVM entry/exit code", regs, SIGABRT); 667 } 668 panic("Bad KVM trap"); 669 } 670 671 /* 672 * Functions used to switch LPCR HR and UPRT bits on all threads 673 * when entering and exiting HPT guests on a radix host. 674 */ 675 676 #define PHASE_REALMODE 1 /* in real mode */ 677 #define PHASE_SET_LPCR 2 /* have set LPCR */ 678 #define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */ 679 #define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */ 680 681 #define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p)) 682 683 static void wait_for_sync(struct kvm_split_mode *sip, int phase) 684 { 685 int thr = local_paca->kvm_hstate.tid; 686 687 sip->lpcr_sync.phase[thr] |= phase; 688 phase = ALL(phase); 689 while ((sip->lpcr_sync.allphases & phase) != phase) { 690 HMT_low(); 691 barrier(); 692 } 693 HMT_medium(); 694 } 695 696 void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip) 697 { 698 int num_sets; 699 unsigned long rb, set; 700 701 /* wait for every other thread to get to real mode */ 702 wait_for_sync(sip, PHASE_REALMODE); 703 704 /* Set LPCR and LPIDR */ 705 mtspr(SPRN_LPCR, sip->lpcr_req); 706 mtspr(SPRN_LPID, sip->lpidr_req); 707 isync(); 708 709 /* 710 * P10 will flush all the congruence class with a single tlbiel 711 */ 712 if (cpu_has_feature(CPU_FTR_ARCH_31)) 713 num_sets = 1; 714 else 715 num_sets = POWER9_TLB_SETS_RADIX; 716 717 /* Invalidate the TLB on thread 0 */ 718 if (local_paca->kvm_hstate.tid == 0) { 719 sip->do_set = 0; 720 asm volatile("ptesync" : : : "memory"); 721 for (set = 0; set < num_sets; ++set) { 722 rb = TLBIEL_INVAL_SET_LPID + 723 (set << TLBIEL_INVAL_SET_SHIFT); 724 asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : : 725 "r" (rb), "r" (0)); 726 } 727 asm volatile("ptesync" : : : "memory"); 728 } 729 730 /* indicate that we have done so and wait for others */ 731 wait_for_sync(sip, PHASE_SET_LPCR); 732 /* order read of sip->lpcr_sync.allphases vs. sip->do_set */ 733 smp_rmb(); 734 } 735 736 /* 737 * Called when a thread that has been in the guest needs 738 * to reload the host LPCR value - but only on POWER9 when 739 * running a HPT guest on a radix host. 740 */ 741 void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip) 742 { 743 /* we're out of the guest... */ 744 wait_for_sync(sip, PHASE_OUT_OF_GUEST); 745 746 mtspr(SPRN_LPID, 0); 747 mtspr(SPRN_LPCR, sip->host_lpcr); 748 isync(); 749 750 if (local_paca->kvm_hstate.tid == 0) { 751 sip->do_restore = 0; 752 smp_wmb(); /* order store of do_restore vs. phase */ 753 } 754 755 wait_for_sync(sip, PHASE_RESET_LPCR); 756 smp_mb(); 757 local_paca->kvm_hstate.kvm_split_mode = NULL; 758 } 759 760 static void kvmppc_end_cede(struct kvm_vcpu *vcpu) 761 { 762 vcpu->arch.ceded = 0; 763 if (vcpu->arch.timer_running) { 764 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 765 vcpu->arch.timer_running = 0; 766 } 767 } 768 769 void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) 770 { 771 /* 772 * Check for illegal transactional state bit combination 773 * and if we find it, force the TS field to a safe state. 774 */ 775 if ((msr & MSR_TS_MASK) == MSR_TS_MASK) 776 msr &= ~MSR_TS_MASK; 777 vcpu->arch.shregs.msr = msr; 778 kvmppc_end_cede(vcpu); 779 } 780 EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv); 781 782 static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) 783 { 784 unsigned long msr, pc, new_msr, new_pc; 785 786 msr = kvmppc_get_msr(vcpu); 787 pc = kvmppc_get_pc(vcpu); 788 new_msr = vcpu->arch.intr_msr; 789 new_pc = vec; 790 791 /* If transactional, change to suspend mode on IRQ delivery */ 792 if (MSR_TM_TRANSACTIONAL(msr)) 793 new_msr |= MSR_TS_S; 794 else 795 new_msr |= msr & MSR_TS_MASK; 796 797 /* 798 * Perform MSR and PC adjustment for LPCR[AIL]=3 if it is set and 799 * applicable. AIL=2 is not supported. 800 * 801 * AIL does not apply to SRESET, MCE, or HMI (which is never 802 * delivered to the guest), and does not apply if IR=0 or DR=0. 803 */ 804 if (vec != BOOK3S_INTERRUPT_SYSTEM_RESET && 805 vec != BOOK3S_INTERRUPT_MACHINE_CHECK && 806 (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 && 807 (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) { 808 new_msr |= MSR_IR | MSR_DR; 809 new_pc += 0xC000000000004000ULL; 810 } 811 812 kvmppc_set_srr0(vcpu, pc); 813 kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); 814 kvmppc_set_pc(vcpu, new_pc); 815 vcpu->arch.shregs.msr = new_msr; 816 } 817 818 void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) 819 { 820 inject_interrupt(vcpu, vec, srr1_flags); 821 kvmppc_end_cede(vcpu); 822 } 823 EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv); 824 825 /* 826 * Is there a PRIV_DOORBELL pending for the guest (on POWER9)? 827 * Can we inject a Decrementer or a External interrupt? 828 */ 829 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) 830 { 831 int ext; 832 unsigned long lpcr; 833 834 /* Insert EXTERNAL bit into LPCR at the MER bit position */ 835 ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1; 836 lpcr = mfspr(SPRN_LPCR); 837 lpcr |= ext << LPCR_MER_SH; 838 mtspr(SPRN_LPCR, lpcr); 839 isync(); 840 841 if (vcpu->arch.shregs.msr & MSR_EE) { 842 if (ext) { 843 inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0); 844 } else { 845 long int dec = mfspr(SPRN_DEC); 846 if (!(lpcr & LPCR_LD)) 847 dec = (int) dec; 848 if (dec < 0) 849 inject_interrupt(vcpu, 850 BOOK3S_INTERRUPT_DECREMENTER, 0); 851 } 852 } 853 854 if (vcpu->arch.doorbell_request) { 855 mtspr(SPRN_DPDES, 1); 856 vcpu->arch.vcore->dpdes = 1; 857 smp_wmb(); 858 vcpu->arch.doorbell_request = 0; 859 } 860 } 861 862 static void flush_guest_tlb(struct kvm *kvm) 863 { 864 unsigned long rb, set; 865 866 rb = PPC_BIT(52); /* IS = 2 */ 867 if (kvm_is_radix(kvm)) { 868 /* R=1 PRS=1 RIC=2 */ 869 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 870 : : "r" (rb), "i" (1), "i" (1), "i" (2), 871 "r" (0) : "memory"); 872 for (set = 1; set < kvm->arch.tlb_sets; ++set) { 873 rb += PPC_BIT(51); /* increment set number */ 874 /* R=1 PRS=1 RIC=0 */ 875 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 876 : : "r" (rb), "i" (1), "i" (1), "i" (0), 877 "r" (0) : "memory"); 878 } 879 asm volatile("ptesync": : :"memory"); 880 asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory"); 881 } else { 882 for (set = 0; set < kvm->arch.tlb_sets; ++set) { 883 /* R=0 PRS=0 RIC=0 */ 884 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 885 : : "r" (rb), "i" (0), "i" (0), "i" (0), 886 "r" (0) : "memory"); 887 rb += PPC_BIT(51); /* increment set number */ 888 } 889 asm volatile("ptesync": : :"memory"); 890 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory"); 891 } 892 } 893 894 void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu, 895 struct kvm_nested_guest *nested) 896 { 897 cpumask_t *need_tlb_flush; 898 899 /* 900 * On POWER9, individual threads can come in here, but the 901 * TLB is shared between the 4 threads in a core, hence 902 * invalidating on one thread invalidates for all. 903 * Thus we make all 4 threads use the same bit. 904 */ 905 if (cpu_has_feature(CPU_FTR_ARCH_300)) 906 pcpu = cpu_first_thread_sibling(pcpu); 907 908 if (nested) 909 need_tlb_flush = &nested->need_tlb_flush; 910 else 911 need_tlb_flush = &kvm->arch.need_tlb_flush; 912 913 if (cpumask_test_cpu(pcpu, need_tlb_flush)) { 914 flush_guest_tlb(kvm); 915 916 /* Clear the bit after the TLB flush */ 917 cpumask_clear_cpu(pcpu, need_tlb_flush); 918 } 919 } 920 EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush); 921