1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 4 */ 5 6 #include <linux/cpu.h> 7 #include <linux/kvm_host.h> 8 #include <linux/preempt.h> 9 #include <linux/export.h> 10 #include <linux/sched.h> 11 #include <linux/spinlock.h> 12 #include <linux/init.h> 13 #include <linux/memblock.h> 14 #include <linux/sizes.h> 15 #include <linux/cma.h> 16 #include <linux/bitops.h> 17 18 #include <asm/asm-prototypes.h> 19 #include <asm/cputable.h> 20 #include <asm/kvm_ppc.h> 21 #include <asm/kvm_book3s.h> 22 #include <asm/archrandom.h> 23 #include <asm/xics.h> 24 #include <asm/xive.h> 25 #include <asm/dbell.h> 26 #include <asm/cputhreads.h> 27 #include <asm/io.h> 28 #include <asm/opal.h> 29 #include <asm/smp.h> 30 31 #define KVM_CMA_CHUNK_ORDER 18 32 33 #include "book3s_xics.h" 34 #include "book3s_xive.h" 35 36 /* 37 * The XIVE module will populate these when it loads 38 */ 39 unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu); 40 unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server); 41 int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, 42 unsigned long mfrr); 43 int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr); 44 int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); 45 EXPORT_SYMBOL_GPL(__xive_vm_h_xirr); 46 EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll); 47 EXPORT_SYMBOL_GPL(__xive_vm_h_ipi); 48 EXPORT_SYMBOL_GPL(__xive_vm_h_cppr); 49 EXPORT_SYMBOL_GPL(__xive_vm_h_eoi); 50 51 /* 52 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) 53 * should be power of 2. 54 */ 55 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ 56 /* 57 * By default we reserve 5% of memory for hash pagetable allocation. 58 */ 59 static unsigned long kvm_cma_resv_ratio = 5; 60 61 static struct cma *kvm_cma; 62 63 static int __init early_parse_kvm_cma_resv(char *p) 64 { 65 pr_debug("%s(%s)\n", __func__, p); 66 if (!p) 67 return -EINVAL; 68 return kstrtoul(p, 0, &kvm_cma_resv_ratio); 69 } 70 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); 71 72 struct page *kvm_alloc_hpt_cma(unsigned long nr_pages) 73 { 74 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 75 76 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES), 77 false); 78 } 79 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma); 80 81 void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages) 82 { 83 cma_release(kvm_cma, page, nr_pages); 84 } 85 EXPORT_SYMBOL_GPL(kvm_free_hpt_cma); 86 87 /** 88 * kvm_cma_reserve() - reserve area for kvm hash pagetable 89 * 90 * This function reserves memory from early allocator. It should be 91 * called by arch specific code once the memblock allocator 92 * has been activated and all other subsystems have already allocated/reserved 93 * memory. 94 */ 95 void __init kvm_cma_reserve(void) 96 { 97 unsigned long align_size; 98 phys_addr_t selected_size; 99 100 /* 101 * We need CMA reservation only when we are in HV mode 102 */ 103 if (!cpu_has_feature(CPU_FTR_HVMODE)) 104 return; 105 106 selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100); 107 if (selected_size) { 108 pr_info("%s: reserving %ld MiB for global area\n", __func__, 109 (unsigned long)selected_size / SZ_1M); 110 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; 111 cma_declare_contiguous(0, selected_size, 0, align_size, 112 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma", 113 &kvm_cma); 114 } 115 } 116 117 /* 118 * Real-mode H_CONFER implementation. 119 * We check if we are the only vcpu out of this virtual core 120 * still running in the guest and not ceded. If so, we pop up 121 * to the virtual-mode implementation; if not, just return to 122 * the guest. 123 */ 124 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, 125 unsigned int yield_count) 126 { 127 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 128 int ptid = local_paca->kvm_hstate.ptid; 129 int threads_running; 130 int threads_ceded; 131 int threads_conferring; 132 u64 stop = get_tb() + 10 * tb_ticks_per_usec; 133 int rv = H_SUCCESS; /* => don't yield */ 134 135 set_bit(ptid, &vc->conferring_threads); 136 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { 137 threads_running = VCORE_ENTRY_MAP(vc); 138 threads_ceded = vc->napping_threads; 139 threads_conferring = vc->conferring_threads; 140 if ((threads_ceded | threads_conferring) == threads_running) { 141 rv = H_TOO_HARD; /* => do yield */ 142 break; 143 } 144 } 145 clear_bit(ptid, &vc->conferring_threads); 146 return rv; 147 } 148 149 /* 150 * When running HV mode KVM we need to block certain operations while KVM VMs 151 * exist in the system. We use a counter of VMs to track this. 152 * 153 * One of the operations we need to block is onlining of secondaries, so we 154 * protect hv_vm_count with get/put_online_cpus(). 155 */ 156 static atomic_t hv_vm_count; 157 158 void kvm_hv_vm_activated(void) 159 { 160 get_online_cpus(); 161 atomic_inc(&hv_vm_count); 162 put_online_cpus(); 163 } 164 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); 165 166 void kvm_hv_vm_deactivated(void) 167 { 168 get_online_cpus(); 169 atomic_dec(&hv_vm_count); 170 put_online_cpus(); 171 } 172 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); 173 174 bool kvm_hv_mode_active(void) 175 { 176 return atomic_read(&hv_vm_count) != 0; 177 } 178 179 extern int hcall_real_table[], hcall_real_table_end[]; 180 181 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) 182 { 183 cmd /= 4; 184 if (cmd < hcall_real_table_end - hcall_real_table && 185 hcall_real_table[cmd]) 186 return 1; 187 188 return 0; 189 } 190 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); 191 192 int kvmppc_hwrng_present(void) 193 { 194 return powernv_hwrng_present(); 195 } 196 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); 197 198 long kvmppc_h_random(struct kvm_vcpu *vcpu) 199 { 200 int r; 201 202 /* Only need to do the expensive mfmsr() on radix */ 203 if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR)) 204 r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]); 205 else 206 r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]); 207 if (r) 208 return H_SUCCESS; 209 210 return H_HARDWARE; 211 } 212 213 /* 214 * Send an interrupt or message to another CPU. 215 * The caller needs to include any barrier needed to order writes 216 * to memory vs. the IPI/message. 217 */ 218 void kvmhv_rm_send_ipi(int cpu) 219 { 220 void __iomem *xics_phys; 221 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 222 223 /* For a nested hypervisor, use the XICS via hcall */ 224 if (kvmhv_on_pseries()) { 225 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 226 227 plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu), 228 IPI_PRIORITY); 229 return; 230 } 231 232 /* On POWER9 we can use msgsnd for any destination cpu. */ 233 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 234 msg |= get_hard_smp_processor_id(cpu); 235 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 236 return; 237 } 238 239 /* On POWER8 for IPIs to threads in the same core, use msgsnd. */ 240 if (cpu_has_feature(CPU_FTR_ARCH_207S) && 241 cpu_first_thread_sibling(cpu) == 242 cpu_first_thread_sibling(raw_smp_processor_id())) { 243 msg |= cpu_thread_in_core(cpu); 244 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 245 return; 246 } 247 248 /* We should never reach this */ 249 if (WARN_ON_ONCE(xics_on_xive())) 250 return; 251 252 /* Else poke the target with an IPI */ 253 xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys; 254 if (xics_phys) 255 __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); 256 else 257 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); 258 } 259 260 /* 261 * The following functions are called from the assembly code 262 * in book3s_hv_rmhandlers.S. 263 */ 264 static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) 265 { 266 int cpu = vc->pcpu; 267 268 /* Order setting of exit map vs. msgsnd/IPI */ 269 smp_mb(); 270 for (; active; active >>= 1, ++cpu) 271 if (active & 1) 272 kvmhv_rm_send_ipi(cpu); 273 } 274 275 void kvmhv_commence_exit(int trap) 276 { 277 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 278 int ptid = local_paca->kvm_hstate.ptid; 279 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; 280 int me, ee, i; 281 282 /* Set our bit in the threads-exiting-guest map in the 0xff00 283 bits of vcore->entry_exit_map */ 284 me = 0x100 << ptid; 285 do { 286 ee = vc->entry_exit_map; 287 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); 288 289 /* Are we the first here? */ 290 if ((ee >> 8) != 0) 291 return; 292 293 /* 294 * Trigger the other threads in this vcore to exit the guest. 295 * If this is a hypervisor decrementer interrupt then they 296 * will be already on their way out of the guest. 297 */ 298 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) 299 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); 300 301 /* 302 * If we are doing dynamic micro-threading, interrupt the other 303 * subcores to pull them out of their guests too. 304 */ 305 if (!sip) 306 return; 307 308 for (i = 0; i < MAX_SUBCORES; ++i) { 309 vc = sip->vc[i]; 310 if (!vc) 311 break; 312 do { 313 ee = vc->entry_exit_map; 314 /* Already asked to exit? */ 315 if ((ee >> 8) != 0) 316 break; 317 } while (cmpxchg(&vc->entry_exit_map, ee, 318 ee | VCORE_EXIT_REQ) != ee); 319 if ((ee >> 8) == 0) 320 kvmhv_interrupt_vcore(vc, ee); 321 } 322 } 323 324 struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; 325 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); 326 327 #ifdef CONFIG_KVM_XICS 328 static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, 329 u32 xisr) 330 { 331 int i; 332 333 /* 334 * We access the mapped array here without a lock. That 335 * is safe because we never reduce the number of entries 336 * in the array and we never change the v_hwirq field of 337 * an entry once it is set. 338 * 339 * We have also carefully ordered the stores in the writer 340 * and the loads here in the reader, so that if we find a matching 341 * hwirq here, the associated GSI and irq_desc fields are valid. 342 */ 343 for (i = 0; i < pimap->n_mapped; i++) { 344 if (xisr == pimap->mapped[i].r_hwirq) { 345 /* 346 * Order subsequent reads in the caller to serialize 347 * with the writer. 348 */ 349 smp_rmb(); 350 return &pimap->mapped[i]; 351 } 352 } 353 return NULL; 354 } 355 356 /* 357 * If we have an interrupt that's not an IPI, check if we have a 358 * passthrough adapter and if so, check if this external interrupt 359 * is for the adapter. 360 * We will attempt to deliver the IRQ directly to the target VCPU's 361 * ICP, the virtual ICP (based on affinity - the xive value in ICS). 362 * 363 * If the delivery fails or if this is not for a passthrough adapter, 364 * return to the host to handle this interrupt. We earlier 365 * saved a copy of the XIRR in the PACA, it will be picked up by 366 * the host ICP driver. 367 */ 368 static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 369 { 370 struct kvmppc_passthru_irqmap *pimap; 371 struct kvmppc_irq_map *irq_map; 372 struct kvm_vcpu *vcpu; 373 374 vcpu = local_paca->kvm_hstate.kvm_vcpu; 375 if (!vcpu) 376 return 1; 377 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); 378 if (!pimap) 379 return 1; 380 irq_map = get_irqmap(pimap, xisr); 381 if (!irq_map) 382 return 1; 383 384 /* We're handling this interrupt, generic code doesn't need to */ 385 local_paca->kvm_hstate.saved_xirr = 0; 386 387 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again); 388 } 389 390 #else 391 static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 392 { 393 return 1; 394 } 395 #endif 396 397 /* 398 * Determine what sort of external interrupt is pending (if any). 399 * Returns: 400 * 0 if no interrupt is pending 401 * 1 if an interrupt is pending that needs to be handled by the host 402 * 2 Passthrough that needs completion in the host 403 * -1 if there was a guest wakeup IPI (which has now been cleared) 404 * -2 if there is PCI passthrough external interrupt that was handled 405 */ 406 static long kvmppc_read_one_intr(bool *again); 407 408 long kvmppc_read_intr(void) 409 { 410 long ret = 0; 411 long rc; 412 bool again; 413 414 if (xive_enabled()) 415 return 1; 416 417 do { 418 again = false; 419 rc = kvmppc_read_one_intr(&again); 420 if (rc && (ret == 0 || rc > ret)) 421 ret = rc; 422 } while (again); 423 return ret; 424 } 425 426 static long kvmppc_read_one_intr(bool *again) 427 { 428 void __iomem *xics_phys; 429 u32 h_xirr; 430 __be32 xirr; 431 u32 xisr; 432 u8 host_ipi; 433 int64_t rc; 434 435 if (xive_enabled()) 436 return 1; 437 438 /* see if a host IPI is pending */ 439 host_ipi = local_paca->kvm_hstate.host_ipi; 440 if (host_ipi) 441 return 1; 442 443 /* Now read the interrupt from the ICP */ 444 if (kvmhv_on_pseries()) { 445 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 446 447 rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF); 448 xirr = cpu_to_be32(retbuf[0]); 449 } else { 450 xics_phys = local_paca->kvm_hstate.xics_phys; 451 rc = 0; 452 if (!xics_phys) 453 rc = opal_int_get_xirr(&xirr, false); 454 else 455 xirr = __raw_rm_readl(xics_phys + XICS_XIRR); 456 } 457 if (rc < 0) 458 return 1; 459 460 /* 461 * Save XIRR for later. Since we get control in reverse endian 462 * on LE systems, save it byte reversed and fetch it back in 463 * host endian. Note that xirr is the value read from the 464 * XIRR register, while h_xirr is the host endian version. 465 */ 466 h_xirr = be32_to_cpu(xirr); 467 local_paca->kvm_hstate.saved_xirr = h_xirr; 468 xisr = h_xirr & 0xffffff; 469 /* 470 * Ensure that the store/load complete to guarantee all side 471 * effects of loading from XIRR has completed 472 */ 473 smp_mb(); 474 475 /* if nothing pending in the ICP */ 476 if (!xisr) 477 return 0; 478 479 /* We found something in the ICP... 480 * 481 * If it is an IPI, clear the MFRR and EOI it. 482 */ 483 if (xisr == XICS_IPI) { 484 rc = 0; 485 if (kvmhv_on_pseries()) { 486 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 487 488 plpar_hcall_raw(H_IPI, retbuf, 489 hard_smp_processor_id(), 0xff); 490 plpar_hcall_raw(H_EOI, retbuf, h_xirr); 491 } else if (xics_phys) { 492 __raw_rm_writeb(0xff, xics_phys + XICS_MFRR); 493 __raw_rm_writel(xirr, xics_phys + XICS_XIRR); 494 } else { 495 opal_int_set_mfrr(hard_smp_processor_id(), 0xff); 496 rc = opal_int_eoi(h_xirr); 497 } 498 /* If rc > 0, there is another interrupt pending */ 499 *again = rc > 0; 500 501 /* 502 * Need to ensure side effects of above stores 503 * complete before proceeding. 504 */ 505 smp_mb(); 506 507 /* 508 * We need to re-check host IPI now in case it got set in the 509 * meantime. If it's clear, we bounce the interrupt to the 510 * guest 511 */ 512 host_ipi = local_paca->kvm_hstate.host_ipi; 513 if (unlikely(host_ipi != 0)) { 514 /* We raced with the host, 515 * we need to resend that IPI, bummer 516 */ 517 if (kvmhv_on_pseries()) { 518 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 519 520 plpar_hcall_raw(H_IPI, retbuf, 521 hard_smp_processor_id(), 522 IPI_PRIORITY); 523 } else if (xics_phys) 524 __raw_rm_writeb(IPI_PRIORITY, 525 xics_phys + XICS_MFRR); 526 else 527 opal_int_set_mfrr(hard_smp_processor_id(), 528 IPI_PRIORITY); 529 /* Let side effects complete */ 530 smp_mb(); 531 return 1; 532 } 533 534 /* OK, it's an IPI for us */ 535 local_paca->kvm_hstate.saved_xirr = 0; 536 return -1; 537 } 538 539 return kvmppc_check_passthru(xisr, xirr, again); 540 } 541 542 #ifdef CONFIG_KVM_XICS 543 static inline bool is_rm(void) 544 { 545 return !(mfmsr() & MSR_DR); 546 } 547 548 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) 549 { 550 if (!kvmppc_xics_enabled(vcpu)) 551 return H_TOO_HARD; 552 if (xics_on_xive()) { 553 if (is_rm()) 554 return xive_rm_h_xirr(vcpu); 555 if (unlikely(!__xive_vm_h_xirr)) 556 return H_NOT_AVAILABLE; 557 return __xive_vm_h_xirr(vcpu); 558 } else 559 return xics_rm_h_xirr(vcpu); 560 } 561 562 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) 563 { 564 if (!kvmppc_xics_enabled(vcpu)) 565 return H_TOO_HARD; 566 vcpu->arch.regs.gpr[5] = get_tb(); 567 if (xics_on_xive()) { 568 if (is_rm()) 569 return xive_rm_h_xirr(vcpu); 570 if (unlikely(!__xive_vm_h_xirr)) 571 return H_NOT_AVAILABLE; 572 return __xive_vm_h_xirr(vcpu); 573 } else 574 return xics_rm_h_xirr(vcpu); 575 } 576 577 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) 578 { 579 if (!kvmppc_xics_enabled(vcpu)) 580 return H_TOO_HARD; 581 if (xics_on_xive()) { 582 if (is_rm()) 583 return xive_rm_h_ipoll(vcpu, server); 584 if (unlikely(!__xive_vm_h_ipoll)) 585 return H_NOT_AVAILABLE; 586 return __xive_vm_h_ipoll(vcpu, server); 587 } else 588 return H_TOO_HARD; 589 } 590 591 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, 592 unsigned long mfrr) 593 { 594 if (!kvmppc_xics_enabled(vcpu)) 595 return H_TOO_HARD; 596 if (xics_on_xive()) { 597 if (is_rm()) 598 return xive_rm_h_ipi(vcpu, server, mfrr); 599 if (unlikely(!__xive_vm_h_ipi)) 600 return H_NOT_AVAILABLE; 601 return __xive_vm_h_ipi(vcpu, server, mfrr); 602 } else 603 return xics_rm_h_ipi(vcpu, server, mfrr); 604 } 605 606 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) 607 { 608 if (!kvmppc_xics_enabled(vcpu)) 609 return H_TOO_HARD; 610 if (xics_on_xive()) { 611 if (is_rm()) 612 return xive_rm_h_cppr(vcpu, cppr); 613 if (unlikely(!__xive_vm_h_cppr)) 614 return H_NOT_AVAILABLE; 615 return __xive_vm_h_cppr(vcpu, cppr); 616 } else 617 return xics_rm_h_cppr(vcpu, cppr); 618 } 619 620 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) 621 { 622 if (!kvmppc_xics_enabled(vcpu)) 623 return H_TOO_HARD; 624 if (xics_on_xive()) { 625 if (is_rm()) 626 return xive_rm_h_eoi(vcpu, xirr); 627 if (unlikely(!__xive_vm_h_eoi)) 628 return H_NOT_AVAILABLE; 629 return __xive_vm_h_eoi(vcpu, xirr); 630 } else 631 return xics_rm_h_eoi(vcpu, xirr); 632 } 633 #endif /* CONFIG_KVM_XICS */ 634 635 void kvmppc_bad_interrupt(struct pt_regs *regs) 636 { 637 /* 638 * 100 could happen at any time, 200 can happen due to invalid real 639 * address access for example (or any time due to a hardware problem). 640 */ 641 if (TRAP(regs) == 0x100) { 642 get_paca()->in_nmi++; 643 system_reset_exception(regs); 644 get_paca()->in_nmi--; 645 } else if (TRAP(regs) == 0x200) { 646 machine_check_exception(regs); 647 } else { 648 die("Bad interrupt in KVM entry/exit code", regs, SIGABRT); 649 } 650 panic("Bad KVM trap"); 651 } 652 653 static void kvmppc_end_cede(struct kvm_vcpu *vcpu) 654 { 655 vcpu->arch.ceded = 0; 656 if (vcpu->arch.timer_running) { 657 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 658 vcpu->arch.timer_running = 0; 659 } 660 } 661 662 void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) 663 { 664 /* 665 * Check for illegal transactional state bit combination 666 * and if we find it, force the TS field to a safe state. 667 */ 668 if ((msr & MSR_TS_MASK) == MSR_TS_MASK) 669 msr &= ~MSR_TS_MASK; 670 vcpu->arch.shregs.msr = msr; 671 kvmppc_end_cede(vcpu); 672 } 673 EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv); 674 675 static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) 676 { 677 unsigned long msr, pc, new_msr, new_pc; 678 679 msr = kvmppc_get_msr(vcpu); 680 pc = kvmppc_get_pc(vcpu); 681 new_msr = vcpu->arch.intr_msr; 682 new_pc = vec; 683 684 /* If transactional, change to suspend mode on IRQ delivery */ 685 if (MSR_TM_TRANSACTIONAL(msr)) 686 new_msr |= MSR_TS_S; 687 else 688 new_msr |= msr & MSR_TS_MASK; 689 690 /* 691 * Perform MSR and PC adjustment for LPCR[AIL]=3 if it is set and 692 * applicable. AIL=2 is not supported. 693 * 694 * AIL does not apply to SRESET, MCE, or HMI (which is never 695 * delivered to the guest), and does not apply if IR=0 or DR=0. 696 */ 697 if (vec != BOOK3S_INTERRUPT_SYSTEM_RESET && 698 vec != BOOK3S_INTERRUPT_MACHINE_CHECK && 699 (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 && 700 (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) { 701 new_msr |= MSR_IR | MSR_DR; 702 new_pc += 0xC000000000004000ULL; 703 } 704 705 kvmppc_set_srr0(vcpu, pc); 706 kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); 707 kvmppc_set_pc(vcpu, new_pc); 708 vcpu->arch.shregs.msr = new_msr; 709 } 710 711 void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) 712 { 713 inject_interrupt(vcpu, vec, srr1_flags); 714 kvmppc_end_cede(vcpu); 715 } 716 EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv); 717 718 /* 719 * Is there a PRIV_DOORBELL pending for the guest (on POWER9)? 720 * Can we inject a Decrementer or a External interrupt? 721 */ 722 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) 723 { 724 int ext; 725 unsigned long lpcr; 726 727 /* Insert EXTERNAL bit into LPCR at the MER bit position */ 728 ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1; 729 lpcr = mfspr(SPRN_LPCR); 730 lpcr |= ext << LPCR_MER_SH; 731 mtspr(SPRN_LPCR, lpcr); 732 isync(); 733 734 if (vcpu->arch.shregs.msr & MSR_EE) { 735 if (ext) { 736 inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0); 737 } else { 738 long int dec = mfspr(SPRN_DEC); 739 if (!(lpcr & LPCR_LD)) 740 dec = (int) dec; 741 if (dec < 0) 742 inject_interrupt(vcpu, 743 BOOK3S_INTERRUPT_DECREMENTER, 0); 744 } 745 } 746 747 if (vcpu->arch.doorbell_request) { 748 mtspr(SPRN_DPDES, 1); 749 vcpu->arch.vcore->dpdes = 1; 750 smp_wmb(); 751 vcpu->arch.doorbell_request = 0; 752 } 753 } 754 755 static void flush_guest_tlb(struct kvm *kvm) 756 { 757 unsigned long rb, set; 758 759 rb = PPC_BIT(52); /* IS = 2 */ 760 if (kvm_is_radix(kvm)) { 761 /* R=1 PRS=1 RIC=2 */ 762 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 763 : : "r" (rb), "i" (1), "i" (1), "i" (2), 764 "r" (0) : "memory"); 765 for (set = 1; set < kvm->arch.tlb_sets; ++set) { 766 rb += PPC_BIT(51); /* increment set number */ 767 /* R=1 PRS=1 RIC=0 */ 768 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 769 : : "r" (rb), "i" (1), "i" (1), "i" (0), 770 "r" (0) : "memory"); 771 } 772 asm volatile("ptesync": : :"memory"); 773 asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory"); 774 } else { 775 for (set = 0; set < kvm->arch.tlb_sets; ++set) { 776 /* R=0 PRS=0 RIC=0 */ 777 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 778 : : "r" (rb), "i" (0), "i" (0), "i" (0), 779 "r" (0) : "memory"); 780 rb += PPC_BIT(51); /* increment set number */ 781 } 782 asm volatile("ptesync": : :"memory"); 783 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory"); 784 } 785 } 786 787 void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu, 788 struct kvm_nested_guest *nested) 789 { 790 cpumask_t *need_tlb_flush; 791 792 /* 793 * On POWER9, individual threads can come in here, but the 794 * TLB is shared between the 4 threads in a core, hence 795 * invalidating on one thread invalidates for all. 796 * Thus we make all 4 threads use the same bit. 797 */ 798 if (cpu_has_feature(CPU_FTR_ARCH_300)) 799 pcpu = cpu_first_thread_sibling(pcpu); 800 801 if (nested) 802 need_tlb_flush = &nested->need_tlb_flush; 803 else 804 need_tlb_flush = &kvm->arch.need_tlb_flush; 805 806 if (cpumask_test_cpu(pcpu, need_tlb_flush)) { 807 flush_guest_tlb(kvm); 808 809 /* Clear the bit after the TLB flush */ 810 cpumask_clear_cpu(pcpu, need_tlb_flush); 811 } 812 } 813 EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush); 814