1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 4 */ 5 6 #include <linux/cpu.h> 7 #include <linux/kvm_host.h> 8 #include <linux/preempt.h> 9 #include <linux/export.h> 10 #include <linux/sched.h> 11 #include <linux/spinlock.h> 12 #include <linux/init.h> 13 #include <linux/memblock.h> 14 #include <linux/sizes.h> 15 #include <linux/cma.h> 16 #include <linux/bitops.h> 17 18 #include <asm/asm-prototypes.h> 19 #include <asm/cputable.h> 20 #include <asm/interrupt.h> 21 #include <asm/kvm_ppc.h> 22 #include <asm/kvm_book3s.h> 23 #include <asm/archrandom.h> 24 #include <asm/xics.h> 25 #include <asm/xive.h> 26 #include <asm/dbell.h> 27 #include <asm/cputhreads.h> 28 #include <asm/io.h> 29 #include <asm/opal.h> 30 #include <asm/smp.h> 31 32 #define KVM_CMA_CHUNK_ORDER 18 33 34 #include "book3s_xics.h" 35 #include "book3s_xive.h" 36 37 /* 38 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) 39 * should be power of 2. 40 */ 41 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ 42 /* 43 * By default we reserve 5% of memory for hash pagetable allocation. 44 */ 45 static unsigned long kvm_cma_resv_ratio = 5; 46 47 static struct cma *kvm_cma; 48 49 static int __init early_parse_kvm_cma_resv(char *p) 50 { 51 pr_debug("%s(%s)\n", __func__, p); 52 if (!p) 53 return -EINVAL; 54 return kstrtoul(p, 0, &kvm_cma_resv_ratio); 55 } 56 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); 57 58 struct page *kvm_alloc_hpt_cma(unsigned long nr_pages) 59 { 60 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 61 62 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES), 63 false); 64 } 65 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma); 66 67 void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages) 68 { 69 cma_release(kvm_cma, page, nr_pages); 70 } 71 EXPORT_SYMBOL_GPL(kvm_free_hpt_cma); 72 73 /** 74 * kvm_cma_reserve() - reserve area for kvm hash pagetable 75 * 76 * This function reserves memory from early allocator. It should be 77 * called by arch specific code once the memblock allocator 78 * has been activated and all other subsystems have already allocated/reserved 79 * memory. 80 */ 81 void __init kvm_cma_reserve(void) 82 { 83 unsigned long align_size; 84 phys_addr_t selected_size; 85 86 /* 87 * We need CMA reservation only when we are in HV mode 88 */ 89 if (!cpu_has_feature(CPU_FTR_HVMODE)) 90 return; 91 92 selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100); 93 if (selected_size) { 94 pr_info("%s: reserving %ld MiB for global area\n", __func__, 95 (unsigned long)selected_size / SZ_1M); 96 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; 97 cma_declare_contiguous(0, selected_size, 0, align_size, 98 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma", 99 &kvm_cma); 100 } 101 } 102 103 /* 104 * Real-mode H_CONFER implementation. 105 * We check if we are the only vcpu out of this virtual core 106 * still running in the guest and not ceded. If so, we pop up 107 * to the virtual-mode implementation; if not, just return to 108 * the guest. 109 */ 110 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, 111 unsigned int yield_count) 112 { 113 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 114 int ptid = local_paca->kvm_hstate.ptid; 115 int threads_running; 116 int threads_ceded; 117 int threads_conferring; 118 u64 stop = get_tb() + 10 * tb_ticks_per_usec; 119 int rv = H_SUCCESS; /* => don't yield */ 120 121 set_bit(ptid, &vc->conferring_threads); 122 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { 123 threads_running = VCORE_ENTRY_MAP(vc); 124 threads_ceded = vc->napping_threads; 125 threads_conferring = vc->conferring_threads; 126 if ((threads_ceded | threads_conferring) == threads_running) { 127 rv = H_TOO_HARD; /* => do yield */ 128 break; 129 } 130 } 131 clear_bit(ptid, &vc->conferring_threads); 132 return rv; 133 } 134 135 /* 136 * When running HV mode KVM we need to block certain operations while KVM VMs 137 * exist in the system. We use a counter of VMs to track this. 138 * 139 * One of the operations we need to block is onlining of secondaries, so we 140 * protect hv_vm_count with get/put_online_cpus(). 141 */ 142 static atomic_t hv_vm_count; 143 144 void kvm_hv_vm_activated(void) 145 { 146 get_online_cpus(); 147 atomic_inc(&hv_vm_count); 148 put_online_cpus(); 149 } 150 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); 151 152 void kvm_hv_vm_deactivated(void) 153 { 154 get_online_cpus(); 155 atomic_dec(&hv_vm_count); 156 put_online_cpus(); 157 } 158 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); 159 160 bool kvm_hv_mode_active(void) 161 { 162 return atomic_read(&hv_vm_count) != 0; 163 } 164 165 extern int hcall_real_table[], hcall_real_table_end[]; 166 167 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) 168 { 169 cmd /= 4; 170 if (cmd < hcall_real_table_end - hcall_real_table && 171 hcall_real_table[cmd]) 172 return 1; 173 174 return 0; 175 } 176 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); 177 178 int kvmppc_hwrng_present(void) 179 { 180 return powernv_hwrng_present(); 181 } 182 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); 183 184 long kvmppc_rm_h_random(struct kvm_vcpu *vcpu) 185 { 186 if (powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4])) 187 return H_SUCCESS; 188 189 return H_HARDWARE; 190 } 191 192 /* 193 * Send an interrupt or message to another CPU. 194 * The caller needs to include any barrier needed to order writes 195 * to memory vs. the IPI/message. 196 */ 197 void kvmhv_rm_send_ipi(int cpu) 198 { 199 void __iomem *xics_phys; 200 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 201 202 /* For a nested hypervisor, use the XICS via hcall */ 203 if (kvmhv_on_pseries()) { 204 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 205 206 plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu), 207 IPI_PRIORITY); 208 return; 209 } 210 211 /* On POWER9 we can use msgsnd for any destination cpu. */ 212 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 213 msg |= get_hard_smp_processor_id(cpu); 214 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 215 return; 216 } 217 218 /* On POWER8 for IPIs to threads in the same core, use msgsnd. */ 219 if (cpu_has_feature(CPU_FTR_ARCH_207S) && 220 cpu_first_thread_sibling(cpu) == 221 cpu_first_thread_sibling(raw_smp_processor_id())) { 222 msg |= cpu_thread_in_core(cpu); 223 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 224 return; 225 } 226 227 /* We should never reach this */ 228 if (WARN_ON_ONCE(xics_on_xive())) 229 return; 230 231 /* Else poke the target with an IPI */ 232 xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys; 233 if (xics_phys) 234 __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); 235 else 236 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); 237 } 238 239 /* 240 * The following functions are called from the assembly code 241 * in book3s_hv_rmhandlers.S. 242 */ 243 static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) 244 { 245 int cpu = vc->pcpu; 246 247 /* Order setting of exit map vs. msgsnd/IPI */ 248 smp_mb(); 249 for (; active; active >>= 1, ++cpu) 250 if (active & 1) 251 kvmhv_rm_send_ipi(cpu); 252 } 253 254 void kvmhv_commence_exit(int trap) 255 { 256 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 257 int ptid = local_paca->kvm_hstate.ptid; 258 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; 259 int me, ee, i; 260 261 /* Set our bit in the threads-exiting-guest map in the 0xff00 262 bits of vcore->entry_exit_map */ 263 me = 0x100 << ptid; 264 do { 265 ee = vc->entry_exit_map; 266 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); 267 268 /* Are we the first here? */ 269 if ((ee >> 8) != 0) 270 return; 271 272 /* 273 * Trigger the other threads in this vcore to exit the guest. 274 * If this is a hypervisor decrementer interrupt then they 275 * will be already on their way out of the guest. 276 */ 277 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) 278 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); 279 280 /* 281 * If we are doing dynamic micro-threading, interrupt the other 282 * subcores to pull them out of their guests too. 283 */ 284 if (!sip) 285 return; 286 287 for (i = 0; i < MAX_SUBCORES; ++i) { 288 vc = sip->vc[i]; 289 if (!vc) 290 break; 291 do { 292 ee = vc->entry_exit_map; 293 /* Already asked to exit? */ 294 if ((ee >> 8) != 0) 295 break; 296 } while (cmpxchg(&vc->entry_exit_map, ee, 297 ee | VCORE_EXIT_REQ) != ee); 298 if ((ee >> 8) == 0) 299 kvmhv_interrupt_vcore(vc, ee); 300 } 301 } 302 303 struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; 304 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); 305 306 #ifdef CONFIG_KVM_XICS 307 static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, 308 u32 xisr) 309 { 310 int i; 311 312 /* 313 * We access the mapped array here without a lock. That 314 * is safe because we never reduce the number of entries 315 * in the array and we never change the v_hwirq field of 316 * an entry once it is set. 317 * 318 * We have also carefully ordered the stores in the writer 319 * and the loads here in the reader, so that if we find a matching 320 * hwirq here, the associated GSI and irq_desc fields are valid. 321 */ 322 for (i = 0; i < pimap->n_mapped; i++) { 323 if (xisr == pimap->mapped[i].r_hwirq) { 324 /* 325 * Order subsequent reads in the caller to serialize 326 * with the writer. 327 */ 328 smp_rmb(); 329 return &pimap->mapped[i]; 330 } 331 } 332 return NULL; 333 } 334 335 /* 336 * If we have an interrupt that's not an IPI, check if we have a 337 * passthrough adapter and if so, check if this external interrupt 338 * is for the adapter. 339 * We will attempt to deliver the IRQ directly to the target VCPU's 340 * ICP, the virtual ICP (based on affinity - the xive value in ICS). 341 * 342 * If the delivery fails or if this is not for a passthrough adapter, 343 * return to the host to handle this interrupt. We earlier 344 * saved a copy of the XIRR in the PACA, it will be picked up by 345 * the host ICP driver. 346 */ 347 static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 348 { 349 struct kvmppc_passthru_irqmap *pimap; 350 struct kvmppc_irq_map *irq_map; 351 struct kvm_vcpu *vcpu; 352 353 vcpu = local_paca->kvm_hstate.kvm_vcpu; 354 if (!vcpu) 355 return 1; 356 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); 357 if (!pimap) 358 return 1; 359 irq_map = get_irqmap(pimap, xisr); 360 if (!irq_map) 361 return 1; 362 363 /* We're handling this interrupt, generic code doesn't need to */ 364 local_paca->kvm_hstate.saved_xirr = 0; 365 366 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again); 367 } 368 369 #else 370 static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 371 { 372 return 1; 373 } 374 #endif 375 376 /* 377 * Determine what sort of external interrupt is pending (if any). 378 * Returns: 379 * 0 if no interrupt is pending 380 * 1 if an interrupt is pending that needs to be handled by the host 381 * 2 Passthrough that needs completion in the host 382 * -1 if there was a guest wakeup IPI (which has now been cleared) 383 * -2 if there is PCI passthrough external interrupt that was handled 384 */ 385 static long kvmppc_read_one_intr(bool *again); 386 387 long kvmppc_read_intr(void) 388 { 389 long ret = 0; 390 long rc; 391 bool again; 392 393 if (xive_enabled()) 394 return 1; 395 396 do { 397 again = false; 398 rc = kvmppc_read_one_intr(&again); 399 if (rc && (ret == 0 || rc > ret)) 400 ret = rc; 401 } while (again); 402 return ret; 403 } 404 405 static long kvmppc_read_one_intr(bool *again) 406 { 407 void __iomem *xics_phys; 408 u32 h_xirr; 409 __be32 xirr; 410 u32 xisr; 411 u8 host_ipi; 412 int64_t rc; 413 414 if (xive_enabled()) 415 return 1; 416 417 /* see if a host IPI is pending */ 418 host_ipi = local_paca->kvm_hstate.host_ipi; 419 if (host_ipi) 420 return 1; 421 422 /* Now read the interrupt from the ICP */ 423 if (kvmhv_on_pseries()) { 424 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 425 426 rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF); 427 xirr = cpu_to_be32(retbuf[0]); 428 } else { 429 xics_phys = local_paca->kvm_hstate.xics_phys; 430 rc = 0; 431 if (!xics_phys) 432 rc = opal_int_get_xirr(&xirr, false); 433 else 434 xirr = __raw_rm_readl(xics_phys + XICS_XIRR); 435 } 436 if (rc < 0) 437 return 1; 438 439 /* 440 * Save XIRR for later. Since we get control in reverse endian 441 * on LE systems, save it byte reversed and fetch it back in 442 * host endian. Note that xirr is the value read from the 443 * XIRR register, while h_xirr is the host endian version. 444 */ 445 h_xirr = be32_to_cpu(xirr); 446 local_paca->kvm_hstate.saved_xirr = h_xirr; 447 xisr = h_xirr & 0xffffff; 448 /* 449 * Ensure that the store/load complete to guarantee all side 450 * effects of loading from XIRR has completed 451 */ 452 smp_mb(); 453 454 /* if nothing pending in the ICP */ 455 if (!xisr) 456 return 0; 457 458 /* We found something in the ICP... 459 * 460 * If it is an IPI, clear the MFRR and EOI it. 461 */ 462 if (xisr == XICS_IPI) { 463 rc = 0; 464 if (kvmhv_on_pseries()) { 465 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 466 467 plpar_hcall_raw(H_IPI, retbuf, 468 hard_smp_processor_id(), 0xff); 469 plpar_hcall_raw(H_EOI, retbuf, h_xirr); 470 } else if (xics_phys) { 471 __raw_rm_writeb(0xff, xics_phys + XICS_MFRR); 472 __raw_rm_writel(xirr, xics_phys + XICS_XIRR); 473 } else { 474 opal_int_set_mfrr(hard_smp_processor_id(), 0xff); 475 rc = opal_int_eoi(h_xirr); 476 } 477 /* If rc > 0, there is another interrupt pending */ 478 *again = rc > 0; 479 480 /* 481 * Need to ensure side effects of above stores 482 * complete before proceeding. 483 */ 484 smp_mb(); 485 486 /* 487 * We need to re-check host IPI now in case it got set in the 488 * meantime. If it's clear, we bounce the interrupt to the 489 * guest 490 */ 491 host_ipi = local_paca->kvm_hstate.host_ipi; 492 if (unlikely(host_ipi != 0)) { 493 /* We raced with the host, 494 * we need to resend that IPI, bummer 495 */ 496 if (kvmhv_on_pseries()) { 497 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 498 499 plpar_hcall_raw(H_IPI, retbuf, 500 hard_smp_processor_id(), 501 IPI_PRIORITY); 502 } else if (xics_phys) 503 __raw_rm_writeb(IPI_PRIORITY, 504 xics_phys + XICS_MFRR); 505 else 506 opal_int_set_mfrr(hard_smp_processor_id(), 507 IPI_PRIORITY); 508 /* Let side effects complete */ 509 smp_mb(); 510 return 1; 511 } 512 513 /* OK, it's an IPI for us */ 514 local_paca->kvm_hstate.saved_xirr = 0; 515 return -1; 516 } 517 518 return kvmppc_check_passthru(xisr, xirr, again); 519 } 520 521 #ifdef CONFIG_KVM_XICS 522 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) 523 { 524 if (!kvmppc_xics_enabled(vcpu)) 525 return H_TOO_HARD; 526 if (xics_on_xive()) 527 return xive_rm_h_xirr(vcpu); 528 else 529 return xics_rm_h_xirr(vcpu); 530 } 531 532 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) 533 { 534 if (!kvmppc_xics_enabled(vcpu)) 535 return H_TOO_HARD; 536 vcpu->arch.regs.gpr[5] = get_tb(); 537 if (xics_on_xive()) 538 return xive_rm_h_xirr(vcpu); 539 else 540 return xics_rm_h_xirr(vcpu); 541 } 542 543 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) 544 { 545 if (!kvmppc_xics_enabled(vcpu)) 546 return H_TOO_HARD; 547 if (xics_on_xive()) 548 return xive_rm_h_ipoll(vcpu, server); 549 else 550 return H_TOO_HARD; 551 } 552 553 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, 554 unsigned long mfrr) 555 { 556 if (!kvmppc_xics_enabled(vcpu)) 557 return H_TOO_HARD; 558 if (xics_on_xive()) 559 return xive_rm_h_ipi(vcpu, server, mfrr); 560 else 561 return xics_rm_h_ipi(vcpu, server, mfrr); 562 } 563 564 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) 565 { 566 if (!kvmppc_xics_enabled(vcpu)) 567 return H_TOO_HARD; 568 if (xics_on_xive()) 569 return xive_rm_h_cppr(vcpu, cppr); 570 else 571 return xics_rm_h_cppr(vcpu, cppr); 572 } 573 574 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) 575 { 576 if (!kvmppc_xics_enabled(vcpu)) 577 return H_TOO_HARD; 578 if (xics_on_xive()) 579 return xive_rm_h_eoi(vcpu, xirr); 580 else 581 return xics_rm_h_eoi(vcpu, xirr); 582 } 583 #endif /* CONFIG_KVM_XICS */ 584 585 void kvmppc_bad_interrupt(struct pt_regs *regs) 586 { 587 /* 588 * 100 could happen at any time, 200 can happen due to invalid real 589 * address access for example (or any time due to a hardware problem). 590 */ 591 if (TRAP(regs) == 0x100) { 592 get_paca()->in_nmi++; 593 system_reset_exception(regs); 594 get_paca()->in_nmi--; 595 } else if (TRAP(regs) == 0x200) { 596 machine_check_exception(regs); 597 } else { 598 die("Bad interrupt in KVM entry/exit code", regs, SIGABRT); 599 } 600 panic("Bad KVM trap"); 601 } 602 603 static void kvmppc_end_cede(struct kvm_vcpu *vcpu) 604 { 605 vcpu->arch.ceded = 0; 606 if (vcpu->arch.timer_running) { 607 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 608 vcpu->arch.timer_running = 0; 609 } 610 } 611 612 void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) 613 { 614 /* Guest must always run with ME enabled, HV disabled. */ 615 msr = (msr | MSR_ME) & ~MSR_HV; 616 617 /* 618 * Check for illegal transactional state bit combination 619 * and if we find it, force the TS field to a safe state. 620 */ 621 if ((msr & MSR_TS_MASK) == MSR_TS_MASK) 622 msr &= ~MSR_TS_MASK; 623 vcpu->arch.shregs.msr = msr; 624 kvmppc_end_cede(vcpu); 625 } 626 EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv); 627 628 static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) 629 { 630 unsigned long msr, pc, new_msr, new_pc; 631 632 msr = kvmppc_get_msr(vcpu); 633 pc = kvmppc_get_pc(vcpu); 634 new_msr = vcpu->arch.intr_msr; 635 new_pc = vec; 636 637 /* If transactional, change to suspend mode on IRQ delivery */ 638 if (MSR_TM_TRANSACTIONAL(msr)) 639 new_msr |= MSR_TS_S; 640 else 641 new_msr |= msr & MSR_TS_MASK; 642 643 /* 644 * Perform MSR and PC adjustment for LPCR[AIL]=3 if it is set and 645 * applicable. AIL=2 is not supported. 646 * 647 * AIL does not apply to SRESET, MCE, or HMI (which is never 648 * delivered to the guest), and does not apply if IR=0 or DR=0. 649 */ 650 if (vec != BOOK3S_INTERRUPT_SYSTEM_RESET && 651 vec != BOOK3S_INTERRUPT_MACHINE_CHECK && 652 (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 && 653 (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) { 654 new_msr |= MSR_IR | MSR_DR; 655 new_pc += 0xC000000000004000ULL; 656 } 657 658 kvmppc_set_srr0(vcpu, pc); 659 kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags); 660 kvmppc_set_pc(vcpu, new_pc); 661 vcpu->arch.shregs.msr = new_msr; 662 } 663 664 void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags) 665 { 666 inject_interrupt(vcpu, vec, srr1_flags); 667 kvmppc_end_cede(vcpu); 668 } 669 EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv); 670 671 /* 672 * Is there a PRIV_DOORBELL pending for the guest (on POWER9)? 673 * Can we inject a Decrementer or a External interrupt? 674 */ 675 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) 676 { 677 int ext; 678 unsigned long lpcr; 679 680 /* Insert EXTERNAL bit into LPCR at the MER bit position */ 681 ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1; 682 lpcr = mfspr(SPRN_LPCR); 683 lpcr |= ext << LPCR_MER_SH; 684 mtspr(SPRN_LPCR, lpcr); 685 isync(); 686 687 if (vcpu->arch.shregs.msr & MSR_EE) { 688 if (ext) { 689 inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0); 690 } else { 691 long int dec = mfspr(SPRN_DEC); 692 if (!(lpcr & LPCR_LD)) 693 dec = (int) dec; 694 if (dec < 0) 695 inject_interrupt(vcpu, 696 BOOK3S_INTERRUPT_DECREMENTER, 0); 697 } 698 } 699 700 if (vcpu->arch.doorbell_request) { 701 mtspr(SPRN_DPDES, 1); 702 vcpu->arch.vcore->dpdes = 1; 703 smp_wmb(); 704 vcpu->arch.doorbell_request = 0; 705 } 706 } 707 708 static void flush_guest_tlb(struct kvm *kvm) 709 { 710 unsigned long rb, set; 711 712 rb = PPC_BIT(52); /* IS = 2 */ 713 if (kvm_is_radix(kvm)) { 714 /* R=1 PRS=1 RIC=2 */ 715 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 716 : : "r" (rb), "i" (1), "i" (1), "i" (2), 717 "r" (0) : "memory"); 718 for (set = 1; set < kvm->arch.tlb_sets; ++set) { 719 rb += PPC_BIT(51); /* increment set number */ 720 /* R=1 PRS=1 RIC=0 */ 721 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 722 : : "r" (rb), "i" (1), "i" (1), "i" (0), 723 "r" (0) : "memory"); 724 } 725 asm volatile("ptesync": : :"memory"); 726 asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory"); 727 } else { 728 for (set = 0; set < kvm->arch.tlb_sets; ++set) { 729 /* R=0 PRS=0 RIC=0 */ 730 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) 731 : : "r" (rb), "i" (0), "i" (0), "i" (0), 732 "r" (0) : "memory"); 733 rb += PPC_BIT(51); /* increment set number */ 734 } 735 asm volatile("ptesync": : :"memory"); 736 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory"); 737 } 738 } 739 740 void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu, 741 struct kvm_nested_guest *nested) 742 { 743 cpumask_t *need_tlb_flush; 744 745 /* 746 * On POWER9, individual threads can come in here, but the 747 * TLB is shared between the 4 threads in a core, hence 748 * invalidating on one thread invalidates for all. 749 * Thus we make all 4 threads use the same bit. 750 */ 751 if (cpu_has_feature(CPU_FTR_ARCH_300)) 752 pcpu = cpu_first_thread_sibling(pcpu); 753 754 if (nested) 755 need_tlb_flush = &nested->need_tlb_flush; 756 else 757 need_tlb_flush = &kvm->arch.need_tlb_flush; 758 759 if (cpumask_test_cpu(pcpu, need_tlb_flush)) { 760 flush_guest_tlb(kvm); 761 762 /* Clear the bit after the TLB flush */ 763 cpumask_clear_cpu(pcpu, need_tlb_flush); 764 } 765 } 766 EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush); 767