1 /* 2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License, version 2, as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/cpu.h> 10 #include <linux/kvm_host.h> 11 #include <linux/preempt.h> 12 #include <linux/export.h> 13 #include <linux/sched.h> 14 #include <linux/spinlock.h> 15 #include <linux/init.h> 16 #include <linux/memblock.h> 17 #include <linux/sizes.h> 18 #include <linux/cma.h> 19 #include <linux/bitops.h> 20 21 #include <asm/cputable.h> 22 #include <asm/kvm_ppc.h> 23 #include <asm/kvm_book3s.h> 24 #include <asm/archrandom.h> 25 #include <asm/xics.h> 26 #include <asm/xive.h> 27 #include <asm/dbell.h> 28 #include <asm/cputhreads.h> 29 #include <asm/io.h> 30 #include <asm/opal.h> 31 #include <asm/smp.h> 32 33 #define KVM_CMA_CHUNK_ORDER 18 34 35 #include "book3s_xics.h" 36 #include "book3s_xive.h" 37 38 /* 39 * The XIVE module will populate these when it loads 40 */ 41 unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu); 42 unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server); 43 int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, 44 unsigned long mfrr); 45 int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr); 46 int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); 47 EXPORT_SYMBOL_GPL(__xive_vm_h_xirr); 48 EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll); 49 EXPORT_SYMBOL_GPL(__xive_vm_h_ipi); 50 EXPORT_SYMBOL_GPL(__xive_vm_h_cppr); 51 EXPORT_SYMBOL_GPL(__xive_vm_h_eoi); 52 53 /* 54 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) 55 * should be power of 2. 56 */ 57 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ 58 /* 59 * By default we reserve 5% of memory for hash pagetable allocation. 60 */ 61 static unsigned long kvm_cma_resv_ratio = 5; 62 63 static struct cma *kvm_cma; 64 65 static int __init early_parse_kvm_cma_resv(char *p) 66 { 67 pr_debug("%s(%s)\n", __func__, p); 68 if (!p) 69 return -EINVAL; 70 return kstrtoul(p, 0, &kvm_cma_resv_ratio); 71 } 72 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); 73 74 struct page *kvm_alloc_hpt_cma(unsigned long nr_pages) 75 { 76 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 77 78 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES), 79 GFP_KERNEL); 80 } 81 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma); 82 83 void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages) 84 { 85 cma_release(kvm_cma, page, nr_pages); 86 } 87 EXPORT_SYMBOL_GPL(kvm_free_hpt_cma); 88 89 /** 90 * kvm_cma_reserve() - reserve area for kvm hash pagetable 91 * 92 * This function reserves memory from early allocator. It should be 93 * called by arch specific code once the memblock allocator 94 * has been activated and all other subsystems have already allocated/reserved 95 * memory. 96 */ 97 void __init kvm_cma_reserve(void) 98 { 99 unsigned long align_size; 100 struct memblock_region *reg; 101 phys_addr_t selected_size = 0; 102 103 /* 104 * We need CMA reservation only when we are in HV mode 105 */ 106 if (!cpu_has_feature(CPU_FTR_HVMODE)) 107 return; 108 /* 109 * We cannot use memblock_phys_mem_size() here, because 110 * memblock_analyze() has not been called yet. 111 */ 112 for_each_memblock(memory, reg) 113 selected_size += memblock_region_memory_end_pfn(reg) - 114 memblock_region_memory_base_pfn(reg); 115 116 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; 117 if (selected_size) { 118 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 119 (unsigned long)selected_size / SZ_1M); 120 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; 121 cma_declare_contiguous(0, selected_size, 0, align_size, 122 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma", 123 &kvm_cma); 124 } 125 } 126 127 /* 128 * Real-mode H_CONFER implementation. 129 * We check if we are the only vcpu out of this virtual core 130 * still running in the guest and not ceded. If so, we pop up 131 * to the virtual-mode implementation; if not, just return to 132 * the guest. 133 */ 134 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, 135 unsigned int yield_count) 136 { 137 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 138 int ptid = local_paca->kvm_hstate.ptid; 139 int threads_running; 140 int threads_ceded; 141 int threads_conferring; 142 u64 stop = get_tb() + 10 * tb_ticks_per_usec; 143 int rv = H_SUCCESS; /* => don't yield */ 144 145 set_bit(ptid, &vc->conferring_threads); 146 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { 147 threads_running = VCORE_ENTRY_MAP(vc); 148 threads_ceded = vc->napping_threads; 149 threads_conferring = vc->conferring_threads; 150 if ((threads_ceded | threads_conferring) == threads_running) { 151 rv = H_TOO_HARD; /* => do yield */ 152 break; 153 } 154 } 155 clear_bit(ptid, &vc->conferring_threads); 156 return rv; 157 } 158 159 /* 160 * When running HV mode KVM we need to block certain operations while KVM VMs 161 * exist in the system. We use a counter of VMs to track this. 162 * 163 * One of the operations we need to block is onlining of secondaries, so we 164 * protect hv_vm_count with get/put_online_cpus(). 165 */ 166 static atomic_t hv_vm_count; 167 168 void kvm_hv_vm_activated(void) 169 { 170 get_online_cpus(); 171 atomic_inc(&hv_vm_count); 172 put_online_cpus(); 173 } 174 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); 175 176 void kvm_hv_vm_deactivated(void) 177 { 178 get_online_cpus(); 179 atomic_dec(&hv_vm_count); 180 put_online_cpus(); 181 } 182 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); 183 184 bool kvm_hv_mode_active(void) 185 { 186 return atomic_read(&hv_vm_count) != 0; 187 } 188 189 extern int hcall_real_table[], hcall_real_table_end[]; 190 191 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) 192 { 193 cmd /= 4; 194 if (cmd < hcall_real_table_end - hcall_real_table && 195 hcall_real_table[cmd]) 196 return 1; 197 198 return 0; 199 } 200 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); 201 202 int kvmppc_hwrng_present(void) 203 { 204 return powernv_hwrng_present(); 205 } 206 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); 207 208 long kvmppc_h_random(struct kvm_vcpu *vcpu) 209 { 210 int r; 211 212 /* Only need to do the expensive mfmsr() on radix */ 213 if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR)) 214 r = powernv_get_random_long(&vcpu->arch.gpr[4]); 215 else 216 r = powernv_get_random_real_mode(&vcpu->arch.gpr[4]); 217 if (r) 218 return H_SUCCESS; 219 220 return H_HARDWARE; 221 } 222 223 /* 224 * Send an interrupt or message to another CPU. 225 * The caller needs to include any barrier needed to order writes 226 * to memory vs. the IPI/message. 227 */ 228 void kvmhv_rm_send_ipi(int cpu) 229 { 230 void __iomem *xics_phys; 231 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 232 233 /* On POWER9 we can use msgsnd for any destination cpu. */ 234 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 235 msg |= get_hard_smp_processor_id(cpu); 236 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 237 return; 238 } 239 240 /* On POWER8 for IPIs to threads in the same core, use msgsnd. */ 241 if (cpu_has_feature(CPU_FTR_ARCH_207S) && 242 cpu_first_thread_sibling(cpu) == 243 cpu_first_thread_sibling(raw_smp_processor_id())) { 244 msg |= cpu_thread_in_core(cpu); 245 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 246 return; 247 } 248 249 /* We should never reach this */ 250 if (WARN_ON_ONCE(xive_enabled())) 251 return; 252 253 /* Else poke the target with an IPI */ 254 xics_phys = paca[cpu].kvm_hstate.xics_phys; 255 if (xics_phys) 256 __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); 257 else 258 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); 259 } 260 261 /* 262 * The following functions are called from the assembly code 263 * in book3s_hv_rmhandlers.S. 264 */ 265 static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) 266 { 267 int cpu = vc->pcpu; 268 269 /* Order setting of exit map vs. msgsnd/IPI */ 270 smp_mb(); 271 for (; active; active >>= 1, ++cpu) 272 if (active & 1) 273 kvmhv_rm_send_ipi(cpu); 274 } 275 276 void kvmhv_commence_exit(int trap) 277 { 278 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 279 int ptid = local_paca->kvm_hstate.ptid; 280 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; 281 int me, ee, i; 282 283 /* Set our bit in the threads-exiting-guest map in the 0xff00 284 bits of vcore->entry_exit_map */ 285 me = 0x100 << ptid; 286 do { 287 ee = vc->entry_exit_map; 288 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); 289 290 /* Are we the first here? */ 291 if ((ee >> 8) != 0) 292 return; 293 294 /* 295 * Trigger the other threads in this vcore to exit the guest. 296 * If this is a hypervisor decrementer interrupt then they 297 * will be already on their way out of the guest. 298 */ 299 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) 300 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); 301 302 /* 303 * If we are doing dynamic micro-threading, interrupt the other 304 * subcores to pull them out of their guests too. 305 */ 306 if (!sip) 307 return; 308 309 for (i = 0; i < MAX_SUBCORES; ++i) { 310 vc = sip->vc[i]; 311 if (!vc) 312 break; 313 do { 314 ee = vc->entry_exit_map; 315 /* Already asked to exit? */ 316 if ((ee >> 8) != 0) 317 break; 318 } while (cmpxchg(&vc->entry_exit_map, ee, 319 ee | VCORE_EXIT_REQ) != ee); 320 if ((ee >> 8) == 0) 321 kvmhv_interrupt_vcore(vc, ee); 322 } 323 } 324 325 struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; 326 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); 327 328 #ifdef CONFIG_KVM_XICS 329 static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, 330 u32 xisr) 331 { 332 int i; 333 334 /* 335 * We access the mapped array here without a lock. That 336 * is safe because we never reduce the number of entries 337 * in the array and we never change the v_hwirq field of 338 * an entry once it is set. 339 * 340 * We have also carefully ordered the stores in the writer 341 * and the loads here in the reader, so that if we find a matching 342 * hwirq here, the associated GSI and irq_desc fields are valid. 343 */ 344 for (i = 0; i < pimap->n_mapped; i++) { 345 if (xisr == pimap->mapped[i].r_hwirq) { 346 /* 347 * Order subsequent reads in the caller to serialize 348 * with the writer. 349 */ 350 smp_rmb(); 351 return &pimap->mapped[i]; 352 } 353 } 354 return NULL; 355 } 356 357 /* 358 * If we have an interrupt that's not an IPI, check if we have a 359 * passthrough adapter and if so, check if this external interrupt 360 * is for the adapter. 361 * We will attempt to deliver the IRQ directly to the target VCPU's 362 * ICP, the virtual ICP (based on affinity - the xive value in ICS). 363 * 364 * If the delivery fails or if this is not for a passthrough adapter, 365 * return to the host to handle this interrupt. We earlier 366 * saved a copy of the XIRR in the PACA, it will be picked up by 367 * the host ICP driver. 368 */ 369 static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 370 { 371 struct kvmppc_passthru_irqmap *pimap; 372 struct kvmppc_irq_map *irq_map; 373 struct kvm_vcpu *vcpu; 374 375 vcpu = local_paca->kvm_hstate.kvm_vcpu; 376 if (!vcpu) 377 return 1; 378 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); 379 if (!pimap) 380 return 1; 381 irq_map = get_irqmap(pimap, xisr); 382 if (!irq_map) 383 return 1; 384 385 /* We're handling this interrupt, generic code doesn't need to */ 386 local_paca->kvm_hstate.saved_xirr = 0; 387 388 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again); 389 } 390 391 #else 392 static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 393 { 394 return 1; 395 } 396 #endif 397 398 /* 399 * Determine what sort of external interrupt is pending (if any). 400 * Returns: 401 * 0 if no interrupt is pending 402 * 1 if an interrupt is pending that needs to be handled by the host 403 * 2 Passthrough that needs completion in the host 404 * -1 if there was a guest wakeup IPI (which has now been cleared) 405 * -2 if there is PCI passthrough external interrupt that was handled 406 */ 407 static long kvmppc_read_one_intr(bool *again); 408 409 long kvmppc_read_intr(void) 410 { 411 long ret = 0; 412 long rc; 413 bool again; 414 415 if (xive_enabled()) 416 return 1; 417 418 do { 419 again = false; 420 rc = kvmppc_read_one_intr(&again); 421 if (rc && (ret == 0 || rc > ret)) 422 ret = rc; 423 } while (again); 424 return ret; 425 } 426 427 static long kvmppc_read_one_intr(bool *again) 428 { 429 void __iomem *xics_phys; 430 u32 h_xirr; 431 __be32 xirr; 432 u32 xisr; 433 u8 host_ipi; 434 int64_t rc; 435 436 if (xive_enabled()) 437 return 1; 438 439 /* see if a host IPI is pending */ 440 host_ipi = local_paca->kvm_hstate.host_ipi; 441 if (host_ipi) 442 return 1; 443 444 /* Now read the interrupt from the ICP */ 445 xics_phys = local_paca->kvm_hstate.xics_phys; 446 rc = 0; 447 if (!xics_phys) 448 rc = opal_int_get_xirr(&xirr, false); 449 else 450 xirr = __raw_rm_readl(xics_phys + XICS_XIRR); 451 if (rc < 0) 452 return 1; 453 454 /* 455 * Save XIRR for later. Since we get control in reverse endian 456 * on LE systems, save it byte reversed and fetch it back in 457 * host endian. Note that xirr is the value read from the 458 * XIRR register, while h_xirr is the host endian version. 459 */ 460 h_xirr = be32_to_cpu(xirr); 461 local_paca->kvm_hstate.saved_xirr = h_xirr; 462 xisr = h_xirr & 0xffffff; 463 /* 464 * Ensure that the store/load complete to guarantee all side 465 * effects of loading from XIRR has completed 466 */ 467 smp_mb(); 468 469 /* if nothing pending in the ICP */ 470 if (!xisr) 471 return 0; 472 473 /* We found something in the ICP... 474 * 475 * If it is an IPI, clear the MFRR and EOI it. 476 */ 477 if (xisr == XICS_IPI) { 478 rc = 0; 479 if (xics_phys) { 480 __raw_rm_writeb(0xff, xics_phys + XICS_MFRR); 481 __raw_rm_writel(xirr, xics_phys + XICS_XIRR); 482 } else { 483 opal_int_set_mfrr(hard_smp_processor_id(), 0xff); 484 rc = opal_int_eoi(h_xirr); 485 } 486 /* If rc > 0, there is another interrupt pending */ 487 *again = rc > 0; 488 489 /* 490 * Need to ensure side effects of above stores 491 * complete before proceeding. 492 */ 493 smp_mb(); 494 495 /* 496 * We need to re-check host IPI now in case it got set in the 497 * meantime. If it's clear, we bounce the interrupt to the 498 * guest 499 */ 500 host_ipi = local_paca->kvm_hstate.host_ipi; 501 if (unlikely(host_ipi != 0)) { 502 /* We raced with the host, 503 * we need to resend that IPI, bummer 504 */ 505 if (xics_phys) 506 __raw_rm_writeb(IPI_PRIORITY, 507 xics_phys + XICS_MFRR); 508 else 509 opal_int_set_mfrr(hard_smp_processor_id(), 510 IPI_PRIORITY); 511 /* Let side effects complete */ 512 smp_mb(); 513 return 1; 514 } 515 516 /* OK, it's an IPI for us */ 517 local_paca->kvm_hstate.saved_xirr = 0; 518 return -1; 519 } 520 521 return kvmppc_check_passthru(xisr, xirr, again); 522 } 523 524 #ifdef CONFIG_KVM_XICS 525 static inline bool is_rm(void) 526 { 527 return !(mfmsr() & MSR_DR); 528 } 529 530 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) 531 { 532 if (xive_enabled()) { 533 if (is_rm()) 534 return xive_rm_h_xirr(vcpu); 535 if (unlikely(!__xive_vm_h_xirr)) 536 return H_NOT_AVAILABLE; 537 return __xive_vm_h_xirr(vcpu); 538 } else 539 return xics_rm_h_xirr(vcpu); 540 } 541 542 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) 543 { 544 vcpu->arch.gpr[5] = get_tb(); 545 if (xive_enabled()) { 546 if (is_rm()) 547 return xive_rm_h_xirr(vcpu); 548 if (unlikely(!__xive_vm_h_xirr)) 549 return H_NOT_AVAILABLE; 550 return __xive_vm_h_xirr(vcpu); 551 } else 552 return xics_rm_h_xirr(vcpu); 553 } 554 555 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) 556 { 557 if (xive_enabled()) { 558 if (is_rm()) 559 return xive_rm_h_ipoll(vcpu, server); 560 if (unlikely(!__xive_vm_h_ipoll)) 561 return H_NOT_AVAILABLE; 562 return __xive_vm_h_ipoll(vcpu, server); 563 } else 564 return H_TOO_HARD; 565 } 566 567 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, 568 unsigned long mfrr) 569 { 570 if (xive_enabled()) { 571 if (is_rm()) 572 return xive_rm_h_ipi(vcpu, server, mfrr); 573 if (unlikely(!__xive_vm_h_ipi)) 574 return H_NOT_AVAILABLE; 575 return __xive_vm_h_ipi(vcpu, server, mfrr); 576 } else 577 return xics_rm_h_ipi(vcpu, server, mfrr); 578 } 579 580 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) 581 { 582 if (xive_enabled()) { 583 if (is_rm()) 584 return xive_rm_h_cppr(vcpu, cppr); 585 if (unlikely(!__xive_vm_h_cppr)) 586 return H_NOT_AVAILABLE; 587 return __xive_vm_h_cppr(vcpu, cppr); 588 } else 589 return xics_rm_h_cppr(vcpu, cppr); 590 } 591 592 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) 593 { 594 if (xive_enabled()) { 595 if (is_rm()) 596 return xive_rm_h_eoi(vcpu, xirr); 597 if (unlikely(!__xive_vm_h_eoi)) 598 return H_NOT_AVAILABLE; 599 return __xive_vm_h_eoi(vcpu, xirr); 600 } else 601 return xics_rm_h_eoi(vcpu, xirr); 602 } 603 #endif /* CONFIG_KVM_XICS */ 604