1 /* 2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License, version 2, as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/cpu.h> 10 #include <linux/kvm_host.h> 11 #include <linux/preempt.h> 12 #include <linux/export.h> 13 #include <linux/sched.h> 14 #include <linux/spinlock.h> 15 #include <linux/init.h> 16 #include <linux/memblock.h> 17 #include <linux/sizes.h> 18 #include <linux/cma.h> 19 #include <linux/bitops.h> 20 21 #include <asm/cputable.h> 22 #include <asm/kvm_ppc.h> 23 #include <asm/kvm_book3s.h> 24 #include <asm/archrandom.h> 25 #include <asm/xics.h> 26 #include <asm/xive.h> 27 #include <asm/dbell.h> 28 #include <asm/cputhreads.h> 29 #include <asm/io.h> 30 #include <asm/opal.h> 31 #include <asm/smp.h> 32 33 #define KVM_CMA_CHUNK_ORDER 18 34 35 /* 36 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) 37 * should be power of 2. 38 */ 39 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ 40 /* 41 * By default we reserve 5% of memory for hash pagetable allocation. 42 */ 43 static unsigned long kvm_cma_resv_ratio = 5; 44 45 static struct cma *kvm_cma; 46 47 static int __init early_parse_kvm_cma_resv(char *p) 48 { 49 pr_debug("%s(%s)\n", __func__, p); 50 if (!p) 51 return -EINVAL; 52 return kstrtoul(p, 0, &kvm_cma_resv_ratio); 53 } 54 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); 55 56 struct page *kvm_alloc_hpt_cma(unsigned long nr_pages) 57 { 58 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 59 60 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES), 61 GFP_KERNEL); 62 } 63 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma); 64 65 void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages) 66 { 67 cma_release(kvm_cma, page, nr_pages); 68 } 69 EXPORT_SYMBOL_GPL(kvm_free_hpt_cma); 70 71 /** 72 * kvm_cma_reserve() - reserve area for kvm hash pagetable 73 * 74 * This function reserves memory from early allocator. It should be 75 * called by arch specific code once the memblock allocator 76 * has been activated and all other subsystems have already allocated/reserved 77 * memory. 78 */ 79 void __init kvm_cma_reserve(void) 80 { 81 unsigned long align_size; 82 struct memblock_region *reg; 83 phys_addr_t selected_size = 0; 84 85 /* 86 * We need CMA reservation only when we are in HV mode 87 */ 88 if (!cpu_has_feature(CPU_FTR_HVMODE)) 89 return; 90 /* 91 * We cannot use memblock_phys_mem_size() here, because 92 * memblock_analyze() has not been called yet. 93 */ 94 for_each_memblock(memory, reg) 95 selected_size += memblock_region_memory_end_pfn(reg) - 96 memblock_region_memory_base_pfn(reg); 97 98 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; 99 if (selected_size) { 100 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 101 (unsigned long)selected_size / SZ_1M); 102 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; 103 cma_declare_contiguous(0, selected_size, 0, align_size, 104 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma); 105 } 106 } 107 108 /* 109 * Real-mode H_CONFER implementation. 110 * We check if we are the only vcpu out of this virtual core 111 * still running in the guest and not ceded. If so, we pop up 112 * to the virtual-mode implementation; if not, just return to 113 * the guest. 114 */ 115 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, 116 unsigned int yield_count) 117 { 118 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 119 int ptid = local_paca->kvm_hstate.ptid; 120 int threads_running; 121 int threads_ceded; 122 int threads_conferring; 123 u64 stop = get_tb() + 10 * tb_ticks_per_usec; 124 int rv = H_SUCCESS; /* => don't yield */ 125 126 set_bit(ptid, &vc->conferring_threads); 127 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { 128 threads_running = VCORE_ENTRY_MAP(vc); 129 threads_ceded = vc->napping_threads; 130 threads_conferring = vc->conferring_threads; 131 if ((threads_ceded | threads_conferring) == threads_running) { 132 rv = H_TOO_HARD; /* => do yield */ 133 break; 134 } 135 } 136 clear_bit(ptid, &vc->conferring_threads); 137 return rv; 138 } 139 140 /* 141 * When running HV mode KVM we need to block certain operations while KVM VMs 142 * exist in the system. We use a counter of VMs to track this. 143 * 144 * One of the operations we need to block is onlining of secondaries, so we 145 * protect hv_vm_count with get/put_online_cpus(). 146 */ 147 static atomic_t hv_vm_count; 148 149 void kvm_hv_vm_activated(void) 150 { 151 get_online_cpus(); 152 atomic_inc(&hv_vm_count); 153 put_online_cpus(); 154 } 155 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); 156 157 void kvm_hv_vm_deactivated(void) 158 { 159 get_online_cpus(); 160 atomic_dec(&hv_vm_count); 161 put_online_cpus(); 162 } 163 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); 164 165 bool kvm_hv_mode_active(void) 166 { 167 return atomic_read(&hv_vm_count) != 0; 168 } 169 170 extern int hcall_real_table[], hcall_real_table_end[]; 171 172 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) 173 { 174 cmd /= 4; 175 if (cmd < hcall_real_table_end - hcall_real_table && 176 hcall_real_table[cmd]) 177 return 1; 178 179 return 0; 180 } 181 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); 182 183 int kvmppc_hwrng_present(void) 184 { 185 return powernv_hwrng_present(); 186 } 187 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); 188 189 long kvmppc_h_random(struct kvm_vcpu *vcpu) 190 { 191 if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) 192 return H_SUCCESS; 193 194 return H_HARDWARE; 195 } 196 197 /* 198 * Send an interrupt or message to another CPU. 199 * The caller needs to include any barrier needed to order writes 200 * to memory vs. the IPI/message. 201 */ 202 void kvmhv_rm_send_ipi(int cpu) 203 { 204 void __iomem *xics_phys; 205 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 206 207 /* On POWER9 we can use msgsnd for any destination cpu. */ 208 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 209 msg |= get_hard_smp_processor_id(cpu); 210 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 211 return; 212 } 213 /* On POWER8 for IPIs to threads in the same core, use msgsnd. */ 214 if (cpu_has_feature(CPU_FTR_ARCH_207S) && 215 cpu_first_thread_sibling(cpu) == 216 cpu_first_thread_sibling(raw_smp_processor_id())) { 217 msg |= cpu_thread_in_core(cpu); 218 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 219 return; 220 } 221 222 /* We should never reach this */ 223 if (WARN_ON_ONCE(xive_enabled())) 224 return; 225 226 /* Else poke the target with an IPI */ 227 xics_phys = paca[cpu].kvm_hstate.xics_phys; 228 if (xics_phys) 229 __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); 230 else 231 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); 232 } 233 234 /* 235 * The following functions are called from the assembly code 236 * in book3s_hv_rmhandlers.S. 237 */ 238 static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) 239 { 240 int cpu = vc->pcpu; 241 242 /* Order setting of exit map vs. msgsnd/IPI */ 243 smp_mb(); 244 for (; active; active >>= 1, ++cpu) 245 if (active & 1) 246 kvmhv_rm_send_ipi(cpu); 247 } 248 249 void kvmhv_commence_exit(int trap) 250 { 251 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 252 int ptid = local_paca->kvm_hstate.ptid; 253 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; 254 int me, ee, i; 255 256 /* Set our bit in the threads-exiting-guest map in the 0xff00 257 bits of vcore->entry_exit_map */ 258 me = 0x100 << ptid; 259 do { 260 ee = vc->entry_exit_map; 261 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); 262 263 /* Are we the first here? */ 264 if ((ee >> 8) != 0) 265 return; 266 267 /* 268 * Trigger the other threads in this vcore to exit the guest. 269 * If this is a hypervisor decrementer interrupt then they 270 * will be already on their way out of the guest. 271 */ 272 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) 273 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); 274 275 /* 276 * If we are doing dynamic micro-threading, interrupt the other 277 * subcores to pull them out of their guests too. 278 */ 279 if (!sip) 280 return; 281 282 for (i = 0; i < MAX_SUBCORES; ++i) { 283 vc = sip->master_vcs[i]; 284 if (!vc) 285 break; 286 do { 287 ee = vc->entry_exit_map; 288 /* Already asked to exit? */ 289 if ((ee >> 8) != 0) 290 break; 291 } while (cmpxchg(&vc->entry_exit_map, ee, 292 ee | VCORE_EXIT_REQ) != ee); 293 if ((ee >> 8) == 0) 294 kvmhv_interrupt_vcore(vc, ee); 295 } 296 } 297 298 struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; 299 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); 300 301 #ifdef CONFIG_KVM_XICS 302 static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, 303 u32 xisr) 304 { 305 int i; 306 307 /* 308 * We access the mapped array here without a lock. That 309 * is safe because we never reduce the number of entries 310 * in the array and we never change the v_hwirq field of 311 * an entry once it is set. 312 * 313 * We have also carefully ordered the stores in the writer 314 * and the loads here in the reader, so that if we find a matching 315 * hwirq here, the associated GSI and irq_desc fields are valid. 316 */ 317 for (i = 0; i < pimap->n_mapped; i++) { 318 if (xisr == pimap->mapped[i].r_hwirq) { 319 /* 320 * Order subsequent reads in the caller to serialize 321 * with the writer. 322 */ 323 smp_rmb(); 324 return &pimap->mapped[i]; 325 } 326 } 327 return NULL; 328 } 329 330 /* 331 * If we have an interrupt that's not an IPI, check if we have a 332 * passthrough adapter and if so, check if this external interrupt 333 * is for the adapter. 334 * We will attempt to deliver the IRQ directly to the target VCPU's 335 * ICP, the virtual ICP (based on affinity - the xive value in ICS). 336 * 337 * If the delivery fails or if this is not for a passthrough adapter, 338 * return to the host to handle this interrupt. We earlier 339 * saved a copy of the XIRR in the PACA, it will be picked up by 340 * the host ICP driver. 341 */ 342 static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 343 { 344 struct kvmppc_passthru_irqmap *pimap; 345 struct kvmppc_irq_map *irq_map; 346 struct kvm_vcpu *vcpu; 347 348 vcpu = local_paca->kvm_hstate.kvm_vcpu; 349 if (!vcpu) 350 return 1; 351 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); 352 if (!pimap) 353 return 1; 354 irq_map = get_irqmap(pimap, xisr); 355 if (!irq_map) 356 return 1; 357 358 /* We're handling this interrupt, generic code doesn't need to */ 359 local_paca->kvm_hstate.saved_xirr = 0; 360 361 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again); 362 } 363 364 #else 365 static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 366 { 367 return 1; 368 } 369 #endif 370 371 /* 372 * Determine what sort of external interrupt is pending (if any). 373 * Returns: 374 * 0 if no interrupt is pending 375 * 1 if an interrupt is pending that needs to be handled by the host 376 * 2 Passthrough that needs completion in the host 377 * -1 if there was a guest wakeup IPI (which has now been cleared) 378 * -2 if there is PCI passthrough external interrupt that was handled 379 */ 380 static long kvmppc_read_one_intr(bool *again); 381 382 long kvmppc_read_intr(void) 383 { 384 long ret = 0; 385 long rc; 386 bool again; 387 388 if (xive_enabled()) 389 return 1; 390 391 do { 392 again = false; 393 rc = kvmppc_read_one_intr(&again); 394 if (rc && (ret == 0 || rc > ret)) 395 ret = rc; 396 } while (again); 397 return ret; 398 } 399 400 static long kvmppc_read_one_intr(bool *again) 401 { 402 void __iomem *xics_phys; 403 u32 h_xirr; 404 __be32 xirr; 405 u32 xisr; 406 u8 host_ipi; 407 int64_t rc; 408 409 /* see if a host IPI is pending */ 410 host_ipi = local_paca->kvm_hstate.host_ipi; 411 if (host_ipi) 412 return 1; 413 414 /* Now read the interrupt from the ICP */ 415 xics_phys = local_paca->kvm_hstate.xics_phys; 416 rc = 0; 417 if (!xics_phys) 418 rc = opal_int_get_xirr(&xirr, false); 419 else 420 xirr = __raw_rm_readl(xics_phys + XICS_XIRR); 421 if (rc < 0) 422 return 1; 423 424 /* 425 * Save XIRR for later. Since we get control in reverse endian 426 * on LE systems, save it byte reversed and fetch it back in 427 * host endian. Note that xirr is the value read from the 428 * XIRR register, while h_xirr is the host endian version. 429 */ 430 h_xirr = be32_to_cpu(xirr); 431 local_paca->kvm_hstate.saved_xirr = h_xirr; 432 xisr = h_xirr & 0xffffff; 433 /* 434 * Ensure that the store/load complete to guarantee all side 435 * effects of loading from XIRR has completed 436 */ 437 smp_mb(); 438 439 /* if nothing pending in the ICP */ 440 if (!xisr) 441 return 0; 442 443 /* We found something in the ICP... 444 * 445 * If it is an IPI, clear the MFRR and EOI it. 446 */ 447 if (xisr == XICS_IPI) { 448 rc = 0; 449 if (xics_phys) { 450 __raw_rm_writeb(0xff, xics_phys + XICS_MFRR); 451 __raw_rm_writel(xirr, xics_phys + XICS_XIRR); 452 } else { 453 opal_int_set_mfrr(hard_smp_processor_id(), 0xff); 454 rc = opal_int_eoi(h_xirr); 455 } 456 /* If rc > 0, there is another interrupt pending */ 457 *again = rc > 0; 458 459 /* 460 * Need to ensure side effects of above stores 461 * complete before proceeding. 462 */ 463 smp_mb(); 464 465 /* 466 * We need to re-check host IPI now in case it got set in the 467 * meantime. If it's clear, we bounce the interrupt to the 468 * guest 469 */ 470 host_ipi = local_paca->kvm_hstate.host_ipi; 471 if (unlikely(host_ipi != 0)) { 472 /* We raced with the host, 473 * we need to resend that IPI, bummer 474 */ 475 if (xics_phys) 476 __raw_rm_writeb(IPI_PRIORITY, 477 xics_phys + XICS_MFRR); 478 else 479 opal_int_set_mfrr(hard_smp_processor_id(), 480 IPI_PRIORITY); 481 /* Let side effects complete */ 482 smp_mb(); 483 return 1; 484 } 485 486 /* OK, it's an IPI for us */ 487 local_paca->kvm_hstate.saved_xirr = 0; 488 return -1; 489 } 490 491 return kvmppc_check_passthru(xisr, xirr, again); 492 } 493