1 /* 2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License, version 2, as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/cpu.h> 10 #include <linux/kvm_host.h> 11 #include <linux/preempt.h> 12 #include <linux/export.h> 13 #include <linux/sched.h> 14 #include <linux/spinlock.h> 15 #include <linux/init.h> 16 #include <linux/memblock.h> 17 #include <linux/sizes.h> 18 #include <linux/cma.h> 19 #include <linux/bitops.h> 20 21 #include <asm/cputable.h> 22 #include <asm/kvm_ppc.h> 23 #include <asm/kvm_book3s.h> 24 #include <asm/archrandom.h> 25 #include <asm/xics.h> 26 #include <asm/dbell.h> 27 #include <asm/cputhreads.h> 28 #include <asm/io.h> 29 #include <asm/opal.h> 30 #include <asm/smp.h> 31 32 static bool in_realmode(void) 33 { 34 return !(mfmsr() & MSR_IR); 35 } 36 37 #define KVM_CMA_CHUNK_ORDER 18 38 39 /* 40 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) 41 * should be power of 2. 42 */ 43 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ 44 /* 45 * By default we reserve 5% of memory for hash pagetable allocation. 46 */ 47 static unsigned long kvm_cma_resv_ratio = 5; 48 49 static struct cma *kvm_cma; 50 51 static int __init early_parse_kvm_cma_resv(char *p) 52 { 53 pr_debug("%s(%s)\n", __func__, p); 54 if (!p) 55 return -EINVAL; 56 return kstrtoul(p, 0, &kvm_cma_resv_ratio); 57 } 58 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); 59 60 struct page *kvm_alloc_hpt(unsigned long nr_pages) 61 { 62 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 63 64 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES)); 65 } 66 EXPORT_SYMBOL_GPL(kvm_alloc_hpt); 67 68 void kvm_release_hpt(struct page *page, unsigned long nr_pages) 69 { 70 cma_release(kvm_cma, page, nr_pages); 71 } 72 EXPORT_SYMBOL_GPL(kvm_release_hpt); 73 74 /** 75 * kvm_cma_reserve() - reserve area for kvm hash pagetable 76 * 77 * This function reserves memory from early allocator. It should be 78 * called by arch specific code once the memblock allocator 79 * has been activated and all other subsystems have already allocated/reserved 80 * memory. 81 */ 82 void __init kvm_cma_reserve(void) 83 { 84 unsigned long align_size; 85 struct memblock_region *reg; 86 phys_addr_t selected_size = 0; 87 88 /* 89 * We need CMA reservation only when we are in HV mode 90 */ 91 if (!cpu_has_feature(CPU_FTR_HVMODE)) 92 return; 93 /* 94 * We cannot use memblock_phys_mem_size() here, because 95 * memblock_analyze() has not been called yet. 96 */ 97 for_each_memblock(memory, reg) 98 selected_size += memblock_region_memory_end_pfn(reg) - 99 memblock_region_memory_base_pfn(reg); 100 101 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; 102 if (selected_size) { 103 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 104 (unsigned long)selected_size / SZ_1M); 105 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; 106 cma_declare_contiguous(0, selected_size, 0, align_size, 107 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma); 108 } 109 } 110 111 /* 112 * Real-mode H_CONFER implementation. 113 * We check if we are the only vcpu out of this virtual core 114 * still running in the guest and not ceded. If so, we pop up 115 * to the virtual-mode implementation; if not, just return to 116 * the guest. 117 */ 118 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, 119 unsigned int yield_count) 120 { 121 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 122 int ptid = local_paca->kvm_hstate.ptid; 123 int threads_running; 124 int threads_ceded; 125 int threads_conferring; 126 u64 stop = get_tb() + 10 * tb_ticks_per_usec; 127 int rv = H_SUCCESS; /* => don't yield */ 128 129 set_bit(ptid, &vc->conferring_threads); 130 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { 131 threads_running = VCORE_ENTRY_MAP(vc); 132 threads_ceded = vc->napping_threads; 133 threads_conferring = vc->conferring_threads; 134 if ((threads_ceded | threads_conferring) == threads_running) { 135 rv = H_TOO_HARD; /* => do yield */ 136 break; 137 } 138 } 139 clear_bit(ptid, &vc->conferring_threads); 140 return rv; 141 } 142 143 /* 144 * When running HV mode KVM we need to block certain operations while KVM VMs 145 * exist in the system. We use a counter of VMs to track this. 146 * 147 * One of the operations we need to block is onlining of secondaries, so we 148 * protect hv_vm_count with get/put_online_cpus(). 149 */ 150 static atomic_t hv_vm_count; 151 152 void kvm_hv_vm_activated(void) 153 { 154 get_online_cpus(); 155 atomic_inc(&hv_vm_count); 156 put_online_cpus(); 157 } 158 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); 159 160 void kvm_hv_vm_deactivated(void) 161 { 162 get_online_cpus(); 163 atomic_dec(&hv_vm_count); 164 put_online_cpus(); 165 } 166 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); 167 168 bool kvm_hv_mode_active(void) 169 { 170 return atomic_read(&hv_vm_count) != 0; 171 } 172 173 extern int hcall_real_table[], hcall_real_table_end[]; 174 175 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) 176 { 177 cmd /= 4; 178 if (cmd < hcall_real_table_end - hcall_real_table && 179 hcall_real_table[cmd]) 180 return 1; 181 182 return 0; 183 } 184 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); 185 186 int kvmppc_hwrng_present(void) 187 { 188 return powernv_hwrng_present(); 189 } 190 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); 191 192 long kvmppc_h_random(struct kvm_vcpu *vcpu) 193 { 194 if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) 195 return H_SUCCESS; 196 197 return H_HARDWARE; 198 } 199 200 static inline void rm_writeb(unsigned long paddr, u8 val) 201 { 202 __asm__ __volatile__("stbcix %0,0,%1" 203 : : "r" (val), "r" (paddr) : "memory"); 204 } 205 206 /* 207 * Send an interrupt or message to another CPU. 208 * The caller needs to include any barrier needed to order writes 209 * to memory vs. the IPI/message. 210 */ 211 void kvmhv_rm_send_ipi(int cpu) 212 { 213 unsigned long xics_phys; 214 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 215 216 /* On POWER9 we can use msgsnd for any destination cpu. */ 217 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 218 msg |= get_hard_smp_processor_id(cpu); 219 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 220 return; 221 } 222 /* On POWER8 for IPIs to threads in the same core, use msgsnd. */ 223 if (cpu_has_feature(CPU_FTR_ARCH_207S) && 224 cpu_first_thread_sibling(cpu) == 225 cpu_first_thread_sibling(raw_smp_processor_id())) { 226 msg |= cpu_thread_in_core(cpu); 227 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 228 return; 229 } 230 231 /* Else poke the target with an IPI */ 232 xics_phys = paca[cpu].kvm_hstate.xics_phys; 233 if (!in_realmode()) 234 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); 235 else if (xics_phys) 236 rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY); 237 else 238 opal_rm_int_set_mfrr(get_hard_smp_processor_id(cpu), 239 IPI_PRIORITY); 240 } 241 242 /* 243 * The following functions are called from the assembly code 244 * in book3s_hv_rmhandlers.S. 245 */ 246 static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) 247 { 248 int cpu = vc->pcpu; 249 250 /* Order setting of exit map vs. msgsnd/IPI */ 251 smp_mb(); 252 for (; active; active >>= 1, ++cpu) 253 if (active & 1) 254 kvmhv_rm_send_ipi(cpu); 255 } 256 257 void kvmhv_commence_exit(int trap) 258 { 259 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 260 int ptid = local_paca->kvm_hstate.ptid; 261 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; 262 int me, ee, i; 263 264 /* Set our bit in the threads-exiting-guest map in the 0xff00 265 bits of vcore->entry_exit_map */ 266 me = 0x100 << ptid; 267 do { 268 ee = vc->entry_exit_map; 269 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); 270 271 /* Are we the first here? */ 272 if ((ee >> 8) != 0) 273 return; 274 275 /* 276 * Trigger the other threads in this vcore to exit the guest. 277 * If this is a hypervisor decrementer interrupt then they 278 * will be already on their way out of the guest. 279 */ 280 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) 281 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); 282 283 /* 284 * If we are doing dynamic micro-threading, interrupt the other 285 * subcores to pull them out of their guests too. 286 */ 287 if (!sip) 288 return; 289 290 for (i = 0; i < MAX_SUBCORES; ++i) { 291 vc = sip->master_vcs[i]; 292 if (!vc) 293 break; 294 do { 295 ee = vc->entry_exit_map; 296 /* Already asked to exit? */ 297 if ((ee >> 8) != 0) 298 break; 299 } while (cmpxchg(&vc->entry_exit_map, ee, 300 ee | VCORE_EXIT_REQ) != ee); 301 if ((ee >> 8) == 0) 302 kvmhv_interrupt_vcore(vc, ee); 303 } 304 } 305 306 struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; 307 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); 308 309 #ifdef CONFIG_KVM_XICS 310 static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, 311 u32 xisr) 312 { 313 int i; 314 315 /* 316 * We access the mapped array here without a lock. That 317 * is safe because we never reduce the number of entries 318 * in the array and we never change the v_hwirq field of 319 * an entry once it is set. 320 * 321 * We have also carefully ordered the stores in the writer 322 * and the loads here in the reader, so that if we find a matching 323 * hwirq here, the associated GSI and irq_desc fields are valid. 324 */ 325 for (i = 0; i < pimap->n_mapped; i++) { 326 if (xisr == pimap->mapped[i].r_hwirq) { 327 /* 328 * Order subsequent reads in the caller to serialize 329 * with the writer. 330 */ 331 smp_rmb(); 332 return &pimap->mapped[i]; 333 } 334 } 335 return NULL; 336 } 337 338 /* 339 * If we have an interrupt that's not an IPI, check if we have a 340 * passthrough adapter and if so, check if this external interrupt 341 * is for the adapter. 342 * We will attempt to deliver the IRQ directly to the target VCPU's 343 * ICP, the virtual ICP (based on affinity - the xive value in ICS). 344 * 345 * If the delivery fails or if this is not for a passthrough adapter, 346 * return to the host to handle this interrupt. We earlier 347 * saved a copy of the XIRR in the PACA, it will be picked up by 348 * the host ICP driver. 349 */ 350 static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 351 { 352 struct kvmppc_passthru_irqmap *pimap; 353 struct kvmppc_irq_map *irq_map; 354 struct kvm_vcpu *vcpu; 355 356 vcpu = local_paca->kvm_hstate.kvm_vcpu; 357 if (!vcpu) 358 return 1; 359 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); 360 if (!pimap) 361 return 1; 362 irq_map = get_irqmap(pimap, xisr); 363 if (!irq_map) 364 return 1; 365 366 /* We're handling this interrupt, generic code doesn't need to */ 367 local_paca->kvm_hstate.saved_xirr = 0; 368 369 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again); 370 } 371 372 #else 373 static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 374 { 375 return 1; 376 } 377 #endif 378 379 /* 380 * Determine what sort of external interrupt is pending (if any). 381 * Returns: 382 * 0 if no interrupt is pending 383 * 1 if an interrupt is pending that needs to be handled by the host 384 * 2 Passthrough that needs completion in the host 385 * -1 if there was a guest wakeup IPI (which has now been cleared) 386 * -2 if there is PCI passthrough external interrupt that was handled 387 */ 388 static long kvmppc_read_one_intr(bool *again); 389 390 long kvmppc_read_intr(void) 391 { 392 long ret = 0; 393 long rc; 394 bool again; 395 396 do { 397 again = false; 398 rc = kvmppc_read_one_intr(&again); 399 if (rc && (ret == 0 || rc > ret)) 400 ret = rc; 401 } while (again); 402 return ret; 403 } 404 405 static long kvmppc_read_one_intr(bool *again) 406 { 407 unsigned long xics_phys; 408 u32 h_xirr; 409 __be32 xirr; 410 u32 xisr; 411 u8 host_ipi; 412 int64_t rc; 413 414 /* see if a host IPI is pending */ 415 host_ipi = local_paca->kvm_hstate.host_ipi; 416 if (host_ipi) 417 return 1; 418 419 /* Now read the interrupt from the ICP */ 420 xics_phys = local_paca->kvm_hstate.xics_phys; 421 rc = 0; 422 if (!in_realmode()) 423 rc = opal_int_get_xirr(&xirr, false); 424 else if (!xics_phys) 425 rc = opal_rm_int_get_xirr(&xirr, false); 426 else 427 xirr = _lwzcix(xics_phys + XICS_XIRR); 428 if (rc < 0) 429 return 1; 430 431 /* 432 * Save XIRR for later. Since we get control in reverse endian 433 * on LE systems, save it byte reversed and fetch it back in 434 * host endian. Note that xirr is the value read from the 435 * XIRR register, while h_xirr is the host endian version. 436 */ 437 h_xirr = be32_to_cpu(xirr); 438 local_paca->kvm_hstate.saved_xirr = h_xirr; 439 xisr = h_xirr & 0xffffff; 440 /* 441 * Ensure that the store/load complete to guarantee all side 442 * effects of loading from XIRR has completed 443 */ 444 smp_mb(); 445 446 /* if nothing pending in the ICP */ 447 if (!xisr) 448 return 0; 449 450 /* We found something in the ICP... 451 * 452 * If it is an IPI, clear the MFRR and EOI it. 453 */ 454 if (xisr == XICS_IPI) { 455 rc = 0; 456 if (!in_realmode()) { 457 opal_int_set_mfrr(hard_smp_processor_id(), 0xff); 458 rc = opal_int_eoi(h_xirr); 459 } else if (xics_phys) { 460 _stbcix(xics_phys + XICS_MFRR, 0xff); 461 _stwcix(xics_phys + XICS_XIRR, xirr); 462 } else { 463 opal_rm_int_set_mfrr(hard_smp_processor_id(), 0xff); 464 rc = opal_rm_int_eoi(h_xirr); 465 } 466 /* If rc > 0, there is another interrupt pending */ 467 *again = rc > 0; 468 469 /* 470 * Need to ensure side effects of above stores 471 * complete before proceeding. 472 */ 473 smp_mb(); 474 475 /* 476 * We need to re-check host IPI now in case it got set in the 477 * meantime. If it's clear, we bounce the interrupt to the 478 * guest 479 */ 480 host_ipi = local_paca->kvm_hstate.host_ipi; 481 if (unlikely(host_ipi != 0)) { 482 /* We raced with the host, 483 * we need to resend that IPI, bummer 484 */ 485 if (!in_realmode()) 486 opal_int_set_mfrr(hard_smp_processor_id(), 487 IPI_PRIORITY); 488 else if (xics_phys) 489 _stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY); 490 else 491 opal_rm_int_set_mfrr(hard_smp_processor_id(), 492 IPI_PRIORITY); 493 /* Let side effects complete */ 494 smp_mb(); 495 return 1; 496 } 497 498 /* OK, it's an IPI for us */ 499 local_paca->kvm_hstate.saved_xirr = 0; 500 return -1; 501 } 502 503 return kvmppc_check_passthru(xisr, xirr, again); 504 } 505