1 /* 2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License, version 2, as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/cpu.h> 10 #include <linux/kvm_host.h> 11 #include <linux/preempt.h> 12 #include <linux/export.h> 13 #include <linux/sched.h> 14 #include <linux/spinlock.h> 15 #include <linux/init.h> 16 #include <linux/memblock.h> 17 #include <linux/sizes.h> 18 #include <linux/cma.h> 19 #include <linux/bitops.h> 20 21 #include <asm/cputable.h> 22 #include <asm/kvm_ppc.h> 23 #include <asm/kvm_book3s.h> 24 #include <asm/archrandom.h> 25 #include <asm/xics.h> 26 #include <asm/dbell.h> 27 #include <asm/cputhreads.h> 28 #include <asm/io.h> 29 #include <asm/asm-prototypes.h> 30 #include <asm/opal.h> 31 #include <asm/smp.h> 32 33 #define KVM_CMA_CHUNK_ORDER 18 34 35 /* 36 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) 37 * should be power of 2. 38 */ 39 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ 40 /* 41 * By default we reserve 5% of memory for hash pagetable allocation. 42 */ 43 static unsigned long kvm_cma_resv_ratio = 5; 44 45 static struct cma *kvm_cma; 46 47 static int __init early_parse_kvm_cma_resv(char *p) 48 { 49 pr_debug("%s(%s)\n", __func__, p); 50 if (!p) 51 return -EINVAL; 52 return kstrtoul(p, 0, &kvm_cma_resv_ratio); 53 } 54 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); 55 56 struct page *kvm_alloc_hpt(unsigned long nr_pages) 57 { 58 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 59 60 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES)); 61 } 62 EXPORT_SYMBOL_GPL(kvm_alloc_hpt); 63 64 void kvm_release_hpt(struct page *page, unsigned long nr_pages) 65 { 66 cma_release(kvm_cma, page, nr_pages); 67 } 68 EXPORT_SYMBOL_GPL(kvm_release_hpt); 69 70 /** 71 * kvm_cma_reserve() - reserve area for kvm hash pagetable 72 * 73 * This function reserves memory from early allocator. It should be 74 * called by arch specific code once the memblock allocator 75 * has been activated and all other subsystems have already allocated/reserved 76 * memory. 77 */ 78 void __init kvm_cma_reserve(void) 79 { 80 unsigned long align_size; 81 struct memblock_region *reg; 82 phys_addr_t selected_size = 0; 83 84 /* 85 * We need CMA reservation only when we are in HV mode 86 */ 87 if (!cpu_has_feature(CPU_FTR_HVMODE)) 88 return; 89 /* 90 * We cannot use memblock_phys_mem_size() here, because 91 * memblock_analyze() has not been called yet. 92 */ 93 for_each_memblock(memory, reg) 94 selected_size += memblock_region_memory_end_pfn(reg) - 95 memblock_region_memory_base_pfn(reg); 96 97 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; 98 if (selected_size) { 99 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 100 (unsigned long)selected_size / SZ_1M); 101 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; 102 cma_declare_contiguous(0, selected_size, 0, align_size, 103 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma); 104 } 105 } 106 107 /* 108 * Real-mode H_CONFER implementation. 109 * We check if we are the only vcpu out of this virtual core 110 * still running in the guest and not ceded. If so, we pop up 111 * to the virtual-mode implementation; if not, just return to 112 * the guest. 113 */ 114 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, 115 unsigned int yield_count) 116 { 117 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 118 int ptid = local_paca->kvm_hstate.ptid; 119 int threads_running; 120 int threads_ceded; 121 int threads_conferring; 122 u64 stop = get_tb() + 10 * tb_ticks_per_usec; 123 int rv = H_SUCCESS; /* => don't yield */ 124 125 set_bit(ptid, &vc->conferring_threads); 126 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { 127 threads_running = VCORE_ENTRY_MAP(vc); 128 threads_ceded = vc->napping_threads; 129 threads_conferring = vc->conferring_threads; 130 if ((threads_ceded | threads_conferring) == threads_running) { 131 rv = H_TOO_HARD; /* => do yield */ 132 break; 133 } 134 } 135 clear_bit(ptid, &vc->conferring_threads); 136 return rv; 137 } 138 139 /* 140 * When running HV mode KVM we need to block certain operations while KVM VMs 141 * exist in the system. We use a counter of VMs to track this. 142 * 143 * One of the operations we need to block is onlining of secondaries, so we 144 * protect hv_vm_count with get/put_online_cpus(). 145 */ 146 static atomic_t hv_vm_count; 147 148 void kvm_hv_vm_activated(void) 149 { 150 get_online_cpus(); 151 atomic_inc(&hv_vm_count); 152 put_online_cpus(); 153 } 154 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); 155 156 void kvm_hv_vm_deactivated(void) 157 { 158 get_online_cpus(); 159 atomic_dec(&hv_vm_count); 160 put_online_cpus(); 161 } 162 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); 163 164 bool kvm_hv_mode_active(void) 165 { 166 return atomic_read(&hv_vm_count) != 0; 167 } 168 169 extern int hcall_real_table[], hcall_real_table_end[]; 170 171 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) 172 { 173 cmd /= 4; 174 if (cmd < hcall_real_table_end - hcall_real_table && 175 hcall_real_table[cmd]) 176 return 1; 177 178 return 0; 179 } 180 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); 181 182 int kvmppc_hwrng_present(void) 183 { 184 return powernv_hwrng_present(); 185 } 186 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); 187 188 long kvmppc_h_random(struct kvm_vcpu *vcpu) 189 { 190 if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) 191 return H_SUCCESS; 192 193 return H_HARDWARE; 194 } 195 196 static inline void rm_writeb(unsigned long paddr, u8 val) 197 { 198 __asm__ __volatile__("stbcix %0,0,%1" 199 : : "r" (val), "r" (paddr) : "memory"); 200 } 201 202 /* 203 * Send an interrupt or message to another CPU. 204 * This can only be called in real mode. 205 * The caller needs to include any barrier needed to order writes 206 * to memory vs. the IPI/message. 207 */ 208 void kvmhv_rm_send_ipi(int cpu) 209 { 210 unsigned long xics_phys; 211 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 212 213 /* On POWER9 we can use msgsnd for any destination cpu. */ 214 if (cpu_has_feature(CPU_FTR_ARCH_300)) { 215 msg |= get_hard_smp_processor_id(cpu); 216 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 217 return; 218 } 219 /* On POWER8 for IPIs to threads in the same core, use msgsnd. */ 220 if (cpu_has_feature(CPU_FTR_ARCH_207S) && 221 cpu_first_thread_sibling(cpu) == 222 cpu_first_thread_sibling(raw_smp_processor_id())) { 223 msg |= cpu_thread_in_core(cpu); 224 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 225 return; 226 } 227 228 /* Else poke the target with an IPI */ 229 xics_phys = paca[cpu].kvm_hstate.xics_phys; 230 if (xics_phys) 231 rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY); 232 else 233 opal_rm_int_set_mfrr(get_hard_smp_processor_id(cpu), 234 IPI_PRIORITY); 235 } 236 237 /* 238 * The following functions are called from the assembly code 239 * in book3s_hv_rmhandlers.S. 240 */ 241 static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) 242 { 243 int cpu = vc->pcpu; 244 245 /* Order setting of exit map vs. msgsnd/IPI */ 246 smp_mb(); 247 for (; active; active >>= 1, ++cpu) 248 if (active & 1) 249 kvmhv_rm_send_ipi(cpu); 250 } 251 252 void kvmhv_commence_exit(int trap) 253 { 254 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 255 int ptid = local_paca->kvm_hstate.ptid; 256 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; 257 int me, ee, i; 258 259 /* Set our bit in the threads-exiting-guest map in the 0xff00 260 bits of vcore->entry_exit_map */ 261 me = 0x100 << ptid; 262 do { 263 ee = vc->entry_exit_map; 264 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); 265 266 /* Are we the first here? */ 267 if ((ee >> 8) != 0) 268 return; 269 270 /* 271 * Trigger the other threads in this vcore to exit the guest. 272 * If this is a hypervisor decrementer interrupt then they 273 * will be already on their way out of the guest. 274 */ 275 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) 276 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); 277 278 /* 279 * If we are doing dynamic micro-threading, interrupt the other 280 * subcores to pull them out of their guests too. 281 */ 282 if (!sip) 283 return; 284 285 for (i = 0; i < MAX_SUBCORES; ++i) { 286 vc = sip->master_vcs[i]; 287 if (!vc) 288 break; 289 do { 290 ee = vc->entry_exit_map; 291 /* Already asked to exit? */ 292 if ((ee >> 8) != 0) 293 break; 294 } while (cmpxchg(&vc->entry_exit_map, ee, 295 ee | VCORE_EXIT_REQ) != ee); 296 if ((ee >> 8) == 0) 297 kvmhv_interrupt_vcore(vc, ee); 298 } 299 } 300 301 struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; 302 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); 303 304 #ifdef CONFIG_KVM_XICS 305 static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, 306 u32 xisr) 307 { 308 int i; 309 310 /* 311 * We access the mapped array here without a lock. That 312 * is safe because we never reduce the number of entries 313 * in the array and we never change the v_hwirq field of 314 * an entry once it is set. 315 * 316 * We have also carefully ordered the stores in the writer 317 * and the loads here in the reader, so that if we find a matching 318 * hwirq here, the associated GSI and irq_desc fields are valid. 319 */ 320 for (i = 0; i < pimap->n_mapped; i++) { 321 if (xisr == pimap->mapped[i].r_hwirq) { 322 /* 323 * Order subsequent reads in the caller to serialize 324 * with the writer. 325 */ 326 smp_rmb(); 327 return &pimap->mapped[i]; 328 } 329 } 330 return NULL; 331 } 332 333 /* 334 * If we have an interrupt that's not an IPI, check if we have a 335 * passthrough adapter and if so, check if this external interrupt 336 * is for the adapter. 337 * We will attempt to deliver the IRQ directly to the target VCPU's 338 * ICP, the virtual ICP (based on affinity - the xive value in ICS). 339 * 340 * If the delivery fails or if this is not for a passthrough adapter, 341 * return to the host to handle this interrupt. We earlier 342 * saved a copy of the XIRR in the PACA, it will be picked up by 343 * the host ICP driver. 344 */ 345 static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 346 { 347 struct kvmppc_passthru_irqmap *pimap; 348 struct kvmppc_irq_map *irq_map; 349 struct kvm_vcpu *vcpu; 350 351 vcpu = local_paca->kvm_hstate.kvm_vcpu; 352 if (!vcpu) 353 return 1; 354 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); 355 if (!pimap) 356 return 1; 357 irq_map = get_irqmap(pimap, xisr); 358 if (!irq_map) 359 return 1; 360 361 /* We're handling this interrupt, generic code doesn't need to */ 362 local_paca->kvm_hstate.saved_xirr = 0; 363 364 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again); 365 } 366 367 #else 368 static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) 369 { 370 return 1; 371 } 372 #endif 373 374 /* 375 * Determine what sort of external interrupt is pending (if any). 376 * Returns: 377 * 0 if no interrupt is pending 378 * 1 if an interrupt is pending that needs to be handled by the host 379 * 2 Passthrough that needs completion in the host 380 * -1 if there was a guest wakeup IPI (which has now been cleared) 381 * -2 if there is PCI passthrough external interrupt that was handled 382 */ 383 static long kvmppc_read_one_intr(bool *again); 384 385 long kvmppc_read_intr(void) 386 { 387 long ret = 0; 388 long rc; 389 bool again; 390 391 do { 392 again = false; 393 rc = kvmppc_read_one_intr(&again); 394 if (rc && (ret == 0 || rc > ret)) 395 ret = rc; 396 } while (again); 397 return ret; 398 } 399 400 static long kvmppc_read_one_intr(bool *again) 401 { 402 unsigned long xics_phys; 403 u32 h_xirr; 404 __be32 xirr; 405 u32 xisr; 406 u8 host_ipi; 407 int64_t rc; 408 409 /* see if a host IPI is pending */ 410 host_ipi = local_paca->kvm_hstate.host_ipi; 411 if (host_ipi) 412 return 1; 413 414 /* Now read the interrupt from the ICP */ 415 xics_phys = local_paca->kvm_hstate.xics_phys; 416 if (!xics_phys) { 417 /* Use OPAL to read the XIRR */ 418 rc = opal_rm_int_get_xirr(&xirr, false); 419 if (rc < 0) 420 return 1; 421 } else { 422 xirr = _lwzcix(xics_phys + XICS_XIRR); 423 } 424 425 /* 426 * Save XIRR for later. Since we get control in reverse endian 427 * on LE systems, save it byte reversed and fetch it back in 428 * host endian. Note that xirr is the value read from the 429 * XIRR register, while h_xirr is the host endian version. 430 */ 431 h_xirr = be32_to_cpu(xirr); 432 local_paca->kvm_hstate.saved_xirr = h_xirr; 433 xisr = h_xirr & 0xffffff; 434 /* 435 * Ensure that the store/load complete to guarantee all side 436 * effects of loading from XIRR has completed 437 */ 438 smp_mb(); 439 440 /* if nothing pending in the ICP */ 441 if (!xisr) 442 return 0; 443 444 /* We found something in the ICP... 445 * 446 * If it is an IPI, clear the MFRR and EOI it. 447 */ 448 if (xisr == XICS_IPI) { 449 if (xics_phys) { 450 _stbcix(xics_phys + XICS_MFRR, 0xff); 451 _stwcix(xics_phys + XICS_XIRR, xirr); 452 } else { 453 opal_rm_int_set_mfrr(hard_smp_processor_id(), 0xff); 454 rc = opal_rm_int_eoi(h_xirr); 455 /* If rc > 0, there is another interrupt pending */ 456 *again = rc > 0; 457 } 458 459 /* 460 * Need to ensure side effects of above stores 461 * complete before proceeding. 462 */ 463 smp_mb(); 464 465 /* 466 * We need to re-check host IPI now in case it got set in the 467 * meantime. If it's clear, we bounce the interrupt to the 468 * guest 469 */ 470 host_ipi = local_paca->kvm_hstate.host_ipi; 471 if (unlikely(host_ipi != 0)) { 472 /* We raced with the host, 473 * we need to resend that IPI, bummer 474 */ 475 if (xics_phys) 476 _stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY); 477 else 478 opal_rm_int_set_mfrr(hard_smp_processor_id(), 479 IPI_PRIORITY); 480 /* Let side effects complete */ 481 smp_mb(); 482 return 1; 483 } 484 485 /* OK, it's an IPI for us */ 486 local_paca->kvm_hstate.saved_xirr = 0; 487 return -1; 488 } 489 490 return kvmppc_check_passthru(xisr, xirr, again); 491 } 492