1 /* 2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License, version 2, as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/cpu.h> 10 #include <linux/kvm_host.h> 11 #include <linux/preempt.h> 12 #include <linux/export.h> 13 #include <linux/sched.h> 14 #include <linux/spinlock.h> 15 #include <linux/init.h> 16 #include <linux/memblock.h> 17 #include <linux/sizes.h> 18 #include <linux/cma.h> 19 #include <linux/bitops.h> 20 21 #include <asm/cputable.h> 22 #include <asm/kvm_ppc.h> 23 #include <asm/kvm_book3s.h> 24 #include <asm/archrandom.h> 25 #include <asm/xics.h> 26 #include <asm/dbell.h> 27 #include <asm/cputhreads.h> 28 #include <asm/io.h> 29 #include <asm/asm-prototypes.h> 30 31 #define KVM_CMA_CHUNK_ORDER 18 32 33 /* 34 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) 35 * should be power of 2. 36 */ 37 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ 38 /* 39 * By default we reserve 5% of memory for hash pagetable allocation. 40 */ 41 static unsigned long kvm_cma_resv_ratio = 5; 42 43 static struct cma *kvm_cma; 44 45 static int __init early_parse_kvm_cma_resv(char *p) 46 { 47 pr_debug("%s(%s)\n", __func__, p); 48 if (!p) 49 return -EINVAL; 50 return kstrtoul(p, 0, &kvm_cma_resv_ratio); 51 } 52 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); 53 54 struct page *kvm_alloc_hpt(unsigned long nr_pages) 55 { 56 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 57 58 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES)); 59 } 60 EXPORT_SYMBOL_GPL(kvm_alloc_hpt); 61 62 void kvm_release_hpt(struct page *page, unsigned long nr_pages) 63 { 64 cma_release(kvm_cma, page, nr_pages); 65 } 66 EXPORT_SYMBOL_GPL(kvm_release_hpt); 67 68 /** 69 * kvm_cma_reserve() - reserve area for kvm hash pagetable 70 * 71 * This function reserves memory from early allocator. It should be 72 * called by arch specific code once the memblock allocator 73 * has been activated and all other subsystems have already allocated/reserved 74 * memory. 75 */ 76 void __init kvm_cma_reserve(void) 77 { 78 unsigned long align_size; 79 struct memblock_region *reg; 80 phys_addr_t selected_size = 0; 81 82 /* 83 * We need CMA reservation only when we are in HV mode 84 */ 85 if (!cpu_has_feature(CPU_FTR_HVMODE)) 86 return; 87 /* 88 * We cannot use memblock_phys_mem_size() here, because 89 * memblock_analyze() has not been called yet. 90 */ 91 for_each_memblock(memory, reg) 92 selected_size += memblock_region_memory_end_pfn(reg) - 93 memblock_region_memory_base_pfn(reg); 94 95 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; 96 if (selected_size) { 97 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 98 (unsigned long)selected_size / SZ_1M); 99 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; 100 cma_declare_contiguous(0, selected_size, 0, align_size, 101 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma); 102 } 103 } 104 105 /* 106 * Real-mode H_CONFER implementation. 107 * We check if we are the only vcpu out of this virtual core 108 * still running in the guest and not ceded. If so, we pop up 109 * to the virtual-mode implementation; if not, just return to 110 * the guest. 111 */ 112 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, 113 unsigned int yield_count) 114 { 115 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 116 int ptid = local_paca->kvm_hstate.ptid; 117 int threads_running; 118 int threads_ceded; 119 int threads_conferring; 120 u64 stop = get_tb() + 10 * tb_ticks_per_usec; 121 int rv = H_SUCCESS; /* => don't yield */ 122 123 set_bit(ptid, &vc->conferring_threads); 124 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { 125 threads_running = VCORE_ENTRY_MAP(vc); 126 threads_ceded = vc->napping_threads; 127 threads_conferring = vc->conferring_threads; 128 if ((threads_ceded | threads_conferring) == threads_running) { 129 rv = H_TOO_HARD; /* => do yield */ 130 break; 131 } 132 } 133 clear_bit(ptid, &vc->conferring_threads); 134 return rv; 135 } 136 137 /* 138 * When running HV mode KVM we need to block certain operations while KVM VMs 139 * exist in the system. We use a counter of VMs to track this. 140 * 141 * One of the operations we need to block is onlining of secondaries, so we 142 * protect hv_vm_count with get/put_online_cpus(). 143 */ 144 static atomic_t hv_vm_count; 145 146 void kvm_hv_vm_activated(void) 147 { 148 get_online_cpus(); 149 atomic_inc(&hv_vm_count); 150 put_online_cpus(); 151 } 152 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); 153 154 void kvm_hv_vm_deactivated(void) 155 { 156 get_online_cpus(); 157 atomic_dec(&hv_vm_count); 158 put_online_cpus(); 159 } 160 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); 161 162 bool kvm_hv_mode_active(void) 163 { 164 return atomic_read(&hv_vm_count) != 0; 165 } 166 167 extern int hcall_real_table[], hcall_real_table_end[]; 168 169 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) 170 { 171 cmd /= 4; 172 if (cmd < hcall_real_table_end - hcall_real_table && 173 hcall_real_table[cmd]) 174 return 1; 175 176 return 0; 177 } 178 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); 179 180 int kvmppc_hwrng_present(void) 181 { 182 return powernv_hwrng_present(); 183 } 184 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); 185 186 long kvmppc_h_random(struct kvm_vcpu *vcpu) 187 { 188 if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) 189 return H_SUCCESS; 190 191 return H_HARDWARE; 192 } 193 194 static inline void rm_writeb(unsigned long paddr, u8 val) 195 { 196 __asm__ __volatile__("stbcix %0,0,%1" 197 : : "r" (val), "r" (paddr) : "memory"); 198 } 199 200 /* 201 * Send an interrupt or message to another CPU. 202 * This can only be called in real mode. 203 * The caller needs to include any barrier needed to order writes 204 * to memory vs. the IPI/message. 205 */ 206 void kvmhv_rm_send_ipi(int cpu) 207 { 208 unsigned long xics_phys; 209 210 /* On POWER8 for IPIs to threads in the same core, use msgsnd */ 211 if (cpu_has_feature(CPU_FTR_ARCH_207S) && 212 cpu_first_thread_sibling(cpu) == 213 cpu_first_thread_sibling(raw_smp_processor_id())) { 214 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 215 msg |= cpu_thread_in_core(cpu); 216 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 217 return; 218 } 219 220 /* Else poke the target with an IPI */ 221 xics_phys = paca[cpu].kvm_hstate.xics_phys; 222 rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY); 223 } 224 225 /* 226 * The following functions are called from the assembly code 227 * in book3s_hv_rmhandlers.S. 228 */ 229 static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) 230 { 231 int cpu = vc->pcpu; 232 233 /* Order setting of exit map vs. msgsnd/IPI */ 234 smp_mb(); 235 for (; active; active >>= 1, ++cpu) 236 if (active & 1) 237 kvmhv_rm_send_ipi(cpu); 238 } 239 240 void kvmhv_commence_exit(int trap) 241 { 242 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 243 int ptid = local_paca->kvm_hstate.ptid; 244 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; 245 int me, ee, i; 246 247 /* Set our bit in the threads-exiting-guest map in the 0xff00 248 bits of vcore->entry_exit_map */ 249 me = 0x100 << ptid; 250 do { 251 ee = vc->entry_exit_map; 252 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); 253 254 /* Are we the first here? */ 255 if ((ee >> 8) != 0) 256 return; 257 258 /* 259 * Trigger the other threads in this vcore to exit the guest. 260 * If this is a hypervisor decrementer interrupt then they 261 * will be already on their way out of the guest. 262 */ 263 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) 264 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); 265 266 /* 267 * If we are doing dynamic micro-threading, interrupt the other 268 * subcores to pull them out of their guests too. 269 */ 270 if (!sip) 271 return; 272 273 for (i = 0; i < MAX_SUBCORES; ++i) { 274 vc = sip->master_vcs[i]; 275 if (!vc) 276 break; 277 do { 278 ee = vc->entry_exit_map; 279 /* Already asked to exit? */ 280 if ((ee >> 8) != 0) 281 break; 282 } while (cmpxchg(&vc->entry_exit_map, ee, 283 ee | VCORE_EXIT_REQ) != ee); 284 if ((ee >> 8) == 0) 285 kvmhv_interrupt_vcore(vc, ee); 286 } 287 } 288 289 struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; 290 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); 291 292 #ifdef CONFIG_KVM_XICS 293 static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, 294 u32 xisr) 295 { 296 int i; 297 298 /* 299 * We access the mapped array here without a lock. That 300 * is safe because we never reduce the number of entries 301 * in the array and we never change the v_hwirq field of 302 * an entry once it is set. 303 * 304 * We have also carefully ordered the stores in the writer 305 * and the loads here in the reader, so that if we find a matching 306 * hwirq here, the associated GSI and irq_desc fields are valid. 307 */ 308 for (i = 0; i < pimap->n_mapped; i++) { 309 if (xisr == pimap->mapped[i].r_hwirq) { 310 /* 311 * Order subsequent reads in the caller to serialize 312 * with the writer. 313 */ 314 smp_rmb(); 315 return &pimap->mapped[i]; 316 } 317 } 318 return NULL; 319 } 320 321 /* 322 * If we have an interrupt that's not an IPI, check if we have a 323 * passthrough adapter and if so, check if this external interrupt 324 * is for the adapter. 325 * We will attempt to deliver the IRQ directly to the target VCPU's 326 * ICP, the virtual ICP (based on affinity - the xive value in ICS). 327 * 328 * If the delivery fails or if this is not for a passthrough adapter, 329 * return to the host to handle this interrupt. We earlier 330 * saved a copy of the XIRR in the PACA, it will be picked up by 331 * the host ICP driver. 332 */ 333 static int kvmppc_check_passthru(u32 xisr, __be32 xirr) 334 { 335 struct kvmppc_passthru_irqmap *pimap; 336 struct kvmppc_irq_map *irq_map; 337 struct kvm_vcpu *vcpu; 338 339 vcpu = local_paca->kvm_hstate.kvm_vcpu; 340 if (!vcpu) 341 return 1; 342 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); 343 if (!pimap) 344 return 1; 345 irq_map = get_irqmap(pimap, xisr); 346 if (!irq_map) 347 return 1; 348 349 /* We're handling this interrupt, generic code doesn't need to */ 350 local_paca->kvm_hstate.saved_xirr = 0; 351 352 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap); 353 } 354 355 #else 356 static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr) 357 { 358 return 1; 359 } 360 #endif 361 362 /* 363 * Determine what sort of external interrupt is pending (if any). 364 * Returns: 365 * 0 if no interrupt is pending 366 * 1 if an interrupt is pending that needs to be handled by the host 367 * 2 Passthrough that needs completion in the host 368 * -1 if there was a guest wakeup IPI (which has now been cleared) 369 * -2 if there is PCI passthrough external interrupt that was handled 370 */ 371 372 long kvmppc_read_intr(void) 373 { 374 unsigned long xics_phys; 375 u32 h_xirr; 376 __be32 xirr; 377 u32 xisr; 378 u8 host_ipi; 379 380 /* see if a host IPI is pending */ 381 host_ipi = local_paca->kvm_hstate.host_ipi; 382 if (host_ipi) 383 return 1; 384 385 /* Now read the interrupt from the ICP */ 386 xics_phys = local_paca->kvm_hstate.xics_phys; 387 if (unlikely(!xics_phys)) 388 return 1; 389 390 /* 391 * Save XIRR for later. Since we get control in reverse endian 392 * on LE systems, save it byte reversed and fetch it back in 393 * host endian. Note that xirr is the value read from the 394 * XIRR register, while h_xirr is the host endian version. 395 */ 396 xirr = _lwzcix(xics_phys + XICS_XIRR); 397 h_xirr = be32_to_cpu(xirr); 398 local_paca->kvm_hstate.saved_xirr = h_xirr; 399 xisr = h_xirr & 0xffffff; 400 /* 401 * Ensure that the store/load complete to guarantee all side 402 * effects of loading from XIRR has completed 403 */ 404 smp_mb(); 405 406 /* if nothing pending in the ICP */ 407 if (!xisr) 408 return 0; 409 410 /* We found something in the ICP... 411 * 412 * If it is an IPI, clear the MFRR and EOI it. 413 */ 414 if (xisr == XICS_IPI) { 415 _stbcix(xics_phys + XICS_MFRR, 0xff); 416 _stwcix(xics_phys + XICS_XIRR, xirr); 417 /* 418 * Need to ensure side effects of above stores 419 * complete before proceeding. 420 */ 421 smp_mb(); 422 423 /* 424 * We need to re-check host IPI now in case it got set in the 425 * meantime. If it's clear, we bounce the interrupt to the 426 * guest 427 */ 428 host_ipi = local_paca->kvm_hstate.host_ipi; 429 if (unlikely(host_ipi != 0)) { 430 /* We raced with the host, 431 * we need to resend that IPI, bummer 432 */ 433 _stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY); 434 /* Let side effects complete */ 435 smp_mb(); 436 return 1; 437 } 438 439 /* OK, it's an IPI for us */ 440 local_paca->kvm_hstate.saved_xirr = 0; 441 return -1; 442 } 443 444 return kvmppc_check_passthru(xisr, xirr); 445 } 446