1 /* 2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License, version 2, as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/cpu.h> 10 #include <linux/kvm_host.h> 11 #include <linux/preempt.h> 12 #include <linux/export.h> 13 #include <linux/sched.h> 14 #include <linux/spinlock.h> 15 #include <linux/init.h> 16 #include <linux/memblock.h> 17 #include <linux/sizes.h> 18 #include <linux/cma.h> 19 #include <linux/bitops.h> 20 21 #include <asm/cputable.h> 22 #include <asm/kvm_ppc.h> 23 #include <asm/kvm_book3s.h> 24 #include <asm/archrandom.h> 25 #include <asm/xics.h> 26 #include <asm/dbell.h> 27 #include <asm/cputhreads.h> 28 #include <asm/io.h> 29 30 #define KVM_CMA_CHUNK_ORDER 18 31 32 /* 33 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) 34 * should be power of 2. 35 */ 36 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ 37 /* 38 * By default we reserve 5% of memory for hash pagetable allocation. 39 */ 40 static unsigned long kvm_cma_resv_ratio = 5; 41 42 static struct cma *kvm_cma; 43 44 static int __init early_parse_kvm_cma_resv(char *p) 45 { 46 pr_debug("%s(%s)\n", __func__, p); 47 if (!p) 48 return -EINVAL; 49 return kstrtoul(p, 0, &kvm_cma_resv_ratio); 50 } 51 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); 52 53 struct page *kvm_alloc_hpt(unsigned long nr_pages) 54 { 55 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 56 57 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES)); 58 } 59 EXPORT_SYMBOL_GPL(kvm_alloc_hpt); 60 61 void kvm_release_hpt(struct page *page, unsigned long nr_pages) 62 { 63 cma_release(kvm_cma, page, nr_pages); 64 } 65 EXPORT_SYMBOL_GPL(kvm_release_hpt); 66 67 /** 68 * kvm_cma_reserve() - reserve area for kvm hash pagetable 69 * 70 * This function reserves memory from early allocator. It should be 71 * called by arch specific code once the memblock allocator 72 * has been activated and all other subsystems have already allocated/reserved 73 * memory. 74 */ 75 void __init kvm_cma_reserve(void) 76 { 77 unsigned long align_size; 78 struct memblock_region *reg; 79 phys_addr_t selected_size = 0; 80 81 /* 82 * We need CMA reservation only when we are in HV mode 83 */ 84 if (!cpu_has_feature(CPU_FTR_HVMODE)) 85 return; 86 /* 87 * We cannot use memblock_phys_mem_size() here, because 88 * memblock_analyze() has not been called yet. 89 */ 90 for_each_memblock(memory, reg) 91 selected_size += memblock_region_memory_end_pfn(reg) - 92 memblock_region_memory_base_pfn(reg); 93 94 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; 95 if (selected_size) { 96 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 97 (unsigned long)selected_size / SZ_1M); 98 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; 99 cma_declare_contiguous(0, selected_size, 0, align_size, 100 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma); 101 } 102 } 103 104 /* 105 * Real-mode H_CONFER implementation. 106 * We check if we are the only vcpu out of this virtual core 107 * still running in the guest and not ceded. If so, we pop up 108 * to the virtual-mode implementation; if not, just return to 109 * the guest. 110 */ 111 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, 112 unsigned int yield_count) 113 { 114 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 115 int ptid = local_paca->kvm_hstate.ptid; 116 int threads_running; 117 int threads_ceded; 118 int threads_conferring; 119 u64 stop = get_tb() + 10 * tb_ticks_per_usec; 120 int rv = H_SUCCESS; /* => don't yield */ 121 122 set_bit(ptid, &vc->conferring_threads); 123 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { 124 threads_running = VCORE_ENTRY_MAP(vc); 125 threads_ceded = vc->napping_threads; 126 threads_conferring = vc->conferring_threads; 127 if ((threads_ceded | threads_conferring) == threads_running) { 128 rv = H_TOO_HARD; /* => do yield */ 129 break; 130 } 131 } 132 clear_bit(ptid, &vc->conferring_threads); 133 return rv; 134 } 135 136 /* 137 * When running HV mode KVM we need to block certain operations while KVM VMs 138 * exist in the system. We use a counter of VMs to track this. 139 * 140 * One of the operations we need to block is onlining of secondaries, so we 141 * protect hv_vm_count with get/put_online_cpus(). 142 */ 143 static atomic_t hv_vm_count; 144 145 void kvm_hv_vm_activated(void) 146 { 147 get_online_cpus(); 148 atomic_inc(&hv_vm_count); 149 put_online_cpus(); 150 } 151 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); 152 153 void kvm_hv_vm_deactivated(void) 154 { 155 get_online_cpus(); 156 atomic_dec(&hv_vm_count); 157 put_online_cpus(); 158 } 159 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); 160 161 bool kvm_hv_mode_active(void) 162 { 163 return atomic_read(&hv_vm_count) != 0; 164 } 165 166 extern int hcall_real_table[], hcall_real_table_end[]; 167 168 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) 169 { 170 cmd /= 4; 171 if (cmd < hcall_real_table_end - hcall_real_table && 172 hcall_real_table[cmd]) 173 return 1; 174 175 return 0; 176 } 177 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); 178 179 int kvmppc_hwrng_present(void) 180 { 181 return powernv_hwrng_present(); 182 } 183 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); 184 185 long kvmppc_h_random(struct kvm_vcpu *vcpu) 186 { 187 if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) 188 return H_SUCCESS; 189 190 return H_HARDWARE; 191 } 192 193 static inline void rm_writeb(unsigned long paddr, u8 val) 194 { 195 __asm__ __volatile__("stbcix %0,0,%1" 196 : : "r" (val), "r" (paddr) : "memory"); 197 } 198 199 /* 200 * Send an interrupt or message to another CPU. 201 * This can only be called in real mode. 202 * The caller needs to include any barrier needed to order writes 203 * to memory vs. the IPI/message. 204 */ 205 void kvmhv_rm_send_ipi(int cpu) 206 { 207 unsigned long xics_phys; 208 209 /* On POWER8 for IPIs to threads in the same core, use msgsnd */ 210 if (cpu_has_feature(CPU_FTR_ARCH_207S) && 211 cpu_first_thread_sibling(cpu) == 212 cpu_first_thread_sibling(raw_smp_processor_id())) { 213 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); 214 msg |= cpu_thread_in_core(cpu); 215 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); 216 return; 217 } 218 219 /* Else poke the target with an IPI */ 220 xics_phys = paca[cpu].kvm_hstate.xics_phys; 221 rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY); 222 } 223 224 /* 225 * The following functions are called from the assembly code 226 * in book3s_hv_rmhandlers.S. 227 */ 228 static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) 229 { 230 int cpu = vc->pcpu; 231 232 /* Order setting of exit map vs. msgsnd/IPI */ 233 smp_mb(); 234 for (; active; active >>= 1, ++cpu) 235 if (active & 1) 236 kvmhv_rm_send_ipi(cpu); 237 } 238 239 void kvmhv_commence_exit(int trap) 240 { 241 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 242 int ptid = local_paca->kvm_hstate.ptid; 243 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; 244 int me, ee, i; 245 246 /* Set our bit in the threads-exiting-guest map in the 0xff00 247 bits of vcore->entry_exit_map */ 248 me = 0x100 << ptid; 249 do { 250 ee = vc->entry_exit_map; 251 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); 252 253 /* Are we the first here? */ 254 if ((ee >> 8) != 0) 255 return; 256 257 /* 258 * Trigger the other threads in this vcore to exit the guest. 259 * If this is a hypervisor decrementer interrupt then they 260 * will be already on their way out of the guest. 261 */ 262 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) 263 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); 264 265 /* 266 * If we are doing dynamic micro-threading, interrupt the other 267 * subcores to pull them out of their guests too. 268 */ 269 if (!sip) 270 return; 271 272 for (i = 0; i < MAX_SUBCORES; ++i) { 273 vc = sip->master_vcs[i]; 274 if (!vc) 275 break; 276 do { 277 ee = vc->entry_exit_map; 278 /* Already asked to exit? */ 279 if ((ee >> 8) != 0) 280 break; 281 } while (cmpxchg(&vc->entry_exit_map, ee, 282 ee | VCORE_EXIT_REQ) != ee); 283 if ((ee >> 8) == 0) 284 kvmhv_interrupt_vcore(vc, ee); 285 } 286 } 287 288 struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; 289 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); 290 291 #ifdef CONFIG_KVM_XICS 292 static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, 293 u32 xisr) 294 { 295 int i; 296 297 /* 298 * We access the mapped array here without a lock. That 299 * is safe because we never reduce the number of entries 300 * in the array and we never change the v_hwirq field of 301 * an entry once it is set. 302 * 303 * We have also carefully ordered the stores in the writer 304 * and the loads here in the reader, so that if we find a matching 305 * hwirq here, the associated GSI and irq_desc fields are valid. 306 */ 307 for (i = 0; i < pimap->n_mapped; i++) { 308 if (xisr == pimap->mapped[i].r_hwirq) { 309 /* 310 * Order subsequent reads in the caller to serialize 311 * with the writer. 312 */ 313 smp_rmb(); 314 return &pimap->mapped[i]; 315 } 316 } 317 return NULL; 318 } 319 320 /* 321 * If we have an interrupt that's not an IPI, check if we have a 322 * passthrough adapter and if so, check if this external interrupt 323 * is for the adapter. 324 * We will attempt to deliver the IRQ directly to the target VCPU's 325 * ICP, the virtual ICP (based on affinity - the xive value in ICS). 326 * 327 * If the delivery fails or if this is not for a passthrough adapter, 328 * return to the host to handle this interrupt. We earlier 329 * saved a copy of the XIRR in the PACA, it will be picked up by 330 * the host ICP driver. 331 */ 332 static int kvmppc_check_passthru(u32 xisr, __be32 xirr) 333 { 334 struct kvmppc_passthru_irqmap *pimap; 335 struct kvmppc_irq_map *irq_map; 336 struct kvm_vcpu *vcpu; 337 338 vcpu = local_paca->kvm_hstate.kvm_vcpu; 339 if (!vcpu) 340 return 1; 341 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); 342 if (!pimap) 343 return 1; 344 irq_map = get_irqmap(pimap, xisr); 345 if (!irq_map) 346 return 1; 347 348 /* We're handling this interrupt, generic code doesn't need to */ 349 local_paca->kvm_hstate.saved_xirr = 0; 350 351 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap); 352 } 353 354 #else 355 static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr) 356 { 357 return 1; 358 } 359 #endif 360 361 /* 362 * Determine what sort of external interrupt is pending (if any). 363 * Returns: 364 * 0 if no interrupt is pending 365 * 1 if an interrupt is pending that needs to be handled by the host 366 * -1 if there was a guest wakeup IPI (which has now been cleared) 367 * -2 if there is PCI passthrough external interrupt that was handled 368 */ 369 370 long kvmppc_read_intr(void) 371 { 372 unsigned long xics_phys; 373 u32 h_xirr; 374 __be32 xirr; 375 u32 xisr; 376 u8 host_ipi; 377 378 /* see if a host IPI is pending */ 379 host_ipi = local_paca->kvm_hstate.host_ipi; 380 if (host_ipi) 381 return 1; 382 383 /* Now read the interrupt from the ICP */ 384 xics_phys = local_paca->kvm_hstate.xics_phys; 385 if (unlikely(!xics_phys)) 386 return 1; 387 388 /* 389 * Save XIRR for later. Since we get control in reverse endian 390 * on LE systems, save it byte reversed and fetch it back in 391 * host endian. Note that xirr is the value read from the 392 * XIRR register, while h_xirr is the host endian version. 393 */ 394 xirr = _lwzcix(xics_phys + XICS_XIRR); 395 h_xirr = be32_to_cpu(xirr); 396 local_paca->kvm_hstate.saved_xirr = h_xirr; 397 xisr = h_xirr & 0xffffff; 398 /* 399 * Ensure that the store/load complete to guarantee all side 400 * effects of loading from XIRR has completed 401 */ 402 smp_mb(); 403 404 /* if nothing pending in the ICP */ 405 if (!xisr) 406 return 0; 407 408 /* We found something in the ICP... 409 * 410 * If it is an IPI, clear the MFRR and EOI it. 411 */ 412 if (xisr == XICS_IPI) { 413 _stbcix(xics_phys + XICS_MFRR, 0xff); 414 _stwcix(xics_phys + XICS_XIRR, xirr); 415 /* 416 * Need to ensure side effects of above stores 417 * complete before proceeding. 418 */ 419 smp_mb(); 420 421 /* 422 * We need to re-check host IPI now in case it got set in the 423 * meantime. If it's clear, we bounce the interrupt to the 424 * guest 425 */ 426 host_ipi = local_paca->kvm_hstate.host_ipi; 427 if (unlikely(host_ipi != 0)) { 428 /* We raced with the host, 429 * we need to resend that IPI, bummer 430 */ 431 _stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY); 432 /* Let side effects complete */ 433 smp_mb(); 434 return 1; 435 } 436 437 /* OK, it's an IPI for us */ 438 local_paca->kvm_hstate.saved_xirr = 0; 439 return -1; 440 } 441 442 return kvmppc_check_passthru(xisr, xirr); 443 } 444