1aa04b4ccSPaul Mackerras /* 2aa04b4ccSPaul Mackerras * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 3aa04b4ccSPaul Mackerras * 4aa04b4ccSPaul Mackerras * This program is free software; you can redistribute it and/or modify 5aa04b4ccSPaul Mackerras * it under the terms of the GNU General Public License, version 2, as 6aa04b4ccSPaul Mackerras * published by the Free Software Foundation. 7aa04b4ccSPaul Mackerras */ 8aa04b4ccSPaul Mackerras 9441c19c8SMichael Ellerman #include <linux/cpu.h> 10aa04b4ccSPaul Mackerras #include <linux/kvm_host.h> 11aa04b4ccSPaul Mackerras #include <linux/preempt.h> 1266b15db6SPaul Gortmaker #include <linux/export.h> 13aa04b4ccSPaul Mackerras #include <linux/sched.h> 14aa04b4ccSPaul Mackerras #include <linux/spinlock.h> 15aa04b4ccSPaul Mackerras #include <linux/init.h> 16fa61a4e3SAneesh Kumar K.V #include <linux/memblock.h> 17fa61a4e3SAneesh Kumar K.V #include <linux/sizes.h> 18fc95ca72SJoonsoo Kim #include <linux/cma.h> 1990fd09f8SSam Bobroff #include <linux/bitops.h> 20aa04b4ccSPaul Mackerras 21aa04b4ccSPaul Mackerras #include <asm/cputable.h> 22aa04b4ccSPaul Mackerras #include <asm/kvm_ppc.h> 23aa04b4ccSPaul Mackerras #include <asm/kvm_book3s.h> 24e928e9cbSMichael Ellerman #include <asm/archrandom.h> 25eddb60fbSPaul Mackerras #include <asm/xics.h> 26aa04b4ccSPaul Mackerras 27fc95ca72SJoonsoo Kim #define KVM_CMA_CHUNK_ORDER 18 28fc95ca72SJoonsoo Kim 29fa61a4e3SAneesh Kumar K.V /* 30fa61a4e3SAneesh Kumar K.V * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) 31fa61a4e3SAneesh Kumar K.V * should be power of 2. 32fa61a4e3SAneesh Kumar K.V */ 33fa61a4e3SAneesh Kumar K.V #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ 34fa61a4e3SAneesh Kumar K.V /* 35fa61a4e3SAneesh Kumar K.V * By default we reserve 5% of memory for hash pagetable allocation. 36fa61a4e3SAneesh Kumar K.V */ 37fa61a4e3SAneesh Kumar K.V static unsigned long kvm_cma_resv_ratio = 5; 38aa04b4ccSPaul Mackerras 39fc95ca72SJoonsoo Kim static struct cma *kvm_cma; 40fc95ca72SJoonsoo Kim 41fa61a4e3SAneesh Kumar K.V static int __init early_parse_kvm_cma_resv(char *p) 42d2a1b483SAlexander Graf { 43fa61a4e3SAneesh Kumar K.V pr_debug("%s(%s)\n", __func__, p); 44d2a1b483SAlexander Graf if (!p) 45fa61a4e3SAneesh Kumar K.V return -EINVAL; 46fa61a4e3SAneesh Kumar K.V return kstrtoul(p, 0, &kvm_cma_resv_ratio); 47d2a1b483SAlexander Graf } 48fa61a4e3SAneesh Kumar K.V early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); 49d2a1b483SAlexander Graf 50fa61a4e3SAneesh Kumar K.V struct page *kvm_alloc_hpt(unsigned long nr_pages) 51d2a1b483SAlexander Graf { 52c04fa583SAlexey Kardashevskiy VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 53fc95ca72SJoonsoo Kim 54c17b98cfSPaul Mackerras return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES)); 55d2a1b483SAlexander Graf } 56d2a1b483SAlexander Graf EXPORT_SYMBOL_GPL(kvm_alloc_hpt); 57d2a1b483SAlexander Graf 58fa61a4e3SAneesh Kumar K.V void kvm_release_hpt(struct page *page, unsigned long nr_pages) 59d2a1b483SAlexander Graf { 60fc95ca72SJoonsoo Kim cma_release(kvm_cma, page, nr_pages); 61d2a1b483SAlexander Graf } 62d2a1b483SAlexander Graf EXPORT_SYMBOL_GPL(kvm_release_hpt); 63d2a1b483SAlexander Graf 64fa61a4e3SAneesh Kumar K.V /** 65fa61a4e3SAneesh Kumar K.V * kvm_cma_reserve() - reserve area for kvm hash pagetable 66fa61a4e3SAneesh Kumar K.V * 67fa61a4e3SAneesh Kumar K.V * This function reserves memory from early allocator. It should be 6814ed7409SAnton Blanchard * called by arch specific code once the memblock allocator 69fa61a4e3SAneesh Kumar K.V * has been activated and all other subsystems have already allocated/reserved 70fa61a4e3SAneesh Kumar K.V * memory. 71fa61a4e3SAneesh Kumar K.V */ 72fa61a4e3SAneesh Kumar K.V void __init kvm_cma_reserve(void) 73fa61a4e3SAneesh Kumar K.V { 74fa61a4e3SAneesh Kumar K.V unsigned long align_size; 75fa61a4e3SAneesh Kumar K.V struct memblock_region *reg; 76fa61a4e3SAneesh Kumar K.V phys_addr_t selected_size = 0; 77cec26bc3SAneesh Kumar K.V 78cec26bc3SAneesh Kumar K.V /* 79cec26bc3SAneesh Kumar K.V * We need CMA reservation only when we are in HV mode 80cec26bc3SAneesh Kumar K.V */ 81cec26bc3SAneesh Kumar K.V if (!cpu_has_feature(CPU_FTR_HVMODE)) 82cec26bc3SAneesh Kumar K.V return; 83fa61a4e3SAneesh Kumar K.V /* 84fa61a4e3SAneesh Kumar K.V * We cannot use memblock_phys_mem_size() here, because 85fa61a4e3SAneesh Kumar K.V * memblock_analyze() has not been called yet. 86fa61a4e3SAneesh Kumar K.V */ 87fa61a4e3SAneesh Kumar K.V for_each_memblock(memory, reg) 88fa61a4e3SAneesh Kumar K.V selected_size += memblock_region_memory_end_pfn(reg) - 89fa61a4e3SAneesh Kumar K.V memblock_region_memory_base_pfn(reg); 90fa61a4e3SAneesh Kumar K.V 91fa61a4e3SAneesh Kumar K.V selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; 92fa61a4e3SAneesh Kumar K.V if (selected_size) { 93fa61a4e3SAneesh Kumar K.V pr_debug("%s: reserving %ld MiB for global area\n", __func__, 94fa61a4e3SAneesh Kumar K.V (unsigned long)selected_size / SZ_1M); 95fa61a4e3SAneesh Kumar K.V align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; 96c1f733aaSJoonsoo Kim cma_declare_contiguous(0, selected_size, 0, align_size, 97c1f733aaSJoonsoo Kim KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma); 98fa61a4e3SAneesh Kumar K.V } 99fa61a4e3SAneesh Kumar K.V } 100441c19c8SMichael Ellerman 101441c19c8SMichael Ellerman /* 10290fd09f8SSam Bobroff * Real-mode H_CONFER implementation. 10390fd09f8SSam Bobroff * We check if we are the only vcpu out of this virtual core 10490fd09f8SSam Bobroff * still running in the guest and not ceded. If so, we pop up 10590fd09f8SSam Bobroff * to the virtual-mode implementation; if not, just return to 10690fd09f8SSam Bobroff * the guest. 10790fd09f8SSam Bobroff */ 10890fd09f8SSam Bobroff long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, 10990fd09f8SSam Bobroff unsigned int yield_count) 11090fd09f8SSam Bobroff { 11190fd09f8SSam Bobroff struct kvmppc_vcore *vc = vcpu->arch.vcore; 11290fd09f8SSam Bobroff int threads_running; 11390fd09f8SSam Bobroff int threads_ceded; 11490fd09f8SSam Bobroff int threads_conferring; 11590fd09f8SSam Bobroff u64 stop = get_tb() + 10 * tb_ticks_per_usec; 11690fd09f8SSam Bobroff int rv = H_SUCCESS; /* => don't yield */ 11790fd09f8SSam Bobroff 11890fd09f8SSam Bobroff set_bit(vcpu->arch.ptid, &vc->conferring_threads); 1197d6c40daSPaul Mackerras while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { 1207d6c40daSPaul Mackerras threads_running = VCORE_ENTRY_MAP(vc); 1217d6c40daSPaul Mackerras threads_ceded = vc->napping_threads; 1227d6c40daSPaul Mackerras threads_conferring = vc->conferring_threads; 1237d6c40daSPaul Mackerras if ((threads_ceded | threads_conferring) == threads_running) { 12490fd09f8SSam Bobroff rv = H_TOO_HARD; /* => do yield */ 12590fd09f8SSam Bobroff break; 12690fd09f8SSam Bobroff } 12790fd09f8SSam Bobroff } 12890fd09f8SSam Bobroff clear_bit(vcpu->arch.ptid, &vc->conferring_threads); 12990fd09f8SSam Bobroff return rv; 13090fd09f8SSam Bobroff } 13190fd09f8SSam Bobroff 13290fd09f8SSam Bobroff /* 133441c19c8SMichael Ellerman * When running HV mode KVM we need to block certain operations while KVM VMs 134441c19c8SMichael Ellerman * exist in the system. We use a counter of VMs to track this. 135441c19c8SMichael Ellerman * 136441c19c8SMichael Ellerman * One of the operations we need to block is onlining of secondaries, so we 137441c19c8SMichael Ellerman * protect hv_vm_count with get/put_online_cpus(). 138441c19c8SMichael Ellerman */ 139441c19c8SMichael Ellerman static atomic_t hv_vm_count; 140441c19c8SMichael Ellerman 141441c19c8SMichael Ellerman void kvm_hv_vm_activated(void) 142441c19c8SMichael Ellerman { 143441c19c8SMichael Ellerman get_online_cpus(); 144441c19c8SMichael Ellerman atomic_inc(&hv_vm_count); 145441c19c8SMichael Ellerman put_online_cpus(); 146441c19c8SMichael Ellerman } 147441c19c8SMichael Ellerman EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); 148441c19c8SMichael Ellerman 149441c19c8SMichael Ellerman void kvm_hv_vm_deactivated(void) 150441c19c8SMichael Ellerman { 151441c19c8SMichael Ellerman get_online_cpus(); 152441c19c8SMichael Ellerman atomic_dec(&hv_vm_count); 153441c19c8SMichael Ellerman put_online_cpus(); 154441c19c8SMichael Ellerman } 155441c19c8SMichael Ellerman EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); 156441c19c8SMichael Ellerman 157441c19c8SMichael Ellerman bool kvm_hv_mode_active(void) 158441c19c8SMichael Ellerman { 159441c19c8SMichael Ellerman return atomic_read(&hv_vm_count) != 0; 160441c19c8SMichael Ellerman } 161ae2113a4SPaul Mackerras 162ae2113a4SPaul Mackerras extern int hcall_real_table[], hcall_real_table_end[]; 163ae2113a4SPaul Mackerras 164ae2113a4SPaul Mackerras int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) 165ae2113a4SPaul Mackerras { 166ae2113a4SPaul Mackerras cmd /= 4; 167ae2113a4SPaul Mackerras if (cmd < hcall_real_table_end - hcall_real_table && 168ae2113a4SPaul Mackerras hcall_real_table[cmd]) 169ae2113a4SPaul Mackerras return 1; 170ae2113a4SPaul Mackerras 171ae2113a4SPaul Mackerras return 0; 172ae2113a4SPaul Mackerras } 173ae2113a4SPaul Mackerras EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); 174e928e9cbSMichael Ellerman 175e928e9cbSMichael Ellerman int kvmppc_hwrng_present(void) 176e928e9cbSMichael Ellerman { 177e928e9cbSMichael Ellerman return powernv_hwrng_present(); 178e928e9cbSMichael Ellerman } 179e928e9cbSMichael Ellerman EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); 180e928e9cbSMichael Ellerman 181e928e9cbSMichael Ellerman long kvmppc_h_random(struct kvm_vcpu *vcpu) 182e928e9cbSMichael Ellerman { 183e928e9cbSMichael Ellerman if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) 184e928e9cbSMichael Ellerman return H_SUCCESS; 185e928e9cbSMichael Ellerman 186e928e9cbSMichael Ellerman return H_HARDWARE; 187e928e9cbSMichael Ellerman } 188eddb60fbSPaul Mackerras 189eddb60fbSPaul Mackerras static inline void rm_writeb(unsigned long paddr, u8 val) 190eddb60fbSPaul Mackerras { 191eddb60fbSPaul Mackerras __asm__ __volatile__("stbcix %0,0,%1" 192eddb60fbSPaul Mackerras : : "r" (val), "r" (paddr) : "memory"); 193eddb60fbSPaul Mackerras } 194eddb60fbSPaul Mackerras 195eddb60fbSPaul Mackerras /* 196eddb60fbSPaul Mackerras * Send an interrupt to another CPU. 197eddb60fbSPaul Mackerras * This can only be called in real mode. 198eddb60fbSPaul Mackerras * The caller needs to include any barrier needed to order writes 199eddb60fbSPaul Mackerras * to memory vs. the IPI/message. 200eddb60fbSPaul Mackerras */ 201eddb60fbSPaul Mackerras void kvmhv_rm_send_ipi(int cpu) 202eddb60fbSPaul Mackerras { 203eddb60fbSPaul Mackerras unsigned long xics_phys; 204eddb60fbSPaul Mackerras 205eddb60fbSPaul Mackerras /* Poke the target */ 206eddb60fbSPaul Mackerras xics_phys = paca[cpu].kvm_hstate.xics_phys; 207eddb60fbSPaul Mackerras rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY); 208eddb60fbSPaul Mackerras } 209eddb60fbSPaul Mackerras 210eddb60fbSPaul Mackerras /* 211eddb60fbSPaul Mackerras * The following functions are called from the assembly code 212eddb60fbSPaul Mackerras * in book3s_hv_rmhandlers.S. 213eddb60fbSPaul Mackerras */ 214eddb60fbSPaul Mackerras static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) 215eddb60fbSPaul Mackerras { 216eddb60fbSPaul Mackerras int cpu = vc->pcpu; 217eddb60fbSPaul Mackerras 218eddb60fbSPaul Mackerras /* Order setting of exit map vs. msgsnd/IPI */ 219eddb60fbSPaul Mackerras smp_mb(); 220eddb60fbSPaul Mackerras for (; active; active >>= 1, ++cpu) 221eddb60fbSPaul Mackerras if (active & 1) 222eddb60fbSPaul Mackerras kvmhv_rm_send_ipi(cpu); 223eddb60fbSPaul Mackerras } 224eddb60fbSPaul Mackerras 225eddb60fbSPaul Mackerras void kvmhv_commence_exit(int trap) 226eddb60fbSPaul Mackerras { 227eddb60fbSPaul Mackerras struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 228eddb60fbSPaul Mackerras int ptid = local_paca->kvm_hstate.ptid; 229eddb60fbSPaul Mackerras int me, ee; 230eddb60fbSPaul Mackerras 231eddb60fbSPaul Mackerras /* Set our bit in the threads-exiting-guest map in the 0xff00 232eddb60fbSPaul Mackerras bits of vcore->entry_exit_map */ 233eddb60fbSPaul Mackerras me = 0x100 << ptid; 234eddb60fbSPaul Mackerras do { 235eddb60fbSPaul Mackerras ee = vc->entry_exit_map; 236eddb60fbSPaul Mackerras } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); 237eddb60fbSPaul Mackerras 238eddb60fbSPaul Mackerras /* Are we the first here? */ 239eddb60fbSPaul Mackerras if ((ee >> 8) != 0) 240eddb60fbSPaul Mackerras return; 241eddb60fbSPaul Mackerras 242eddb60fbSPaul Mackerras /* 243eddb60fbSPaul Mackerras * Trigger the other threads in this vcore to exit the guest. 244eddb60fbSPaul Mackerras * If this is a hypervisor decrementer interrupt then they 245eddb60fbSPaul Mackerras * will be already on their way out of the guest. 246eddb60fbSPaul Mackerras */ 247eddb60fbSPaul Mackerras if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) 248eddb60fbSPaul Mackerras kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); 249eddb60fbSPaul Mackerras } 250