1 /* 2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License, version 2, as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/cpu.h> 10 #include <linux/kvm_host.h> 11 #include <linux/preempt.h> 12 #include <linux/export.h> 13 #include <linux/sched.h> 14 #include <linux/spinlock.h> 15 #include <linux/init.h> 16 #include <linux/memblock.h> 17 #include <linux/sizes.h> 18 #include <linux/cma.h> 19 #include <linux/bitops.h> 20 21 #include <asm/cputable.h> 22 #include <asm/kvm_ppc.h> 23 #include <asm/kvm_book3s.h> 24 #include <asm/archrandom.h> 25 26 #define KVM_CMA_CHUNK_ORDER 18 27 28 /* 29 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) 30 * should be power of 2. 31 */ 32 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ 33 /* 34 * By default we reserve 5% of memory for hash pagetable allocation. 35 */ 36 static unsigned long kvm_cma_resv_ratio = 5; 37 38 static struct cma *kvm_cma; 39 40 static int __init early_parse_kvm_cma_resv(char *p) 41 { 42 pr_debug("%s(%s)\n", __func__, p); 43 if (!p) 44 return -EINVAL; 45 return kstrtoul(p, 0, &kvm_cma_resv_ratio); 46 } 47 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); 48 49 struct page *kvm_alloc_hpt(unsigned long nr_pages) 50 { 51 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); 52 53 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES)); 54 } 55 EXPORT_SYMBOL_GPL(kvm_alloc_hpt); 56 57 void kvm_release_hpt(struct page *page, unsigned long nr_pages) 58 { 59 cma_release(kvm_cma, page, nr_pages); 60 } 61 EXPORT_SYMBOL_GPL(kvm_release_hpt); 62 63 /** 64 * kvm_cma_reserve() - reserve area for kvm hash pagetable 65 * 66 * This function reserves memory from early allocator. It should be 67 * called by arch specific code once the memblock allocator 68 * has been activated and all other subsystems have already allocated/reserved 69 * memory. 70 */ 71 void __init kvm_cma_reserve(void) 72 { 73 unsigned long align_size; 74 struct memblock_region *reg; 75 phys_addr_t selected_size = 0; 76 77 /* 78 * We need CMA reservation only when we are in HV mode 79 */ 80 if (!cpu_has_feature(CPU_FTR_HVMODE)) 81 return; 82 /* 83 * We cannot use memblock_phys_mem_size() here, because 84 * memblock_analyze() has not been called yet. 85 */ 86 for_each_memblock(memory, reg) 87 selected_size += memblock_region_memory_end_pfn(reg) - 88 memblock_region_memory_base_pfn(reg); 89 90 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; 91 if (selected_size) { 92 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 93 (unsigned long)selected_size / SZ_1M); 94 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; 95 cma_declare_contiguous(0, selected_size, 0, align_size, 96 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma); 97 } 98 } 99 100 /* 101 * Real-mode H_CONFER implementation. 102 * We check if we are the only vcpu out of this virtual core 103 * still running in the guest and not ceded. If so, we pop up 104 * to the virtual-mode implementation; if not, just return to 105 * the guest. 106 */ 107 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, 108 unsigned int yield_count) 109 { 110 struct kvmppc_vcore *vc = vcpu->arch.vcore; 111 int threads_running; 112 int threads_ceded; 113 int threads_conferring; 114 u64 stop = get_tb() + 10 * tb_ticks_per_usec; 115 int rv = H_SUCCESS; /* => don't yield */ 116 117 set_bit(vcpu->arch.ptid, &vc->conferring_threads); 118 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { 119 threads_running = VCORE_ENTRY_MAP(vc); 120 threads_ceded = vc->napping_threads; 121 threads_conferring = vc->conferring_threads; 122 if ((threads_ceded | threads_conferring) == threads_running) { 123 rv = H_TOO_HARD; /* => do yield */ 124 break; 125 } 126 } 127 clear_bit(vcpu->arch.ptid, &vc->conferring_threads); 128 return rv; 129 } 130 131 /* 132 * When running HV mode KVM we need to block certain operations while KVM VMs 133 * exist in the system. We use a counter of VMs to track this. 134 * 135 * One of the operations we need to block is onlining of secondaries, so we 136 * protect hv_vm_count with get/put_online_cpus(). 137 */ 138 static atomic_t hv_vm_count; 139 140 void kvm_hv_vm_activated(void) 141 { 142 get_online_cpus(); 143 atomic_inc(&hv_vm_count); 144 put_online_cpus(); 145 } 146 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); 147 148 void kvm_hv_vm_deactivated(void) 149 { 150 get_online_cpus(); 151 atomic_dec(&hv_vm_count); 152 put_online_cpus(); 153 } 154 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); 155 156 bool kvm_hv_mode_active(void) 157 { 158 return atomic_read(&hv_vm_count) != 0; 159 } 160 161 extern int hcall_real_table[], hcall_real_table_end[]; 162 163 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) 164 { 165 cmd /= 4; 166 if (cmd < hcall_real_table_end - hcall_real_table && 167 hcall_real_table[cmd]) 168 return 1; 169 170 return 0; 171 } 172 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); 173 174 int kvmppc_hwrng_present(void) 175 { 176 return powernv_hwrng_present(); 177 } 178 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); 179 180 long kvmppc_h_random(struct kvm_vcpu *vcpu) 181 { 182 if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) 183 return H_SUCCESS; 184 185 return H_HARDWARE; 186 } 187