1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com> 17 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com> 18 */ 19 20 #include <linux/types.h> 21 #include <linux/string.h> 22 #include <linux/kvm.h> 23 #include <linux/kvm_host.h> 24 #include <linux/highmem.h> 25 #include <linux/gfp.h> 26 #include <linux/slab.h> 27 #include <linux/hugetlb.h> 28 #include <linux/list.h> 29 #include <linux/anon_inodes.h> 30 31 #include <asm/tlbflush.h> 32 #include <asm/kvm_ppc.h> 33 #include <asm/kvm_book3s.h> 34 #include <asm/book3s/64/mmu-hash.h> 35 #include <asm/hvcall.h> 36 #include <asm/synch.h> 37 #include <asm/ppc-opcode.h> 38 #include <asm/kvm_host.h> 39 #include <asm/udbg.h> 40 #include <asm/iommu.h> 41 #include <asm/tce.h> 42 43 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages) 44 { 45 return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; 46 } 47 48 static unsigned long kvmppc_stt_pages(unsigned long tce_pages) 49 { 50 unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) + 51 (tce_pages * sizeof(struct page *)); 52 53 return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE; 54 } 55 56 static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc) 57 { 58 long ret = 0; 59 60 if (!current || !current->mm) 61 return ret; /* process exited */ 62 63 down_write(¤t->mm->mmap_sem); 64 65 if (inc) { 66 unsigned long locked, lock_limit; 67 68 locked = current->mm->locked_vm + stt_pages; 69 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 70 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) 71 ret = -ENOMEM; 72 else 73 current->mm->locked_vm += stt_pages; 74 } else { 75 if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm)) 76 stt_pages = current->mm->locked_vm; 77 78 current->mm->locked_vm -= stt_pages; 79 } 80 81 pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid, 82 inc ? '+' : '-', 83 stt_pages << PAGE_SHIFT, 84 current->mm->locked_vm << PAGE_SHIFT, 85 rlimit(RLIMIT_MEMLOCK), 86 ret ? " - exceeded" : ""); 87 88 up_write(¤t->mm->mmap_sem); 89 90 return ret; 91 } 92 93 static void release_spapr_tce_table(struct rcu_head *head) 94 { 95 struct kvmppc_spapr_tce_table *stt = container_of(head, 96 struct kvmppc_spapr_tce_table, rcu); 97 unsigned long i, npages = kvmppc_tce_pages(stt->size); 98 99 for (i = 0; i < npages; i++) 100 __free_page(stt->pages[i]); 101 102 kfree(stt); 103 } 104 105 static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 106 { 107 struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data; 108 struct page *page; 109 110 if (vmf->pgoff >= kvmppc_tce_pages(stt->size)) 111 return VM_FAULT_SIGBUS; 112 113 page = stt->pages[vmf->pgoff]; 114 get_page(page); 115 vmf->page = page; 116 return 0; 117 } 118 119 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = { 120 .fault = kvm_spapr_tce_fault, 121 }; 122 123 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma) 124 { 125 vma->vm_ops = &kvm_spapr_tce_vm_ops; 126 return 0; 127 } 128 129 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) 130 { 131 struct kvmppc_spapr_tce_table *stt = filp->private_data; 132 133 list_del_rcu(&stt->list); 134 135 kvm_put_kvm(stt->kvm); 136 137 kvmppc_account_memlimit( 138 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false); 139 call_rcu(&stt->rcu, release_spapr_tce_table); 140 141 return 0; 142 } 143 144 static const struct file_operations kvm_spapr_tce_fops = { 145 .mmap = kvm_spapr_tce_mmap, 146 .release = kvm_spapr_tce_release, 147 }; 148 149 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, 150 struct kvm_create_spapr_tce_64 *args) 151 { 152 struct kvmppc_spapr_tce_table *stt = NULL; 153 unsigned long npages, size; 154 int ret = -ENOMEM; 155 int i; 156 157 if (!args->size) 158 return -EINVAL; 159 160 /* Check this LIOBN hasn't been previously allocated */ 161 list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) { 162 if (stt->liobn == args->liobn) 163 return -EBUSY; 164 } 165 166 size = args->size; 167 npages = kvmppc_tce_pages(size); 168 ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true); 169 if (ret) { 170 stt = NULL; 171 goto fail; 172 } 173 174 stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *), 175 GFP_KERNEL); 176 if (!stt) 177 goto fail; 178 179 stt->liobn = args->liobn; 180 stt->page_shift = args->page_shift; 181 stt->offset = args->offset; 182 stt->size = size; 183 stt->kvm = kvm; 184 185 for (i = 0; i < npages; i++) { 186 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); 187 if (!stt->pages[i]) 188 goto fail; 189 } 190 191 kvm_get_kvm(kvm); 192 193 mutex_lock(&kvm->lock); 194 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables); 195 196 mutex_unlock(&kvm->lock); 197 198 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, 199 stt, O_RDWR | O_CLOEXEC); 200 201 fail: 202 if (stt) { 203 for (i = 0; i < npages; i++) 204 if (stt->pages[i]) 205 __free_page(stt->pages[i]); 206 207 kfree(stt); 208 } 209 return ret; 210 } 211 212 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 213 unsigned long ioba, unsigned long tce) 214 { 215 struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn); 216 long ret; 217 218 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ 219 /* liobn, ioba, tce); */ 220 221 if (!stt) 222 return H_TOO_HARD; 223 224 ret = kvmppc_ioba_validate(stt, ioba, 1); 225 if (ret != H_SUCCESS) 226 return ret; 227 228 ret = kvmppc_tce_validate(stt, tce); 229 if (ret != H_SUCCESS) 230 return ret; 231 232 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce); 233 234 return H_SUCCESS; 235 } 236 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); 237 238 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, 239 unsigned long liobn, unsigned long ioba, 240 unsigned long tce_list, unsigned long npages) 241 { 242 struct kvmppc_spapr_tce_table *stt; 243 long i, ret = H_SUCCESS, idx; 244 unsigned long entry, ua = 0; 245 u64 __user *tces; 246 u64 tce; 247 248 stt = kvmppc_find_table(vcpu, liobn); 249 if (!stt) 250 return H_TOO_HARD; 251 252 entry = ioba >> stt->page_shift; 253 /* 254 * SPAPR spec says that the maximum size of the list is 512 TCEs 255 * so the whole table fits in 4K page 256 */ 257 if (npages > 512) 258 return H_PARAMETER; 259 260 if (tce_list & (SZ_4K - 1)) 261 return H_PARAMETER; 262 263 ret = kvmppc_ioba_validate(stt, ioba, npages); 264 if (ret != H_SUCCESS) 265 return ret; 266 267 idx = srcu_read_lock(&vcpu->kvm->srcu); 268 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) { 269 ret = H_TOO_HARD; 270 goto unlock_exit; 271 } 272 tces = (u64 __user *) ua; 273 274 for (i = 0; i < npages; ++i) { 275 if (get_user(tce, tces + i)) { 276 ret = H_TOO_HARD; 277 goto unlock_exit; 278 } 279 tce = be64_to_cpu(tce); 280 281 ret = kvmppc_tce_validate(stt, tce); 282 if (ret != H_SUCCESS) 283 goto unlock_exit; 284 285 kvmppc_tce_put(stt, entry + i, tce); 286 } 287 288 unlock_exit: 289 srcu_read_unlock(&vcpu->kvm->srcu, idx); 290 291 return ret; 292 } 293 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect); 294 295 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, 296 unsigned long liobn, unsigned long ioba, 297 unsigned long tce_value, unsigned long npages) 298 { 299 struct kvmppc_spapr_tce_table *stt; 300 long i, ret; 301 302 stt = kvmppc_find_table(vcpu, liobn); 303 if (!stt) 304 return H_TOO_HARD; 305 306 ret = kvmppc_ioba_validate(stt, ioba, npages); 307 if (ret != H_SUCCESS) 308 return ret; 309 310 /* Check permission bits only to allow userspace poison TCE for debug */ 311 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ)) 312 return H_PARAMETER; 313 314 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) 315 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); 316 317 return H_SUCCESS; 318 } 319 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce); 320