1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * 4 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> 5 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com> 6 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com> 7 */ 8 9 #include <linux/types.h> 10 #include <linux/string.h> 11 #include <linux/kvm.h> 12 #include <linux/kvm_host.h> 13 #include <linux/highmem.h> 14 #include <linux/gfp.h> 15 #include <linux/slab.h> 16 #include <linux/sched/signal.h> 17 #include <linux/hugetlb.h> 18 #include <linux/list.h> 19 #include <linux/anon_inodes.h> 20 #include <linux/iommu.h> 21 #include <linux/file.h> 22 #include <linux/mm.h> 23 24 #include <asm/kvm_ppc.h> 25 #include <asm/kvm_book3s.h> 26 #include <asm/book3s/64/mmu-hash.h> 27 #include <asm/hvcall.h> 28 #include <asm/synch.h> 29 #include <asm/ppc-opcode.h> 30 #include <asm/udbg.h> 31 #include <asm/iommu.h> 32 #include <asm/tce.h> 33 #include <asm/mmu_context.h> 34 35 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages) 36 { 37 return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE; 38 } 39 40 static unsigned long kvmppc_stt_pages(unsigned long tce_pages) 41 { 42 unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) + 43 (tce_pages * sizeof(struct page *)); 44 45 return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE; 46 } 47 48 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head) 49 { 50 struct kvmppc_spapr_tce_iommu_table *stit = container_of(head, 51 struct kvmppc_spapr_tce_iommu_table, rcu); 52 53 iommu_tce_table_put(stit->tbl); 54 55 kfree(stit); 56 } 57 58 static void kvm_spapr_tce_liobn_put(struct kref *kref) 59 { 60 struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref, 61 struct kvmppc_spapr_tce_iommu_table, kref); 62 63 list_del_rcu(&stit->next); 64 65 call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free); 66 } 67 68 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, 69 struct iommu_group *grp) 70 { 71 int i; 72 struct kvmppc_spapr_tce_table *stt; 73 struct kvmppc_spapr_tce_iommu_table *stit, *tmp; 74 struct iommu_table_group *table_group = NULL; 75 76 rcu_read_lock(); 77 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { 78 79 table_group = iommu_group_get_iommudata(grp); 80 if (WARN_ON(!table_group)) 81 continue; 82 83 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) { 84 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { 85 if (table_group->tables[i] != stit->tbl) 86 continue; 87 88 kref_put(&stit->kref, kvm_spapr_tce_liobn_put); 89 } 90 } 91 cond_resched_rcu(); 92 } 93 rcu_read_unlock(); 94 } 95 96 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, 97 struct iommu_group *grp) 98 { 99 struct kvmppc_spapr_tce_table *stt = NULL; 100 bool found = false; 101 struct iommu_table *tbl = NULL; 102 struct iommu_table_group *table_group; 103 long i; 104 struct kvmppc_spapr_tce_iommu_table *stit; 105 struct fd f; 106 107 f = fdget(tablefd); 108 if (!f.file) 109 return -EBADF; 110 111 rcu_read_lock(); 112 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) { 113 if (stt == f.file->private_data) { 114 found = true; 115 break; 116 } 117 } 118 rcu_read_unlock(); 119 120 fdput(f); 121 122 if (!found) 123 return -EINVAL; 124 125 table_group = iommu_group_get_iommudata(grp); 126 if (WARN_ON(!table_group)) 127 return -EFAULT; 128 129 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) { 130 struct iommu_table *tbltmp = table_group->tables[i]; 131 132 if (!tbltmp) 133 continue; 134 /* Make sure hardware table parameters are compatible */ 135 if ((tbltmp->it_page_shift <= stt->page_shift) && 136 (tbltmp->it_offset << tbltmp->it_page_shift == 137 stt->offset << stt->page_shift) && 138 (tbltmp->it_size << tbltmp->it_page_shift >= 139 stt->size << stt->page_shift)) { 140 /* 141 * Reference the table to avoid races with 142 * add/remove DMA windows. 143 */ 144 tbl = iommu_tce_table_get(tbltmp); 145 break; 146 } 147 } 148 if (!tbl) 149 return -EINVAL; 150 151 rcu_read_lock(); 152 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { 153 if (tbl != stit->tbl) 154 continue; 155 156 if (!kref_get_unless_zero(&stit->kref)) { 157 /* stit is being destroyed */ 158 iommu_tce_table_put(tbl); 159 rcu_read_unlock(); 160 return -ENOTTY; 161 } 162 /* 163 * The table is already known to this KVM, we just increased 164 * its KVM reference counter and can return. 165 */ 166 rcu_read_unlock(); 167 return 0; 168 } 169 rcu_read_unlock(); 170 171 stit = kzalloc(sizeof(*stit), GFP_KERNEL); 172 if (!stit) { 173 iommu_tce_table_put(tbl); 174 return -ENOMEM; 175 } 176 177 stit->tbl = tbl; 178 kref_init(&stit->kref); 179 180 list_add_rcu(&stit->next, &stt->iommu_tables); 181 182 return 0; 183 } 184 185 static void release_spapr_tce_table(struct rcu_head *head) 186 { 187 struct kvmppc_spapr_tce_table *stt = container_of(head, 188 struct kvmppc_spapr_tce_table, rcu); 189 unsigned long i, npages = kvmppc_tce_pages(stt->size); 190 191 for (i = 0; i < npages; i++) 192 if (stt->pages[i]) 193 __free_page(stt->pages[i]); 194 195 kfree(stt); 196 } 197 198 static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt, 199 unsigned long sttpage) 200 { 201 struct page *page = stt->pages[sttpage]; 202 203 if (page) 204 return page; 205 206 mutex_lock(&stt->alloc_lock); 207 page = stt->pages[sttpage]; 208 if (!page) { 209 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 210 WARN_ON_ONCE(!page); 211 if (page) 212 stt->pages[sttpage] = page; 213 } 214 mutex_unlock(&stt->alloc_lock); 215 216 return page; 217 } 218 219 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf) 220 { 221 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data; 222 struct page *page; 223 224 if (vmf->pgoff >= kvmppc_tce_pages(stt->size)) 225 return VM_FAULT_SIGBUS; 226 227 page = kvm_spapr_get_tce_page(stt, vmf->pgoff); 228 if (!page) 229 return VM_FAULT_OOM; 230 231 get_page(page); 232 vmf->page = page; 233 return 0; 234 } 235 236 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = { 237 .fault = kvm_spapr_tce_fault, 238 }; 239 240 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma) 241 { 242 vma->vm_ops = &kvm_spapr_tce_vm_ops; 243 return 0; 244 } 245 246 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp) 247 { 248 struct kvmppc_spapr_tce_table *stt = filp->private_data; 249 struct kvmppc_spapr_tce_iommu_table *stit, *tmp; 250 struct kvm *kvm = stt->kvm; 251 252 mutex_lock(&kvm->lock); 253 list_del_rcu(&stt->list); 254 mutex_unlock(&kvm->lock); 255 256 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) { 257 WARN_ON(!kref_read(&stit->kref)); 258 while (1) { 259 if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put)) 260 break; 261 } 262 } 263 264 account_locked_vm(kvm->mm, 265 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false); 266 267 kvm_put_kvm(stt->kvm); 268 269 call_rcu(&stt->rcu, release_spapr_tce_table); 270 271 return 0; 272 } 273 274 static const struct file_operations kvm_spapr_tce_fops = { 275 .mmap = kvm_spapr_tce_mmap, 276 .release = kvm_spapr_tce_release, 277 }; 278 279 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, 280 struct kvm_create_spapr_tce_64 *args) 281 { 282 struct kvmppc_spapr_tce_table *stt = NULL; 283 struct kvmppc_spapr_tce_table *siter; 284 struct mm_struct *mm = kvm->mm; 285 unsigned long npages, size = args->size; 286 int ret; 287 288 if (!args->size || args->page_shift < 12 || args->page_shift > 34 || 289 (args->offset + args->size > (ULLONG_MAX >> args->page_shift))) 290 return -EINVAL; 291 292 npages = kvmppc_tce_pages(size); 293 ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true); 294 if (ret) 295 return ret; 296 297 ret = -ENOMEM; 298 stt = kzalloc(struct_size(stt, pages, npages), GFP_KERNEL); 299 if (!stt) 300 goto fail_acct; 301 302 stt->liobn = args->liobn; 303 stt->page_shift = args->page_shift; 304 stt->offset = args->offset; 305 stt->size = size; 306 stt->kvm = kvm; 307 mutex_init(&stt->alloc_lock); 308 INIT_LIST_HEAD_RCU(&stt->iommu_tables); 309 310 mutex_lock(&kvm->lock); 311 312 /* Check this LIOBN hasn't been previously allocated */ 313 ret = 0; 314 list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) { 315 if (siter->liobn == args->liobn) { 316 ret = -EBUSY; 317 break; 318 } 319 } 320 321 kvm_get_kvm(kvm); 322 if (!ret) 323 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, 324 stt, O_RDWR | O_CLOEXEC); 325 326 if (ret >= 0) 327 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables); 328 else 329 kvm_put_kvm_no_destroy(kvm); 330 331 mutex_unlock(&kvm->lock); 332 333 if (ret >= 0) 334 return ret; 335 336 kfree(stt); 337 fail_acct: 338 account_locked_vm(mm, kvmppc_stt_pages(npages), false); 339 return ret; 340 } 341 342 static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce, 343 unsigned long *ua) 344 { 345 unsigned long gfn = tce >> PAGE_SHIFT; 346 struct kvm_memory_slot *memslot; 347 348 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); 349 if (!memslot) 350 return -EINVAL; 351 352 *ua = __gfn_to_hva_memslot(memslot, gfn) | 353 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); 354 355 return 0; 356 } 357 358 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, 359 unsigned long tce) 360 { 361 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE); 362 enum dma_data_direction dir = iommu_tce_direction(tce); 363 struct kvmppc_spapr_tce_iommu_table *stit; 364 unsigned long ua = 0; 365 366 /* Allow userspace to poison TCE table */ 367 if (dir == DMA_NONE) 368 return H_SUCCESS; 369 370 if (iommu_tce_check_gpa(stt->page_shift, gpa)) 371 return H_TOO_HARD; 372 373 if (kvmppc_tce_to_ua(stt->kvm, tce, &ua)) 374 return H_TOO_HARD; 375 376 rcu_read_lock(); 377 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { 378 unsigned long hpa = 0; 379 struct mm_iommu_table_group_mem_t *mem; 380 long shift = stit->tbl->it_page_shift; 381 382 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift); 383 if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) { 384 rcu_read_unlock(); 385 return H_TOO_HARD; 386 } 387 } 388 rcu_read_unlock(); 389 390 return H_SUCCESS; 391 } 392 393 /* 394 * Handles TCE requests for emulated devices. 395 * Puts guest TCE values to the table and expects user space to convert them. 396 * Cannot fail so kvmppc_tce_validate must be called before it. 397 */ 398 static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, 399 unsigned long idx, unsigned long tce) 400 { 401 struct page *page; 402 u64 *tbl; 403 unsigned long sttpage; 404 405 idx -= stt->offset; 406 sttpage = idx / TCES_PER_PAGE; 407 page = stt->pages[sttpage]; 408 409 if (!page) { 410 /* We allow any TCE, not just with read|write permissions */ 411 if (!tce) 412 return; 413 414 page = kvm_spapr_get_tce_page(stt, sttpage); 415 if (!page) 416 return; 417 } 418 tbl = page_to_virt(page); 419 420 tbl[idx % TCES_PER_PAGE] = tce; 421 } 422 423 static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt, 424 struct iommu_table *tbl, unsigned long entry) 425 { 426 unsigned long i; 427 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); 428 unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift); 429 430 for (i = 0; i < subpages; ++i) { 431 unsigned long hpa = 0; 432 enum dma_data_direction dir = DMA_NONE; 433 434 iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir); 435 } 436 } 437 438 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm, 439 struct iommu_table *tbl, unsigned long entry) 440 { 441 struct mm_iommu_table_group_mem_t *mem = NULL; 442 const unsigned long pgsize = 1ULL << tbl->it_page_shift; 443 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry); 444 445 if (!pua) 446 return H_SUCCESS; 447 448 mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize); 449 if (!mem) 450 return H_TOO_HARD; 451 452 mm_iommu_mapped_dec(mem); 453 454 *pua = cpu_to_be64(0); 455 456 return H_SUCCESS; 457 } 458 459 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm, 460 struct iommu_table *tbl, unsigned long entry) 461 { 462 enum dma_data_direction dir = DMA_NONE; 463 unsigned long hpa = 0; 464 long ret; 465 466 if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, 467 &dir))) 468 return H_TOO_HARD; 469 470 if (dir == DMA_NONE) 471 return H_SUCCESS; 472 473 ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry); 474 if (ret != H_SUCCESS) 475 iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir); 476 477 return ret; 478 } 479 480 static long kvmppc_tce_iommu_unmap(struct kvm *kvm, 481 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, 482 unsigned long entry) 483 { 484 unsigned long i, ret = H_SUCCESS; 485 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); 486 unsigned long io_entry = entry * subpages; 487 488 for (i = 0; i < subpages; ++i) { 489 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i); 490 if (ret != H_SUCCESS) 491 break; 492 } 493 494 iommu_tce_kill(tbl, io_entry, subpages); 495 496 return ret; 497 } 498 499 static long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl, 500 unsigned long entry, unsigned long ua, 501 enum dma_data_direction dir) 502 { 503 long ret; 504 unsigned long hpa; 505 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry); 506 struct mm_iommu_table_group_mem_t *mem; 507 508 if (!pua) 509 /* it_userspace allocation might be delayed */ 510 return H_TOO_HARD; 511 512 mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift); 513 if (!mem) 514 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */ 515 return H_TOO_HARD; 516 517 if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) 518 return H_TOO_HARD; 519 520 if (mm_iommu_mapped_inc(mem)) 521 return H_TOO_HARD; 522 523 ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir); 524 if (WARN_ON_ONCE(ret)) { 525 mm_iommu_mapped_dec(mem); 526 return H_TOO_HARD; 527 } 528 529 if (dir != DMA_NONE) 530 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry); 531 532 *pua = cpu_to_be64(ua); 533 534 return 0; 535 } 536 537 static long kvmppc_tce_iommu_map(struct kvm *kvm, 538 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl, 539 unsigned long entry, unsigned long ua, 540 enum dma_data_direction dir) 541 { 542 unsigned long i, pgoff, ret = H_SUCCESS; 543 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift); 544 unsigned long io_entry = entry * subpages; 545 546 for (i = 0, pgoff = 0; i < subpages; 547 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) { 548 549 ret = kvmppc_tce_iommu_do_map(kvm, tbl, 550 io_entry + i, ua + pgoff, dir); 551 if (ret != H_SUCCESS) 552 break; 553 } 554 555 iommu_tce_kill(tbl, io_entry, subpages); 556 557 return ret; 558 } 559 560 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 561 unsigned long ioba, unsigned long tce) 562 { 563 struct kvmppc_spapr_tce_table *stt; 564 long ret, idx; 565 struct kvmppc_spapr_tce_iommu_table *stit; 566 unsigned long entry, ua = 0; 567 enum dma_data_direction dir; 568 569 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */ 570 /* liobn, ioba, tce); */ 571 572 stt = kvmppc_find_table(vcpu->kvm, liobn); 573 if (!stt) 574 return H_TOO_HARD; 575 576 ret = kvmppc_ioba_validate(stt, ioba, 1); 577 if (ret != H_SUCCESS) 578 return ret; 579 580 idx = srcu_read_lock(&vcpu->kvm->srcu); 581 582 ret = kvmppc_tce_validate(stt, tce); 583 if (ret != H_SUCCESS) 584 goto unlock_exit; 585 586 dir = iommu_tce_direction(tce); 587 588 if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) { 589 ret = H_PARAMETER; 590 goto unlock_exit; 591 } 592 593 entry = ioba >> stt->page_shift; 594 595 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { 596 if (dir == DMA_NONE) 597 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt, 598 stit->tbl, entry); 599 else 600 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl, 601 entry, ua, dir); 602 603 604 if (ret != H_SUCCESS) { 605 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry); 606 goto unlock_exit; 607 } 608 } 609 610 kvmppc_tce_put(stt, entry, tce); 611 612 unlock_exit: 613 srcu_read_unlock(&vcpu->kvm->srcu, idx); 614 615 return ret; 616 } 617 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); 618 619 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, 620 unsigned long liobn, unsigned long ioba, 621 unsigned long tce_list, unsigned long npages) 622 { 623 struct kvmppc_spapr_tce_table *stt; 624 long i, ret = H_SUCCESS, idx; 625 unsigned long entry, ua = 0; 626 u64 __user *tces; 627 u64 tce; 628 struct kvmppc_spapr_tce_iommu_table *stit; 629 630 stt = kvmppc_find_table(vcpu->kvm, liobn); 631 if (!stt) 632 return H_TOO_HARD; 633 634 entry = ioba >> stt->page_shift; 635 /* 636 * SPAPR spec says that the maximum size of the list is 512 TCEs 637 * so the whole table fits in 4K page 638 */ 639 if (npages > 512) 640 return H_PARAMETER; 641 642 if (tce_list & (SZ_4K - 1)) 643 return H_PARAMETER; 644 645 ret = kvmppc_ioba_validate(stt, ioba, npages); 646 if (ret != H_SUCCESS) 647 return ret; 648 649 idx = srcu_read_lock(&vcpu->kvm->srcu); 650 if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) { 651 ret = H_TOO_HARD; 652 goto unlock_exit; 653 } 654 tces = (u64 __user *) ua; 655 656 for (i = 0; i < npages; ++i) { 657 if (get_user(tce, tces + i)) { 658 ret = H_TOO_HARD; 659 goto unlock_exit; 660 } 661 tce = be64_to_cpu(tce); 662 663 ret = kvmppc_tce_validate(stt, tce); 664 if (ret != H_SUCCESS) 665 goto unlock_exit; 666 } 667 668 for (i = 0; i < npages; ++i) { 669 /* 670 * This looks unsafe, because we validate, then regrab 671 * the TCE from userspace which could have been changed by 672 * another thread. 673 * 674 * But it actually is safe, because the relevant checks will be 675 * re-executed in the following code. If userspace tries to 676 * change this dodgily it will result in a messier failure mode 677 * but won't threaten the host. 678 */ 679 if (get_user(tce, tces + i)) { 680 ret = H_TOO_HARD; 681 goto unlock_exit; 682 } 683 tce = be64_to_cpu(tce); 684 685 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) { 686 ret = H_PARAMETER; 687 goto unlock_exit; 688 } 689 690 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { 691 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, 692 stit->tbl, entry + i, ua, 693 iommu_tce_direction(tce)); 694 695 if (ret != H_SUCCESS) { 696 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, 697 entry + i); 698 goto unlock_exit; 699 } 700 } 701 702 kvmppc_tce_put(stt, entry + i, tce); 703 } 704 705 unlock_exit: 706 srcu_read_unlock(&vcpu->kvm->srcu, idx); 707 708 return ret; 709 } 710 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect); 711 712 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, 713 unsigned long liobn, unsigned long ioba, 714 unsigned long tce_value, unsigned long npages) 715 { 716 struct kvmppc_spapr_tce_table *stt; 717 long i, ret; 718 struct kvmppc_spapr_tce_iommu_table *stit; 719 720 stt = kvmppc_find_table(vcpu->kvm, liobn); 721 if (!stt) 722 return H_TOO_HARD; 723 724 ret = kvmppc_ioba_validate(stt, ioba, npages); 725 if (ret != H_SUCCESS) 726 return ret; 727 728 /* Check permission bits only to allow userspace poison TCE for debug */ 729 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ)) 730 return H_PARAMETER; 731 732 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { 733 unsigned long entry = ioba >> stt->page_shift; 734 735 for (i = 0; i < npages; ++i) { 736 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt, 737 stit->tbl, entry + i); 738 739 if (ret == H_SUCCESS) 740 continue; 741 742 if (ret == H_TOO_HARD) 743 return ret; 744 745 WARN_ON_ONCE(1); 746 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i); 747 } 748 } 749 750 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) 751 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); 752 753 return ret; 754 } 755 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce); 756