xref: /openbmc/linux/arch/powerpc/kvm/book3s_64_vio.c (revision d1f56f31)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hugetlb.h>
18 #include <linux/list.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/iommu.h>
21 #include <linux/file.h>
22 #include <linux/mm.h>
23 
24 #include <asm/kvm_ppc.h>
25 #include <asm/kvm_book3s.h>
26 #include <asm/book3s/64/mmu-hash.h>
27 #include <asm/hvcall.h>
28 #include <asm/synch.h>
29 #include <asm/ppc-opcode.h>
30 #include <asm/udbg.h>
31 #include <asm/iommu.h>
32 #include <asm/tce.h>
33 #include <asm/mmu_context.h>
34 
35 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
36 {
37 	return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
38 }
39 
40 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
41 {
42 	unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
43 			(tce_pages * sizeof(struct page *));
44 
45 	return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
46 }
47 
48 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
49 {
50 	struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
51 			struct kvmppc_spapr_tce_iommu_table, rcu);
52 
53 	iommu_tce_table_put(stit->tbl);
54 
55 	kfree(stit);
56 }
57 
58 static void kvm_spapr_tce_liobn_put(struct kref *kref)
59 {
60 	struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
61 			struct kvmppc_spapr_tce_iommu_table, kref);
62 
63 	list_del_rcu(&stit->next);
64 
65 	call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
66 }
67 
68 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
69 		struct iommu_group *grp)
70 {
71 	int i;
72 	struct kvmppc_spapr_tce_table *stt;
73 	struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
74 	struct iommu_table_group *table_group = NULL;
75 
76 	list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
77 
78 		table_group = iommu_group_get_iommudata(grp);
79 		if (WARN_ON(!table_group))
80 			continue;
81 
82 		list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
83 			for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
84 				if (table_group->tables[i] != stit->tbl)
85 					continue;
86 
87 				kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
88 			}
89 		}
90 	}
91 }
92 
93 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
94 		struct iommu_group *grp)
95 {
96 	struct kvmppc_spapr_tce_table *stt = NULL;
97 	bool found = false;
98 	struct iommu_table *tbl = NULL;
99 	struct iommu_table_group *table_group;
100 	long i;
101 	struct kvmppc_spapr_tce_iommu_table *stit;
102 	struct fd f;
103 
104 	f = fdget(tablefd);
105 	if (!f.file)
106 		return -EBADF;
107 
108 	list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
109 		if (stt == f.file->private_data) {
110 			found = true;
111 			break;
112 		}
113 	}
114 
115 	fdput(f);
116 
117 	if (!found)
118 		return -EINVAL;
119 
120 	table_group = iommu_group_get_iommudata(grp);
121 	if (WARN_ON(!table_group))
122 		return -EFAULT;
123 
124 	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
125 		struct iommu_table *tbltmp = table_group->tables[i];
126 
127 		if (!tbltmp)
128 			continue;
129 		/* Make sure hardware table parameters are compatible */
130 		if ((tbltmp->it_page_shift <= stt->page_shift) &&
131 				(tbltmp->it_offset << tbltmp->it_page_shift ==
132 				 stt->offset << stt->page_shift) &&
133 				(tbltmp->it_size << tbltmp->it_page_shift >=
134 				 stt->size << stt->page_shift)) {
135 			/*
136 			 * Reference the table to avoid races with
137 			 * add/remove DMA windows.
138 			 */
139 			tbl = iommu_tce_table_get(tbltmp);
140 			break;
141 		}
142 	}
143 	if (!tbl)
144 		return -EINVAL;
145 
146 	list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
147 		if (tbl != stit->tbl)
148 			continue;
149 
150 		if (!kref_get_unless_zero(&stit->kref)) {
151 			/* stit is being destroyed */
152 			iommu_tce_table_put(tbl);
153 			return -ENOTTY;
154 		}
155 		/*
156 		 * The table is already known to this KVM, we just increased
157 		 * its KVM reference counter and can return.
158 		 */
159 		return 0;
160 	}
161 
162 	stit = kzalloc(sizeof(*stit), GFP_KERNEL);
163 	if (!stit) {
164 		iommu_tce_table_put(tbl);
165 		return -ENOMEM;
166 	}
167 
168 	stit->tbl = tbl;
169 	kref_init(&stit->kref);
170 
171 	list_add_rcu(&stit->next, &stt->iommu_tables);
172 
173 	return 0;
174 }
175 
176 static void release_spapr_tce_table(struct rcu_head *head)
177 {
178 	struct kvmppc_spapr_tce_table *stt = container_of(head,
179 			struct kvmppc_spapr_tce_table, rcu);
180 	unsigned long i, npages = kvmppc_tce_pages(stt->size);
181 
182 	for (i = 0; i < npages; i++)
183 		if (stt->pages[i])
184 			__free_page(stt->pages[i]);
185 
186 	kfree(stt);
187 }
188 
189 static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
190 		unsigned long sttpage)
191 {
192 	struct page *page = stt->pages[sttpage];
193 
194 	if (page)
195 		return page;
196 
197 	mutex_lock(&stt->alloc_lock);
198 	page = stt->pages[sttpage];
199 	if (!page) {
200 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
201 		WARN_ON_ONCE(!page);
202 		if (page)
203 			stt->pages[sttpage] = page;
204 	}
205 	mutex_unlock(&stt->alloc_lock);
206 
207 	return page;
208 }
209 
210 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
211 {
212 	struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
213 	struct page *page;
214 
215 	if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
216 		return VM_FAULT_SIGBUS;
217 
218 	page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
219 	if (!page)
220 		return VM_FAULT_OOM;
221 
222 	get_page(page);
223 	vmf->page = page;
224 	return 0;
225 }
226 
227 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
228 	.fault = kvm_spapr_tce_fault,
229 };
230 
231 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
232 {
233 	vma->vm_ops = &kvm_spapr_tce_vm_ops;
234 	return 0;
235 }
236 
237 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
238 {
239 	struct kvmppc_spapr_tce_table *stt = filp->private_data;
240 	struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
241 	struct kvm *kvm = stt->kvm;
242 
243 	mutex_lock(&kvm->lock);
244 	list_del_rcu(&stt->list);
245 	mutex_unlock(&kvm->lock);
246 
247 	list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
248 		WARN_ON(!kref_read(&stit->kref));
249 		while (1) {
250 			if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
251 				break;
252 		}
253 	}
254 
255 	account_locked_vm(kvm->mm,
256 		kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
257 
258 	kvm_put_kvm(stt->kvm);
259 
260 	call_rcu(&stt->rcu, release_spapr_tce_table);
261 
262 	return 0;
263 }
264 
265 static const struct file_operations kvm_spapr_tce_fops = {
266 	.mmap           = kvm_spapr_tce_mmap,
267 	.release	= kvm_spapr_tce_release,
268 };
269 
270 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
271 				   struct kvm_create_spapr_tce_64 *args)
272 {
273 	struct kvmppc_spapr_tce_table *stt = NULL;
274 	struct kvmppc_spapr_tce_table *siter;
275 	struct mm_struct *mm = kvm->mm;
276 	unsigned long npages, size = args->size;
277 	int ret = -ENOMEM;
278 
279 	if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
280 		(args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
281 		return -EINVAL;
282 
283 	npages = kvmppc_tce_pages(size);
284 	ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
285 	if (ret)
286 		return ret;
287 
288 	ret = -ENOMEM;
289 	stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
290 		      GFP_KERNEL);
291 	if (!stt)
292 		goto fail_acct;
293 
294 	stt->liobn = args->liobn;
295 	stt->page_shift = args->page_shift;
296 	stt->offset = args->offset;
297 	stt->size = size;
298 	stt->kvm = kvm;
299 	mutex_init(&stt->alloc_lock);
300 	INIT_LIST_HEAD_RCU(&stt->iommu_tables);
301 
302 	mutex_lock(&kvm->lock);
303 
304 	/* Check this LIOBN hasn't been previously allocated */
305 	ret = 0;
306 	list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
307 		if (siter->liobn == args->liobn) {
308 			ret = -EBUSY;
309 			break;
310 		}
311 	}
312 
313 	kvm_get_kvm(kvm);
314 	if (!ret)
315 		ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
316 				       stt, O_RDWR | O_CLOEXEC);
317 
318 	if (ret >= 0)
319 		list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
320 	else
321 		kvm_put_kvm_no_destroy(kvm);
322 
323 	mutex_unlock(&kvm->lock);
324 
325 	if (ret >= 0)
326 		return ret;
327 
328 	kfree(stt);
329  fail_acct:
330 	account_locked_vm(mm, kvmppc_stt_pages(npages), false);
331 	return ret;
332 }
333 
334 static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
335 		unsigned long *ua)
336 {
337 	unsigned long gfn = tce >> PAGE_SHIFT;
338 	struct kvm_memory_slot *memslot;
339 
340 	memslot = search_memslots(kvm_memslots(kvm), gfn);
341 	if (!memslot)
342 		return -EINVAL;
343 
344 	*ua = __gfn_to_hva_memslot(memslot, gfn) |
345 		(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
346 
347 	return 0;
348 }
349 
350 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
351 		unsigned long tce)
352 {
353 	unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
354 	enum dma_data_direction dir = iommu_tce_direction(tce);
355 	struct kvmppc_spapr_tce_iommu_table *stit;
356 	unsigned long ua = 0;
357 
358 	/* Allow userspace to poison TCE table */
359 	if (dir == DMA_NONE)
360 		return H_SUCCESS;
361 
362 	if (iommu_tce_check_gpa(stt->page_shift, gpa))
363 		return H_TOO_HARD;
364 
365 	if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
366 		return H_TOO_HARD;
367 
368 	list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
369 		unsigned long hpa = 0;
370 		struct mm_iommu_table_group_mem_t *mem;
371 		long shift = stit->tbl->it_page_shift;
372 
373 		mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
374 		if (!mem)
375 			return H_TOO_HARD;
376 
377 		if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
378 			return H_TOO_HARD;
379 	}
380 
381 	return H_SUCCESS;
382 }
383 
384 /*
385  * Handles TCE requests for emulated devices.
386  * Puts guest TCE values to the table and expects user space to convert them.
387  * Cannot fail so kvmppc_tce_validate must be called before it.
388  */
389 static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
390 		unsigned long idx, unsigned long tce)
391 {
392 	struct page *page;
393 	u64 *tbl;
394 	unsigned long sttpage;
395 
396 	idx -= stt->offset;
397 	sttpage = idx / TCES_PER_PAGE;
398 	page = stt->pages[sttpage];
399 
400 	if (!page) {
401 		/* We allow any TCE, not just with read|write permissions */
402 		if (!tce)
403 			return;
404 
405 		page = kvm_spapr_get_tce_page(stt, sttpage);
406 		if (!page)
407 			return;
408 	}
409 	tbl = page_to_virt(page);
410 
411 	tbl[idx % TCES_PER_PAGE] = tce;
412 }
413 
414 static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
415 		unsigned long entry)
416 {
417 	unsigned long hpa = 0;
418 	enum dma_data_direction dir = DMA_NONE;
419 
420 	iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
421 }
422 
423 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
424 		struct iommu_table *tbl, unsigned long entry)
425 {
426 	struct mm_iommu_table_group_mem_t *mem = NULL;
427 	const unsigned long pgsize = 1ULL << tbl->it_page_shift;
428 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
429 
430 	if (!pua)
431 		return H_SUCCESS;
432 
433 	mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
434 	if (!mem)
435 		return H_TOO_HARD;
436 
437 	mm_iommu_mapped_dec(mem);
438 
439 	*pua = cpu_to_be64(0);
440 
441 	return H_SUCCESS;
442 }
443 
444 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
445 		struct iommu_table *tbl, unsigned long entry)
446 {
447 	enum dma_data_direction dir = DMA_NONE;
448 	unsigned long hpa = 0;
449 	long ret;
450 
451 	if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
452 					&dir)))
453 		return H_TOO_HARD;
454 
455 	if (dir == DMA_NONE)
456 		return H_SUCCESS;
457 
458 	ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
459 	if (ret != H_SUCCESS)
460 		iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
461 
462 	return ret;
463 }
464 
465 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
466 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
467 		unsigned long entry)
468 {
469 	unsigned long i, ret = H_SUCCESS;
470 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
471 	unsigned long io_entry = entry * subpages;
472 
473 	for (i = 0; i < subpages; ++i) {
474 		ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
475 		if (ret != H_SUCCESS)
476 			break;
477 	}
478 
479 	return ret;
480 }
481 
482 long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
483 		unsigned long entry, unsigned long ua,
484 		enum dma_data_direction dir)
485 {
486 	long ret;
487 	unsigned long hpa;
488 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
489 	struct mm_iommu_table_group_mem_t *mem;
490 
491 	if (!pua)
492 		/* it_userspace allocation might be delayed */
493 		return H_TOO_HARD;
494 
495 	mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
496 	if (!mem)
497 		/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
498 		return H_TOO_HARD;
499 
500 	if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
501 		return H_TOO_HARD;
502 
503 	if (mm_iommu_mapped_inc(mem))
504 		return H_TOO_HARD;
505 
506 	ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
507 	if (WARN_ON_ONCE(ret)) {
508 		mm_iommu_mapped_dec(mem);
509 		return H_TOO_HARD;
510 	}
511 
512 	if (dir != DMA_NONE)
513 		kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
514 
515 	*pua = cpu_to_be64(ua);
516 
517 	return 0;
518 }
519 
520 static long kvmppc_tce_iommu_map(struct kvm *kvm,
521 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
522 		unsigned long entry, unsigned long ua,
523 		enum dma_data_direction dir)
524 {
525 	unsigned long i, pgoff, ret = H_SUCCESS;
526 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
527 	unsigned long io_entry = entry * subpages;
528 
529 	for (i = 0, pgoff = 0; i < subpages;
530 			++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
531 
532 		ret = kvmppc_tce_iommu_do_map(kvm, tbl,
533 				io_entry + i, ua + pgoff, dir);
534 		if (ret != H_SUCCESS)
535 			break;
536 	}
537 
538 	return ret;
539 }
540 
541 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
542 		      unsigned long ioba, unsigned long tce)
543 {
544 	struct kvmppc_spapr_tce_table *stt;
545 	long ret, idx;
546 	struct kvmppc_spapr_tce_iommu_table *stit;
547 	unsigned long entry, ua = 0;
548 	enum dma_data_direction dir;
549 
550 	/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
551 	/* 	    liobn, ioba, tce); */
552 
553 	stt = kvmppc_find_table(vcpu->kvm, liobn);
554 	if (!stt)
555 		return H_TOO_HARD;
556 
557 	ret = kvmppc_ioba_validate(stt, ioba, 1);
558 	if (ret != H_SUCCESS)
559 		return ret;
560 
561 	idx = srcu_read_lock(&vcpu->kvm->srcu);
562 
563 	ret = kvmppc_tce_validate(stt, tce);
564 	if (ret != H_SUCCESS)
565 		goto unlock_exit;
566 
567 	dir = iommu_tce_direction(tce);
568 
569 	if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
570 		ret = H_PARAMETER;
571 		goto unlock_exit;
572 	}
573 
574 	entry = ioba >> stt->page_shift;
575 
576 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
577 		if (dir == DMA_NONE)
578 			ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
579 					stit->tbl, entry);
580 		else
581 			ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
582 					entry, ua, dir);
583 
584 		iommu_tce_kill(stit->tbl, entry, 1);
585 
586 		if (ret != H_SUCCESS) {
587 			kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
588 			goto unlock_exit;
589 		}
590 	}
591 
592 	kvmppc_tce_put(stt, entry, tce);
593 
594 unlock_exit:
595 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
596 
597 	return ret;
598 }
599 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
600 
601 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
602 		unsigned long liobn, unsigned long ioba,
603 		unsigned long tce_list, unsigned long npages)
604 {
605 	struct kvmppc_spapr_tce_table *stt;
606 	long i, ret = H_SUCCESS, idx;
607 	unsigned long entry, ua = 0;
608 	u64 __user *tces;
609 	u64 tce;
610 	struct kvmppc_spapr_tce_iommu_table *stit;
611 
612 	stt = kvmppc_find_table(vcpu->kvm, liobn);
613 	if (!stt)
614 		return H_TOO_HARD;
615 
616 	entry = ioba >> stt->page_shift;
617 	/*
618 	 * SPAPR spec says that the maximum size of the list is 512 TCEs
619 	 * so the whole table fits in 4K page
620 	 */
621 	if (npages > 512)
622 		return H_PARAMETER;
623 
624 	if (tce_list & (SZ_4K - 1))
625 		return H_PARAMETER;
626 
627 	ret = kvmppc_ioba_validate(stt, ioba, npages);
628 	if (ret != H_SUCCESS)
629 		return ret;
630 
631 	idx = srcu_read_lock(&vcpu->kvm->srcu);
632 	if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
633 		ret = H_TOO_HARD;
634 		goto unlock_exit;
635 	}
636 	tces = (u64 __user *) ua;
637 
638 	for (i = 0; i < npages; ++i) {
639 		if (get_user(tce, tces + i)) {
640 			ret = H_TOO_HARD;
641 			goto unlock_exit;
642 		}
643 		tce = be64_to_cpu(tce);
644 
645 		ret = kvmppc_tce_validate(stt, tce);
646 		if (ret != H_SUCCESS)
647 			goto unlock_exit;
648 	}
649 
650 	for (i = 0; i < npages; ++i) {
651 		/*
652 		 * This looks unsafe, because we validate, then regrab
653 		 * the TCE from userspace which could have been changed by
654 		 * another thread.
655 		 *
656 		 * But it actually is safe, because the relevant checks will be
657 		 * re-executed in the following code.  If userspace tries to
658 		 * change this dodgily it will result in a messier failure mode
659 		 * but won't threaten the host.
660 		 */
661 		if (get_user(tce, tces + i)) {
662 			ret = H_TOO_HARD;
663 			goto invalidate_exit;
664 		}
665 		tce = be64_to_cpu(tce);
666 
667 		if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
668 			ret = H_PARAMETER;
669 			goto invalidate_exit;
670 		}
671 
672 		list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
673 			ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
674 					stit->tbl, entry + i, ua,
675 					iommu_tce_direction(tce));
676 
677 			if (ret != H_SUCCESS) {
678 				kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
679 						entry);
680 				goto invalidate_exit;
681 			}
682 		}
683 
684 		kvmppc_tce_put(stt, entry + i, tce);
685 	}
686 
687 invalidate_exit:
688 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
689 		iommu_tce_kill(stit->tbl, entry, npages);
690 
691 unlock_exit:
692 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
693 
694 	return ret;
695 }
696 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
697 
698 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
699 		unsigned long liobn, unsigned long ioba,
700 		unsigned long tce_value, unsigned long npages)
701 {
702 	struct kvmppc_spapr_tce_table *stt;
703 	long i, ret;
704 	struct kvmppc_spapr_tce_iommu_table *stit;
705 
706 	stt = kvmppc_find_table(vcpu->kvm, liobn);
707 	if (!stt)
708 		return H_TOO_HARD;
709 
710 	ret = kvmppc_ioba_validate(stt, ioba, npages);
711 	if (ret != H_SUCCESS)
712 		return ret;
713 
714 	/* Check permission bits only to allow userspace poison TCE for debug */
715 	if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
716 		return H_PARAMETER;
717 
718 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
719 		unsigned long entry = ioba >> stt->page_shift;
720 
721 		for (i = 0; i < npages; ++i) {
722 			ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
723 					stit->tbl, entry + i);
724 
725 			if (ret == H_SUCCESS)
726 				continue;
727 
728 			if (ret == H_TOO_HARD)
729 				goto invalidate_exit;
730 
731 			WARN_ON_ONCE(1);
732 			kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
733 		}
734 	}
735 
736 	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
737 		kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
738 
739 invalidate_exit:
740 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
741 		iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
742 
743 	return ret;
744 }
745 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
746