xref: /openbmc/linux/arch/powerpc/kvm/book3s_64_vio.c (revision 22fc4c4c9fd60427bcda00878cee94e7622cfa7a)
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
18  */
19 
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/sched/signal.h>
28 #include <linux/hugetlb.h>
29 #include <linux/list.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/iommu.h>
32 #include <linux/file.h>
33 
34 #include <asm/kvm_ppc.h>
35 #include <asm/kvm_book3s.h>
36 #include <asm/book3s/64/mmu-hash.h>
37 #include <asm/hvcall.h>
38 #include <asm/synch.h>
39 #include <asm/ppc-opcode.h>
40 #include <asm/kvm_host.h>
41 #include <asm/udbg.h>
42 #include <asm/iommu.h>
43 #include <asm/tce.h>
44 #include <asm/mmu_context.h>
45 
46 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
47 {
48 	return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
49 }
50 
51 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
52 {
53 	unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
54 			(tce_pages * sizeof(struct page *));
55 
56 	return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
57 }
58 
59 static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
60 {
61 	long ret = 0;
62 
63 	if (!current || !current->mm)
64 		return ret; /* process exited */
65 
66 	down_write(&current->mm->mmap_sem);
67 
68 	if (inc) {
69 		unsigned long locked, lock_limit;
70 
71 		locked = current->mm->locked_vm + stt_pages;
72 		lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
73 		if (locked > lock_limit && !capable(CAP_IPC_LOCK))
74 			ret = -ENOMEM;
75 		else
76 			current->mm->locked_vm += stt_pages;
77 	} else {
78 		if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm))
79 			stt_pages = current->mm->locked_vm;
80 
81 		current->mm->locked_vm -= stt_pages;
82 	}
83 
84 	pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid,
85 			inc ? '+' : '-',
86 			stt_pages << PAGE_SHIFT,
87 			current->mm->locked_vm << PAGE_SHIFT,
88 			rlimit(RLIMIT_MEMLOCK),
89 			ret ? " - exceeded" : "");
90 
91 	up_write(&current->mm->mmap_sem);
92 
93 	return ret;
94 }
95 
96 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
97 {
98 	struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
99 			struct kvmppc_spapr_tce_iommu_table, rcu);
100 
101 	iommu_tce_table_put(stit->tbl);
102 
103 	kfree(stit);
104 }
105 
106 static void kvm_spapr_tce_liobn_put(struct kref *kref)
107 {
108 	struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
109 			struct kvmppc_spapr_tce_iommu_table, kref);
110 
111 	list_del_rcu(&stit->next);
112 
113 	call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
114 }
115 
116 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
117 		struct iommu_group *grp)
118 {
119 	int i;
120 	struct kvmppc_spapr_tce_table *stt;
121 	struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
122 	struct iommu_table_group *table_group = NULL;
123 
124 	list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
125 
126 		table_group = iommu_group_get_iommudata(grp);
127 		if (WARN_ON(!table_group))
128 			continue;
129 
130 		list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
131 			for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
132 				if (table_group->tables[i] != stit->tbl)
133 					continue;
134 
135 				kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
136 				return;
137 			}
138 		}
139 	}
140 }
141 
142 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
143 		struct iommu_group *grp)
144 {
145 	struct kvmppc_spapr_tce_table *stt = NULL;
146 	bool found = false;
147 	struct iommu_table *tbl = NULL;
148 	struct iommu_table_group *table_group;
149 	long i;
150 	struct kvmppc_spapr_tce_iommu_table *stit;
151 	struct fd f;
152 
153 	f = fdget(tablefd);
154 	if (!f.file)
155 		return -EBADF;
156 
157 	list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
158 		if (stt == f.file->private_data) {
159 			found = true;
160 			break;
161 		}
162 	}
163 
164 	fdput(f);
165 
166 	if (!found)
167 		return -EINVAL;
168 
169 	table_group = iommu_group_get_iommudata(grp);
170 	if (WARN_ON(!table_group))
171 		return -EFAULT;
172 
173 	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
174 		struct iommu_table *tbltmp = table_group->tables[i];
175 
176 		if (!tbltmp)
177 			continue;
178 		/* Make sure hardware table parameters are compatible */
179 		if ((tbltmp->it_page_shift <= stt->page_shift) &&
180 				(tbltmp->it_offset << tbltmp->it_page_shift ==
181 				 stt->offset << stt->page_shift) &&
182 				(tbltmp->it_size << tbltmp->it_page_shift >=
183 				 stt->size << stt->page_shift)) {
184 			/*
185 			 * Reference the table to avoid races with
186 			 * add/remove DMA windows.
187 			 */
188 			tbl = iommu_tce_table_get(tbltmp);
189 			break;
190 		}
191 	}
192 	if (!tbl)
193 		return -EINVAL;
194 
195 	list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
196 		if (tbl != stit->tbl)
197 			continue;
198 
199 		if (!kref_get_unless_zero(&stit->kref)) {
200 			/* stit is being destroyed */
201 			iommu_tce_table_put(tbl);
202 			return -ENOTTY;
203 		}
204 		/*
205 		 * The table is already known to this KVM, we just increased
206 		 * its KVM reference counter and can return.
207 		 */
208 		return 0;
209 	}
210 
211 	stit = kzalloc(sizeof(*stit), GFP_KERNEL);
212 	if (!stit) {
213 		iommu_tce_table_put(tbl);
214 		return -ENOMEM;
215 	}
216 
217 	stit->tbl = tbl;
218 	kref_init(&stit->kref);
219 
220 	list_add_rcu(&stit->next, &stt->iommu_tables);
221 
222 	return 0;
223 }
224 
225 static void release_spapr_tce_table(struct rcu_head *head)
226 {
227 	struct kvmppc_spapr_tce_table *stt = container_of(head,
228 			struct kvmppc_spapr_tce_table, rcu);
229 	unsigned long i, npages = kvmppc_tce_pages(stt->size);
230 
231 	for (i = 0; i < npages; i++)
232 		__free_page(stt->pages[i]);
233 
234 	kfree(stt);
235 }
236 
237 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
238 {
239 	struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
240 	struct page *page;
241 
242 	if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
243 		return VM_FAULT_SIGBUS;
244 
245 	page = stt->pages[vmf->pgoff];
246 	get_page(page);
247 	vmf->page = page;
248 	return 0;
249 }
250 
251 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
252 	.fault = kvm_spapr_tce_fault,
253 };
254 
255 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
256 {
257 	vma->vm_ops = &kvm_spapr_tce_vm_ops;
258 	return 0;
259 }
260 
261 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
262 {
263 	struct kvmppc_spapr_tce_table *stt = filp->private_data;
264 	struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
265 	struct kvm *kvm = stt->kvm;
266 
267 	mutex_lock(&kvm->lock);
268 	list_del_rcu(&stt->list);
269 	mutex_unlock(&kvm->lock);
270 
271 	list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
272 		WARN_ON(!kref_read(&stit->kref));
273 		while (1) {
274 			if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
275 				break;
276 		}
277 	}
278 
279 	kvm_put_kvm(stt->kvm);
280 
281 	kvmppc_account_memlimit(
282 		kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
283 	call_rcu(&stt->rcu, release_spapr_tce_table);
284 
285 	return 0;
286 }
287 
288 static const struct file_operations kvm_spapr_tce_fops = {
289 	.mmap           = kvm_spapr_tce_mmap,
290 	.release	= kvm_spapr_tce_release,
291 };
292 
293 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
294 				   struct kvm_create_spapr_tce_64 *args)
295 {
296 	struct kvmppc_spapr_tce_table *stt = NULL;
297 	struct kvmppc_spapr_tce_table *siter;
298 	unsigned long npages, size = args->size;
299 	int ret = -ENOMEM;
300 	int i;
301 
302 	if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
303 		(args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
304 		return -EINVAL;
305 
306 	npages = kvmppc_tce_pages(size);
307 	ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
308 	if (ret)
309 		return ret;
310 
311 	ret = -ENOMEM;
312 	stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
313 		      GFP_KERNEL);
314 	if (!stt)
315 		goto fail_acct;
316 
317 	stt->liobn = args->liobn;
318 	stt->page_shift = args->page_shift;
319 	stt->offset = args->offset;
320 	stt->size = size;
321 	stt->kvm = kvm;
322 	INIT_LIST_HEAD_RCU(&stt->iommu_tables);
323 
324 	for (i = 0; i < npages; i++) {
325 		stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
326 		if (!stt->pages[i])
327 			goto fail;
328 	}
329 
330 	mutex_lock(&kvm->lock);
331 
332 	/* Check this LIOBN hasn't been previously allocated */
333 	ret = 0;
334 	list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
335 		if (siter->liobn == args->liobn) {
336 			ret = -EBUSY;
337 			break;
338 		}
339 	}
340 
341 	if (!ret)
342 		ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
343 				       stt, O_RDWR | O_CLOEXEC);
344 
345 	if (ret >= 0) {
346 		list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
347 		kvm_get_kvm(kvm);
348 	}
349 
350 	mutex_unlock(&kvm->lock);
351 
352 	if (ret >= 0)
353 		return ret;
354 
355  fail:
356 	for (i = 0; i < npages; i++)
357 		if (stt->pages[i])
358 			__free_page(stt->pages[i]);
359 
360 	kfree(stt);
361  fail_acct:
362 	kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
363 	return ret;
364 }
365 
366 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
367 		unsigned long tce)
368 {
369 	unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
370 	enum dma_data_direction dir = iommu_tce_direction(tce);
371 	struct kvmppc_spapr_tce_iommu_table *stit;
372 	unsigned long ua = 0;
373 
374 	/* Allow userspace to poison TCE table */
375 	if (dir == DMA_NONE)
376 		return H_SUCCESS;
377 
378 	if (iommu_tce_check_gpa(stt->page_shift, gpa))
379 		return H_TOO_HARD;
380 
381 	if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
382 		return H_TOO_HARD;
383 
384 	list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
385 		unsigned long hpa = 0;
386 		struct mm_iommu_table_group_mem_t *mem;
387 		long shift = stit->tbl->it_page_shift;
388 
389 		mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
390 		if (!mem)
391 			return H_TOO_HARD;
392 
393 		if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
394 			return H_TOO_HARD;
395 	}
396 
397 	return H_SUCCESS;
398 }
399 
400 static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
401 		unsigned long entry)
402 {
403 	unsigned long hpa = 0;
404 	enum dma_data_direction dir = DMA_NONE;
405 
406 	iommu_tce_xchg(mm, tbl, entry, &hpa, &dir);
407 }
408 
409 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
410 		struct iommu_table *tbl, unsigned long entry)
411 {
412 	struct mm_iommu_table_group_mem_t *mem = NULL;
413 	const unsigned long pgsize = 1ULL << tbl->it_page_shift;
414 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
415 
416 	if (!pua)
417 		return H_SUCCESS;
418 
419 	mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
420 	if (!mem)
421 		return H_TOO_HARD;
422 
423 	mm_iommu_mapped_dec(mem);
424 
425 	*pua = cpu_to_be64(0);
426 
427 	return H_SUCCESS;
428 }
429 
430 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
431 		struct iommu_table *tbl, unsigned long entry)
432 {
433 	enum dma_data_direction dir = DMA_NONE;
434 	unsigned long hpa = 0;
435 	long ret;
436 
437 	if (WARN_ON_ONCE(iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir)))
438 		return H_TOO_HARD;
439 
440 	if (dir == DMA_NONE)
441 		return H_SUCCESS;
442 
443 	ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
444 	if (ret != H_SUCCESS)
445 		iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir);
446 
447 	return ret;
448 }
449 
450 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
451 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
452 		unsigned long entry)
453 {
454 	unsigned long i, ret = H_SUCCESS;
455 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
456 	unsigned long io_entry = entry * subpages;
457 
458 	for (i = 0; i < subpages; ++i) {
459 		ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
460 		if (ret != H_SUCCESS)
461 			break;
462 	}
463 
464 	return ret;
465 }
466 
467 long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
468 		unsigned long entry, unsigned long ua,
469 		enum dma_data_direction dir)
470 {
471 	long ret;
472 	unsigned long hpa;
473 	__be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
474 	struct mm_iommu_table_group_mem_t *mem;
475 
476 	if (!pua)
477 		/* it_userspace allocation might be delayed */
478 		return H_TOO_HARD;
479 
480 	mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
481 	if (!mem)
482 		/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
483 		return H_TOO_HARD;
484 
485 	if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
486 		return H_TOO_HARD;
487 
488 	if (mm_iommu_mapped_inc(mem))
489 		return H_TOO_HARD;
490 
491 	ret = iommu_tce_xchg(kvm->mm, tbl, entry, &hpa, &dir);
492 	if (WARN_ON_ONCE(ret)) {
493 		mm_iommu_mapped_dec(mem);
494 		return H_TOO_HARD;
495 	}
496 
497 	if (dir != DMA_NONE)
498 		kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
499 
500 	*pua = cpu_to_be64(ua);
501 
502 	return 0;
503 }
504 
505 static long kvmppc_tce_iommu_map(struct kvm *kvm,
506 		struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
507 		unsigned long entry, unsigned long ua,
508 		enum dma_data_direction dir)
509 {
510 	unsigned long i, pgoff, ret = H_SUCCESS;
511 	unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
512 	unsigned long io_entry = entry * subpages;
513 
514 	for (i = 0, pgoff = 0; i < subpages;
515 			++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
516 
517 		ret = kvmppc_tce_iommu_do_map(kvm, tbl,
518 				io_entry + i, ua + pgoff, dir);
519 		if (ret != H_SUCCESS)
520 			break;
521 	}
522 
523 	return ret;
524 }
525 
526 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
527 		      unsigned long ioba, unsigned long tce)
528 {
529 	struct kvmppc_spapr_tce_table *stt;
530 	long ret, idx;
531 	struct kvmppc_spapr_tce_iommu_table *stit;
532 	unsigned long entry, ua = 0;
533 	enum dma_data_direction dir;
534 
535 	/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
536 	/* 	    liobn, ioba, tce); */
537 
538 	stt = kvmppc_find_table(vcpu->kvm, liobn);
539 	if (!stt)
540 		return H_TOO_HARD;
541 
542 	ret = kvmppc_ioba_validate(stt, ioba, 1);
543 	if (ret != H_SUCCESS)
544 		return ret;
545 
546 	ret = kvmppc_tce_validate(stt, tce);
547 	if (ret != H_SUCCESS)
548 		return ret;
549 
550 	dir = iommu_tce_direction(tce);
551 
552 	idx = srcu_read_lock(&vcpu->kvm->srcu);
553 
554 	if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
555 		ret = H_PARAMETER;
556 		goto unlock_exit;
557 	}
558 
559 	entry = ioba >> stt->page_shift;
560 
561 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
562 		if (dir == DMA_NONE)
563 			ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
564 					stit->tbl, entry);
565 		else
566 			ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
567 					entry, ua, dir);
568 
569 		if (ret != H_SUCCESS) {
570 			kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
571 			goto unlock_exit;
572 		}
573 	}
574 
575 	kvmppc_tce_put(stt, entry, tce);
576 
577 unlock_exit:
578 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
579 
580 	return ret;
581 }
582 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
583 
584 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
585 		unsigned long liobn, unsigned long ioba,
586 		unsigned long tce_list, unsigned long npages)
587 {
588 	struct kvmppc_spapr_tce_table *stt;
589 	long i, ret = H_SUCCESS, idx;
590 	unsigned long entry, ua = 0;
591 	u64 __user *tces;
592 	u64 tce;
593 	struct kvmppc_spapr_tce_iommu_table *stit;
594 
595 	stt = kvmppc_find_table(vcpu->kvm, liobn);
596 	if (!stt)
597 		return H_TOO_HARD;
598 
599 	entry = ioba >> stt->page_shift;
600 	/*
601 	 * SPAPR spec says that the maximum size of the list is 512 TCEs
602 	 * so the whole table fits in 4K page
603 	 */
604 	if (npages > 512)
605 		return H_PARAMETER;
606 
607 	if (tce_list & (SZ_4K - 1))
608 		return H_PARAMETER;
609 
610 	ret = kvmppc_ioba_validate(stt, ioba, npages);
611 	if (ret != H_SUCCESS)
612 		return ret;
613 
614 	idx = srcu_read_lock(&vcpu->kvm->srcu);
615 	if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
616 		ret = H_TOO_HARD;
617 		goto unlock_exit;
618 	}
619 	tces = (u64 __user *) ua;
620 
621 	for (i = 0; i < npages; ++i) {
622 		if (get_user(tce, tces + i)) {
623 			ret = H_TOO_HARD;
624 			goto unlock_exit;
625 		}
626 		tce = be64_to_cpu(tce);
627 
628 		ret = kvmppc_tce_validate(stt, tce);
629 		if (ret != H_SUCCESS)
630 			goto unlock_exit;
631 	}
632 
633 	for (i = 0; i < npages; ++i) {
634 		/*
635 		 * This looks unsafe, because we validate, then regrab
636 		 * the TCE from userspace which could have been changed by
637 		 * another thread.
638 		 *
639 		 * But it actually is safe, because the relevant checks will be
640 		 * re-executed in the following code.  If userspace tries to
641 		 * change this dodgily it will result in a messier failure mode
642 		 * but won't threaten the host.
643 		 */
644 		if (get_user(tce, tces + i)) {
645 			ret = H_TOO_HARD;
646 			goto unlock_exit;
647 		}
648 		tce = be64_to_cpu(tce);
649 
650 		if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
651 			return H_PARAMETER;
652 
653 		list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
654 			ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
655 					stit->tbl, entry + i, ua,
656 					iommu_tce_direction(tce));
657 
658 			if (ret != H_SUCCESS) {
659 				kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
660 						entry);
661 				goto unlock_exit;
662 			}
663 		}
664 
665 		kvmppc_tce_put(stt, entry + i, tce);
666 	}
667 
668 unlock_exit:
669 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
670 
671 	return ret;
672 }
673 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
674 
675 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
676 		unsigned long liobn, unsigned long ioba,
677 		unsigned long tce_value, unsigned long npages)
678 {
679 	struct kvmppc_spapr_tce_table *stt;
680 	long i, ret;
681 	struct kvmppc_spapr_tce_iommu_table *stit;
682 
683 	stt = kvmppc_find_table(vcpu->kvm, liobn);
684 	if (!stt)
685 		return H_TOO_HARD;
686 
687 	ret = kvmppc_ioba_validate(stt, ioba, npages);
688 	if (ret != H_SUCCESS)
689 		return ret;
690 
691 	/* Check permission bits only to allow userspace poison TCE for debug */
692 	if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
693 		return H_PARAMETER;
694 
695 	list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
696 		unsigned long entry = ioba >> stt->page_shift;
697 
698 		for (i = 0; i < npages; ++i) {
699 			ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
700 					stit->tbl, entry + i);
701 
702 			if (ret == H_SUCCESS)
703 				continue;
704 
705 			if (ret == H_TOO_HARD)
706 				return ret;
707 
708 			WARN_ON_ONCE(1);
709 			kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
710 		}
711 	}
712 
713 	for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
714 		kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
715 
716 	return H_SUCCESS;
717 }
718 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
719