1 /*
2  * VFIO: IOMMU DMA mapping support for TCE on POWER
3  *
4  * Copyright (C) 2013 IBM Corp.  All rights reserved.
5  *     Author: Alexey Kardashevskiy <aik@ozlabs.ru>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * Derived from original vfio_iommu_type1.c:
12  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
13  *     Author: Alex Williamson <alex.williamson@redhat.com>
14  */
15 
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <linux/vmalloc.h>
23 #include <asm/iommu.h>
24 #include <asm/tce.h>
25 #include <asm/mmu_context.h>
26 
27 #define DRIVER_VERSION  "0.1"
28 #define DRIVER_AUTHOR   "aik@ozlabs.ru"
29 #define DRIVER_DESC     "VFIO IOMMU SPAPR TCE"
30 
31 static void tce_iommu_detach_group(void *iommu_data,
32 		struct iommu_group *iommu_group);
33 
34 static long try_increment_locked_vm(struct mm_struct *mm, long npages)
35 {
36 	long ret = 0, locked, lock_limit;
37 
38 	if (WARN_ON_ONCE(!mm))
39 		return -EPERM;
40 
41 	if (!npages)
42 		return 0;
43 
44 	down_write(&mm->mmap_sem);
45 	locked = mm->locked_vm + npages;
46 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
47 	if (locked > lock_limit && !capable(CAP_IPC_LOCK))
48 		ret = -ENOMEM;
49 	else
50 		mm->locked_vm += npages;
51 
52 	pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
53 			npages << PAGE_SHIFT,
54 			mm->locked_vm << PAGE_SHIFT,
55 			rlimit(RLIMIT_MEMLOCK),
56 			ret ? " - exceeded" : "");
57 
58 	up_write(&mm->mmap_sem);
59 
60 	return ret;
61 }
62 
63 static void decrement_locked_vm(struct mm_struct *mm, long npages)
64 {
65 	if (!mm || !npages)
66 		return;
67 
68 	down_write(&mm->mmap_sem);
69 	if (WARN_ON_ONCE(npages > mm->locked_vm))
70 		npages = mm->locked_vm;
71 	mm->locked_vm -= npages;
72 	pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
73 			npages << PAGE_SHIFT,
74 			mm->locked_vm << PAGE_SHIFT,
75 			rlimit(RLIMIT_MEMLOCK));
76 	up_write(&mm->mmap_sem);
77 }
78 
79 /*
80  * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
81  *
82  * This code handles mapping and unmapping of user data buffers
83  * into DMA'ble space using the IOMMU
84  */
85 
86 struct tce_iommu_group {
87 	struct list_head next;
88 	struct iommu_group *grp;
89 };
90 
91 /*
92  * A container needs to remember which preregistered region  it has
93  * referenced to do proper cleanup at the userspace process exit.
94  */
95 struct tce_iommu_prereg {
96 	struct list_head next;
97 	struct mm_iommu_table_group_mem_t *mem;
98 };
99 
100 /*
101  * The container descriptor supports only a single group per container.
102  * Required by the API as the container is not supplied with the IOMMU group
103  * at the moment of initialization.
104  */
105 struct tce_container {
106 	struct mutex lock;
107 	bool enabled;
108 	bool v2;
109 	bool def_window_pending;
110 	unsigned long locked_pages;
111 	struct mm_struct *mm;
112 	struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
113 	struct list_head group_list;
114 	struct list_head prereg_list;
115 };
116 
117 static long tce_iommu_mm_set(struct tce_container *container)
118 {
119 	if (container->mm) {
120 		if (container->mm == current->mm)
121 			return 0;
122 		return -EPERM;
123 	}
124 	BUG_ON(!current->mm);
125 	container->mm = current->mm;
126 	atomic_inc(&container->mm->mm_count);
127 
128 	return 0;
129 }
130 
131 static long tce_iommu_prereg_free(struct tce_container *container,
132 		struct tce_iommu_prereg *tcemem)
133 {
134 	long ret;
135 
136 	ret = mm_iommu_put(container->mm, tcemem->mem);
137 	if (ret)
138 		return ret;
139 
140 	list_del(&tcemem->next);
141 	kfree(tcemem);
142 
143 	return 0;
144 }
145 
146 static long tce_iommu_unregister_pages(struct tce_container *container,
147 		__u64 vaddr, __u64 size)
148 {
149 	struct mm_iommu_table_group_mem_t *mem;
150 	struct tce_iommu_prereg *tcemem;
151 	bool found = false;
152 
153 	if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
154 		return -EINVAL;
155 
156 	mem = mm_iommu_find(container->mm, vaddr, size >> PAGE_SHIFT);
157 	if (!mem)
158 		return -ENOENT;
159 
160 	list_for_each_entry(tcemem, &container->prereg_list, next) {
161 		if (tcemem->mem == mem) {
162 			found = true;
163 			break;
164 		}
165 	}
166 
167 	if (!found)
168 		return -ENOENT;
169 
170 	return tce_iommu_prereg_free(container, tcemem);
171 }
172 
173 static long tce_iommu_register_pages(struct tce_container *container,
174 		__u64 vaddr, __u64 size)
175 {
176 	long ret = 0;
177 	struct mm_iommu_table_group_mem_t *mem = NULL;
178 	struct tce_iommu_prereg *tcemem;
179 	unsigned long entries = size >> PAGE_SHIFT;
180 
181 	if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
182 			((vaddr + size) < vaddr))
183 		return -EINVAL;
184 
185 	mem = mm_iommu_find(container->mm, vaddr, entries);
186 	if (mem) {
187 		list_for_each_entry(tcemem, &container->prereg_list, next) {
188 			if (tcemem->mem == mem)
189 				return -EBUSY;
190 		}
191 	}
192 
193 	ret = mm_iommu_get(container->mm, vaddr, entries, &mem);
194 	if (ret)
195 		return ret;
196 
197 	tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
198 	tcemem->mem = mem;
199 	list_add(&tcemem->next, &container->prereg_list);
200 
201 	container->enabled = true;
202 
203 	return 0;
204 }
205 
206 static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
207 		struct mm_struct *mm)
208 {
209 	unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
210 			tbl->it_size, PAGE_SIZE);
211 	unsigned long *uas;
212 	long ret;
213 
214 	BUG_ON(tbl->it_userspace);
215 
216 	ret = try_increment_locked_vm(mm, cb >> PAGE_SHIFT);
217 	if (ret)
218 		return ret;
219 
220 	uas = vzalloc(cb);
221 	if (!uas) {
222 		decrement_locked_vm(mm, cb >> PAGE_SHIFT);
223 		return -ENOMEM;
224 	}
225 	tbl->it_userspace = uas;
226 
227 	return 0;
228 }
229 
230 static void tce_iommu_userspace_view_free(struct iommu_table *tbl,
231 		struct mm_struct *mm)
232 {
233 	unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
234 			tbl->it_size, PAGE_SIZE);
235 
236 	if (!tbl->it_userspace)
237 		return;
238 
239 	vfree(tbl->it_userspace);
240 	tbl->it_userspace = NULL;
241 	decrement_locked_vm(mm, cb >> PAGE_SHIFT);
242 }
243 
244 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
245 {
246 	/*
247 	 * Check that the TCE table granularity is not bigger than the size of
248 	 * a page we just found. Otherwise the hardware can get access to
249 	 * a bigger memory chunk that it should.
250 	 */
251 	return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
252 }
253 
254 static inline bool tce_groups_attached(struct tce_container *container)
255 {
256 	return !list_empty(&container->group_list);
257 }
258 
259 static long tce_iommu_find_table(struct tce_container *container,
260 		phys_addr_t ioba, struct iommu_table **ptbl)
261 {
262 	long i;
263 
264 	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
265 		struct iommu_table *tbl = container->tables[i];
266 
267 		if (tbl) {
268 			unsigned long entry = ioba >> tbl->it_page_shift;
269 			unsigned long start = tbl->it_offset;
270 			unsigned long end = start + tbl->it_size;
271 
272 			if ((start <= entry) && (entry < end)) {
273 				*ptbl = tbl;
274 				return i;
275 			}
276 		}
277 	}
278 
279 	return -1;
280 }
281 
282 static int tce_iommu_find_free_table(struct tce_container *container)
283 {
284 	int i;
285 
286 	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
287 		if (!container->tables[i])
288 			return i;
289 	}
290 
291 	return -ENOSPC;
292 }
293 
294 static int tce_iommu_enable(struct tce_container *container)
295 {
296 	int ret = 0;
297 	unsigned long locked;
298 	struct iommu_table_group *table_group;
299 	struct tce_iommu_group *tcegrp;
300 
301 	if (container->enabled)
302 		return -EBUSY;
303 
304 	/*
305 	 * When userspace pages are mapped into the IOMMU, they are effectively
306 	 * locked memory, so, theoretically, we need to update the accounting
307 	 * of locked pages on each map and unmap.  For powerpc, the map unmap
308 	 * paths can be very hot, though, and the accounting would kill
309 	 * performance, especially since it would be difficult to impossible
310 	 * to handle the accounting in real mode only.
311 	 *
312 	 * To address that, rather than precisely accounting every page, we
313 	 * instead account for a worst case on locked memory when the iommu is
314 	 * enabled and disabled.  The worst case upper bound on locked memory
315 	 * is the size of the whole iommu window, which is usually relatively
316 	 * small (compared to total memory sizes) on POWER hardware.
317 	 *
318 	 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
319 	 * that would effectively kill the guest at random points, much better
320 	 * enforcing the limit based on the max that the guest can map.
321 	 *
322 	 * Unfortunately at the moment it counts whole tables, no matter how
323 	 * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
324 	 * each with 2GB DMA window, 8GB will be counted here. The reason for
325 	 * this is that we cannot tell here the amount of RAM used by the guest
326 	 * as this information is only available from KVM and VFIO is
327 	 * KVM agnostic.
328 	 *
329 	 * So we do not allow enabling a container without a group attached
330 	 * as there is no way to know how much we should increment
331 	 * the locked_vm counter.
332 	 */
333 	if (!tce_groups_attached(container))
334 		return -ENODEV;
335 
336 	tcegrp = list_first_entry(&container->group_list,
337 			struct tce_iommu_group, next);
338 	table_group = iommu_group_get_iommudata(tcegrp->grp);
339 	if (!table_group)
340 		return -ENODEV;
341 
342 	if (!table_group->tce32_size)
343 		return -EPERM;
344 
345 	ret = tce_iommu_mm_set(container);
346 	if (ret)
347 		return ret;
348 
349 	locked = table_group->tce32_size >> PAGE_SHIFT;
350 	ret = try_increment_locked_vm(container->mm, locked);
351 	if (ret)
352 		return ret;
353 
354 	container->locked_pages = locked;
355 
356 	container->enabled = true;
357 
358 	return ret;
359 }
360 
361 static void tce_iommu_disable(struct tce_container *container)
362 {
363 	if (!container->enabled)
364 		return;
365 
366 	container->enabled = false;
367 
368 	BUG_ON(!container->mm);
369 	decrement_locked_vm(container->mm, container->locked_pages);
370 }
371 
372 static void *tce_iommu_open(unsigned long arg)
373 {
374 	struct tce_container *container;
375 
376 	if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
377 		pr_err("tce_vfio: Wrong IOMMU type\n");
378 		return ERR_PTR(-EINVAL);
379 	}
380 
381 	container = kzalloc(sizeof(*container), GFP_KERNEL);
382 	if (!container)
383 		return ERR_PTR(-ENOMEM);
384 
385 	mutex_init(&container->lock);
386 	INIT_LIST_HEAD_RCU(&container->group_list);
387 	INIT_LIST_HEAD_RCU(&container->prereg_list);
388 
389 	container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
390 
391 	return container;
392 }
393 
394 static int tce_iommu_clear(struct tce_container *container,
395 		struct iommu_table *tbl,
396 		unsigned long entry, unsigned long pages);
397 static void tce_iommu_free_table(struct tce_container *container,
398 		struct iommu_table *tbl);
399 
400 static void tce_iommu_release(void *iommu_data)
401 {
402 	struct tce_container *container = iommu_data;
403 	struct tce_iommu_group *tcegrp;
404 	long i;
405 
406 	while (tce_groups_attached(container)) {
407 		tcegrp = list_first_entry(&container->group_list,
408 				struct tce_iommu_group, next);
409 		tce_iommu_detach_group(iommu_data, tcegrp->grp);
410 	}
411 
412 	/*
413 	 * If VFIO created a table, it was not disposed
414 	 * by tce_iommu_detach_group() so do it now.
415 	 */
416 	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
417 		struct iommu_table *tbl = container->tables[i];
418 
419 		if (!tbl)
420 			continue;
421 
422 		tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
423 		tce_iommu_free_table(container, tbl);
424 	}
425 
426 	while (!list_empty(&container->prereg_list)) {
427 		struct tce_iommu_prereg *tcemem;
428 
429 		tcemem = list_first_entry(&container->prereg_list,
430 				struct tce_iommu_prereg, next);
431 		WARN_ON_ONCE(tce_iommu_prereg_free(container, tcemem));
432 	}
433 
434 	tce_iommu_disable(container);
435 	if (container->mm)
436 		mmdrop(container->mm);
437 	mutex_destroy(&container->lock);
438 
439 	kfree(container);
440 }
441 
442 static void tce_iommu_unuse_page(struct tce_container *container,
443 		unsigned long hpa)
444 {
445 	struct page *page;
446 
447 	page = pfn_to_page(hpa >> PAGE_SHIFT);
448 	put_page(page);
449 }
450 
451 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
452 		unsigned long tce, unsigned long size,
453 		unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
454 {
455 	long ret = 0;
456 	struct mm_iommu_table_group_mem_t *mem;
457 
458 	mem = mm_iommu_lookup(container->mm, tce, size);
459 	if (!mem)
460 		return -EINVAL;
461 
462 	ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
463 	if (ret)
464 		return -EINVAL;
465 
466 	*pmem = mem;
467 
468 	return 0;
469 }
470 
471 static void tce_iommu_unuse_page_v2(struct tce_container *container,
472 		struct iommu_table *tbl, unsigned long entry)
473 {
474 	struct mm_iommu_table_group_mem_t *mem = NULL;
475 	int ret;
476 	unsigned long hpa = 0;
477 	unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
478 
479 	if (!pua)
480 		return;
481 
482 	ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
483 			&hpa, &mem);
484 	if (ret)
485 		pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
486 				__func__, *pua, entry, ret);
487 	if (mem)
488 		mm_iommu_mapped_dec(mem);
489 
490 	*pua = 0;
491 }
492 
493 static int tce_iommu_clear(struct tce_container *container,
494 		struct iommu_table *tbl,
495 		unsigned long entry, unsigned long pages)
496 {
497 	unsigned long oldhpa;
498 	long ret;
499 	enum dma_data_direction direction;
500 
501 	for ( ; pages; --pages, ++entry) {
502 		direction = DMA_NONE;
503 		oldhpa = 0;
504 		ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
505 		if (ret)
506 			continue;
507 
508 		if (direction == DMA_NONE)
509 			continue;
510 
511 		if (container->v2) {
512 			tce_iommu_unuse_page_v2(container, tbl, entry);
513 			continue;
514 		}
515 
516 		tce_iommu_unuse_page(container, oldhpa);
517 	}
518 
519 	return 0;
520 }
521 
522 static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
523 {
524 	struct page *page = NULL;
525 	enum dma_data_direction direction = iommu_tce_direction(tce);
526 
527 	if (get_user_pages_fast(tce & PAGE_MASK, 1,
528 			direction != DMA_TO_DEVICE, &page) != 1)
529 		return -EFAULT;
530 
531 	*hpa = __pa((unsigned long) page_address(page));
532 
533 	return 0;
534 }
535 
536 static long tce_iommu_build(struct tce_container *container,
537 		struct iommu_table *tbl,
538 		unsigned long entry, unsigned long tce, unsigned long pages,
539 		enum dma_data_direction direction)
540 {
541 	long i, ret = 0;
542 	struct page *page;
543 	unsigned long hpa;
544 	enum dma_data_direction dirtmp;
545 
546 	for (i = 0; i < pages; ++i) {
547 		unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
548 
549 		ret = tce_iommu_use_page(tce, &hpa);
550 		if (ret)
551 			break;
552 
553 		page = pfn_to_page(hpa >> PAGE_SHIFT);
554 		if (!tce_page_is_contained(page, tbl->it_page_shift)) {
555 			ret = -EPERM;
556 			break;
557 		}
558 
559 		hpa |= offset;
560 		dirtmp = direction;
561 		ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
562 		if (ret) {
563 			tce_iommu_unuse_page(container, hpa);
564 			pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
565 					__func__, entry << tbl->it_page_shift,
566 					tce, ret);
567 			break;
568 		}
569 
570 		if (dirtmp != DMA_NONE)
571 			tce_iommu_unuse_page(container, hpa);
572 
573 		tce += IOMMU_PAGE_SIZE(tbl);
574 	}
575 
576 	if (ret)
577 		tce_iommu_clear(container, tbl, entry, i);
578 
579 	return ret;
580 }
581 
582 static long tce_iommu_build_v2(struct tce_container *container,
583 		struct iommu_table *tbl,
584 		unsigned long entry, unsigned long tce, unsigned long pages,
585 		enum dma_data_direction direction)
586 {
587 	long i, ret = 0;
588 	struct page *page;
589 	unsigned long hpa;
590 	enum dma_data_direction dirtmp;
591 
592 	if (!tbl->it_userspace) {
593 		ret = tce_iommu_userspace_view_alloc(tbl, container->mm);
594 		if (ret)
595 			return ret;
596 	}
597 
598 	for (i = 0; i < pages; ++i) {
599 		struct mm_iommu_table_group_mem_t *mem = NULL;
600 		unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
601 				entry + i);
602 
603 		ret = tce_iommu_prereg_ua_to_hpa(container,
604 				tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
605 		if (ret)
606 			break;
607 
608 		page = pfn_to_page(hpa >> PAGE_SHIFT);
609 		if (!tce_page_is_contained(page, tbl->it_page_shift)) {
610 			ret = -EPERM;
611 			break;
612 		}
613 
614 		/* Preserve offset within IOMMU page */
615 		hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
616 		dirtmp = direction;
617 
618 		/* The registered region is being unregistered */
619 		if (mm_iommu_mapped_inc(mem))
620 			break;
621 
622 		ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
623 		if (ret) {
624 			/* dirtmp cannot be DMA_NONE here */
625 			tce_iommu_unuse_page_v2(container, tbl, entry + i);
626 			pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
627 					__func__, entry << tbl->it_page_shift,
628 					tce, ret);
629 			break;
630 		}
631 
632 		if (dirtmp != DMA_NONE)
633 			tce_iommu_unuse_page_v2(container, tbl, entry + i);
634 
635 		*pua = tce;
636 
637 		tce += IOMMU_PAGE_SIZE(tbl);
638 	}
639 
640 	if (ret)
641 		tce_iommu_clear(container, tbl, entry, i);
642 
643 	return ret;
644 }
645 
646 static long tce_iommu_create_table(struct tce_container *container,
647 			struct iommu_table_group *table_group,
648 			int num,
649 			__u32 page_shift,
650 			__u64 window_size,
651 			__u32 levels,
652 			struct iommu_table **ptbl)
653 {
654 	long ret, table_size;
655 
656 	table_size = table_group->ops->get_table_size(page_shift, window_size,
657 			levels);
658 	if (!table_size)
659 		return -EINVAL;
660 
661 	ret = try_increment_locked_vm(container->mm, table_size >> PAGE_SHIFT);
662 	if (ret)
663 		return ret;
664 
665 	ret = table_group->ops->create_table(table_group, num,
666 			page_shift, window_size, levels, ptbl);
667 
668 	WARN_ON(!ret && !(*ptbl)->it_ops->free);
669 	WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
670 
671 	return ret;
672 }
673 
674 static void tce_iommu_free_table(struct tce_container *container,
675 		struct iommu_table *tbl)
676 {
677 	unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
678 
679 	tce_iommu_userspace_view_free(tbl, container->mm);
680 	tbl->it_ops->free(tbl);
681 	decrement_locked_vm(container->mm, pages);
682 }
683 
684 static long tce_iommu_create_window(struct tce_container *container,
685 		__u32 page_shift, __u64 window_size, __u32 levels,
686 		__u64 *start_addr)
687 {
688 	struct tce_iommu_group *tcegrp;
689 	struct iommu_table_group *table_group;
690 	struct iommu_table *tbl = NULL;
691 	long ret, num;
692 
693 	num = tce_iommu_find_free_table(container);
694 	if (num < 0)
695 		return num;
696 
697 	/* Get the first group for ops::create_table */
698 	tcegrp = list_first_entry(&container->group_list,
699 			struct tce_iommu_group, next);
700 	table_group = iommu_group_get_iommudata(tcegrp->grp);
701 	if (!table_group)
702 		return -EFAULT;
703 
704 	if (!(table_group->pgsizes & (1ULL << page_shift)))
705 		return -EINVAL;
706 
707 	if (!table_group->ops->set_window || !table_group->ops->unset_window ||
708 			!table_group->ops->get_table_size ||
709 			!table_group->ops->create_table)
710 		return -EPERM;
711 
712 	/* Create TCE table */
713 	ret = tce_iommu_create_table(container, table_group, num,
714 			page_shift, window_size, levels, &tbl);
715 	if (ret)
716 		return ret;
717 
718 	BUG_ON(!tbl->it_ops->free);
719 
720 	/*
721 	 * Program the table to every group.
722 	 * Groups have been tested for compatibility at the attach time.
723 	 */
724 	list_for_each_entry(tcegrp, &container->group_list, next) {
725 		table_group = iommu_group_get_iommudata(tcegrp->grp);
726 
727 		ret = table_group->ops->set_window(table_group, num, tbl);
728 		if (ret)
729 			goto unset_exit;
730 	}
731 
732 	container->tables[num] = tbl;
733 
734 	/* Return start address assigned by platform in create_table() */
735 	*start_addr = tbl->it_offset << tbl->it_page_shift;
736 
737 	return 0;
738 
739 unset_exit:
740 	list_for_each_entry(tcegrp, &container->group_list, next) {
741 		table_group = iommu_group_get_iommudata(tcegrp->grp);
742 		table_group->ops->unset_window(table_group, num);
743 	}
744 	tce_iommu_free_table(container, tbl);
745 
746 	return ret;
747 }
748 
749 static long tce_iommu_remove_window(struct tce_container *container,
750 		__u64 start_addr)
751 {
752 	struct iommu_table_group *table_group = NULL;
753 	struct iommu_table *tbl;
754 	struct tce_iommu_group *tcegrp;
755 	int num;
756 
757 	num = tce_iommu_find_table(container, start_addr, &tbl);
758 	if (num < 0)
759 		return -EINVAL;
760 
761 	BUG_ON(!tbl->it_size);
762 
763 	/* Detach groups from IOMMUs */
764 	list_for_each_entry(tcegrp, &container->group_list, next) {
765 		table_group = iommu_group_get_iommudata(tcegrp->grp);
766 
767 		/*
768 		 * SPAPR TCE IOMMU exposes the default DMA window to
769 		 * the guest via dma32_window_start/size of
770 		 * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
771 		 * the userspace to remove this window, some do not so
772 		 * here we check for the platform capability.
773 		 */
774 		if (!table_group->ops || !table_group->ops->unset_window)
775 			return -EPERM;
776 
777 		table_group->ops->unset_window(table_group, num);
778 	}
779 
780 	/* Free table */
781 	tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
782 	tce_iommu_free_table(container, tbl);
783 	container->tables[num] = NULL;
784 
785 	return 0;
786 }
787 
788 static long tce_iommu_create_default_window(struct tce_container *container)
789 {
790 	long ret;
791 	__u64 start_addr = 0;
792 	struct tce_iommu_group *tcegrp;
793 	struct iommu_table_group *table_group;
794 
795 	if (!container->def_window_pending)
796 		return 0;
797 
798 	if (!tce_groups_attached(container))
799 		return -ENODEV;
800 
801 	tcegrp = list_first_entry(&container->group_list,
802 			struct tce_iommu_group, next);
803 	table_group = iommu_group_get_iommudata(tcegrp->grp);
804 	if (!table_group)
805 		return -ENODEV;
806 
807 	ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K,
808 			table_group->tce32_size, 1, &start_addr);
809 	WARN_ON_ONCE(!ret && start_addr);
810 
811 	if (!ret)
812 		container->def_window_pending = false;
813 
814 	return ret;
815 }
816 
817 static long tce_iommu_ioctl(void *iommu_data,
818 				 unsigned int cmd, unsigned long arg)
819 {
820 	struct tce_container *container = iommu_data;
821 	unsigned long minsz, ddwsz;
822 	long ret;
823 
824 	switch (cmd) {
825 	case VFIO_CHECK_EXTENSION:
826 		switch (arg) {
827 		case VFIO_SPAPR_TCE_IOMMU:
828 		case VFIO_SPAPR_TCE_v2_IOMMU:
829 			ret = 1;
830 			break;
831 		default:
832 			ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
833 			break;
834 		}
835 
836 		return (ret < 0) ? 0 : ret;
837 	}
838 
839 	/*
840 	 * Sanity check to prevent one userspace from manipulating
841 	 * another userspace mm.
842 	 */
843 	BUG_ON(!container);
844 	if (container->mm && container->mm != current->mm)
845 		return -EPERM;
846 
847 	switch (cmd) {
848 	case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
849 		struct vfio_iommu_spapr_tce_info info;
850 		struct tce_iommu_group *tcegrp;
851 		struct iommu_table_group *table_group;
852 
853 		if (!tce_groups_attached(container))
854 			return -ENXIO;
855 
856 		tcegrp = list_first_entry(&container->group_list,
857 				struct tce_iommu_group, next);
858 		table_group = iommu_group_get_iommudata(tcegrp->grp);
859 
860 		if (!table_group)
861 			return -ENXIO;
862 
863 		minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
864 				dma32_window_size);
865 
866 		if (copy_from_user(&info, (void __user *)arg, minsz))
867 			return -EFAULT;
868 
869 		if (info.argsz < minsz)
870 			return -EINVAL;
871 
872 		info.dma32_window_start = table_group->tce32_start;
873 		info.dma32_window_size = table_group->tce32_size;
874 		info.flags = 0;
875 		memset(&info.ddw, 0, sizeof(info.ddw));
876 
877 		if (table_group->max_dynamic_windows_supported &&
878 				container->v2) {
879 			info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
880 			info.ddw.pgsizes = table_group->pgsizes;
881 			info.ddw.max_dynamic_windows_supported =
882 				table_group->max_dynamic_windows_supported;
883 			info.ddw.levels = table_group->max_levels;
884 		}
885 
886 		ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
887 
888 		if (info.argsz >= ddwsz)
889 			minsz = ddwsz;
890 
891 		if (copy_to_user((void __user *)arg, &info, minsz))
892 			return -EFAULT;
893 
894 		return 0;
895 	}
896 	case VFIO_IOMMU_MAP_DMA: {
897 		struct vfio_iommu_type1_dma_map param;
898 		struct iommu_table *tbl = NULL;
899 		long num;
900 		enum dma_data_direction direction;
901 
902 		if (!container->enabled)
903 			return -EPERM;
904 
905 		minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
906 
907 		if (copy_from_user(&param, (void __user *)arg, minsz))
908 			return -EFAULT;
909 
910 		if (param.argsz < minsz)
911 			return -EINVAL;
912 
913 		if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
914 				VFIO_DMA_MAP_FLAG_WRITE))
915 			return -EINVAL;
916 
917 		ret = tce_iommu_create_default_window(container);
918 		if (ret)
919 			return ret;
920 
921 		num = tce_iommu_find_table(container, param.iova, &tbl);
922 		if (num < 0)
923 			return -ENXIO;
924 
925 		if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
926 				(param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
927 			return -EINVAL;
928 
929 		/* iova is checked by the IOMMU API */
930 		if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
931 			if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
932 				direction = DMA_BIDIRECTIONAL;
933 			else
934 				direction = DMA_TO_DEVICE;
935 		} else {
936 			if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
937 				direction = DMA_FROM_DEVICE;
938 			else
939 				return -EINVAL;
940 		}
941 
942 		ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
943 		if (ret)
944 			return ret;
945 
946 		if (container->v2)
947 			ret = tce_iommu_build_v2(container, tbl,
948 					param.iova >> tbl->it_page_shift,
949 					param.vaddr,
950 					param.size >> tbl->it_page_shift,
951 					direction);
952 		else
953 			ret = tce_iommu_build(container, tbl,
954 					param.iova >> tbl->it_page_shift,
955 					param.vaddr,
956 					param.size >> tbl->it_page_shift,
957 					direction);
958 
959 		iommu_flush_tce(tbl);
960 
961 		return ret;
962 	}
963 	case VFIO_IOMMU_UNMAP_DMA: {
964 		struct vfio_iommu_type1_dma_unmap param;
965 		struct iommu_table *tbl = NULL;
966 		long num;
967 
968 		if (!container->enabled)
969 			return -EPERM;
970 
971 		minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
972 				size);
973 
974 		if (copy_from_user(&param, (void __user *)arg, minsz))
975 			return -EFAULT;
976 
977 		if (param.argsz < minsz)
978 			return -EINVAL;
979 
980 		/* No flag is supported now */
981 		if (param.flags)
982 			return -EINVAL;
983 
984 		ret = tce_iommu_create_default_window(container);
985 		if (ret)
986 			return ret;
987 
988 		num = tce_iommu_find_table(container, param.iova, &tbl);
989 		if (num < 0)
990 			return -ENXIO;
991 
992 		if (param.size & ~IOMMU_PAGE_MASK(tbl))
993 			return -EINVAL;
994 
995 		ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
996 				param.size >> tbl->it_page_shift);
997 		if (ret)
998 			return ret;
999 
1000 		ret = tce_iommu_clear(container, tbl,
1001 				param.iova >> tbl->it_page_shift,
1002 				param.size >> tbl->it_page_shift);
1003 		iommu_flush_tce(tbl);
1004 
1005 		return ret;
1006 	}
1007 	case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
1008 		struct vfio_iommu_spapr_register_memory param;
1009 
1010 		if (!container->v2)
1011 			break;
1012 
1013 		minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1014 				size);
1015 
1016 		ret = tce_iommu_mm_set(container);
1017 		if (ret)
1018 			return ret;
1019 
1020 		if (copy_from_user(&param, (void __user *)arg, minsz))
1021 			return -EFAULT;
1022 
1023 		if (param.argsz < minsz)
1024 			return -EINVAL;
1025 
1026 		/* No flag is supported now */
1027 		if (param.flags)
1028 			return -EINVAL;
1029 
1030 		mutex_lock(&container->lock);
1031 		ret = tce_iommu_register_pages(container, param.vaddr,
1032 				param.size);
1033 		mutex_unlock(&container->lock);
1034 
1035 		return ret;
1036 	}
1037 	case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
1038 		struct vfio_iommu_spapr_register_memory param;
1039 
1040 		if (!container->v2)
1041 			break;
1042 
1043 		if (!container->mm)
1044 			return -EPERM;
1045 
1046 		minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
1047 				size);
1048 
1049 		if (copy_from_user(&param, (void __user *)arg, minsz))
1050 			return -EFAULT;
1051 
1052 		if (param.argsz < minsz)
1053 			return -EINVAL;
1054 
1055 		/* No flag is supported now */
1056 		if (param.flags)
1057 			return -EINVAL;
1058 
1059 		mutex_lock(&container->lock);
1060 		ret = tce_iommu_unregister_pages(container, param.vaddr,
1061 				param.size);
1062 		mutex_unlock(&container->lock);
1063 
1064 		return ret;
1065 	}
1066 	case VFIO_IOMMU_ENABLE:
1067 		if (container->v2)
1068 			break;
1069 
1070 		mutex_lock(&container->lock);
1071 		ret = tce_iommu_enable(container);
1072 		mutex_unlock(&container->lock);
1073 		return ret;
1074 
1075 
1076 	case VFIO_IOMMU_DISABLE:
1077 		if (container->v2)
1078 			break;
1079 
1080 		mutex_lock(&container->lock);
1081 		tce_iommu_disable(container);
1082 		mutex_unlock(&container->lock);
1083 		return 0;
1084 
1085 	case VFIO_EEH_PE_OP: {
1086 		struct tce_iommu_group *tcegrp;
1087 
1088 		ret = 0;
1089 		list_for_each_entry(tcegrp, &container->group_list, next) {
1090 			ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
1091 					cmd, arg);
1092 			if (ret)
1093 				return ret;
1094 		}
1095 		return ret;
1096 	}
1097 
1098 	case VFIO_IOMMU_SPAPR_TCE_CREATE: {
1099 		struct vfio_iommu_spapr_tce_create create;
1100 
1101 		if (!container->v2)
1102 			break;
1103 
1104 		ret = tce_iommu_mm_set(container);
1105 		if (ret)
1106 			return ret;
1107 
1108 		if (!tce_groups_attached(container))
1109 			return -ENXIO;
1110 
1111 		minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
1112 				start_addr);
1113 
1114 		if (copy_from_user(&create, (void __user *)arg, minsz))
1115 			return -EFAULT;
1116 
1117 		if (create.argsz < minsz)
1118 			return -EINVAL;
1119 
1120 		if (create.flags)
1121 			return -EINVAL;
1122 
1123 		mutex_lock(&container->lock);
1124 
1125 		ret = tce_iommu_create_default_window(container);
1126 		if (ret)
1127 			return ret;
1128 
1129 		ret = tce_iommu_create_window(container, create.page_shift,
1130 				create.window_size, create.levels,
1131 				&create.start_addr);
1132 
1133 		mutex_unlock(&container->lock);
1134 
1135 		if (!ret && copy_to_user((void __user *)arg, &create, minsz))
1136 			ret = -EFAULT;
1137 
1138 		return ret;
1139 	}
1140 	case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
1141 		struct vfio_iommu_spapr_tce_remove remove;
1142 
1143 		if (!container->v2)
1144 			break;
1145 
1146 		ret = tce_iommu_mm_set(container);
1147 		if (ret)
1148 			return ret;
1149 
1150 		if (!tce_groups_attached(container))
1151 			return -ENXIO;
1152 
1153 		minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
1154 				start_addr);
1155 
1156 		if (copy_from_user(&remove, (void __user *)arg, minsz))
1157 			return -EFAULT;
1158 
1159 		if (remove.argsz < minsz)
1160 			return -EINVAL;
1161 
1162 		if (remove.flags)
1163 			return -EINVAL;
1164 
1165 		if (container->def_window_pending && !remove.start_addr) {
1166 			container->def_window_pending = false;
1167 			return 0;
1168 		}
1169 
1170 		mutex_lock(&container->lock);
1171 
1172 		ret = tce_iommu_remove_window(container, remove.start_addr);
1173 
1174 		mutex_unlock(&container->lock);
1175 
1176 		return ret;
1177 	}
1178 	}
1179 
1180 	return -ENOTTY;
1181 }
1182 
1183 static void tce_iommu_release_ownership(struct tce_container *container,
1184 		struct iommu_table_group *table_group)
1185 {
1186 	int i;
1187 
1188 	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1189 		struct iommu_table *tbl = container->tables[i];
1190 
1191 		if (!tbl)
1192 			continue;
1193 
1194 		tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
1195 		tce_iommu_userspace_view_free(tbl, container->mm);
1196 		if (tbl->it_map)
1197 			iommu_release_ownership(tbl);
1198 
1199 		container->tables[i] = NULL;
1200 	}
1201 }
1202 
1203 static int tce_iommu_take_ownership(struct tce_container *container,
1204 		struct iommu_table_group *table_group)
1205 {
1206 	int i, j, rc = 0;
1207 
1208 	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1209 		struct iommu_table *tbl = table_group->tables[i];
1210 
1211 		if (!tbl || !tbl->it_map)
1212 			continue;
1213 
1214 		rc = iommu_take_ownership(tbl);
1215 		if (rc) {
1216 			for (j = 0; j < i; ++j)
1217 				iommu_release_ownership(
1218 						table_group->tables[j]);
1219 
1220 			return rc;
1221 		}
1222 	}
1223 
1224 	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1225 		container->tables[i] = table_group->tables[i];
1226 
1227 	return 0;
1228 }
1229 
1230 static void tce_iommu_release_ownership_ddw(struct tce_container *container,
1231 		struct iommu_table_group *table_group)
1232 {
1233 	long i;
1234 
1235 	if (!table_group->ops->unset_window) {
1236 		WARN_ON_ONCE(1);
1237 		return;
1238 	}
1239 
1240 	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
1241 		table_group->ops->unset_window(table_group, i);
1242 
1243 	table_group->ops->release_ownership(table_group);
1244 }
1245 
1246 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
1247 		struct iommu_table_group *table_group)
1248 {
1249 	if (!table_group->ops->create_table || !table_group->ops->set_window ||
1250 			!table_group->ops->release_ownership) {
1251 		WARN_ON_ONCE(1);
1252 		return -EFAULT;
1253 	}
1254 
1255 	table_group->ops->take_ownership(table_group);
1256 
1257 	return 0;
1258 }
1259 
1260 static int tce_iommu_attach_group(void *iommu_data,
1261 		struct iommu_group *iommu_group)
1262 {
1263 	int ret;
1264 	struct tce_container *container = iommu_data;
1265 	struct iommu_table_group *table_group;
1266 	struct tce_iommu_group *tcegrp = NULL;
1267 
1268 	mutex_lock(&container->lock);
1269 
1270 	/* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
1271 			iommu_group_id(iommu_group), iommu_group); */
1272 	table_group = iommu_group_get_iommudata(iommu_group);
1273 
1274 	if (tce_groups_attached(container) && (!table_group->ops ||
1275 			!table_group->ops->take_ownership ||
1276 			!table_group->ops->release_ownership)) {
1277 		ret = -EBUSY;
1278 		goto unlock_exit;
1279 	}
1280 
1281 	/* Check if new group has the same iommu_ops (i.e. compatible) */
1282 	list_for_each_entry(tcegrp, &container->group_list, next) {
1283 		struct iommu_table_group *table_group_tmp;
1284 
1285 		if (tcegrp->grp == iommu_group) {
1286 			pr_warn("tce_vfio: Group %d is already attached\n",
1287 					iommu_group_id(iommu_group));
1288 			ret = -EBUSY;
1289 			goto unlock_exit;
1290 		}
1291 		table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
1292 		if (table_group_tmp->ops->create_table !=
1293 				table_group->ops->create_table) {
1294 			pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
1295 					iommu_group_id(iommu_group),
1296 					iommu_group_id(tcegrp->grp));
1297 			ret = -EPERM;
1298 			goto unlock_exit;
1299 		}
1300 	}
1301 
1302 	tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
1303 	if (!tcegrp) {
1304 		ret = -ENOMEM;
1305 		goto unlock_exit;
1306 	}
1307 
1308 	if (!table_group->ops || !table_group->ops->take_ownership ||
1309 			!table_group->ops->release_ownership) {
1310 		ret = tce_iommu_take_ownership(container, table_group);
1311 	} else {
1312 		ret = tce_iommu_take_ownership_ddw(container, table_group);
1313 		if (!tce_groups_attached(container) && !container->tables[0])
1314 			container->def_window_pending = true;
1315 	}
1316 
1317 	if (!ret) {
1318 		tcegrp->grp = iommu_group;
1319 		list_add(&tcegrp->next, &container->group_list);
1320 	}
1321 
1322 unlock_exit:
1323 	if (ret && tcegrp)
1324 		kfree(tcegrp);
1325 
1326 	mutex_unlock(&container->lock);
1327 
1328 	return ret;
1329 }
1330 
1331 static void tce_iommu_detach_group(void *iommu_data,
1332 		struct iommu_group *iommu_group)
1333 {
1334 	struct tce_container *container = iommu_data;
1335 	struct iommu_table_group *table_group;
1336 	bool found = false;
1337 	struct tce_iommu_group *tcegrp;
1338 
1339 	mutex_lock(&container->lock);
1340 
1341 	list_for_each_entry(tcegrp, &container->group_list, next) {
1342 		if (tcegrp->grp == iommu_group) {
1343 			found = true;
1344 			break;
1345 		}
1346 	}
1347 
1348 	if (!found) {
1349 		pr_warn("tce_vfio: detaching unattached group #%u\n",
1350 				iommu_group_id(iommu_group));
1351 		goto unlock_exit;
1352 	}
1353 
1354 	list_del(&tcegrp->next);
1355 	kfree(tcegrp);
1356 
1357 	table_group = iommu_group_get_iommudata(iommu_group);
1358 	BUG_ON(!table_group);
1359 
1360 	if (!table_group->ops || !table_group->ops->release_ownership)
1361 		tce_iommu_release_ownership(container, table_group);
1362 	else
1363 		tce_iommu_release_ownership_ddw(container, table_group);
1364 
1365 unlock_exit:
1366 	mutex_unlock(&container->lock);
1367 }
1368 
1369 const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1370 	.name		= "iommu-vfio-powerpc",
1371 	.owner		= THIS_MODULE,
1372 	.open		= tce_iommu_open,
1373 	.release	= tce_iommu_release,
1374 	.ioctl		= tce_iommu_ioctl,
1375 	.attach_group	= tce_iommu_attach_group,
1376 	.detach_group	= tce_iommu_detach_group,
1377 };
1378 
1379 static int __init tce_iommu_init(void)
1380 {
1381 	return vfio_register_iommu_driver(&tce_iommu_driver_ops);
1382 }
1383 
1384 static void __exit tce_iommu_cleanup(void)
1385 {
1386 	vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
1387 }
1388 
1389 module_init(tce_iommu_init);
1390 module_exit(tce_iommu_cleanup);
1391 
1392 MODULE_VERSION(DRIVER_VERSION);
1393 MODULE_LICENSE("GPL v2");
1394 MODULE_AUTHOR(DRIVER_AUTHOR);
1395 MODULE_DESCRIPTION(DRIVER_DESC);
1396 
1397