15ffd229cSAlexey Kardashevskiy /*
25ffd229cSAlexey Kardashevskiy  * VFIO: IOMMU DMA mapping support for TCE on POWER
35ffd229cSAlexey Kardashevskiy  *
45ffd229cSAlexey Kardashevskiy  * Copyright (C) 2013 IBM Corp.  All rights reserved.
55ffd229cSAlexey Kardashevskiy  *     Author: Alexey Kardashevskiy <aik@ozlabs.ru>
65ffd229cSAlexey Kardashevskiy  *
75ffd229cSAlexey Kardashevskiy  * This program is free software; you can redistribute it and/or modify
85ffd229cSAlexey Kardashevskiy  * it under the terms of the GNU General Public License version 2 as
95ffd229cSAlexey Kardashevskiy  * published by the Free Software Foundation.
105ffd229cSAlexey Kardashevskiy  *
115ffd229cSAlexey Kardashevskiy  * Derived from original vfio_iommu_type1.c:
125ffd229cSAlexey Kardashevskiy  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
135ffd229cSAlexey Kardashevskiy  *     Author: Alex Williamson <alex.williamson@redhat.com>
145ffd229cSAlexey Kardashevskiy  */
155ffd229cSAlexey Kardashevskiy 
165ffd229cSAlexey Kardashevskiy #include <linux/module.h>
175ffd229cSAlexey Kardashevskiy #include <linux/pci.h>
185ffd229cSAlexey Kardashevskiy #include <linux/slab.h>
195ffd229cSAlexey Kardashevskiy #include <linux/uaccess.h>
205ffd229cSAlexey Kardashevskiy #include <linux/err.h>
215ffd229cSAlexey Kardashevskiy #include <linux/vfio.h>
225ffd229cSAlexey Kardashevskiy #include <asm/iommu.h>
235ffd229cSAlexey Kardashevskiy #include <asm/tce.h>
245ffd229cSAlexey Kardashevskiy 
255ffd229cSAlexey Kardashevskiy #define DRIVER_VERSION  "0.1"
265ffd229cSAlexey Kardashevskiy #define DRIVER_AUTHOR   "aik@ozlabs.ru"
275ffd229cSAlexey Kardashevskiy #define DRIVER_DESC     "VFIO IOMMU SPAPR TCE"
285ffd229cSAlexey Kardashevskiy 
295ffd229cSAlexey Kardashevskiy static void tce_iommu_detach_group(void *iommu_data,
305ffd229cSAlexey Kardashevskiy 		struct iommu_group *iommu_group);
315ffd229cSAlexey Kardashevskiy 
325ffd229cSAlexey Kardashevskiy /*
335ffd229cSAlexey Kardashevskiy  * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
345ffd229cSAlexey Kardashevskiy  *
355ffd229cSAlexey Kardashevskiy  * This code handles mapping and unmapping of user data buffers
365ffd229cSAlexey Kardashevskiy  * into DMA'ble space using the IOMMU
375ffd229cSAlexey Kardashevskiy  */
385ffd229cSAlexey Kardashevskiy 
395ffd229cSAlexey Kardashevskiy /*
405ffd229cSAlexey Kardashevskiy  * The container descriptor supports only a single group per container.
415ffd229cSAlexey Kardashevskiy  * Required by the API as the container is not supplied with the IOMMU group
425ffd229cSAlexey Kardashevskiy  * at the moment of initialization.
435ffd229cSAlexey Kardashevskiy  */
445ffd229cSAlexey Kardashevskiy struct tce_container {
455ffd229cSAlexey Kardashevskiy 	struct mutex lock;
465ffd229cSAlexey Kardashevskiy 	struct iommu_table *tbl;
475ffd229cSAlexey Kardashevskiy 	bool enabled;
485ffd229cSAlexey Kardashevskiy };
495ffd229cSAlexey Kardashevskiy 
505ffd229cSAlexey Kardashevskiy static int tce_iommu_enable(struct tce_container *container)
515ffd229cSAlexey Kardashevskiy {
525ffd229cSAlexey Kardashevskiy 	int ret = 0;
535ffd229cSAlexey Kardashevskiy 	unsigned long locked, lock_limit, npages;
545ffd229cSAlexey Kardashevskiy 	struct iommu_table *tbl = container->tbl;
555ffd229cSAlexey Kardashevskiy 
565ffd229cSAlexey Kardashevskiy 	if (!container->tbl)
575ffd229cSAlexey Kardashevskiy 		return -ENXIO;
585ffd229cSAlexey Kardashevskiy 
595ffd229cSAlexey Kardashevskiy 	if (!current->mm)
605ffd229cSAlexey Kardashevskiy 		return -ESRCH; /* process exited */
615ffd229cSAlexey Kardashevskiy 
625ffd229cSAlexey Kardashevskiy 	if (container->enabled)
635ffd229cSAlexey Kardashevskiy 		return -EBUSY;
645ffd229cSAlexey Kardashevskiy 
655ffd229cSAlexey Kardashevskiy 	/*
665ffd229cSAlexey Kardashevskiy 	 * When userspace pages are mapped into the IOMMU, they are effectively
675ffd229cSAlexey Kardashevskiy 	 * locked memory, so, theoretically, we need to update the accounting
685ffd229cSAlexey Kardashevskiy 	 * of locked pages on each map and unmap.  For powerpc, the map unmap
695ffd229cSAlexey Kardashevskiy 	 * paths can be very hot, though, and the accounting would kill
705ffd229cSAlexey Kardashevskiy 	 * performance, especially since it would be difficult to impossible
715ffd229cSAlexey Kardashevskiy 	 * to handle the accounting in real mode only.
725ffd229cSAlexey Kardashevskiy 	 *
735ffd229cSAlexey Kardashevskiy 	 * To address that, rather than precisely accounting every page, we
745ffd229cSAlexey Kardashevskiy 	 * instead account for a worst case on locked memory when the iommu is
755ffd229cSAlexey Kardashevskiy 	 * enabled and disabled.  The worst case upper bound on locked memory
765ffd229cSAlexey Kardashevskiy 	 * is the size of the whole iommu window, which is usually relatively
775ffd229cSAlexey Kardashevskiy 	 * small (compared to total memory sizes) on POWER hardware.
785ffd229cSAlexey Kardashevskiy 	 *
795ffd229cSAlexey Kardashevskiy 	 * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
805ffd229cSAlexey Kardashevskiy 	 * that would effectively kill the guest at random points, much better
815ffd229cSAlexey Kardashevskiy 	 * enforcing the limit based on the max that the guest can map.
825ffd229cSAlexey Kardashevskiy 	 */
835ffd229cSAlexey Kardashevskiy 	down_write(&current->mm->mmap_sem);
845ffd229cSAlexey Kardashevskiy 	npages = (tbl->it_size << IOMMU_PAGE_SHIFT) >> PAGE_SHIFT;
855ffd229cSAlexey Kardashevskiy 	locked = current->mm->locked_vm + npages;
865ffd229cSAlexey Kardashevskiy 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
875ffd229cSAlexey Kardashevskiy 	if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
885ffd229cSAlexey Kardashevskiy 		pr_warn("RLIMIT_MEMLOCK (%ld) exceeded\n",
895ffd229cSAlexey Kardashevskiy 				rlimit(RLIMIT_MEMLOCK));
905ffd229cSAlexey Kardashevskiy 		ret = -ENOMEM;
915ffd229cSAlexey Kardashevskiy 	} else {
925ffd229cSAlexey Kardashevskiy 
935ffd229cSAlexey Kardashevskiy 		current->mm->locked_vm += npages;
945ffd229cSAlexey Kardashevskiy 		container->enabled = true;
955ffd229cSAlexey Kardashevskiy 	}
965ffd229cSAlexey Kardashevskiy 	up_write(&current->mm->mmap_sem);
975ffd229cSAlexey Kardashevskiy 
985ffd229cSAlexey Kardashevskiy 	return ret;
995ffd229cSAlexey Kardashevskiy }
1005ffd229cSAlexey Kardashevskiy 
1015ffd229cSAlexey Kardashevskiy static void tce_iommu_disable(struct tce_container *container)
1025ffd229cSAlexey Kardashevskiy {
1035ffd229cSAlexey Kardashevskiy 	if (!container->enabled)
1045ffd229cSAlexey Kardashevskiy 		return;
1055ffd229cSAlexey Kardashevskiy 
1065ffd229cSAlexey Kardashevskiy 	container->enabled = false;
1075ffd229cSAlexey Kardashevskiy 
1085ffd229cSAlexey Kardashevskiy 	if (!container->tbl || !current->mm)
1095ffd229cSAlexey Kardashevskiy 		return;
1105ffd229cSAlexey Kardashevskiy 
1115ffd229cSAlexey Kardashevskiy 	down_write(&current->mm->mmap_sem);
1125ffd229cSAlexey Kardashevskiy 	current->mm->locked_vm -= (container->tbl->it_size <<
1135ffd229cSAlexey Kardashevskiy 			IOMMU_PAGE_SHIFT) >> PAGE_SHIFT;
1145ffd229cSAlexey Kardashevskiy 	up_write(&current->mm->mmap_sem);
1155ffd229cSAlexey Kardashevskiy }
1165ffd229cSAlexey Kardashevskiy 
1175ffd229cSAlexey Kardashevskiy static void *tce_iommu_open(unsigned long arg)
1185ffd229cSAlexey Kardashevskiy {
1195ffd229cSAlexey Kardashevskiy 	struct tce_container *container;
1205ffd229cSAlexey Kardashevskiy 
1215ffd229cSAlexey Kardashevskiy 	if (arg != VFIO_SPAPR_TCE_IOMMU) {
1225ffd229cSAlexey Kardashevskiy 		pr_err("tce_vfio: Wrong IOMMU type\n");
1235ffd229cSAlexey Kardashevskiy 		return ERR_PTR(-EINVAL);
1245ffd229cSAlexey Kardashevskiy 	}
1255ffd229cSAlexey Kardashevskiy 
1265ffd229cSAlexey Kardashevskiy 	container = kzalloc(sizeof(*container), GFP_KERNEL);
1275ffd229cSAlexey Kardashevskiy 	if (!container)
1285ffd229cSAlexey Kardashevskiy 		return ERR_PTR(-ENOMEM);
1295ffd229cSAlexey Kardashevskiy 
1305ffd229cSAlexey Kardashevskiy 	mutex_init(&container->lock);
1315ffd229cSAlexey Kardashevskiy 
1325ffd229cSAlexey Kardashevskiy 	return container;
1335ffd229cSAlexey Kardashevskiy }
1345ffd229cSAlexey Kardashevskiy 
1355ffd229cSAlexey Kardashevskiy static void tce_iommu_release(void *iommu_data)
1365ffd229cSAlexey Kardashevskiy {
1375ffd229cSAlexey Kardashevskiy 	struct tce_container *container = iommu_data;
1385ffd229cSAlexey Kardashevskiy 
1395ffd229cSAlexey Kardashevskiy 	WARN_ON(container->tbl && !container->tbl->it_group);
1405ffd229cSAlexey Kardashevskiy 	tce_iommu_disable(container);
1415ffd229cSAlexey Kardashevskiy 
1425ffd229cSAlexey Kardashevskiy 	if (container->tbl && container->tbl->it_group)
1435ffd229cSAlexey Kardashevskiy 		tce_iommu_detach_group(iommu_data, container->tbl->it_group);
1445ffd229cSAlexey Kardashevskiy 
1455ffd229cSAlexey Kardashevskiy 	mutex_destroy(&container->lock);
1465ffd229cSAlexey Kardashevskiy 
1475ffd229cSAlexey Kardashevskiy 	kfree(container);
1485ffd229cSAlexey Kardashevskiy }
1495ffd229cSAlexey Kardashevskiy 
1505ffd229cSAlexey Kardashevskiy static long tce_iommu_ioctl(void *iommu_data,
1515ffd229cSAlexey Kardashevskiy 				 unsigned int cmd, unsigned long arg)
1525ffd229cSAlexey Kardashevskiy {
1535ffd229cSAlexey Kardashevskiy 	struct tce_container *container = iommu_data;
1545ffd229cSAlexey Kardashevskiy 	unsigned long minsz;
1555ffd229cSAlexey Kardashevskiy 	long ret;
1565ffd229cSAlexey Kardashevskiy 
1575ffd229cSAlexey Kardashevskiy 	switch (cmd) {
1585ffd229cSAlexey Kardashevskiy 	case VFIO_CHECK_EXTENSION:
1595ffd229cSAlexey Kardashevskiy 		return (arg == VFIO_SPAPR_TCE_IOMMU) ? 1 : 0;
1605ffd229cSAlexey Kardashevskiy 
1615ffd229cSAlexey Kardashevskiy 	case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
1625ffd229cSAlexey Kardashevskiy 		struct vfio_iommu_spapr_tce_info info;
1635ffd229cSAlexey Kardashevskiy 		struct iommu_table *tbl = container->tbl;
1645ffd229cSAlexey Kardashevskiy 
1655ffd229cSAlexey Kardashevskiy 		if (WARN_ON(!tbl))
1665ffd229cSAlexey Kardashevskiy 			return -ENXIO;
1675ffd229cSAlexey Kardashevskiy 
1685ffd229cSAlexey Kardashevskiy 		minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
1695ffd229cSAlexey Kardashevskiy 				dma32_window_size);
1705ffd229cSAlexey Kardashevskiy 
1715ffd229cSAlexey Kardashevskiy 		if (copy_from_user(&info, (void __user *)arg, minsz))
1725ffd229cSAlexey Kardashevskiy 			return -EFAULT;
1735ffd229cSAlexey Kardashevskiy 
1745ffd229cSAlexey Kardashevskiy 		if (info.argsz < minsz)
1755ffd229cSAlexey Kardashevskiy 			return -EINVAL;
1765ffd229cSAlexey Kardashevskiy 
1775ffd229cSAlexey Kardashevskiy 		info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT;
1785ffd229cSAlexey Kardashevskiy 		info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT;
1795ffd229cSAlexey Kardashevskiy 		info.flags = 0;
1805ffd229cSAlexey Kardashevskiy 
1815ffd229cSAlexey Kardashevskiy 		if (copy_to_user((void __user *)arg, &info, minsz))
1825ffd229cSAlexey Kardashevskiy 			return -EFAULT;
1835ffd229cSAlexey Kardashevskiy 
1845ffd229cSAlexey Kardashevskiy 		return 0;
1855ffd229cSAlexey Kardashevskiy 	}
1865ffd229cSAlexey Kardashevskiy 	case VFIO_IOMMU_MAP_DMA: {
1875ffd229cSAlexey Kardashevskiy 		struct vfio_iommu_type1_dma_map param;
1885ffd229cSAlexey Kardashevskiy 		struct iommu_table *tbl = container->tbl;
1895ffd229cSAlexey Kardashevskiy 		unsigned long tce, i;
1905ffd229cSAlexey Kardashevskiy 
1915ffd229cSAlexey Kardashevskiy 		if (!tbl)
1925ffd229cSAlexey Kardashevskiy 			return -ENXIO;
1935ffd229cSAlexey Kardashevskiy 
1945ffd229cSAlexey Kardashevskiy 		BUG_ON(!tbl->it_group);
1955ffd229cSAlexey Kardashevskiy 
1965ffd229cSAlexey Kardashevskiy 		minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
1975ffd229cSAlexey Kardashevskiy 
1985ffd229cSAlexey Kardashevskiy 		if (copy_from_user(&param, (void __user *)arg, minsz))
1995ffd229cSAlexey Kardashevskiy 			return -EFAULT;
2005ffd229cSAlexey Kardashevskiy 
2015ffd229cSAlexey Kardashevskiy 		if (param.argsz < minsz)
2025ffd229cSAlexey Kardashevskiy 			return -EINVAL;
2035ffd229cSAlexey Kardashevskiy 
2045ffd229cSAlexey Kardashevskiy 		if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
2055ffd229cSAlexey Kardashevskiy 				VFIO_DMA_MAP_FLAG_WRITE))
2065ffd229cSAlexey Kardashevskiy 			return -EINVAL;
2075ffd229cSAlexey Kardashevskiy 
2085ffd229cSAlexey Kardashevskiy 		if ((param.size & ~IOMMU_PAGE_MASK) ||
2095ffd229cSAlexey Kardashevskiy 				(param.vaddr & ~IOMMU_PAGE_MASK))
2105ffd229cSAlexey Kardashevskiy 			return -EINVAL;
2115ffd229cSAlexey Kardashevskiy 
2125ffd229cSAlexey Kardashevskiy 		/* iova is checked by the IOMMU API */
2135ffd229cSAlexey Kardashevskiy 		tce = param.vaddr;
2145ffd229cSAlexey Kardashevskiy 		if (param.flags & VFIO_DMA_MAP_FLAG_READ)
2155ffd229cSAlexey Kardashevskiy 			tce |= TCE_PCI_READ;
2165ffd229cSAlexey Kardashevskiy 		if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
2175ffd229cSAlexey Kardashevskiy 			tce |= TCE_PCI_WRITE;
2185ffd229cSAlexey Kardashevskiy 
2195ffd229cSAlexey Kardashevskiy 		ret = iommu_tce_put_param_check(tbl, param.iova, tce);
2205ffd229cSAlexey Kardashevskiy 		if (ret)
2215ffd229cSAlexey Kardashevskiy 			return ret;
2225ffd229cSAlexey Kardashevskiy 
2235ffd229cSAlexey Kardashevskiy 		for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT); ++i) {
2245ffd229cSAlexey Kardashevskiy 			ret = iommu_put_tce_user_mode(tbl,
2255ffd229cSAlexey Kardashevskiy 					(param.iova >> IOMMU_PAGE_SHIFT) + i,
2265ffd229cSAlexey Kardashevskiy 					tce);
2275ffd229cSAlexey Kardashevskiy 			if (ret)
2285ffd229cSAlexey Kardashevskiy 				break;
2295ffd229cSAlexey Kardashevskiy 			tce += IOMMU_PAGE_SIZE;
2305ffd229cSAlexey Kardashevskiy 		}
2315ffd229cSAlexey Kardashevskiy 		if (ret)
2325ffd229cSAlexey Kardashevskiy 			iommu_clear_tces_and_put_pages(tbl,
2335ffd229cSAlexey Kardashevskiy 					param.iova >> IOMMU_PAGE_SHIFT,	i);
2345ffd229cSAlexey Kardashevskiy 
2355ffd229cSAlexey Kardashevskiy 		iommu_flush_tce(tbl);
2365ffd229cSAlexey Kardashevskiy 
2375ffd229cSAlexey Kardashevskiy 		return ret;
2385ffd229cSAlexey Kardashevskiy 	}
2395ffd229cSAlexey Kardashevskiy 	case VFIO_IOMMU_UNMAP_DMA: {
2405ffd229cSAlexey Kardashevskiy 		struct vfio_iommu_type1_dma_unmap param;
2415ffd229cSAlexey Kardashevskiy 		struct iommu_table *tbl = container->tbl;
2425ffd229cSAlexey Kardashevskiy 
2435ffd229cSAlexey Kardashevskiy 		if (WARN_ON(!tbl))
2445ffd229cSAlexey Kardashevskiy 			return -ENXIO;
2455ffd229cSAlexey Kardashevskiy 
2465ffd229cSAlexey Kardashevskiy 		minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
2475ffd229cSAlexey Kardashevskiy 				size);
2485ffd229cSAlexey Kardashevskiy 
2495ffd229cSAlexey Kardashevskiy 		if (copy_from_user(&param, (void __user *)arg, minsz))
2505ffd229cSAlexey Kardashevskiy 			return -EFAULT;
2515ffd229cSAlexey Kardashevskiy 
2525ffd229cSAlexey Kardashevskiy 		if (param.argsz < minsz)
2535ffd229cSAlexey Kardashevskiy 			return -EINVAL;
2545ffd229cSAlexey Kardashevskiy 
2555ffd229cSAlexey Kardashevskiy 		/* No flag is supported now */
2565ffd229cSAlexey Kardashevskiy 		if (param.flags)
2575ffd229cSAlexey Kardashevskiy 			return -EINVAL;
2585ffd229cSAlexey Kardashevskiy 
2595ffd229cSAlexey Kardashevskiy 		if (param.size & ~IOMMU_PAGE_MASK)
2605ffd229cSAlexey Kardashevskiy 			return -EINVAL;
2615ffd229cSAlexey Kardashevskiy 
2625ffd229cSAlexey Kardashevskiy 		ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
2635ffd229cSAlexey Kardashevskiy 				param.size >> IOMMU_PAGE_SHIFT);
2645ffd229cSAlexey Kardashevskiy 		if (ret)
2655ffd229cSAlexey Kardashevskiy 			return ret;
2665ffd229cSAlexey Kardashevskiy 
2675ffd229cSAlexey Kardashevskiy 		ret = iommu_clear_tces_and_put_pages(tbl,
2685ffd229cSAlexey Kardashevskiy 				param.iova >> IOMMU_PAGE_SHIFT,
2695ffd229cSAlexey Kardashevskiy 				param.size >> IOMMU_PAGE_SHIFT);
2705ffd229cSAlexey Kardashevskiy 		iommu_flush_tce(tbl);
2715ffd229cSAlexey Kardashevskiy 
2725ffd229cSAlexey Kardashevskiy 		return ret;
2735ffd229cSAlexey Kardashevskiy 	}
2745ffd229cSAlexey Kardashevskiy 	case VFIO_IOMMU_ENABLE:
2755ffd229cSAlexey Kardashevskiy 		mutex_lock(&container->lock);
2765ffd229cSAlexey Kardashevskiy 		ret = tce_iommu_enable(container);
2775ffd229cSAlexey Kardashevskiy 		mutex_unlock(&container->lock);
2785ffd229cSAlexey Kardashevskiy 		return ret;
2795ffd229cSAlexey Kardashevskiy 
2805ffd229cSAlexey Kardashevskiy 
2815ffd229cSAlexey Kardashevskiy 	case VFIO_IOMMU_DISABLE:
2825ffd229cSAlexey Kardashevskiy 		mutex_lock(&container->lock);
2835ffd229cSAlexey Kardashevskiy 		tce_iommu_disable(container);
2845ffd229cSAlexey Kardashevskiy 		mutex_unlock(&container->lock);
2855ffd229cSAlexey Kardashevskiy 		return 0;
2865ffd229cSAlexey Kardashevskiy 	}
2875ffd229cSAlexey Kardashevskiy 
2885ffd229cSAlexey Kardashevskiy 	return -ENOTTY;
2895ffd229cSAlexey Kardashevskiy }
2905ffd229cSAlexey Kardashevskiy 
2915ffd229cSAlexey Kardashevskiy static int tce_iommu_attach_group(void *iommu_data,
2925ffd229cSAlexey Kardashevskiy 		struct iommu_group *iommu_group)
2935ffd229cSAlexey Kardashevskiy {
2945ffd229cSAlexey Kardashevskiy 	int ret;
2955ffd229cSAlexey Kardashevskiy 	struct tce_container *container = iommu_data;
2965ffd229cSAlexey Kardashevskiy 	struct iommu_table *tbl = iommu_group_get_iommudata(iommu_group);
2975ffd229cSAlexey Kardashevskiy 
2985ffd229cSAlexey Kardashevskiy 	BUG_ON(!tbl);
2995ffd229cSAlexey Kardashevskiy 	mutex_lock(&container->lock);
3005ffd229cSAlexey Kardashevskiy 
3015ffd229cSAlexey Kardashevskiy 	/* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
3025ffd229cSAlexey Kardashevskiy 			iommu_group_id(iommu_group), iommu_group); */
3035ffd229cSAlexey Kardashevskiy 	if (container->tbl) {
3045ffd229cSAlexey Kardashevskiy 		pr_warn("tce_vfio: Only one group per IOMMU container is allowed, existing id=%d, attaching id=%d\n",
3055ffd229cSAlexey Kardashevskiy 				iommu_group_id(container->tbl->it_group),
3065ffd229cSAlexey Kardashevskiy 				iommu_group_id(iommu_group));
3075ffd229cSAlexey Kardashevskiy 		ret = -EBUSY;
3085ffd229cSAlexey Kardashevskiy 	} else if (container->enabled) {
3095ffd229cSAlexey Kardashevskiy 		pr_err("tce_vfio: attaching group #%u to enabled container\n",
3105ffd229cSAlexey Kardashevskiy 				iommu_group_id(iommu_group));
3115ffd229cSAlexey Kardashevskiy 		ret = -EBUSY;
3125ffd229cSAlexey Kardashevskiy 	} else {
3135ffd229cSAlexey Kardashevskiy 		ret = iommu_take_ownership(tbl);
3145ffd229cSAlexey Kardashevskiy 		if (!ret)
3155ffd229cSAlexey Kardashevskiy 			container->tbl = tbl;
3165ffd229cSAlexey Kardashevskiy 	}
3175ffd229cSAlexey Kardashevskiy 
3185ffd229cSAlexey Kardashevskiy 	mutex_unlock(&container->lock);
3195ffd229cSAlexey Kardashevskiy 
3205ffd229cSAlexey Kardashevskiy 	return ret;
3215ffd229cSAlexey Kardashevskiy }
3225ffd229cSAlexey Kardashevskiy 
3235ffd229cSAlexey Kardashevskiy static void tce_iommu_detach_group(void *iommu_data,
3245ffd229cSAlexey Kardashevskiy 		struct iommu_group *iommu_group)
3255ffd229cSAlexey Kardashevskiy {
3265ffd229cSAlexey Kardashevskiy 	struct tce_container *container = iommu_data;
3275ffd229cSAlexey Kardashevskiy 	struct iommu_table *tbl = iommu_group_get_iommudata(iommu_group);
3285ffd229cSAlexey Kardashevskiy 
3295ffd229cSAlexey Kardashevskiy 	BUG_ON(!tbl);
3305ffd229cSAlexey Kardashevskiy 	mutex_lock(&container->lock);
3315ffd229cSAlexey Kardashevskiy 	if (tbl != container->tbl) {
3325ffd229cSAlexey Kardashevskiy 		pr_warn("tce_vfio: detaching group #%u, expected group is #%u\n",
3335ffd229cSAlexey Kardashevskiy 				iommu_group_id(iommu_group),
3345ffd229cSAlexey Kardashevskiy 				iommu_group_id(tbl->it_group));
3355ffd229cSAlexey Kardashevskiy 	} else {
3365ffd229cSAlexey Kardashevskiy 		if (container->enabled) {
3375ffd229cSAlexey Kardashevskiy 			pr_warn("tce_vfio: detaching group #%u from enabled container, forcing disable\n",
3385ffd229cSAlexey Kardashevskiy 					iommu_group_id(tbl->it_group));
3395ffd229cSAlexey Kardashevskiy 			tce_iommu_disable(container);
3405ffd229cSAlexey Kardashevskiy 		}
3415ffd229cSAlexey Kardashevskiy 
3425ffd229cSAlexey Kardashevskiy 		/* pr_debug("tce_vfio: detaching group #%u from iommu %p\n",
3435ffd229cSAlexey Kardashevskiy 				iommu_group_id(iommu_group), iommu_group); */
3445ffd229cSAlexey Kardashevskiy 		container->tbl = NULL;
3455ffd229cSAlexey Kardashevskiy 		iommu_release_ownership(tbl);
3465ffd229cSAlexey Kardashevskiy 	}
3475ffd229cSAlexey Kardashevskiy 	mutex_unlock(&container->lock);
3485ffd229cSAlexey Kardashevskiy }
3495ffd229cSAlexey Kardashevskiy 
3505ffd229cSAlexey Kardashevskiy const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
3515ffd229cSAlexey Kardashevskiy 	.name		= "iommu-vfio-powerpc",
3525ffd229cSAlexey Kardashevskiy 	.owner		= THIS_MODULE,
3535ffd229cSAlexey Kardashevskiy 	.open		= tce_iommu_open,
3545ffd229cSAlexey Kardashevskiy 	.release	= tce_iommu_release,
3555ffd229cSAlexey Kardashevskiy 	.ioctl		= tce_iommu_ioctl,
3565ffd229cSAlexey Kardashevskiy 	.attach_group	= tce_iommu_attach_group,
3575ffd229cSAlexey Kardashevskiy 	.detach_group	= tce_iommu_detach_group,
3585ffd229cSAlexey Kardashevskiy };
3595ffd229cSAlexey Kardashevskiy 
3605ffd229cSAlexey Kardashevskiy static int __init tce_iommu_init(void)
3615ffd229cSAlexey Kardashevskiy {
3625ffd229cSAlexey Kardashevskiy 	return vfio_register_iommu_driver(&tce_iommu_driver_ops);
3635ffd229cSAlexey Kardashevskiy }
3645ffd229cSAlexey Kardashevskiy 
3655ffd229cSAlexey Kardashevskiy static void __exit tce_iommu_cleanup(void)
3665ffd229cSAlexey Kardashevskiy {
3675ffd229cSAlexey Kardashevskiy 	vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
3685ffd229cSAlexey Kardashevskiy }
3695ffd229cSAlexey Kardashevskiy 
3705ffd229cSAlexey Kardashevskiy module_init(tce_iommu_init);
3715ffd229cSAlexey Kardashevskiy module_exit(tce_iommu_cleanup);
3725ffd229cSAlexey Kardashevskiy 
3735ffd229cSAlexey Kardashevskiy MODULE_VERSION(DRIVER_VERSION);
3745ffd229cSAlexey Kardashevskiy MODULE_LICENSE("GPL v2");
3755ffd229cSAlexey Kardashevskiy MODULE_AUTHOR(DRIVER_AUTHOR);
3765ffd229cSAlexey Kardashevskiy MODULE_DESCRIPTION(DRIVER_DESC);
3775ffd229cSAlexey Kardashevskiy 
378