xref: /openbmc/linux/drivers/iommu/dma-iommu.c (revision d16e0faa)
10db2e5d1SRobin Murphy /*
20db2e5d1SRobin Murphy  * A fairly generic DMA-API to IOMMU-API glue layer.
30db2e5d1SRobin Murphy  *
40db2e5d1SRobin Murphy  * Copyright (C) 2014-2015 ARM Ltd.
50db2e5d1SRobin Murphy  *
60db2e5d1SRobin Murphy  * based in part on arch/arm/mm/dma-mapping.c:
70db2e5d1SRobin Murphy  * Copyright (C) 2000-2004 Russell King
80db2e5d1SRobin Murphy  *
90db2e5d1SRobin Murphy  * This program is free software; you can redistribute it and/or modify
100db2e5d1SRobin Murphy  * it under the terms of the GNU General Public License version 2 as
110db2e5d1SRobin Murphy  * published by the Free Software Foundation.
120db2e5d1SRobin Murphy  *
130db2e5d1SRobin Murphy  * This program is distributed in the hope that it will be useful,
140db2e5d1SRobin Murphy  * but WITHOUT ANY WARRANTY; without even the implied warranty of
150db2e5d1SRobin Murphy  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
160db2e5d1SRobin Murphy  * GNU General Public License for more details.
170db2e5d1SRobin Murphy  *
180db2e5d1SRobin Murphy  * You should have received a copy of the GNU General Public License
190db2e5d1SRobin Murphy  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
200db2e5d1SRobin Murphy  */
210db2e5d1SRobin Murphy 
220db2e5d1SRobin Murphy #include <linux/device.h>
230db2e5d1SRobin Murphy #include <linux/dma-iommu.h>
245b11e9cdSRobin Murphy #include <linux/gfp.h>
250db2e5d1SRobin Murphy #include <linux/huge_mm.h>
260db2e5d1SRobin Murphy #include <linux/iommu.h>
270db2e5d1SRobin Murphy #include <linux/iova.h>
280db2e5d1SRobin Murphy #include <linux/mm.h>
295b11e9cdSRobin Murphy #include <linux/scatterlist.h>
305b11e9cdSRobin Murphy #include <linux/vmalloc.h>
310db2e5d1SRobin Murphy 
320db2e5d1SRobin Murphy int iommu_dma_init(void)
330db2e5d1SRobin Murphy {
340db2e5d1SRobin Murphy 	return iova_cache_get();
350db2e5d1SRobin Murphy }
360db2e5d1SRobin Murphy 
370db2e5d1SRobin Murphy /**
380db2e5d1SRobin Murphy  * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
390db2e5d1SRobin Murphy  * @domain: IOMMU domain to prepare for DMA-API usage
400db2e5d1SRobin Murphy  *
410db2e5d1SRobin Murphy  * IOMMU drivers should normally call this from their domain_alloc
420db2e5d1SRobin Murphy  * callback when domain->type == IOMMU_DOMAIN_DMA.
430db2e5d1SRobin Murphy  */
440db2e5d1SRobin Murphy int iommu_get_dma_cookie(struct iommu_domain *domain)
450db2e5d1SRobin Murphy {
460db2e5d1SRobin Murphy 	struct iova_domain *iovad;
470db2e5d1SRobin Murphy 
480db2e5d1SRobin Murphy 	if (domain->iova_cookie)
490db2e5d1SRobin Murphy 		return -EEXIST;
500db2e5d1SRobin Murphy 
510db2e5d1SRobin Murphy 	iovad = kzalloc(sizeof(*iovad), GFP_KERNEL);
520db2e5d1SRobin Murphy 	domain->iova_cookie = iovad;
530db2e5d1SRobin Murphy 
540db2e5d1SRobin Murphy 	return iovad ? 0 : -ENOMEM;
550db2e5d1SRobin Murphy }
560db2e5d1SRobin Murphy EXPORT_SYMBOL(iommu_get_dma_cookie);
570db2e5d1SRobin Murphy 
580db2e5d1SRobin Murphy /**
590db2e5d1SRobin Murphy  * iommu_put_dma_cookie - Release a domain's DMA mapping resources
600db2e5d1SRobin Murphy  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
610db2e5d1SRobin Murphy  *
620db2e5d1SRobin Murphy  * IOMMU drivers should normally call this from their domain_free callback.
630db2e5d1SRobin Murphy  */
640db2e5d1SRobin Murphy void iommu_put_dma_cookie(struct iommu_domain *domain)
650db2e5d1SRobin Murphy {
660db2e5d1SRobin Murphy 	struct iova_domain *iovad = domain->iova_cookie;
670db2e5d1SRobin Murphy 
680db2e5d1SRobin Murphy 	if (!iovad)
690db2e5d1SRobin Murphy 		return;
700db2e5d1SRobin Murphy 
710db2e5d1SRobin Murphy 	put_iova_domain(iovad);
720db2e5d1SRobin Murphy 	kfree(iovad);
730db2e5d1SRobin Murphy 	domain->iova_cookie = NULL;
740db2e5d1SRobin Murphy }
750db2e5d1SRobin Murphy EXPORT_SYMBOL(iommu_put_dma_cookie);
760db2e5d1SRobin Murphy 
770db2e5d1SRobin Murphy /**
780db2e5d1SRobin Murphy  * iommu_dma_init_domain - Initialise a DMA mapping domain
790db2e5d1SRobin Murphy  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
800db2e5d1SRobin Murphy  * @base: IOVA at which the mappable address space starts
810db2e5d1SRobin Murphy  * @size: Size of IOVA space
820db2e5d1SRobin Murphy  *
830db2e5d1SRobin Murphy  * @base and @size should be exact multiples of IOMMU page granularity to
840db2e5d1SRobin Murphy  * avoid rounding surprises. If necessary, we reserve the page at address 0
850db2e5d1SRobin Murphy  * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
860db2e5d1SRobin Murphy  * any change which could make prior IOVAs invalid will fail.
870db2e5d1SRobin Murphy  */
880db2e5d1SRobin Murphy int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size)
890db2e5d1SRobin Murphy {
900db2e5d1SRobin Murphy 	struct iova_domain *iovad = domain->iova_cookie;
910db2e5d1SRobin Murphy 	unsigned long order, base_pfn, end_pfn;
920db2e5d1SRobin Murphy 
930db2e5d1SRobin Murphy 	if (!iovad)
940db2e5d1SRobin Murphy 		return -ENODEV;
950db2e5d1SRobin Murphy 
960db2e5d1SRobin Murphy 	/* Use the smallest supported page size for IOVA granularity */
97d16e0faaSRobin Murphy 	order = __ffs(domain->pgsize_bitmap);
980db2e5d1SRobin Murphy 	base_pfn = max_t(unsigned long, 1, base >> order);
990db2e5d1SRobin Murphy 	end_pfn = (base + size - 1) >> order;
1000db2e5d1SRobin Murphy 
1010db2e5d1SRobin Murphy 	/* Check the domain allows at least some access to the device... */
1020db2e5d1SRobin Murphy 	if (domain->geometry.force_aperture) {
1030db2e5d1SRobin Murphy 		if (base > domain->geometry.aperture_end ||
1040db2e5d1SRobin Murphy 		    base + size <= domain->geometry.aperture_start) {
1050db2e5d1SRobin Murphy 			pr_warn("specified DMA range outside IOMMU capability\n");
1060db2e5d1SRobin Murphy 			return -EFAULT;
1070db2e5d1SRobin Murphy 		}
1080db2e5d1SRobin Murphy 		/* ...then finally give it a kicking to make sure it fits */
1090db2e5d1SRobin Murphy 		base_pfn = max_t(unsigned long, base_pfn,
1100db2e5d1SRobin Murphy 				domain->geometry.aperture_start >> order);
1110db2e5d1SRobin Murphy 		end_pfn = min_t(unsigned long, end_pfn,
1120db2e5d1SRobin Murphy 				domain->geometry.aperture_end >> order);
1130db2e5d1SRobin Murphy 	}
1140db2e5d1SRobin Murphy 
1150db2e5d1SRobin Murphy 	/* All we can safely do with an existing domain is enlarge it */
1160db2e5d1SRobin Murphy 	if (iovad->start_pfn) {
1170db2e5d1SRobin Murphy 		if (1UL << order != iovad->granule ||
1180db2e5d1SRobin Murphy 		    base_pfn != iovad->start_pfn ||
1190db2e5d1SRobin Murphy 		    end_pfn < iovad->dma_32bit_pfn) {
1200db2e5d1SRobin Murphy 			pr_warn("Incompatible range for DMA domain\n");
1210db2e5d1SRobin Murphy 			return -EFAULT;
1220db2e5d1SRobin Murphy 		}
1230db2e5d1SRobin Murphy 		iovad->dma_32bit_pfn = end_pfn;
1240db2e5d1SRobin Murphy 	} else {
1250db2e5d1SRobin Murphy 		init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
1260db2e5d1SRobin Murphy 	}
1270db2e5d1SRobin Murphy 	return 0;
1280db2e5d1SRobin Murphy }
1290db2e5d1SRobin Murphy EXPORT_SYMBOL(iommu_dma_init_domain);
1300db2e5d1SRobin Murphy 
1310db2e5d1SRobin Murphy /**
1320db2e5d1SRobin Murphy  * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags
1330db2e5d1SRobin Murphy  * @dir: Direction of DMA transfer
1340db2e5d1SRobin Murphy  * @coherent: Is the DMA master cache-coherent?
1350db2e5d1SRobin Murphy  *
1360db2e5d1SRobin Murphy  * Return: corresponding IOMMU API page protection flags
1370db2e5d1SRobin Murphy  */
1380db2e5d1SRobin Murphy int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
1390db2e5d1SRobin Murphy {
1400db2e5d1SRobin Murphy 	int prot = coherent ? IOMMU_CACHE : 0;
1410db2e5d1SRobin Murphy 
1420db2e5d1SRobin Murphy 	switch (dir) {
1430db2e5d1SRobin Murphy 	case DMA_BIDIRECTIONAL:
1440db2e5d1SRobin Murphy 		return prot | IOMMU_READ | IOMMU_WRITE;
1450db2e5d1SRobin Murphy 	case DMA_TO_DEVICE:
1460db2e5d1SRobin Murphy 		return prot | IOMMU_READ;
1470db2e5d1SRobin Murphy 	case DMA_FROM_DEVICE:
1480db2e5d1SRobin Murphy 		return prot | IOMMU_WRITE;
1490db2e5d1SRobin Murphy 	default:
1500db2e5d1SRobin Murphy 		return 0;
1510db2e5d1SRobin Murphy 	}
1520db2e5d1SRobin Murphy }
1530db2e5d1SRobin Murphy 
1540db2e5d1SRobin Murphy static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size,
1550db2e5d1SRobin Murphy 		dma_addr_t dma_limit)
1560db2e5d1SRobin Murphy {
1570db2e5d1SRobin Murphy 	unsigned long shift = iova_shift(iovad);
1580db2e5d1SRobin Murphy 	unsigned long length = iova_align(iovad, size) >> shift;
1590db2e5d1SRobin Murphy 
1600db2e5d1SRobin Murphy 	/*
1610db2e5d1SRobin Murphy 	 * Enforce size-alignment to be safe - there could perhaps be an
1620db2e5d1SRobin Murphy 	 * attribute to control this per-device, or at least per-domain...
1630db2e5d1SRobin Murphy 	 */
1640db2e5d1SRobin Murphy 	return alloc_iova(iovad, length, dma_limit >> shift, true);
1650db2e5d1SRobin Murphy }
1660db2e5d1SRobin Murphy 
1670db2e5d1SRobin Murphy /* The IOVA allocator knows what we mapped, so just unmap whatever that was */
1680db2e5d1SRobin Murphy static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr)
1690db2e5d1SRobin Murphy {
1700db2e5d1SRobin Murphy 	struct iova_domain *iovad = domain->iova_cookie;
1710db2e5d1SRobin Murphy 	unsigned long shift = iova_shift(iovad);
1720db2e5d1SRobin Murphy 	unsigned long pfn = dma_addr >> shift;
1730db2e5d1SRobin Murphy 	struct iova *iova = find_iova(iovad, pfn);
1740db2e5d1SRobin Murphy 	size_t size;
1750db2e5d1SRobin Murphy 
1760db2e5d1SRobin Murphy 	if (WARN_ON(!iova))
1770db2e5d1SRobin Murphy 		return;
1780db2e5d1SRobin Murphy 
1790db2e5d1SRobin Murphy 	size = iova_size(iova) << shift;
1800db2e5d1SRobin Murphy 	size -= iommu_unmap(domain, pfn << shift, size);
1810db2e5d1SRobin Murphy 	/* ...and if we can't, then something is horribly, horribly wrong */
1820db2e5d1SRobin Murphy 	WARN_ON(size > 0);
1830db2e5d1SRobin Murphy 	__free_iova(iovad, iova);
1840db2e5d1SRobin Murphy }
1850db2e5d1SRobin Murphy 
1860db2e5d1SRobin Murphy static void __iommu_dma_free_pages(struct page **pages, int count)
1870db2e5d1SRobin Murphy {
1880db2e5d1SRobin Murphy 	while (count--)
1890db2e5d1SRobin Murphy 		__free_page(pages[count]);
1900db2e5d1SRobin Murphy 	kvfree(pages);
1910db2e5d1SRobin Murphy }
1920db2e5d1SRobin Murphy 
1930db2e5d1SRobin Murphy static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
1940db2e5d1SRobin Murphy {
1950db2e5d1SRobin Murphy 	struct page **pages;
1960db2e5d1SRobin Murphy 	unsigned int i = 0, array_size = count * sizeof(*pages);
1970a9afedaSRobin Murphy 	unsigned int order = MAX_ORDER;
1980db2e5d1SRobin Murphy 
1990db2e5d1SRobin Murphy 	if (array_size <= PAGE_SIZE)
2000db2e5d1SRobin Murphy 		pages = kzalloc(array_size, GFP_KERNEL);
2010db2e5d1SRobin Murphy 	else
2020db2e5d1SRobin Murphy 		pages = vzalloc(array_size);
2030db2e5d1SRobin Murphy 	if (!pages)
2040db2e5d1SRobin Murphy 		return NULL;
2050db2e5d1SRobin Murphy 
2060db2e5d1SRobin Murphy 	/* IOMMU can map any pages, so himem can also be used here */
2070db2e5d1SRobin Murphy 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
2080db2e5d1SRobin Murphy 
2090db2e5d1SRobin Murphy 	while (count) {
2100db2e5d1SRobin Murphy 		struct page *page = NULL;
2110a9afedaSRobin Murphy 		int j;
2120db2e5d1SRobin Murphy 
2130db2e5d1SRobin Murphy 		/*
2140db2e5d1SRobin Murphy 		 * Higher-order allocations are a convenience rather
2150db2e5d1SRobin Murphy 		 * than a necessity, hence using __GFP_NORETRY until
2160db2e5d1SRobin Murphy 		 * falling back to single-page allocations.
2170db2e5d1SRobin Murphy 		 */
2180a9afedaSRobin Murphy 		for (order = min_t(unsigned int, order, __fls(count));
2190a9afedaSRobin Murphy 		     order > 0; order--) {
2200db2e5d1SRobin Murphy 			page = alloc_pages(gfp | __GFP_NORETRY, order);
2210db2e5d1SRobin Murphy 			if (!page)
2220db2e5d1SRobin Murphy 				continue;
2230db2e5d1SRobin Murphy 			if (PageCompound(page)) {
2240db2e5d1SRobin Murphy 				if (!split_huge_page(page))
2250db2e5d1SRobin Murphy 					break;
2260db2e5d1SRobin Murphy 				__free_pages(page, order);
2270db2e5d1SRobin Murphy 			} else {
2280db2e5d1SRobin Murphy 				split_page(page, order);
2290db2e5d1SRobin Murphy 				break;
2300db2e5d1SRobin Murphy 			}
2310db2e5d1SRobin Murphy 		}
2320db2e5d1SRobin Murphy 		if (!page)
2330db2e5d1SRobin Murphy 			page = alloc_page(gfp);
2340db2e5d1SRobin Murphy 		if (!page) {
2350db2e5d1SRobin Murphy 			__iommu_dma_free_pages(pages, i);
2360db2e5d1SRobin Murphy 			return NULL;
2370db2e5d1SRobin Murphy 		}
2380db2e5d1SRobin Murphy 		j = 1 << order;
2390db2e5d1SRobin Murphy 		count -= j;
2400db2e5d1SRobin Murphy 		while (j--)
2410db2e5d1SRobin Murphy 			pages[i++] = page++;
2420db2e5d1SRobin Murphy 	}
2430db2e5d1SRobin Murphy 	return pages;
2440db2e5d1SRobin Murphy }
2450db2e5d1SRobin Murphy 
2460db2e5d1SRobin Murphy /**
2470db2e5d1SRobin Murphy  * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
2480db2e5d1SRobin Murphy  * @dev: Device which owns this buffer
2490db2e5d1SRobin Murphy  * @pages: Array of buffer pages as returned by iommu_dma_alloc()
2500db2e5d1SRobin Murphy  * @size: Size of buffer in bytes
2510db2e5d1SRobin Murphy  * @handle: DMA address of buffer
2520db2e5d1SRobin Murphy  *
2530db2e5d1SRobin Murphy  * Frees both the pages associated with the buffer, and the array
2540db2e5d1SRobin Murphy  * describing them
2550db2e5d1SRobin Murphy  */
2560db2e5d1SRobin Murphy void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
2570db2e5d1SRobin Murphy 		dma_addr_t *handle)
2580db2e5d1SRobin Murphy {
2590db2e5d1SRobin Murphy 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle);
2600db2e5d1SRobin Murphy 	__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
2610db2e5d1SRobin Murphy 	*handle = DMA_ERROR_CODE;
2620db2e5d1SRobin Murphy }
2630db2e5d1SRobin Murphy 
2640db2e5d1SRobin Murphy /**
2650db2e5d1SRobin Murphy  * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
2660db2e5d1SRobin Murphy  * @dev: Device to allocate memory for. Must be a real device
2670db2e5d1SRobin Murphy  *	 attached to an iommu_dma_domain
2680db2e5d1SRobin Murphy  * @size: Size of buffer in bytes
2690db2e5d1SRobin Murphy  * @gfp: Allocation flags
2700db2e5d1SRobin Murphy  * @prot: IOMMU mapping flags
2710db2e5d1SRobin Murphy  * @handle: Out argument for allocated DMA handle
2720db2e5d1SRobin Murphy  * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
2730db2e5d1SRobin Murphy  *		given VA/PA are visible to the given non-coherent device.
2740db2e5d1SRobin Murphy  *
2750db2e5d1SRobin Murphy  * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
2760db2e5d1SRobin Murphy  * but an IOMMU which supports smaller pages might not map the whole thing.
2770db2e5d1SRobin Murphy  *
2780db2e5d1SRobin Murphy  * Return: Array of struct page pointers describing the buffer,
2790db2e5d1SRobin Murphy  *	   or NULL on failure.
2800db2e5d1SRobin Murphy  */
2810db2e5d1SRobin Murphy struct page **iommu_dma_alloc(struct device *dev, size_t size,
2820db2e5d1SRobin Murphy 		gfp_t gfp, int prot, dma_addr_t *handle,
2830db2e5d1SRobin Murphy 		void (*flush_page)(struct device *, const void *, phys_addr_t))
2840db2e5d1SRobin Murphy {
2850db2e5d1SRobin Murphy 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
2860db2e5d1SRobin Murphy 	struct iova_domain *iovad = domain->iova_cookie;
2870db2e5d1SRobin Murphy 	struct iova *iova;
2880db2e5d1SRobin Murphy 	struct page **pages;
2890db2e5d1SRobin Murphy 	struct sg_table sgt;
2900db2e5d1SRobin Murphy 	dma_addr_t dma_addr;
2910db2e5d1SRobin Murphy 	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
2920db2e5d1SRobin Murphy 
2930db2e5d1SRobin Murphy 	*handle = DMA_ERROR_CODE;
2940db2e5d1SRobin Murphy 
2950db2e5d1SRobin Murphy 	pages = __iommu_dma_alloc_pages(count, gfp);
2960db2e5d1SRobin Murphy 	if (!pages)
2970db2e5d1SRobin Murphy 		return NULL;
2980db2e5d1SRobin Murphy 
2990db2e5d1SRobin Murphy 	iova = __alloc_iova(iovad, size, dev->coherent_dma_mask);
3000db2e5d1SRobin Murphy 	if (!iova)
3010db2e5d1SRobin Murphy 		goto out_free_pages;
3020db2e5d1SRobin Murphy 
3030db2e5d1SRobin Murphy 	size = iova_align(iovad, size);
3040db2e5d1SRobin Murphy 	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
3050db2e5d1SRobin Murphy 		goto out_free_iova;
3060db2e5d1SRobin Murphy 
3070db2e5d1SRobin Murphy 	if (!(prot & IOMMU_CACHE)) {
3080db2e5d1SRobin Murphy 		struct sg_mapping_iter miter;
3090db2e5d1SRobin Murphy 		/*
3100db2e5d1SRobin Murphy 		 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
3110db2e5d1SRobin Murphy 		 * sufficient here, so skip it by using the "wrong" direction.
3120db2e5d1SRobin Murphy 		 */
3130db2e5d1SRobin Murphy 		sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
3140db2e5d1SRobin Murphy 		while (sg_miter_next(&miter))
3150db2e5d1SRobin Murphy 			flush_page(dev, miter.addr, page_to_phys(miter.page));
3160db2e5d1SRobin Murphy 		sg_miter_stop(&miter);
3170db2e5d1SRobin Murphy 	}
3180db2e5d1SRobin Murphy 
3190db2e5d1SRobin Murphy 	dma_addr = iova_dma_addr(iovad, iova);
3200db2e5d1SRobin Murphy 	if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot)
3210db2e5d1SRobin Murphy 			< size)
3220db2e5d1SRobin Murphy 		goto out_free_sg;
3230db2e5d1SRobin Murphy 
3240db2e5d1SRobin Murphy 	*handle = dma_addr;
3250db2e5d1SRobin Murphy 	sg_free_table(&sgt);
3260db2e5d1SRobin Murphy 	return pages;
3270db2e5d1SRobin Murphy 
3280db2e5d1SRobin Murphy out_free_sg:
3290db2e5d1SRobin Murphy 	sg_free_table(&sgt);
3300db2e5d1SRobin Murphy out_free_iova:
3310db2e5d1SRobin Murphy 	__free_iova(iovad, iova);
3320db2e5d1SRobin Murphy out_free_pages:
3330db2e5d1SRobin Murphy 	__iommu_dma_free_pages(pages, count);
3340db2e5d1SRobin Murphy 	return NULL;
3350db2e5d1SRobin Murphy }
3360db2e5d1SRobin Murphy 
3370db2e5d1SRobin Murphy /**
3380db2e5d1SRobin Murphy  * iommu_dma_mmap - Map a buffer into provided user VMA
3390db2e5d1SRobin Murphy  * @pages: Array representing buffer from iommu_dma_alloc()
3400db2e5d1SRobin Murphy  * @size: Size of buffer in bytes
3410db2e5d1SRobin Murphy  * @vma: VMA describing requested userspace mapping
3420db2e5d1SRobin Murphy  *
3430db2e5d1SRobin Murphy  * Maps the pages of the buffer in @pages into @vma. The caller is responsible
3440db2e5d1SRobin Murphy  * for verifying the correct size and protection of @vma beforehand.
3450db2e5d1SRobin Murphy  */
3460db2e5d1SRobin Murphy 
3470db2e5d1SRobin Murphy int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
3480db2e5d1SRobin Murphy {
3490db2e5d1SRobin Murphy 	unsigned long uaddr = vma->vm_start;
3500db2e5d1SRobin Murphy 	unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
3510db2e5d1SRobin Murphy 	int ret = -ENXIO;
3520db2e5d1SRobin Murphy 
3530db2e5d1SRobin Murphy 	for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
3540db2e5d1SRobin Murphy 		ret = vm_insert_page(vma, uaddr, pages[i]);
3550db2e5d1SRobin Murphy 		if (ret)
3560db2e5d1SRobin Murphy 			break;
3570db2e5d1SRobin Murphy 		uaddr += PAGE_SIZE;
3580db2e5d1SRobin Murphy 	}
3590db2e5d1SRobin Murphy 	return ret;
3600db2e5d1SRobin Murphy }
3610db2e5d1SRobin Murphy 
3620db2e5d1SRobin Murphy dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
3630db2e5d1SRobin Murphy 		unsigned long offset, size_t size, int prot)
3640db2e5d1SRobin Murphy {
3650db2e5d1SRobin Murphy 	dma_addr_t dma_addr;
3660db2e5d1SRobin Murphy 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
3670db2e5d1SRobin Murphy 	struct iova_domain *iovad = domain->iova_cookie;
3680db2e5d1SRobin Murphy 	phys_addr_t phys = page_to_phys(page) + offset;
3690db2e5d1SRobin Murphy 	size_t iova_off = iova_offset(iovad, phys);
3700db2e5d1SRobin Murphy 	size_t len = iova_align(iovad, size + iova_off);
3710db2e5d1SRobin Murphy 	struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev));
3720db2e5d1SRobin Murphy 
3730db2e5d1SRobin Murphy 	if (!iova)
3740db2e5d1SRobin Murphy 		return DMA_ERROR_CODE;
3750db2e5d1SRobin Murphy 
3760db2e5d1SRobin Murphy 	dma_addr = iova_dma_addr(iovad, iova);
3770db2e5d1SRobin Murphy 	if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) {
3780db2e5d1SRobin Murphy 		__free_iova(iovad, iova);
3790db2e5d1SRobin Murphy 		return DMA_ERROR_CODE;
3800db2e5d1SRobin Murphy 	}
3810db2e5d1SRobin Murphy 	return dma_addr + iova_off;
3820db2e5d1SRobin Murphy }
3830db2e5d1SRobin Murphy 
3840db2e5d1SRobin Murphy void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
3850db2e5d1SRobin Murphy 		enum dma_data_direction dir, struct dma_attrs *attrs)
3860db2e5d1SRobin Murphy {
3870db2e5d1SRobin Murphy 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
3880db2e5d1SRobin Murphy }
3890db2e5d1SRobin Murphy 
3900db2e5d1SRobin Murphy /*
3910db2e5d1SRobin Murphy  * Prepare a successfully-mapped scatterlist to give back to the caller.
392809eac54SRobin Murphy  *
393809eac54SRobin Murphy  * At this point the segments are already laid out by iommu_dma_map_sg() to
394809eac54SRobin Murphy  * avoid individually crossing any boundaries, so we merely need to check a
395809eac54SRobin Murphy  * segment's start address to avoid concatenating across one.
3960db2e5d1SRobin Murphy  */
3970db2e5d1SRobin Murphy static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
3980db2e5d1SRobin Murphy 		dma_addr_t dma_addr)
3990db2e5d1SRobin Murphy {
400809eac54SRobin Murphy 	struct scatterlist *s, *cur = sg;
401809eac54SRobin Murphy 	unsigned long seg_mask = dma_get_seg_boundary(dev);
402809eac54SRobin Murphy 	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
403809eac54SRobin Murphy 	int i, count = 0;
4040db2e5d1SRobin Murphy 
4050db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
406809eac54SRobin Murphy 		/* Restore this segment's original unaligned fields first */
407809eac54SRobin Murphy 		unsigned int s_iova_off = sg_dma_address(s);
4080db2e5d1SRobin Murphy 		unsigned int s_length = sg_dma_len(s);
409809eac54SRobin Murphy 		unsigned int s_iova_len = s->length;
4100db2e5d1SRobin Murphy 
411809eac54SRobin Murphy 		s->offset += s_iova_off;
4120db2e5d1SRobin Murphy 		s->length = s_length;
413809eac54SRobin Murphy 		sg_dma_address(s) = DMA_ERROR_CODE;
414809eac54SRobin Murphy 		sg_dma_len(s) = 0;
415809eac54SRobin Murphy 
416809eac54SRobin Murphy 		/*
417809eac54SRobin Murphy 		 * Now fill in the real DMA data. If...
418809eac54SRobin Murphy 		 * - there is a valid output segment to append to
419809eac54SRobin Murphy 		 * - and this segment starts on an IOVA page boundary
420809eac54SRobin Murphy 		 * - but doesn't fall at a segment boundary
421809eac54SRobin Murphy 		 * - and wouldn't make the resulting output segment too long
422809eac54SRobin Murphy 		 */
423809eac54SRobin Murphy 		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
424809eac54SRobin Murphy 		    (cur_len + s_length <= max_len)) {
425809eac54SRobin Murphy 			/* ...then concatenate it with the previous one */
426809eac54SRobin Murphy 			cur_len += s_length;
427809eac54SRobin Murphy 		} else {
428809eac54SRobin Murphy 			/* Otherwise start the next output segment */
429809eac54SRobin Murphy 			if (i > 0)
430809eac54SRobin Murphy 				cur = sg_next(cur);
431809eac54SRobin Murphy 			cur_len = s_length;
432809eac54SRobin Murphy 			count++;
433809eac54SRobin Murphy 
434809eac54SRobin Murphy 			sg_dma_address(cur) = dma_addr + s_iova_off;
4350db2e5d1SRobin Murphy 		}
436809eac54SRobin Murphy 
437809eac54SRobin Murphy 		sg_dma_len(cur) = cur_len;
438809eac54SRobin Murphy 		dma_addr += s_iova_len;
439809eac54SRobin Murphy 
440809eac54SRobin Murphy 		if (s_length + s_iova_off < s_iova_len)
441809eac54SRobin Murphy 			cur_len = 0;
442809eac54SRobin Murphy 	}
443809eac54SRobin Murphy 	return count;
4440db2e5d1SRobin Murphy }
4450db2e5d1SRobin Murphy 
4460db2e5d1SRobin Murphy /*
4470db2e5d1SRobin Murphy  * If mapping failed, then just restore the original list,
4480db2e5d1SRobin Murphy  * but making sure the DMA fields are invalidated.
4490db2e5d1SRobin Murphy  */
4500db2e5d1SRobin Murphy static void __invalidate_sg(struct scatterlist *sg, int nents)
4510db2e5d1SRobin Murphy {
4520db2e5d1SRobin Murphy 	struct scatterlist *s;
4530db2e5d1SRobin Murphy 	int i;
4540db2e5d1SRobin Murphy 
4550db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
4560db2e5d1SRobin Murphy 		if (sg_dma_address(s) != DMA_ERROR_CODE)
45707b48ac4SRobin Murphy 			s->offset += sg_dma_address(s);
4580db2e5d1SRobin Murphy 		if (sg_dma_len(s))
4590db2e5d1SRobin Murphy 			s->length = sg_dma_len(s);
4600db2e5d1SRobin Murphy 		sg_dma_address(s) = DMA_ERROR_CODE;
4610db2e5d1SRobin Murphy 		sg_dma_len(s) = 0;
4620db2e5d1SRobin Murphy 	}
4630db2e5d1SRobin Murphy }
4640db2e5d1SRobin Murphy 
4650db2e5d1SRobin Murphy /*
4660db2e5d1SRobin Murphy  * The DMA API client is passing in a scatterlist which could describe
4670db2e5d1SRobin Murphy  * any old buffer layout, but the IOMMU API requires everything to be
4680db2e5d1SRobin Murphy  * aligned to IOMMU pages. Hence the need for this complicated bit of
4690db2e5d1SRobin Murphy  * impedance-matching, to be able to hand off a suitably-aligned list,
4700db2e5d1SRobin Murphy  * but still preserve the original offsets and sizes for the caller.
4710db2e5d1SRobin Murphy  */
4720db2e5d1SRobin Murphy int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
4730db2e5d1SRobin Murphy 		int nents, int prot)
4740db2e5d1SRobin Murphy {
4750db2e5d1SRobin Murphy 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
4760db2e5d1SRobin Murphy 	struct iova_domain *iovad = domain->iova_cookie;
4770db2e5d1SRobin Murphy 	struct iova *iova;
4780db2e5d1SRobin Murphy 	struct scatterlist *s, *prev = NULL;
4790db2e5d1SRobin Murphy 	dma_addr_t dma_addr;
4800db2e5d1SRobin Murphy 	size_t iova_len = 0;
481809eac54SRobin Murphy 	unsigned long mask = dma_get_seg_boundary(dev);
4820db2e5d1SRobin Murphy 	int i;
4830db2e5d1SRobin Murphy 
4840db2e5d1SRobin Murphy 	/*
4850db2e5d1SRobin Murphy 	 * Work out how much IOVA space we need, and align the segments to
4860db2e5d1SRobin Murphy 	 * IOVA granules for the IOMMU driver to handle. With some clever
4870db2e5d1SRobin Murphy 	 * trickery we can modify the list in-place, but reversibly, by
488809eac54SRobin Murphy 	 * stashing the unaligned parts in the as-yet-unused DMA fields.
4890db2e5d1SRobin Murphy 	 */
4900db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
491809eac54SRobin Murphy 		size_t s_iova_off = iova_offset(iovad, s->offset);
4920db2e5d1SRobin Murphy 		size_t s_length = s->length;
493809eac54SRobin Murphy 		size_t pad_len = (mask - iova_len + 1) & mask;
4940db2e5d1SRobin Murphy 
495809eac54SRobin Murphy 		sg_dma_address(s) = s_iova_off;
4960db2e5d1SRobin Murphy 		sg_dma_len(s) = s_length;
497809eac54SRobin Murphy 		s->offset -= s_iova_off;
498809eac54SRobin Murphy 		s_length = iova_align(iovad, s_length + s_iova_off);
4990db2e5d1SRobin Murphy 		s->length = s_length;
5000db2e5d1SRobin Murphy 
5010db2e5d1SRobin Murphy 		/*
502809eac54SRobin Murphy 		 * Due to the alignment of our single IOVA allocation, we can
503809eac54SRobin Murphy 		 * depend on these assumptions about the segment boundary mask:
504809eac54SRobin Murphy 		 * - If mask size >= IOVA size, then the IOVA range cannot
505809eac54SRobin Murphy 		 *   possibly fall across a boundary, so we don't care.
506809eac54SRobin Murphy 		 * - If mask size < IOVA size, then the IOVA range must start
507809eac54SRobin Murphy 		 *   exactly on a boundary, therefore we can lay things out
508809eac54SRobin Murphy 		 *   based purely on segment lengths without needing to know
509809eac54SRobin Murphy 		 *   the actual addresses beforehand.
510809eac54SRobin Murphy 		 * - The mask must be a power of 2, so pad_len == 0 if
511809eac54SRobin Murphy 		 *   iova_len == 0, thus we cannot dereference prev the first
512809eac54SRobin Murphy 		 *   time through here (i.e. before it has a meaningful value).
5130db2e5d1SRobin Murphy 		 */
514809eac54SRobin Murphy 		if (pad_len && pad_len < s_length - 1) {
5150db2e5d1SRobin Murphy 			prev->length += pad_len;
5160db2e5d1SRobin Murphy 			iova_len += pad_len;
5170db2e5d1SRobin Murphy 		}
5180db2e5d1SRobin Murphy 
5190db2e5d1SRobin Murphy 		iova_len += s_length;
5200db2e5d1SRobin Murphy 		prev = s;
5210db2e5d1SRobin Murphy 	}
5220db2e5d1SRobin Murphy 
5230db2e5d1SRobin Murphy 	iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev));
5240db2e5d1SRobin Murphy 	if (!iova)
5250db2e5d1SRobin Murphy 		goto out_restore_sg;
5260db2e5d1SRobin Murphy 
5270db2e5d1SRobin Murphy 	/*
5280db2e5d1SRobin Murphy 	 * We'll leave any physical concatenation to the IOMMU driver's
5290db2e5d1SRobin Murphy 	 * implementation - it knows better than we do.
5300db2e5d1SRobin Murphy 	 */
5310db2e5d1SRobin Murphy 	dma_addr = iova_dma_addr(iovad, iova);
5320db2e5d1SRobin Murphy 	if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len)
5330db2e5d1SRobin Murphy 		goto out_free_iova;
5340db2e5d1SRobin Murphy 
5350db2e5d1SRobin Murphy 	return __finalise_sg(dev, sg, nents, dma_addr);
5360db2e5d1SRobin Murphy 
5370db2e5d1SRobin Murphy out_free_iova:
5380db2e5d1SRobin Murphy 	__free_iova(iovad, iova);
5390db2e5d1SRobin Murphy out_restore_sg:
5400db2e5d1SRobin Murphy 	__invalidate_sg(sg, nents);
5410db2e5d1SRobin Murphy 	return 0;
5420db2e5d1SRobin Murphy }
5430db2e5d1SRobin Murphy 
5440db2e5d1SRobin Murphy void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
5450db2e5d1SRobin Murphy 		enum dma_data_direction dir, struct dma_attrs *attrs)
5460db2e5d1SRobin Murphy {
5470db2e5d1SRobin Murphy 	/*
5480db2e5d1SRobin Murphy 	 * The scatterlist segments are mapped into a single
5490db2e5d1SRobin Murphy 	 * contiguous IOVA allocation, so this is incredibly easy.
5500db2e5d1SRobin Murphy 	 */
5510db2e5d1SRobin Murphy 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
5520db2e5d1SRobin Murphy }
5530db2e5d1SRobin Murphy 
5540db2e5d1SRobin Murphy int iommu_dma_supported(struct device *dev, u64 mask)
5550db2e5d1SRobin Murphy {
5560db2e5d1SRobin Murphy 	/*
5570db2e5d1SRobin Murphy 	 * 'Special' IOMMUs which don't have the same addressing capability
5580db2e5d1SRobin Murphy 	 * as the CPU will have to wait until we have some way to query that
5590db2e5d1SRobin Murphy 	 * before they'll be able to use this framework.
5600db2e5d1SRobin Murphy 	 */
5610db2e5d1SRobin Murphy 	return 1;
5620db2e5d1SRobin Murphy }
5630db2e5d1SRobin Murphy 
5640db2e5d1SRobin Murphy int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
5650db2e5d1SRobin Murphy {
5660db2e5d1SRobin Murphy 	return dma_addr == DMA_ERROR_CODE;
5670db2e5d1SRobin Murphy }
568