xref: /openbmc/linux/drivers/iommu/dma-iommu.c (revision cd2c9fcf)
10db2e5d1SRobin Murphy /*
20db2e5d1SRobin Murphy  * A fairly generic DMA-API to IOMMU-API glue layer.
30db2e5d1SRobin Murphy  *
40db2e5d1SRobin Murphy  * Copyright (C) 2014-2015 ARM Ltd.
50db2e5d1SRobin Murphy  *
60db2e5d1SRobin Murphy  * based in part on arch/arm/mm/dma-mapping.c:
70db2e5d1SRobin Murphy  * Copyright (C) 2000-2004 Russell King
80db2e5d1SRobin Murphy  *
90db2e5d1SRobin Murphy  * This program is free software; you can redistribute it and/or modify
100db2e5d1SRobin Murphy  * it under the terms of the GNU General Public License version 2 as
110db2e5d1SRobin Murphy  * published by the Free Software Foundation.
120db2e5d1SRobin Murphy  *
130db2e5d1SRobin Murphy  * This program is distributed in the hope that it will be useful,
140db2e5d1SRobin Murphy  * but WITHOUT ANY WARRANTY; without even the implied warranty of
150db2e5d1SRobin Murphy  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
160db2e5d1SRobin Murphy  * GNU General Public License for more details.
170db2e5d1SRobin Murphy  *
180db2e5d1SRobin Murphy  * You should have received a copy of the GNU General Public License
190db2e5d1SRobin Murphy  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
200db2e5d1SRobin Murphy  */
210db2e5d1SRobin Murphy 
22f51dc892SShameer Kolothum #include <linux/acpi_iort.h>
230db2e5d1SRobin Murphy #include <linux/device.h>
240db2e5d1SRobin Murphy #include <linux/dma-iommu.h>
255b11e9cdSRobin Murphy #include <linux/gfp.h>
260db2e5d1SRobin Murphy #include <linux/huge_mm.h>
270db2e5d1SRobin Murphy #include <linux/iommu.h>
280db2e5d1SRobin Murphy #include <linux/iova.h>
2944bb7e24SRobin Murphy #include <linux/irq.h>
300db2e5d1SRobin Murphy #include <linux/mm.h>
31fade1ec0SRobin Murphy #include <linux/pci.h>
325b11e9cdSRobin Murphy #include <linux/scatterlist.h>
335b11e9cdSRobin Murphy #include <linux/vmalloc.h>
340db2e5d1SRobin Murphy 
3581a5a316SChristoph Hellwig #define IOMMU_MAPPING_ERROR	0
3681a5a316SChristoph Hellwig 
3744bb7e24SRobin Murphy struct iommu_dma_msi_page {
3844bb7e24SRobin Murphy 	struct list_head	list;
3944bb7e24SRobin Murphy 	dma_addr_t		iova;
4044bb7e24SRobin Murphy 	phys_addr_t		phys;
4144bb7e24SRobin Murphy };
4244bb7e24SRobin Murphy 
43fdbe574eSRobin Murphy enum iommu_dma_cookie_type {
44fdbe574eSRobin Murphy 	IOMMU_DMA_IOVA_COOKIE,
45fdbe574eSRobin Murphy 	IOMMU_DMA_MSI_COOKIE,
46fdbe574eSRobin Murphy };
47fdbe574eSRobin Murphy 
4844bb7e24SRobin Murphy struct iommu_dma_cookie {
49fdbe574eSRobin Murphy 	enum iommu_dma_cookie_type	type;
50fdbe574eSRobin Murphy 	union {
51fdbe574eSRobin Murphy 		/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
5244bb7e24SRobin Murphy 		struct iova_domain	iovad;
53fdbe574eSRobin Murphy 		/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
54fdbe574eSRobin Murphy 		dma_addr_t		msi_iova;
55fdbe574eSRobin Murphy 	};
5644bb7e24SRobin Murphy 	struct list_head		msi_page_list;
5744bb7e24SRobin Murphy 	spinlock_t			msi_lock;
5844bb7e24SRobin Murphy };
5944bb7e24SRobin Murphy 
60fdbe574eSRobin Murphy static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
61fdbe574eSRobin Murphy {
62fdbe574eSRobin Murphy 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
63fdbe574eSRobin Murphy 		return cookie->iovad.granule;
64fdbe574eSRobin Murphy 	return PAGE_SIZE;
65fdbe574eSRobin Murphy }
66fdbe574eSRobin Murphy 
67fdbe574eSRobin Murphy static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
68fdbe574eSRobin Murphy {
69fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie;
70fdbe574eSRobin Murphy 
71fdbe574eSRobin Murphy 	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
72fdbe574eSRobin Murphy 	if (cookie) {
73fdbe574eSRobin Murphy 		spin_lock_init(&cookie->msi_lock);
74fdbe574eSRobin Murphy 		INIT_LIST_HEAD(&cookie->msi_page_list);
75fdbe574eSRobin Murphy 		cookie->type = type;
76fdbe574eSRobin Murphy 	}
77fdbe574eSRobin Murphy 	return cookie;
7844bb7e24SRobin Murphy }
7944bb7e24SRobin Murphy 
800db2e5d1SRobin Murphy int iommu_dma_init(void)
810db2e5d1SRobin Murphy {
820db2e5d1SRobin Murphy 	return iova_cache_get();
830db2e5d1SRobin Murphy }
840db2e5d1SRobin Murphy 
850db2e5d1SRobin Murphy /**
860db2e5d1SRobin Murphy  * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
870db2e5d1SRobin Murphy  * @domain: IOMMU domain to prepare for DMA-API usage
880db2e5d1SRobin Murphy  *
890db2e5d1SRobin Murphy  * IOMMU drivers should normally call this from their domain_alloc
900db2e5d1SRobin Murphy  * callback when domain->type == IOMMU_DOMAIN_DMA.
910db2e5d1SRobin Murphy  */
920db2e5d1SRobin Murphy int iommu_get_dma_cookie(struct iommu_domain *domain)
930db2e5d1SRobin Murphy {
940db2e5d1SRobin Murphy 	if (domain->iova_cookie)
950db2e5d1SRobin Murphy 		return -EEXIST;
960db2e5d1SRobin Murphy 
97fdbe574eSRobin Murphy 	domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
98fdbe574eSRobin Murphy 	if (!domain->iova_cookie)
9944bb7e24SRobin Murphy 		return -ENOMEM;
1000db2e5d1SRobin Murphy 
10144bb7e24SRobin Murphy 	return 0;
1020db2e5d1SRobin Murphy }
1030db2e5d1SRobin Murphy EXPORT_SYMBOL(iommu_get_dma_cookie);
1040db2e5d1SRobin Murphy 
1050db2e5d1SRobin Murphy /**
106fdbe574eSRobin Murphy  * iommu_get_msi_cookie - Acquire just MSI remapping resources
107fdbe574eSRobin Murphy  * @domain: IOMMU domain to prepare
108fdbe574eSRobin Murphy  * @base: Start address of IOVA region for MSI mappings
109fdbe574eSRobin Murphy  *
110fdbe574eSRobin Murphy  * Users who manage their own IOVA allocation and do not want DMA API support,
111fdbe574eSRobin Murphy  * but would still like to take advantage of automatic MSI remapping, can use
112fdbe574eSRobin Murphy  * this to initialise their own domain appropriately. Users should reserve a
113fdbe574eSRobin Murphy  * contiguous IOVA region, starting at @base, large enough to accommodate the
114fdbe574eSRobin Murphy  * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
115fdbe574eSRobin Murphy  * used by the devices attached to @domain.
116fdbe574eSRobin Murphy  */
117fdbe574eSRobin Murphy int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
118fdbe574eSRobin Murphy {
119fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie;
120fdbe574eSRobin Murphy 
121fdbe574eSRobin Murphy 	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
122fdbe574eSRobin Murphy 		return -EINVAL;
123fdbe574eSRobin Murphy 
124fdbe574eSRobin Murphy 	if (domain->iova_cookie)
125fdbe574eSRobin Murphy 		return -EEXIST;
126fdbe574eSRobin Murphy 
127fdbe574eSRobin Murphy 	cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
128fdbe574eSRobin Murphy 	if (!cookie)
129fdbe574eSRobin Murphy 		return -ENOMEM;
130fdbe574eSRobin Murphy 
131fdbe574eSRobin Murphy 	cookie->msi_iova = base;
132fdbe574eSRobin Murphy 	domain->iova_cookie = cookie;
133fdbe574eSRobin Murphy 	return 0;
134fdbe574eSRobin Murphy }
135fdbe574eSRobin Murphy EXPORT_SYMBOL(iommu_get_msi_cookie);
136fdbe574eSRobin Murphy 
137fdbe574eSRobin Murphy /**
1380db2e5d1SRobin Murphy  * iommu_put_dma_cookie - Release a domain's DMA mapping resources
139fdbe574eSRobin Murphy  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
140fdbe574eSRobin Murphy  *          iommu_get_msi_cookie()
1410db2e5d1SRobin Murphy  *
1420db2e5d1SRobin Murphy  * IOMMU drivers should normally call this from their domain_free callback.
1430db2e5d1SRobin Murphy  */
1440db2e5d1SRobin Murphy void iommu_put_dma_cookie(struct iommu_domain *domain)
1450db2e5d1SRobin Murphy {
14644bb7e24SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
14744bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi, *tmp;
1480db2e5d1SRobin Murphy 
14944bb7e24SRobin Murphy 	if (!cookie)
1500db2e5d1SRobin Murphy 		return;
1510db2e5d1SRobin Murphy 
152fdbe574eSRobin Murphy 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
15344bb7e24SRobin Murphy 		put_iova_domain(&cookie->iovad);
15444bb7e24SRobin Murphy 
15544bb7e24SRobin Murphy 	list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
15644bb7e24SRobin Murphy 		list_del(&msi->list);
15744bb7e24SRobin Murphy 		kfree(msi);
15844bb7e24SRobin Murphy 	}
15944bb7e24SRobin Murphy 	kfree(cookie);
1600db2e5d1SRobin Murphy 	domain->iova_cookie = NULL;
1610db2e5d1SRobin Murphy }
1620db2e5d1SRobin Murphy EXPORT_SYMBOL(iommu_put_dma_cookie);
1630db2e5d1SRobin Murphy 
164273df963SRobin Murphy /**
165273df963SRobin Murphy  * iommu_dma_get_resv_regions - Reserved region driver helper
166273df963SRobin Murphy  * @dev: Device from iommu_get_resv_regions()
167273df963SRobin Murphy  * @list: Reserved region list from iommu_get_resv_regions()
168273df963SRobin Murphy  *
169273df963SRobin Murphy  * IOMMU drivers can use this to implement their .get_resv_regions callback
170cd2c9fcfSShameer Kolothum  * for general non-IOMMU-specific reservations. Currently, this covers GICv3
171cd2c9fcfSShameer Kolothum  * ITS region reservation on ACPI based ARM platforms that may require HW MSI
172cd2c9fcfSShameer Kolothum  * reservation.
173273df963SRobin Murphy  */
174273df963SRobin Murphy void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
175fade1ec0SRobin Murphy {
176fade1ec0SRobin Murphy 
177cd2c9fcfSShameer Kolothum 	if (!is_of_node(dev->iommu_fwspec->iommu_fwnode))
178cd2c9fcfSShameer Kolothum 		iort_iommu_msi_get_resv_regions(dev, list);
179f51dc892SShameer Kolothum 
180fade1ec0SRobin Murphy }
181273df963SRobin Murphy EXPORT_SYMBOL(iommu_dma_get_resv_regions);
182fade1ec0SRobin Murphy 
1837c1b058cSRobin Murphy static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
1847c1b058cSRobin Murphy 		phys_addr_t start, phys_addr_t end)
1857c1b058cSRobin Murphy {
1867c1b058cSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
1877c1b058cSRobin Murphy 	struct iommu_dma_msi_page *msi_page;
1887c1b058cSRobin Murphy 	int i, num_pages;
1897c1b058cSRobin Murphy 
1907c1b058cSRobin Murphy 	start -= iova_offset(iovad, start);
1917c1b058cSRobin Murphy 	num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
1927c1b058cSRobin Murphy 
1937c1b058cSRobin Murphy 	msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
1947c1b058cSRobin Murphy 	if (!msi_page)
1957c1b058cSRobin Murphy 		return -ENOMEM;
1967c1b058cSRobin Murphy 
1977c1b058cSRobin Murphy 	for (i = 0; i < num_pages; i++) {
1987c1b058cSRobin Murphy 		msi_page[i].phys = start;
1997c1b058cSRobin Murphy 		msi_page[i].iova = start;
2007c1b058cSRobin Murphy 		INIT_LIST_HEAD(&msi_page[i].list);
2017c1b058cSRobin Murphy 		list_add(&msi_page[i].list, &cookie->msi_page_list);
2027c1b058cSRobin Murphy 		start += iovad->granule;
2037c1b058cSRobin Murphy 	}
2047c1b058cSRobin Murphy 
2057c1b058cSRobin Murphy 	return 0;
2067c1b058cSRobin Murphy }
2077c1b058cSRobin Murphy 
208cd2c9fcfSShameer Kolothum static void iova_reserve_pci_windows(struct pci_dev *dev,
209cd2c9fcfSShameer Kolothum 		struct iova_domain *iovad)
210cd2c9fcfSShameer Kolothum {
211cd2c9fcfSShameer Kolothum 	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
212cd2c9fcfSShameer Kolothum 	struct resource_entry *window;
213cd2c9fcfSShameer Kolothum 	unsigned long lo, hi;
214cd2c9fcfSShameer Kolothum 
215cd2c9fcfSShameer Kolothum 	resource_list_for_each_entry(window, &bridge->windows) {
216cd2c9fcfSShameer Kolothum 		if (resource_type(window->res) != IORESOURCE_MEM)
217cd2c9fcfSShameer Kolothum 			continue;
218cd2c9fcfSShameer Kolothum 
219cd2c9fcfSShameer Kolothum 		lo = iova_pfn(iovad, window->res->start - window->offset);
220cd2c9fcfSShameer Kolothum 		hi = iova_pfn(iovad, window->res->end - window->offset);
221cd2c9fcfSShameer Kolothum 		reserve_iova(iovad, lo, hi);
222cd2c9fcfSShameer Kolothum 	}
223cd2c9fcfSShameer Kolothum }
224cd2c9fcfSShameer Kolothum 
2257c1b058cSRobin Murphy static int iova_reserve_iommu_regions(struct device *dev,
2267c1b058cSRobin Murphy 		struct iommu_domain *domain)
2277c1b058cSRobin Murphy {
2287c1b058cSRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
2297c1b058cSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
2307c1b058cSRobin Murphy 	struct iommu_resv_region *region;
2317c1b058cSRobin Murphy 	LIST_HEAD(resv_regions);
2327c1b058cSRobin Murphy 	int ret = 0;
2337c1b058cSRobin Murphy 
234cd2c9fcfSShameer Kolothum 	if (dev_is_pci(dev))
235cd2c9fcfSShameer Kolothum 		iova_reserve_pci_windows(to_pci_dev(dev), iovad);
236cd2c9fcfSShameer Kolothum 
2377c1b058cSRobin Murphy 	iommu_get_resv_regions(dev, &resv_regions);
2387c1b058cSRobin Murphy 	list_for_each_entry(region, &resv_regions, list) {
2397c1b058cSRobin Murphy 		unsigned long lo, hi;
2407c1b058cSRobin Murphy 
2417c1b058cSRobin Murphy 		/* We ARE the software that manages these! */
2427c1b058cSRobin Murphy 		if (region->type == IOMMU_RESV_SW_MSI)
2437c1b058cSRobin Murphy 			continue;
2447c1b058cSRobin Murphy 
2457c1b058cSRobin Murphy 		lo = iova_pfn(iovad, region->start);
2467c1b058cSRobin Murphy 		hi = iova_pfn(iovad, region->start + region->length - 1);
2477c1b058cSRobin Murphy 		reserve_iova(iovad, lo, hi);
2487c1b058cSRobin Murphy 
2497c1b058cSRobin Murphy 		if (region->type == IOMMU_RESV_MSI)
2507c1b058cSRobin Murphy 			ret = cookie_init_hw_msi_region(cookie, region->start,
2517c1b058cSRobin Murphy 					region->start + region->length);
2527c1b058cSRobin Murphy 		if (ret)
2537c1b058cSRobin Murphy 			break;
2547c1b058cSRobin Murphy 	}
2557c1b058cSRobin Murphy 	iommu_put_resv_regions(dev, &resv_regions);
2567c1b058cSRobin Murphy 
2577c1b058cSRobin Murphy 	return ret;
2587c1b058cSRobin Murphy }
2597c1b058cSRobin Murphy 
2600db2e5d1SRobin Murphy /**
2610db2e5d1SRobin Murphy  * iommu_dma_init_domain - Initialise a DMA mapping domain
2620db2e5d1SRobin Murphy  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
2630db2e5d1SRobin Murphy  * @base: IOVA at which the mappable address space starts
2640db2e5d1SRobin Murphy  * @size: Size of IOVA space
265fade1ec0SRobin Murphy  * @dev: Device the domain is being initialised for
2660db2e5d1SRobin Murphy  *
2670db2e5d1SRobin Murphy  * @base and @size should be exact multiples of IOMMU page granularity to
2680db2e5d1SRobin Murphy  * avoid rounding surprises. If necessary, we reserve the page at address 0
2690db2e5d1SRobin Murphy  * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
2700db2e5d1SRobin Murphy  * any change which could make prior IOVAs invalid will fail.
2710db2e5d1SRobin Murphy  */
272fade1ec0SRobin Murphy int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
273fade1ec0SRobin Murphy 		u64 size, struct device *dev)
2740db2e5d1SRobin Murphy {
275fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
276fdbe574eSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
2770db2e5d1SRobin Murphy 	unsigned long order, base_pfn, end_pfn;
2780db2e5d1SRobin Murphy 
279fdbe574eSRobin Murphy 	if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
280fdbe574eSRobin Murphy 		return -EINVAL;
2810db2e5d1SRobin Murphy 
2820db2e5d1SRobin Murphy 	/* Use the smallest supported page size for IOVA granularity */
283d16e0faaSRobin Murphy 	order = __ffs(domain->pgsize_bitmap);
2840db2e5d1SRobin Murphy 	base_pfn = max_t(unsigned long, 1, base >> order);
2850db2e5d1SRobin Murphy 	end_pfn = (base + size - 1) >> order;
2860db2e5d1SRobin Murphy 
2870db2e5d1SRobin Murphy 	/* Check the domain allows at least some access to the device... */
2880db2e5d1SRobin Murphy 	if (domain->geometry.force_aperture) {
2890db2e5d1SRobin Murphy 		if (base > domain->geometry.aperture_end ||
2900db2e5d1SRobin Murphy 		    base + size <= domain->geometry.aperture_start) {
2910db2e5d1SRobin Murphy 			pr_warn("specified DMA range outside IOMMU capability\n");
2920db2e5d1SRobin Murphy 			return -EFAULT;
2930db2e5d1SRobin Murphy 		}
2940db2e5d1SRobin Murphy 		/* ...then finally give it a kicking to make sure it fits */
2950db2e5d1SRobin Murphy 		base_pfn = max_t(unsigned long, base_pfn,
2960db2e5d1SRobin Murphy 				domain->geometry.aperture_start >> order);
2970db2e5d1SRobin Murphy 	}
2980db2e5d1SRobin Murphy 
299f51d7bb7SRobin Murphy 	/* start_pfn is always nonzero for an already-initialised domain */
3000db2e5d1SRobin Murphy 	if (iovad->start_pfn) {
3010db2e5d1SRobin Murphy 		if (1UL << order != iovad->granule ||
302f51d7bb7SRobin Murphy 		    base_pfn != iovad->start_pfn) {
3030db2e5d1SRobin Murphy 			pr_warn("Incompatible range for DMA domain\n");
3040db2e5d1SRobin Murphy 			return -EFAULT;
3050db2e5d1SRobin Murphy 		}
3067c1b058cSRobin Murphy 
3070db2e5d1SRobin Murphy 		return 0;
3080db2e5d1SRobin Murphy 	}
3097c1b058cSRobin Murphy 
310aa3ac946SZhen Lei 	init_iova_domain(iovad, 1UL << order, base_pfn);
3117c1b058cSRobin Murphy 	if (!dev)
3127c1b058cSRobin Murphy 		return 0;
3137c1b058cSRobin Murphy 
3147c1b058cSRobin Murphy 	return iova_reserve_iommu_regions(dev, domain);
3157c1b058cSRobin Murphy }
3160db2e5d1SRobin Murphy EXPORT_SYMBOL(iommu_dma_init_domain);
3170db2e5d1SRobin Murphy 
3180db2e5d1SRobin Murphy /**
319737c85caSMitchel Humpherys  * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
320737c85caSMitchel Humpherys  *                    page flags.
3210db2e5d1SRobin Murphy  * @dir: Direction of DMA transfer
3220db2e5d1SRobin Murphy  * @coherent: Is the DMA master cache-coherent?
323737c85caSMitchel Humpherys  * @attrs: DMA attributes for the mapping
3240db2e5d1SRobin Murphy  *
3250db2e5d1SRobin Murphy  * Return: corresponding IOMMU API page protection flags
3260db2e5d1SRobin Murphy  */
327737c85caSMitchel Humpherys int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
328737c85caSMitchel Humpherys 		     unsigned long attrs)
3290db2e5d1SRobin Murphy {
3300db2e5d1SRobin Murphy 	int prot = coherent ? IOMMU_CACHE : 0;
3310db2e5d1SRobin Murphy 
332737c85caSMitchel Humpherys 	if (attrs & DMA_ATTR_PRIVILEGED)
333737c85caSMitchel Humpherys 		prot |= IOMMU_PRIV;
334737c85caSMitchel Humpherys 
3350db2e5d1SRobin Murphy 	switch (dir) {
3360db2e5d1SRobin Murphy 	case DMA_BIDIRECTIONAL:
3370db2e5d1SRobin Murphy 		return prot | IOMMU_READ | IOMMU_WRITE;
3380db2e5d1SRobin Murphy 	case DMA_TO_DEVICE:
3390db2e5d1SRobin Murphy 		return prot | IOMMU_READ;
3400db2e5d1SRobin Murphy 	case DMA_FROM_DEVICE:
3410db2e5d1SRobin Murphy 		return prot | IOMMU_WRITE;
3420db2e5d1SRobin Murphy 	default:
3430db2e5d1SRobin Murphy 		return 0;
3440db2e5d1SRobin Murphy 	}
3450db2e5d1SRobin Murphy }
3460db2e5d1SRobin Murphy 
347842fe519SRobin Murphy static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
348842fe519SRobin Murphy 		size_t size, dma_addr_t dma_limit, struct device *dev)
3490db2e5d1SRobin Murphy {
350a44e6657SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
351a44e6657SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
352bb65a64cSRobin Murphy 	unsigned long shift, iova_len, iova = 0;
3530db2e5d1SRobin Murphy 
354a44e6657SRobin Murphy 	if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
355a44e6657SRobin Murphy 		cookie->msi_iova += size;
356a44e6657SRobin Murphy 		return cookie->msi_iova - size;
357a44e6657SRobin Murphy 	}
358a44e6657SRobin Murphy 
359a44e6657SRobin Murphy 	shift = iova_shift(iovad);
360a44e6657SRobin Murphy 	iova_len = size >> shift;
361bb65a64cSRobin Murphy 	/*
362bb65a64cSRobin Murphy 	 * Freeing non-power-of-two-sized allocations back into the IOVA caches
363bb65a64cSRobin Murphy 	 * will come back to bite us badly, so we have to waste a bit of space
364bb65a64cSRobin Murphy 	 * rounding up anything cacheable to make sure that can't happen. The
365bb65a64cSRobin Murphy 	 * order of the unadjusted size will still match upon freeing.
366bb65a64cSRobin Murphy 	 */
367bb65a64cSRobin Murphy 	if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
368bb65a64cSRobin Murphy 		iova_len = roundup_pow_of_two(iova_len);
369a44e6657SRobin Murphy 
370c987ff0dSRobin Murphy 	if (domain->geometry.force_aperture)
371c987ff0dSRobin Murphy 		dma_limit = min(dma_limit, domain->geometry.aperture_end);
372122fac03SRobin Murphy 
373122fac03SRobin Murphy 	/* Try to get PCI devices a SAC address */
374122fac03SRobin Murphy 	if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
375538d5b33STomasz Nowicki 		iova = alloc_iova_fast(iovad, iova_len,
376538d5b33STomasz Nowicki 				       DMA_BIT_MASK(32) >> shift, false);
377122fac03SRobin Murphy 
378bb65a64cSRobin Murphy 	if (!iova)
379538d5b33STomasz Nowicki 		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
380538d5b33STomasz Nowicki 				       true);
381bb65a64cSRobin Murphy 
382bb65a64cSRobin Murphy 	return (dma_addr_t)iova << shift;
3830db2e5d1SRobin Murphy }
3840db2e5d1SRobin Murphy 
385842fe519SRobin Murphy static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
386842fe519SRobin Murphy 		dma_addr_t iova, size_t size)
3870db2e5d1SRobin Murphy {
388842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
3890db2e5d1SRobin Murphy 
390a44e6657SRobin Murphy 	/* The MSI case is only ever cleaning up its most recent allocation */
391bb65a64cSRobin Murphy 	if (cookie->type == IOMMU_DMA_MSI_COOKIE)
392a44e6657SRobin Murphy 		cookie->msi_iova -= size;
393bb65a64cSRobin Murphy 	else
3941cc896edSRobin Murphy 		free_iova_fast(iovad, iova_pfn(iovad, iova),
3951cc896edSRobin Murphy 				size >> iova_shift(iovad));
396842fe519SRobin Murphy }
397842fe519SRobin Murphy 
398842fe519SRobin Murphy static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
399842fe519SRobin Murphy 		size_t size)
400842fe519SRobin Murphy {
401a44e6657SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
402a44e6657SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
403842fe519SRobin Murphy 	size_t iova_off = iova_offset(iovad, dma_addr);
404842fe519SRobin Murphy 
405842fe519SRobin Murphy 	dma_addr -= iova_off;
406842fe519SRobin Murphy 	size = iova_align(iovad, size + iova_off);
407842fe519SRobin Murphy 
408842fe519SRobin Murphy 	WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
409a44e6657SRobin Murphy 	iommu_dma_free_iova(cookie, dma_addr, size);
4100db2e5d1SRobin Murphy }
4110db2e5d1SRobin Murphy 
4120db2e5d1SRobin Murphy static void __iommu_dma_free_pages(struct page **pages, int count)
4130db2e5d1SRobin Murphy {
4140db2e5d1SRobin Murphy 	while (count--)
4150db2e5d1SRobin Murphy 		__free_page(pages[count]);
4160db2e5d1SRobin Murphy 	kvfree(pages);
4170db2e5d1SRobin Murphy }
4180db2e5d1SRobin Murphy 
4193b6b7e19SRobin Murphy static struct page **__iommu_dma_alloc_pages(unsigned int count,
4203b6b7e19SRobin Murphy 		unsigned long order_mask, gfp_t gfp)
4210db2e5d1SRobin Murphy {
4220db2e5d1SRobin Murphy 	struct page **pages;
4230db2e5d1SRobin Murphy 	unsigned int i = 0, array_size = count * sizeof(*pages);
4243b6b7e19SRobin Murphy 
4253b6b7e19SRobin Murphy 	order_mask &= (2U << MAX_ORDER) - 1;
4263b6b7e19SRobin Murphy 	if (!order_mask)
4273b6b7e19SRobin Murphy 		return NULL;
4280db2e5d1SRobin Murphy 
4290db2e5d1SRobin Murphy 	if (array_size <= PAGE_SIZE)
4300db2e5d1SRobin Murphy 		pages = kzalloc(array_size, GFP_KERNEL);
4310db2e5d1SRobin Murphy 	else
4320db2e5d1SRobin Murphy 		pages = vzalloc(array_size);
4330db2e5d1SRobin Murphy 	if (!pages)
4340db2e5d1SRobin Murphy 		return NULL;
4350db2e5d1SRobin Murphy 
4360db2e5d1SRobin Murphy 	/* IOMMU can map any pages, so himem can also be used here */
4370db2e5d1SRobin Murphy 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
4380db2e5d1SRobin Murphy 
4390db2e5d1SRobin Murphy 	while (count) {
4400db2e5d1SRobin Murphy 		struct page *page = NULL;
4413b6b7e19SRobin Murphy 		unsigned int order_size;
4420db2e5d1SRobin Murphy 
4430db2e5d1SRobin Murphy 		/*
4440db2e5d1SRobin Murphy 		 * Higher-order allocations are a convenience rather
4450db2e5d1SRobin Murphy 		 * than a necessity, hence using __GFP_NORETRY until
4463b6b7e19SRobin Murphy 		 * falling back to minimum-order allocations.
4470db2e5d1SRobin Murphy 		 */
4483b6b7e19SRobin Murphy 		for (order_mask &= (2U << __fls(count)) - 1;
4493b6b7e19SRobin Murphy 		     order_mask; order_mask &= ~order_size) {
4503b6b7e19SRobin Murphy 			unsigned int order = __fls(order_mask);
4513b6b7e19SRobin Murphy 
4523b6b7e19SRobin Murphy 			order_size = 1U << order;
4533b6b7e19SRobin Murphy 			page = alloc_pages((order_mask - order_size) ?
4543b6b7e19SRobin Murphy 					   gfp | __GFP_NORETRY : gfp, order);
4550db2e5d1SRobin Murphy 			if (!page)
4560db2e5d1SRobin Murphy 				continue;
4573b6b7e19SRobin Murphy 			if (!order)
4580db2e5d1SRobin Murphy 				break;
4593b6b7e19SRobin Murphy 			if (!PageCompound(page)) {
4600db2e5d1SRobin Murphy 				split_page(page, order);
4610db2e5d1SRobin Murphy 				break;
4623b6b7e19SRobin Murphy 			} else if (!split_huge_page(page)) {
4633b6b7e19SRobin Murphy 				break;
4640db2e5d1SRobin Murphy 			}
4653b6b7e19SRobin Murphy 			__free_pages(page, order);
4660db2e5d1SRobin Murphy 		}
4670db2e5d1SRobin Murphy 		if (!page) {
4680db2e5d1SRobin Murphy 			__iommu_dma_free_pages(pages, i);
4690db2e5d1SRobin Murphy 			return NULL;
4700db2e5d1SRobin Murphy 		}
4713b6b7e19SRobin Murphy 		count -= order_size;
4723b6b7e19SRobin Murphy 		while (order_size--)
4730db2e5d1SRobin Murphy 			pages[i++] = page++;
4740db2e5d1SRobin Murphy 	}
4750db2e5d1SRobin Murphy 	return pages;
4760db2e5d1SRobin Murphy }
4770db2e5d1SRobin Murphy 
4780db2e5d1SRobin Murphy /**
4790db2e5d1SRobin Murphy  * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
4800db2e5d1SRobin Murphy  * @dev: Device which owns this buffer
4810db2e5d1SRobin Murphy  * @pages: Array of buffer pages as returned by iommu_dma_alloc()
4820db2e5d1SRobin Murphy  * @size: Size of buffer in bytes
4830db2e5d1SRobin Murphy  * @handle: DMA address of buffer
4840db2e5d1SRobin Murphy  *
4850db2e5d1SRobin Murphy  * Frees both the pages associated with the buffer, and the array
4860db2e5d1SRobin Murphy  * describing them
4870db2e5d1SRobin Murphy  */
4880db2e5d1SRobin Murphy void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
4890db2e5d1SRobin Murphy 		dma_addr_t *handle)
4900db2e5d1SRobin Murphy {
491842fe519SRobin Murphy 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
4920db2e5d1SRobin Murphy 	__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
49381a5a316SChristoph Hellwig 	*handle = IOMMU_MAPPING_ERROR;
4940db2e5d1SRobin Murphy }
4950db2e5d1SRobin Murphy 
4960db2e5d1SRobin Murphy /**
4970db2e5d1SRobin Murphy  * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
4980db2e5d1SRobin Murphy  * @dev: Device to allocate memory for. Must be a real device
4990db2e5d1SRobin Murphy  *	 attached to an iommu_dma_domain
5000db2e5d1SRobin Murphy  * @size: Size of buffer in bytes
5010db2e5d1SRobin Murphy  * @gfp: Allocation flags
5023b6b7e19SRobin Murphy  * @attrs: DMA attributes for this allocation
5030db2e5d1SRobin Murphy  * @prot: IOMMU mapping flags
5040db2e5d1SRobin Murphy  * @handle: Out argument for allocated DMA handle
5050db2e5d1SRobin Murphy  * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
5060db2e5d1SRobin Murphy  *		given VA/PA are visible to the given non-coherent device.
5070db2e5d1SRobin Murphy  *
5080db2e5d1SRobin Murphy  * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
5090db2e5d1SRobin Murphy  * but an IOMMU which supports smaller pages might not map the whole thing.
5100db2e5d1SRobin Murphy  *
5110db2e5d1SRobin Murphy  * Return: Array of struct page pointers describing the buffer,
5120db2e5d1SRobin Murphy  *	   or NULL on failure.
5130db2e5d1SRobin Murphy  */
5143b6b7e19SRobin Murphy struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
51500085f1eSKrzysztof Kozlowski 		unsigned long attrs, int prot, dma_addr_t *handle,
5160db2e5d1SRobin Murphy 		void (*flush_page)(struct device *, const void *, phys_addr_t))
5170db2e5d1SRobin Murphy {
5180db2e5d1SRobin Murphy 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
519842fe519SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
520842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
5210db2e5d1SRobin Murphy 	struct page **pages;
5220db2e5d1SRobin Murphy 	struct sg_table sgt;
523842fe519SRobin Murphy 	dma_addr_t iova;
5243b6b7e19SRobin Murphy 	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
5250db2e5d1SRobin Murphy 
52681a5a316SChristoph Hellwig 	*handle = IOMMU_MAPPING_ERROR;
5270db2e5d1SRobin Murphy 
5283b6b7e19SRobin Murphy 	min_size = alloc_sizes & -alloc_sizes;
5293b6b7e19SRobin Murphy 	if (min_size < PAGE_SIZE) {
5303b6b7e19SRobin Murphy 		min_size = PAGE_SIZE;
5313b6b7e19SRobin Murphy 		alloc_sizes |= PAGE_SIZE;
5323b6b7e19SRobin Murphy 	} else {
5333b6b7e19SRobin Murphy 		size = ALIGN(size, min_size);
5343b6b7e19SRobin Murphy 	}
53500085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
5363b6b7e19SRobin Murphy 		alloc_sizes = min_size;
5373b6b7e19SRobin Murphy 
5383b6b7e19SRobin Murphy 	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
5393b6b7e19SRobin Murphy 	pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
5400db2e5d1SRobin Murphy 	if (!pages)
5410db2e5d1SRobin Murphy 		return NULL;
5420db2e5d1SRobin Murphy 
543842fe519SRobin Murphy 	size = iova_align(iovad, size);
544842fe519SRobin Murphy 	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
5450db2e5d1SRobin Murphy 	if (!iova)
5460db2e5d1SRobin Murphy 		goto out_free_pages;
5470db2e5d1SRobin Murphy 
5480db2e5d1SRobin Murphy 	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
5490db2e5d1SRobin Murphy 		goto out_free_iova;
5500db2e5d1SRobin Murphy 
5510db2e5d1SRobin Murphy 	if (!(prot & IOMMU_CACHE)) {
5520db2e5d1SRobin Murphy 		struct sg_mapping_iter miter;
5530db2e5d1SRobin Murphy 		/*
5540db2e5d1SRobin Murphy 		 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
5550db2e5d1SRobin Murphy 		 * sufficient here, so skip it by using the "wrong" direction.
5560db2e5d1SRobin Murphy 		 */
5570db2e5d1SRobin Murphy 		sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
5580db2e5d1SRobin Murphy 		while (sg_miter_next(&miter))
5590db2e5d1SRobin Murphy 			flush_page(dev, miter.addr, page_to_phys(miter.page));
5600db2e5d1SRobin Murphy 		sg_miter_stop(&miter);
5610db2e5d1SRobin Murphy 	}
5620db2e5d1SRobin Murphy 
563842fe519SRobin Murphy 	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
5640db2e5d1SRobin Murphy 			< size)
5650db2e5d1SRobin Murphy 		goto out_free_sg;
5660db2e5d1SRobin Murphy 
567842fe519SRobin Murphy 	*handle = iova;
5680db2e5d1SRobin Murphy 	sg_free_table(&sgt);
5690db2e5d1SRobin Murphy 	return pages;
5700db2e5d1SRobin Murphy 
5710db2e5d1SRobin Murphy out_free_sg:
5720db2e5d1SRobin Murphy 	sg_free_table(&sgt);
5730db2e5d1SRobin Murphy out_free_iova:
574842fe519SRobin Murphy 	iommu_dma_free_iova(cookie, iova, size);
5750db2e5d1SRobin Murphy out_free_pages:
5760db2e5d1SRobin Murphy 	__iommu_dma_free_pages(pages, count);
5770db2e5d1SRobin Murphy 	return NULL;
5780db2e5d1SRobin Murphy }
5790db2e5d1SRobin Murphy 
5800db2e5d1SRobin Murphy /**
5810db2e5d1SRobin Murphy  * iommu_dma_mmap - Map a buffer into provided user VMA
5820db2e5d1SRobin Murphy  * @pages: Array representing buffer from iommu_dma_alloc()
5830db2e5d1SRobin Murphy  * @size: Size of buffer in bytes
5840db2e5d1SRobin Murphy  * @vma: VMA describing requested userspace mapping
5850db2e5d1SRobin Murphy  *
5860db2e5d1SRobin Murphy  * Maps the pages of the buffer in @pages into @vma. The caller is responsible
5870db2e5d1SRobin Murphy  * for verifying the correct size and protection of @vma beforehand.
5880db2e5d1SRobin Murphy  */
5890db2e5d1SRobin Murphy 
5900db2e5d1SRobin Murphy int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
5910db2e5d1SRobin Murphy {
5920db2e5d1SRobin Murphy 	unsigned long uaddr = vma->vm_start;
5930db2e5d1SRobin Murphy 	unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
5940db2e5d1SRobin Murphy 	int ret = -ENXIO;
5950db2e5d1SRobin Murphy 
5960db2e5d1SRobin Murphy 	for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
5970db2e5d1SRobin Murphy 		ret = vm_insert_page(vma, uaddr, pages[i]);
5980db2e5d1SRobin Murphy 		if (ret)
5990db2e5d1SRobin Murphy 			break;
6000db2e5d1SRobin Murphy 		uaddr += PAGE_SIZE;
6010db2e5d1SRobin Murphy 	}
6020db2e5d1SRobin Murphy 	return ret;
6030db2e5d1SRobin Murphy }
6040db2e5d1SRobin Murphy 
60551f8cc9eSRobin Murphy static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
60651f8cc9eSRobin Murphy 		size_t size, int prot)
6070db2e5d1SRobin Murphy {
6080db2e5d1SRobin Murphy 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
609842fe519SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
6101cc896edSRobin Murphy 	size_t iova_off = 0;
611842fe519SRobin Murphy 	dma_addr_t iova;
6120db2e5d1SRobin Murphy 
6131cc896edSRobin Murphy 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
6141cc896edSRobin Murphy 		iova_off = iova_offset(&cookie->iovad, phys);
6151cc896edSRobin Murphy 		size = iova_align(&cookie->iovad, size + iova_off);
6161cc896edSRobin Murphy 	}
6171cc896edSRobin Murphy 
618842fe519SRobin Murphy 	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
6190db2e5d1SRobin Murphy 	if (!iova)
62081a5a316SChristoph Hellwig 		return IOMMU_MAPPING_ERROR;
6210db2e5d1SRobin Murphy 
622842fe519SRobin Murphy 	if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
623842fe519SRobin Murphy 		iommu_dma_free_iova(cookie, iova, size);
62481a5a316SChristoph Hellwig 		return IOMMU_MAPPING_ERROR;
6250db2e5d1SRobin Murphy 	}
626842fe519SRobin Murphy 	return iova + iova_off;
6270db2e5d1SRobin Murphy }
6280db2e5d1SRobin Murphy 
62951f8cc9eSRobin Murphy dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
63051f8cc9eSRobin Murphy 		unsigned long offset, size_t size, int prot)
63151f8cc9eSRobin Murphy {
63251f8cc9eSRobin Murphy 	return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
63351f8cc9eSRobin Murphy }
63451f8cc9eSRobin Murphy 
6350db2e5d1SRobin Murphy void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
63600085f1eSKrzysztof Kozlowski 		enum dma_data_direction dir, unsigned long attrs)
6370db2e5d1SRobin Murphy {
638842fe519SRobin Murphy 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
6390db2e5d1SRobin Murphy }
6400db2e5d1SRobin Murphy 
6410db2e5d1SRobin Murphy /*
6420db2e5d1SRobin Murphy  * Prepare a successfully-mapped scatterlist to give back to the caller.
643809eac54SRobin Murphy  *
644809eac54SRobin Murphy  * At this point the segments are already laid out by iommu_dma_map_sg() to
645809eac54SRobin Murphy  * avoid individually crossing any boundaries, so we merely need to check a
646809eac54SRobin Murphy  * segment's start address to avoid concatenating across one.
6470db2e5d1SRobin Murphy  */
6480db2e5d1SRobin Murphy static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
6490db2e5d1SRobin Murphy 		dma_addr_t dma_addr)
6500db2e5d1SRobin Murphy {
651809eac54SRobin Murphy 	struct scatterlist *s, *cur = sg;
652809eac54SRobin Murphy 	unsigned long seg_mask = dma_get_seg_boundary(dev);
653809eac54SRobin Murphy 	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
654809eac54SRobin Murphy 	int i, count = 0;
6550db2e5d1SRobin Murphy 
6560db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
657809eac54SRobin Murphy 		/* Restore this segment's original unaligned fields first */
658809eac54SRobin Murphy 		unsigned int s_iova_off = sg_dma_address(s);
6590db2e5d1SRobin Murphy 		unsigned int s_length = sg_dma_len(s);
660809eac54SRobin Murphy 		unsigned int s_iova_len = s->length;
6610db2e5d1SRobin Murphy 
662809eac54SRobin Murphy 		s->offset += s_iova_off;
6630db2e5d1SRobin Murphy 		s->length = s_length;
66481a5a316SChristoph Hellwig 		sg_dma_address(s) = IOMMU_MAPPING_ERROR;
665809eac54SRobin Murphy 		sg_dma_len(s) = 0;
666809eac54SRobin Murphy 
667809eac54SRobin Murphy 		/*
668809eac54SRobin Murphy 		 * Now fill in the real DMA data. If...
669809eac54SRobin Murphy 		 * - there is a valid output segment to append to
670809eac54SRobin Murphy 		 * - and this segment starts on an IOVA page boundary
671809eac54SRobin Murphy 		 * - but doesn't fall at a segment boundary
672809eac54SRobin Murphy 		 * - and wouldn't make the resulting output segment too long
673809eac54SRobin Murphy 		 */
674809eac54SRobin Murphy 		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
675809eac54SRobin Murphy 		    (cur_len + s_length <= max_len)) {
676809eac54SRobin Murphy 			/* ...then concatenate it with the previous one */
677809eac54SRobin Murphy 			cur_len += s_length;
678809eac54SRobin Murphy 		} else {
679809eac54SRobin Murphy 			/* Otherwise start the next output segment */
680809eac54SRobin Murphy 			if (i > 0)
681809eac54SRobin Murphy 				cur = sg_next(cur);
682809eac54SRobin Murphy 			cur_len = s_length;
683809eac54SRobin Murphy 			count++;
684809eac54SRobin Murphy 
685809eac54SRobin Murphy 			sg_dma_address(cur) = dma_addr + s_iova_off;
6860db2e5d1SRobin Murphy 		}
687809eac54SRobin Murphy 
688809eac54SRobin Murphy 		sg_dma_len(cur) = cur_len;
689809eac54SRobin Murphy 		dma_addr += s_iova_len;
690809eac54SRobin Murphy 
691809eac54SRobin Murphy 		if (s_length + s_iova_off < s_iova_len)
692809eac54SRobin Murphy 			cur_len = 0;
693809eac54SRobin Murphy 	}
694809eac54SRobin Murphy 	return count;
6950db2e5d1SRobin Murphy }
6960db2e5d1SRobin Murphy 
6970db2e5d1SRobin Murphy /*
6980db2e5d1SRobin Murphy  * If mapping failed, then just restore the original list,
6990db2e5d1SRobin Murphy  * but making sure the DMA fields are invalidated.
7000db2e5d1SRobin Murphy  */
7010db2e5d1SRobin Murphy static void __invalidate_sg(struct scatterlist *sg, int nents)
7020db2e5d1SRobin Murphy {
7030db2e5d1SRobin Murphy 	struct scatterlist *s;
7040db2e5d1SRobin Murphy 	int i;
7050db2e5d1SRobin Murphy 
7060db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
70781a5a316SChristoph Hellwig 		if (sg_dma_address(s) != IOMMU_MAPPING_ERROR)
70807b48ac4SRobin Murphy 			s->offset += sg_dma_address(s);
7090db2e5d1SRobin Murphy 		if (sg_dma_len(s))
7100db2e5d1SRobin Murphy 			s->length = sg_dma_len(s);
71181a5a316SChristoph Hellwig 		sg_dma_address(s) = IOMMU_MAPPING_ERROR;
7120db2e5d1SRobin Murphy 		sg_dma_len(s) = 0;
7130db2e5d1SRobin Murphy 	}
7140db2e5d1SRobin Murphy }
7150db2e5d1SRobin Murphy 
7160db2e5d1SRobin Murphy /*
7170db2e5d1SRobin Murphy  * The DMA API client is passing in a scatterlist which could describe
7180db2e5d1SRobin Murphy  * any old buffer layout, but the IOMMU API requires everything to be
7190db2e5d1SRobin Murphy  * aligned to IOMMU pages. Hence the need for this complicated bit of
7200db2e5d1SRobin Murphy  * impedance-matching, to be able to hand off a suitably-aligned list,
7210db2e5d1SRobin Murphy  * but still preserve the original offsets and sizes for the caller.
7220db2e5d1SRobin Murphy  */
7230db2e5d1SRobin Murphy int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
7240db2e5d1SRobin Murphy 		int nents, int prot)
7250db2e5d1SRobin Murphy {
7260db2e5d1SRobin Murphy 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
727842fe519SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
728842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
7290db2e5d1SRobin Murphy 	struct scatterlist *s, *prev = NULL;
730842fe519SRobin Murphy 	dma_addr_t iova;
7310db2e5d1SRobin Murphy 	size_t iova_len = 0;
732809eac54SRobin Murphy 	unsigned long mask = dma_get_seg_boundary(dev);
7330db2e5d1SRobin Murphy 	int i;
7340db2e5d1SRobin Murphy 
7350db2e5d1SRobin Murphy 	/*
7360db2e5d1SRobin Murphy 	 * Work out how much IOVA space we need, and align the segments to
7370db2e5d1SRobin Murphy 	 * IOVA granules for the IOMMU driver to handle. With some clever
7380db2e5d1SRobin Murphy 	 * trickery we can modify the list in-place, but reversibly, by
739809eac54SRobin Murphy 	 * stashing the unaligned parts in the as-yet-unused DMA fields.
7400db2e5d1SRobin Murphy 	 */
7410db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
742809eac54SRobin Murphy 		size_t s_iova_off = iova_offset(iovad, s->offset);
7430db2e5d1SRobin Murphy 		size_t s_length = s->length;
744809eac54SRobin Murphy 		size_t pad_len = (mask - iova_len + 1) & mask;
7450db2e5d1SRobin Murphy 
746809eac54SRobin Murphy 		sg_dma_address(s) = s_iova_off;
7470db2e5d1SRobin Murphy 		sg_dma_len(s) = s_length;
748809eac54SRobin Murphy 		s->offset -= s_iova_off;
749809eac54SRobin Murphy 		s_length = iova_align(iovad, s_length + s_iova_off);
7500db2e5d1SRobin Murphy 		s->length = s_length;
7510db2e5d1SRobin Murphy 
7520db2e5d1SRobin Murphy 		/*
753809eac54SRobin Murphy 		 * Due to the alignment of our single IOVA allocation, we can
754809eac54SRobin Murphy 		 * depend on these assumptions about the segment boundary mask:
755809eac54SRobin Murphy 		 * - If mask size >= IOVA size, then the IOVA range cannot
756809eac54SRobin Murphy 		 *   possibly fall across a boundary, so we don't care.
757809eac54SRobin Murphy 		 * - If mask size < IOVA size, then the IOVA range must start
758809eac54SRobin Murphy 		 *   exactly on a boundary, therefore we can lay things out
759809eac54SRobin Murphy 		 *   based purely on segment lengths without needing to know
760809eac54SRobin Murphy 		 *   the actual addresses beforehand.
761809eac54SRobin Murphy 		 * - The mask must be a power of 2, so pad_len == 0 if
762809eac54SRobin Murphy 		 *   iova_len == 0, thus we cannot dereference prev the first
763809eac54SRobin Murphy 		 *   time through here (i.e. before it has a meaningful value).
7640db2e5d1SRobin Murphy 		 */
765809eac54SRobin Murphy 		if (pad_len && pad_len < s_length - 1) {
7660db2e5d1SRobin Murphy 			prev->length += pad_len;
7670db2e5d1SRobin Murphy 			iova_len += pad_len;
7680db2e5d1SRobin Murphy 		}
7690db2e5d1SRobin Murphy 
7700db2e5d1SRobin Murphy 		iova_len += s_length;
7710db2e5d1SRobin Murphy 		prev = s;
7720db2e5d1SRobin Murphy 	}
7730db2e5d1SRobin Murphy 
774842fe519SRobin Murphy 	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
7750db2e5d1SRobin Murphy 	if (!iova)
7760db2e5d1SRobin Murphy 		goto out_restore_sg;
7770db2e5d1SRobin Murphy 
7780db2e5d1SRobin Murphy 	/*
7790db2e5d1SRobin Murphy 	 * We'll leave any physical concatenation to the IOMMU driver's
7800db2e5d1SRobin Murphy 	 * implementation - it knows better than we do.
7810db2e5d1SRobin Murphy 	 */
782842fe519SRobin Murphy 	if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
7830db2e5d1SRobin Murphy 		goto out_free_iova;
7840db2e5d1SRobin Murphy 
785842fe519SRobin Murphy 	return __finalise_sg(dev, sg, nents, iova);
7860db2e5d1SRobin Murphy 
7870db2e5d1SRobin Murphy out_free_iova:
788842fe519SRobin Murphy 	iommu_dma_free_iova(cookie, iova, iova_len);
7890db2e5d1SRobin Murphy out_restore_sg:
7900db2e5d1SRobin Murphy 	__invalidate_sg(sg, nents);
7910db2e5d1SRobin Murphy 	return 0;
7920db2e5d1SRobin Murphy }
7930db2e5d1SRobin Murphy 
7940db2e5d1SRobin Murphy void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
79500085f1eSKrzysztof Kozlowski 		enum dma_data_direction dir, unsigned long attrs)
7960db2e5d1SRobin Murphy {
797842fe519SRobin Murphy 	dma_addr_t start, end;
798842fe519SRobin Murphy 	struct scatterlist *tmp;
799842fe519SRobin Murphy 	int i;
8000db2e5d1SRobin Murphy 	/*
8010db2e5d1SRobin Murphy 	 * The scatterlist segments are mapped into a single
8020db2e5d1SRobin Murphy 	 * contiguous IOVA allocation, so this is incredibly easy.
8030db2e5d1SRobin Murphy 	 */
804842fe519SRobin Murphy 	start = sg_dma_address(sg);
805842fe519SRobin Murphy 	for_each_sg(sg_next(sg), tmp, nents - 1, i) {
806842fe519SRobin Murphy 		if (sg_dma_len(tmp) == 0)
807842fe519SRobin Murphy 			break;
808842fe519SRobin Murphy 		sg = tmp;
809842fe519SRobin Murphy 	}
810842fe519SRobin Murphy 	end = sg_dma_address(sg) + sg_dma_len(sg);
811842fe519SRobin Murphy 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
8120db2e5d1SRobin Murphy }
8130db2e5d1SRobin Murphy 
81451f8cc9eSRobin Murphy dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
81551f8cc9eSRobin Murphy 		size_t size, enum dma_data_direction dir, unsigned long attrs)
81651f8cc9eSRobin Murphy {
81751f8cc9eSRobin Murphy 	return __iommu_dma_map(dev, phys, size,
818737c85caSMitchel Humpherys 			dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
81951f8cc9eSRobin Murphy }
82051f8cc9eSRobin Murphy 
82151f8cc9eSRobin Murphy void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
82251f8cc9eSRobin Murphy 		size_t size, enum dma_data_direction dir, unsigned long attrs)
82351f8cc9eSRobin Murphy {
824842fe519SRobin Murphy 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
82551f8cc9eSRobin Murphy }
82651f8cc9eSRobin Murphy 
8270db2e5d1SRobin Murphy int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8280db2e5d1SRobin Murphy {
82981a5a316SChristoph Hellwig 	return dma_addr == IOMMU_MAPPING_ERROR;
8300db2e5d1SRobin Murphy }
83144bb7e24SRobin Murphy 
83244bb7e24SRobin Murphy static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
83344bb7e24SRobin Murphy 		phys_addr_t msi_addr, struct iommu_domain *domain)
83444bb7e24SRobin Murphy {
83544bb7e24SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
83644bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi_page;
837842fe519SRobin Murphy 	dma_addr_t iova;
83844bb7e24SRobin Murphy 	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
839fdbe574eSRobin Murphy 	size_t size = cookie_msi_granule(cookie);
84044bb7e24SRobin Murphy 
841fdbe574eSRobin Murphy 	msi_addr &= ~(phys_addr_t)(size - 1);
84244bb7e24SRobin Murphy 	list_for_each_entry(msi_page, &cookie->msi_page_list, list)
84344bb7e24SRobin Murphy 		if (msi_page->phys == msi_addr)
84444bb7e24SRobin Murphy 			return msi_page;
84544bb7e24SRobin Murphy 
84644bb7e24SRobin Murphy 	msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
84744bb7e24SRobin Murphy 	if (!msi_page)
84844bb7e24SRobin Murphy 		return NULL;
84944bb7e24SRobin Murphy 
850a44e6657SRobin Murphy 	iova = __iommu_dma_map(dev, msi_addr, size, prot);
851a44e6657SRobin Murphy 	if (iommu_dma_mapping_error(dev, iova))
85244bb7e24SRobin Murphy 		goto out_free_page;
85344bb7e24SRobin Murphy 
85444bb7e24SRobin Murphy 	INIT_LIST_HEAD(&msi_page->list);
855a44e6657SRobin Murphy 	msi_page->phys = msi_addr;
856a44e6657SRobin Murphy 	msi_page->iova = iova;
85744bb7e24SRobin Murphy 	list_add(&msi_page->list, &cookie->msi_page_list);
85844bb7e24SRobin Murphy 	return msi_page;
85944bb7e24SRobin Murphy 
86044bb7e24SRobin Murphy out_free_page:
86144bb7e24SRobin Murphy 	kfree(msi_page);
86244bb7e24SRobin Murphy 	return NULL;
86344bb7e24SRobin Murphy }
86444bb7e24SRobin Murphy 
86544bb7e24SRobin Murphy void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
86644bb7e24SRobin Murphy {
86744bb7e24SRobin Murphy 	struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
86844bb7e24SRobin Murphy 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
86944bb7e24SRobin Murphy 	struct iommu_dma_cookie *cookie;
87044bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi_page;
87144bb7e24SRobin Murphy 	phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
87244bb7e24SRobin Murphy 	unsigned long flags;
87344bb7e24SRobin Murphy 
87444bb7e24SRobin Murphy 	if (!domain || !domain->iova_cookie)
87544bb7e24SRobin Murphy 		return;
87644bb7e24SRobin Murphy 
87744bb7e24SRobin Murphy 	cookie = domain->iova_cookie;
87844bb7e24SRobin Murphy 
87944bb7e24SRobin Murphy 	/*
88044bb7e24SRobin Murphy 	 * We disable IRQs to rule out a possible inversion against
88144bb7e24SRobin Murphy 	 * irq_desc_lock if, say, someone tries to retarget the affinity
88244bb7e24SRobin Murphy 	 * of an MSI from within an IPI handler.
88344bb7e24SRobin Murphy 	 */
88444bb7e24SRobin Murphy 	spin_lock_irqsave(&cookie->msi_lock, flags);
88544bb7e24SRobin Murphy 	msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
88644bb7e24SRobin Murphy 	spin_unlock_irqrestore(&cookie->msi_lock, flags);
88744bb7e24SRobin Murphy 
88844bb7e24SRobin Murphy 	if (WARN_ON(!msi_page)) {
88944bb7e24SRobin Murphy 		/*
89044bb7e24SRobin Murphy 		 * We're called from a void callback, so the best we can do is
89144bb7e24SRobin Murphy 		 * 'fail' by filling the message with obviously bogus values.
89244bb7e24SRobin Murphy 		 * Since we got this far due to an IOMMU being present, it's
89344bb7e24SRobin Murphy 		 * not like the existing address would have worked anyway...
89444bb7e24SRobin Murphy 		 */
89544bb7e24SRobin Murphy 		msg->address_hi = ~0U;
89644bb7e24SRobin Murphy 		msg->address_lo = ~0U;
89744bb7e24SRobin Murphy 		msg->data = ~0U;
89844bb7e24SRobin Murphy 	} else {
89944bb7e24SRobin Murphy 		msg->address_hi = upper_32_bits(msi_page->iova);
900fdbe574eSRobin Murphy 		msg->address_lo &= cookie_msi_granule(cookie) - 1;
90144bb7e24SRobin Murphy 		msg->address_lo += lower_32_bits(msi_page->iova);
90244bb7e24SRobin Murphy 	}
90344bb7e24SRobin Murphy }
904