xref: /openbmc/linux/drivers/iommu/dma-iommu.c (revision af751d43)
10db2e5d1SRobin Murphy /*
20db2e5d1SRobin Murphy  * A fairly generic DMA-API to IOMMU-API glue layer.
30db2e5d1SRobin Murphy  *
40db2e5d1SRobin Murphy  * Copyright (C) 2014-2015 ARM Ltd.
50db2e5d1SRobin Murphy  *
60db2e5d1SRobin Murphy  * based in part on arch/arm/mm/dma-mapping.c:
70db2e5d1SRobin Murphy  * Copyright (C) 2000-2004 Russell King
80db2e5d1SRobin Murphy  *
90db2e5d1SRobin Murphy  * This program is free software; you can redistribute it and/or modify
100db2e5d1SRobin Murphy  * it under the terms of the GNU General Public License version 2 as
110db2e5d1SRobin Murphy  * published by the Free Software Foundation.
120db2e5d1SRobin Murphy  *
130db2e5d1SRobin Murphy  * This program is distributed in the hope that it will be useful,
140db2e5d1SRobin Murphy  * but WITHOUT ANY WARRANTY; without even the implied warranty of
150db2e5d1SRobin Murphy  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
160db2e5d1SRobin Murphy  * GNU General Public License for more details.
170db2e5d1SRobin Murphy  *
180db2e5d1SRobin Murphy  * You should have received a copy of the GNU General Public License
190db2e5d1SRobin Murphy  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
200db2e5d1SRobin Murphy  */
210db2e5d1SRobin Murphy 
22f51dc892SShameer Kolothum #include <linux/acpi_iort.h>
230db2e5d1SRobin Murphy #include <linux/device.h>
240db2e5d1SRobin Murphy #include <linux/dma-iommu.h>
25af751d43SChristoph Hellwig #include <linux/dma-noncoherent.h>
265b11e9cdSRobin Murphy #include <linux/gfp.h>
270db2e5d1SRobin Murphy #include <linux/huge_mm.h>
280db2e5d1SRobin Murphy #include <linux/iommu.h>
290db2e5d1SRobin Murphy #include <linux/iova.h>
3044bb7e24SRobin Murphy #include <linux/irq.h>
310db2e5d1SRobin Murphy #include <linux/mm.h>
32fade1ec0SRobin Murphy #include <linux/pci.h>
335b11e9cdSRobin Murphy #include <linux/scatterlist.h>
345b11e9cdSRobin Murphy #include <linux/vmalloc.h>
350db2e5d1SRobin Murphy 
3644bb7e24SRobin Murphy struct iommu_dma_msi_page {
3744bb7e24SRobin Murphy 	struct list_head	list;
3844bb7e24SRobin Murphy 	dma_addr_t		iova;
3944bb7e24SRobin Murphy 	phys_addr_t		phys;
4044bb7e24SRobin Murphy };
4144bb7e24SRobin Murphy 
42fdbe574eSRobin Murphy enum iommu_dma_cookie_type {
43fdbe574eSRobin Murphy 	IOMMU_DMA_IOVA_COOKIE,
44fdbe574eSRobin Murphy 	IOMMU_DMA_MSI_COOKIE,
45fdbe574eSRobin Murphy };
46fdbe574eSRobin Murphy 
4744bb7e24SRobin Murphy struct iommu_dma_cookie {
48fdbe574eSRobin Murphy 	enum iommu_dma_cookie_type	type;
49fdbe574eSRobin Murphy 	union {
50fdbe574eSRobin Murphy 		/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
5144bb7e24SRobin Murphy 		struct iova_domain	iovad;
52fdbe574eSRobin Murphy 		/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
53fdbe574eSRobin Murphy 		dma_addr_t		msi_iova;
54fdbe574eSRobin Murphy 	};
5544bb7e24SRobin Murphy 	struct list_head		msi_page_list;
5644bb7e24SRobin Murphy 	spinlock_t			msi_lock;
572da274cdSZhen Lei 
582da274cdSZhen Lei 	/* Domain for flush queue callback; NULL if flush queue not in use */
592da274cdSZhen Lei 	struct iommu_domain		*fq_domain;
6044bb7e24SRobin Murphy };
6144bb7e24SRobin Murphy 
62fdbe574eSRobin Murphy static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
63fdbe574eSRobin Murphy {
64fdbe574eSRobin Murphy 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
65fdbe574eSRobin Murphy 		return cookie->iovad.granule;
66fdbe574eSRobin Murphy 	return PAGE_SIZE;
67fdbe574eSRobin Murphy }
68fdbe574eSRobin Murphy 
69fdbe574eSRobin Murphy static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
70fdbe574eSRobin Murphy {
71fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie;
72fdbe574eSRobin Murphy 
73fdbe574eSRobin Murphy 	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
74fdbe574eSRobin Murphy 	if (cookie) {
75fdbe574eSRobin Murphy 		spin_lock_init(&cookie->msi_lock);
76fdbe574eSRobin Murphy 		INIT_LIST_HEAD(&cookie->msi_page_list);
77fdbe574eSRobin Murphy 		cookie->type = type;
78fdbe574eSRobin Murphy 	}
79fdbe574eSRobin Murphy 	return cookie;
8044bb7e24SRobin Murphy }
8144bb7e24SRobin Murphy 
820db2e5d1SRobin Murphy int iommu_dma_init(void)
830db2e5d1SRobin Murphy {
840db2e5d1SRobin Murphy 	return iova_cache_get();
850db2e5d1SRobin Murphy }
860db2e5d1SRobin Murphy 
870db2e5d1SRobin Murphy /**
880db2e5d1SRobin Murphy  * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
890db2e5d1SRobin Murphy  * @domain: IOMMU domain to prepare for DMA-API usage
900db2e5d1SRobin Murphy  *
910db2e5d1SRobin Murphy  * IOMMU drivers should normally call this from their domain_alloc
920db2e5d1SRobin Murphy  * callback when domain->type == IOMMU_DOMAIN_DMA.
930db2e5d1SRobin Murphy  */
940db2e5d1SRobin Murphy int iommu_get_dma_cookie(struct iommu_domain *domain)
950db2e5d1SRobin Murphy {
960db2e5d1SRobin Murphy 	if (domain->iova_cookie)
970db2e5d1SRobin Murphy 		return -EEXIST;
980db2e5d1SRobin Murphy 
99fdbe574eSRobin Murphy 	domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
100fdbe574eSRobin Murphy 	if (!domain->iova_cookie)
10144bb7e24SRobin Murphy 		return -ENOMEM;
1020db2e5d1SRobin Murphy 
10344bb7e24SRobin Murphy 	return 0;
1040db2e5d1SRobin Murphy }
1050db2e5d1SRobin Murphy EXPORT_SYMBOL(iommu_get_dma_cookie);
1060db2e5d1SRobin Murphy 
1070db2e5d1SRobin Murphy /**
108fdbe574eSRobin Murphy  * iommu_get_msi_cookie - Acquire just MSI remapping resources
109fdbe574eSRobin Murphy  * @domain: IOMMU domain to prepare
110fdbe574eSRobin Murphy  * @base: Start address of IOVA region for MSI mappings
111fdbe574eSRobin Murphy  *
112fdbe574eSRobin Murphy  * Users who manage their own IOVA allocation and do not want DMA API support,
113fdbe574eSRobin Murphy  * but would still like to take advantage of automatic MSI remapping, can use
114fdbe574eSRobin Murphy  * this to initialise their own domain appropriately. Users should reserve a
115fdbe574eSRobin Murphy  * contiguous IOVA region, starting at @base, large enough to accommodate the
116fdbe574eSRobin Murphy  * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
117fdbe574eSRobin Murphy  * used by the devices attached to @domain.
118fdbe574eSRobin Murphy  */
119fdbe574eSRobin Murphy int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
120fdbe574eSRobin Murphy {
121fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie;
122fdbe574eSRobin Murphy 
123fdbe574eSRobin Murphy 	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
124fdbe574eSRobin Murphy 		return -EINVAL;
125fdbe574eSRobin Murphy 
126fdbe574eSRobin Murphy 	if (domain->iova_cookie)
127fdbe574eSRobin Murphy 		return -EEXIST;
128fdbe574eSRobin Murphy 
129fdbe574eSRobin Murphy 	cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
130fdbe574eSRobin Murphy 	if (!cookie)
131fdbe574eSRobin Murphy 		return -ENOMEM;
132fdbe574eSRobin Murphy 
133fdbe574eSRobin Murphy 	cookie->msi_iova = base;
134fdbe574eSRobin Murphy 	domain->iova_cookie = cookie;
135fdbe574eSRobin Murphy 	return 0;
136fdbe574eSRobin Murphy }
137fdbe574eSRobin Murphy EXPORT_SYMBOL(iommu_get_msi_cookie);
138fdbe574eSRobin Murphy 
139fdbe574eSRobin Murphy /**
1400db2e5d1SRobin Murphy  * iommu_put_dma_cookie - Release a domain's DMA mapping resources
141fdbe574eSRobin Murphy  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
142fdbe574eSRobin Murphy  *          iommu_get_msi_cookie()
1430db2e5d1SRobin Murphy  *
1440db2e5d1SRobin Murphy  * IOMMU drivers should normally call this from their domain_free callback.
1450db2e5d1SRobin Murphy  */
1460db2e5d1SRobin Murphy void iommu_put_dma_cookie(struct iommu_domain *domain)
1470db2e5d1SRobin Murphy {
14844bb7e24SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
14944bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi, *tmp;
1500db2e5d1SRobin Murphy 
15144bb7e24SRobin Murphy 	if (!cookie)
1520db2e5d1SRobin Murphy 		return;
1530db2e5d1SRobin Murphy 
154fdbe574eSRobin Murphy 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
15544bb7e24SRobin Murphy 		put_iova_domain(&cookie->iovad);
15644bb7e24SRobin Murphy 
15744bb7e24SRobin Murphy 	list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
15844bb7e24SRobin Murphy 		list_del(&msi->list);
15944bb7e24SRobin Murphy 		kfree(msi);
16044bb7e24SRobin Murphy 	}
16144bb7e24SRobin Murphy 	kfree(cookie);
1620db2e5d1SRobin Murphy 	domain->iova_cookie = NULL;
1630db2e5d1SRobin Murphy }
1640db2e5d1SRobin Murphy EXPORT_SYMBOL(iommu_put_dma_cookie);
1650db2e5d1SRobin Murphy 
166273df963SRobin Murphy /**
167273df963SRobin Murphy  * iommu_dma_get_resv_regions - Reserved region driver helper
168273df963SRobin Murphy  * @dev: Device from iommu_get_resv_regions()
169273df963SRobin Murphy  * @list: Reserved region list from iommu_get_resv_regions()
170273df963SRobin Murphy  *
171273df963SRobin Murphy  * IOMMU drivers can use this to implement their .get_resv_regions callback
172cd2c9fcfSShameer Kolothum  * for general non-IOMMU-specific reservations. Currently, this covers GICv3
173cd2c9fcfSShameer Kolothum  * ITS region reservation on ACPI based ARM platforms that may require HW MSI
174cd2c9fcfSShameer Kolothum  * reservation.
175273df963SRobin Murphy  */
176273df963SRobin Murphy void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
177fade1ec0SRobin Murphy {
178fade1ec0SRobin Murphy 
17998cc4f71SJoerg Roedel 	if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
180cd2c9fcfSShameer Kolothum 		iort_iommu_msi_get_resv_regions(dev, list);
181f51dc892SShameer Kolothum 
182fade1ec0SRobin Murphy }
183273df963SRobin Murphy EXPORT_SYMBOL(iommu_dma_get_resv_regions);
184fade1ec0SRobin Murphy 
1857c1b058cSRobin Murphy static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
1867c1b058cSRobin Murphy 		phys_addr_t start, phys_addr_t end)
1877c1b058cSRobin Murphy {
1887c1b058cSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
1897c1b058cSRobin Murphy 	struct iommu_dma_msi_page *msi_page;
1907c1b058cSRobin Murphy 	int i, num_pages;
1917c1b058cSRobin Murphy 
1927c1b058cSRobin Murphy 	start -= iova_offset(iovad, start);
1937c1b058cSRobin Murphy 	num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
1947c1b058cSRobin Murphy 
1957c1b058cSRobin Murphy 	msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
1967c1b058cSRobin Murphy 	if (!msi_page)
1977c1b058cSRobin Murphy 		return -ENOMEM;
1987c1b058cSRobin Murphy 
1997c1b058cSRobin Murphy 	for (i = 0; i < num_pages; i++) {
2007c1b058cSRobin Murphy 		msi_page[i].phys = start;
2017c1b058cSRobin Murphy 		msi_page[i].iova = start;
2027c1b058cSRobin Murphy 		INIT_LIST_HEAD(&msi_page[i].list);
2037c1b058cSRobin Murphy 		list_add(&msi_page[i].list, &cookie->msi_page_list);
2047c1b058cSRobin Murphy 		start += iovad->granule;
2057c1b058cSRobin Murphy 	}
2067c1b058cSRobin Murphy 
2077c1b058cSRobin Murphy 	return 0;
2087c1b058cSRobin Murphy }
2097c1b058cSRobin Murphy 
210aadad097SSrinath Mannam static int iova_reserve_pci_windows(struct pci_dev *dev,
211cd2c9fcfSShameer Kolothum 		struct iova_domain *iovad)
212cd2c9fcfSShameer Kolothum {
213cd2c9fcfSShameer Kolothum 	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
214cd2c9fcfSShameer Kolothum 	struct resource_entry *window;
215cd2c9fcfSShameer Kolothum 	unsigned long lo, hi;
216aadad097SSrinath Mannam 	phys_addr_t start = 0, end;
217cd2c9fcfSShameer Kolothum 
218cd2c9fcfSShameer Kolothum 	resource_list_for_each_entry(window, &bridge->windows) {
219cd2c9fcfSShameer Kolothum 		if (resource_type(window->res) != IORESOURCE_MEM)
220cd2c9fcfSShameer Kolothum 			continue;
221cd2c9fcfSShameer Kolothum 
222cd2c9fcfSShameer Kolothum 		lo = iova_pfn(iovad, window->res->start - window->offset);
223cd2c9fcfSShameer Kolothum 		hi = iova_pfn(iovad, window->res->end - window->offset);
224cd2c9fcfSShameer Kolothum 		reserve_iova(iovad, lo, hi);
225cd2c9fcfSShameer Kolothum 	}
226aadad097SSrinath Mannam 
227aadad097SSrinath Mannam 	/* Get reserved DMA windows from host bridge */
228aadad097SSrinath Mannam 	resource_list_for_each_entry(window, &bridge->dma_ranges) {
229aadad097SSrinath Mannam 		end = window->res->start - window->offset;
230aadad097SSrinath Mannam resv_iova:
231aadad097SSrinath Mannam 		if (end > start) {
232aadad097SSrinath Mannam 			lo = iova_pfn(iovad, start);
233aadad097SSrinath Mannam 			hi = iova_pfn(iovad, end);
234aadad097SSrinath Mannam 			reserve_iova(iovad, lo, hi);
235aadad097SSrinath Mannam 		} else {
236aadad097SSrinath Mannam 			/* dma_ranges list should be sorted */
237aadad097SSrinath Mannam 			dev_err(&dev->dev, "Failed to reserve IOVA\n");
238aadad097SSrinath Mannam 			return -EINVAL;
239aadad097SSrinath Mannam 		}
240aadad097SSrinath Mannam 
241aadad097SSrinath Mannam 		start = window->res->end - window->offset + 1;
242aadad097SSrinath Mannam 		/* If window is last entry */
243aadad097SSrinath Mannam 		if (window->node.next == &bridge->dma_ranges &&
244aadad097SSrinath Mannam 		    end != ~(dma_addr_t)0) {
245aadad097SSrinath Mannam 			end = ~(dma_addr_t)0;
246aadad097SSrinath Mannam 			goto resv_iova;
247aadad097SSrinath Mannam 		}
248aadad097SSrinath Mannam 	}
249aadad097SSrinath Mannam 
250aadad097SSrinath Mannam 	return 0;
251cd2c9fcfSShameer Kolothum }
252cd2c9fcfSShameer Kolothum 
2537c1b058cSRobin Murphy static int iova_reserve_iommu_regions(struct device *dev,
2547c1b058cSRobin Murphy 		struct iommu_domain *domain)
2557c1b058cSRobin Murphy {
2567c1b058cSRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
2577c1b058cSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
2587c1b058cSRobin Murphy 	struct iommu_resv_region *region;
2597c1b058cSRobin Murphy 	LIST_HEAD(resv_regions);
2607c1b058cSRobin Murphy 	int ret = 0;
2617c1b058cSRobin Murphy 
262aadad097SSrinath Mannam 	if (dev_is_pci(dev)) {
263aadad097SSrinath Mannam 		ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
264aadad097SSrinath Mannam 		if (ret)
265aadad097SSrinath Mannam 			return ret;
266aadad097SSrinath Mannam 	}
267cd2c9fcfSShameer Kolothum 
2687c1b058cSRobin Murphy 	iommu_get_resv_regions(dev, &resv_regions);
2697c1b058cSRobin Murphy 	list_for_each_entry(region, &resv_regions, list) {
2707c1b058cSRobin Murphy 		unsigned long lo, hi;
2717c1b058cSRobin Murphy 
2727c1b058cSRobin Murphy 		/* We ARE the software that manages these! */
2737c1b058cSRobin Murphy 		if (region->type == IOMMU_RESV_SW_MSI)
2747c1b058cSRobin Murphy 			continue;
2757c1b058cSRobin Murphy 
2767c1b058cSRobin Murphy 		lo = iova_pfn(iovad, region->start);
2777c1b058cSRobin Murphy 		hi = iova_pfn(iovad, region->start + region->length - 1);
2787c1b058cSRobin Murphy 		reserve_iova(iovad, lo, hi);
2797c1b058cSRobin Murphy 
2807c1b058cSRobin Murphy 		if (region->type == IOMMU_RESV_MSI)
2817c1b058cSRobin Murphy 			ret = cookie_init_hw_msi_region(cookie, region->start,
2827c1b058cSRobin Murphy 					region->start + region->length);
2837c1b058cSRobin Murphy 		if (ret)
2847c1b058cSRobin Murphy 			break;
2857c1b058cSRobin Murphy 	}
2867c1b058cSRobin Murphy 	iommu_put_resv_regions(dev, &resv_regions);
2877c1b058cSRobin Murphy 
2887c1b058cSRobin Murphy 	return ret;
2897c1b058cSRobin Murphy }
2907c1b058cSRobin Murphy 
2912da274cdSZhen Lei static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
2922da274cdSZhen Lei {
2932da274cdSZhen Lei 	struct iommu_dma_cookie *cookie;
2942da274cdSZhen Lei 	struct iommu_domain *domain;
2952da274cdSZhen Lei 
2962da274cdSZhen Lei 	cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
2972da274cdSZhen Lei 	domain = cookie->fq_domain;
2982da274cdSZhen Lei 	/*
2992da274cdSZhen Lei 	 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
3002da274cdSZhen Lei 	 * implies that ops->flush_iotlb_all must be non-NULL.
3012da274cdSZhen Lei 	 */
3022da274cdSZhen Lei 	domain->ops->flush_iotlb_all(domain);
3032da274cdSZhen Lei }
3042da274cdSZhen Lei 
3050db2e5d1SRobin Murphy /**
3060db2e5d1SRobin Murphy  * iommu_dma_init_domain - Initialise a DMA mapping domain
3070db2e5d1SRobin Murphy  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
3080db2e5d1SRobin Murphy  * @base: IOVA at which the mappable address space starts
3090db2e5d1SRobin Murphy  * @size: Size of IOVA space
310fade1ec0SRobin Murphy  * @dev: Device the domain is being initialised for
3110db2e5d1SRobin Murphy  *
3120db2e5d1SRobin Murphy  * @base and @size should be exact multiples of IOMMU page granularity to
3130db2e5d1SRobin Murphy  * avoid rounding surprises. If necessary, we reserve the page at address 0
3140db2e5d1SRobin Murphy  * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
3150db2e5d1SRobin Murphy  * any change which could make prior IOVAs invalid will fail.
3160db2e5d1SRobin Murphy  */
317fade1ec0SRobin Murphy int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
318fade1ec0SRobin Murphy 		u64 size, struct device *dev)
3190db2e5d1SRobin Murphy {
320fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
321fdbe574eSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
322c61a4633SShaokun Zhang 	unsigned long order, base_pfn;
3232da274cdSZhen Lei 	int attr;
3240db2e5d1SRobin Murphy 
325fdbe574eSRobin Murphy 	if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
326fdbe574eSRobin Murphy 		return -EINVAL;
3270db2e5d1SRobin Murphy 
3280db2e5d1SRobin Murphy 	/* Use the smallest supported page size for IOVA granularity */
329d16e0faaSRobin Murphy 	order = __ffs(domain->pgsize_bitmap);
3300db2e5d1SRobin Murphy 	base_pfn = max_t(unsigned long, 1, base >> order);
3310db2e5d1SRobin Murphy 
3320db2e5d1SRobin Murphy 	/* Check the domain allows at least some access to the device... */
3330db2e5d1SRobin Murphy 	if (domain->geometry.force_aperture) {
3340db2e5d1SRobin Murphy 		if (base > domain->geometry.aperture_end ||
3350db2e5d1SRobin Murphy 		    base + size <= domain->geometry.aperture_start) {
3360db2e5d1SRobin Murphy 			pr_warn("specified DMA range outside IOMMU capability\n");
3370db2e5d1SRobin Murphy 			return -EFAULT;
3380db2e5d1SRobin Murphy 		}
3390db2e5d1SRobin Murphy 		/* ...then finally give it a kicking to make sure it fits */
3400db2e5d1SRobin Murphy 		base_pfn = max_t(unsigned long, base_pfn,
3410db2e5d1SRobin Murphy 				domain->geometry.aperture_start >> order);
3420db2e5d1SRobin Murphy 	}
3430db2e5d1SRobin Murphy 
344f51d7bb7SRobin Murphy 	/* start_pfn is always nonzero for an already-initialised domain */
3450db2e5d1SRobin Murphy 	if (iovad->start_pfn) {
3460db2e5d1SRobin Murphy 		if (1UL << order != iovad->granule ||
347f51d7bb7SRobin Murphy 		    base_pfn != iovad->start_pfn) {
3480db2e5d1SRobin Murphy 			pr_warn("Incompatible range for DMA domain\n");
3490db2e5d1SRobin Murphy 			return -EFAULT;
3500db2e5d1SRobin Murphy 		}
3517c1b058cSRobin Murphy 
3520db2e5d1SRobin Murphy 		return 0;
3530db2e5d1SRobin Murphy 	}
3547c1b058cSRobin Murphy 
355aa3ac946SZhen Lei 	init_iova_domain(iovad, 1UL << order, base_pfn);
3562da274cdSZhen Lei 
3572da274cdSZhen Lei 	if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
3582da274cdSZhen Lei 			DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
3592da274cdSZhen Lei 		cookie->fq_domain = domain;
3602da274cdSZhen Lei 		init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
3612da274cdSZhen Lei 	}
3622da274cdSZhen Lei 
3637c1b058cSRobin Murphy 	if (!dev)
3647c1b058cSRobin Murphy 		return 0;
3657c1b058cSRobin Murphy 
3667c1b058cSRobin Murphy 	return iova_reserve_iommu_regions(dev, domain);
3677c1b058cSRobin Murphy }
3680db2e5d1SRobin Murphy EXPORT_SYMBOL(iommu_dma_init_domain);
3690db2e5d1SRobin Murphy 
3700db2e5d1SRobin Murphy /**
371737c85caSMitchel Humpherys  * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
372737c85caSMitchel Humpherys  *                    page flags.
3730db2e5d1SRobin Murphy  * @dir: Direction of DMA transfer
3740db2e5d1SRobin Murphy  * @coherent: Is the DMA master cache-coherent?
375737c85caSMitchel Humpherys  * @attrs: DMA attributes for the mapping
3760db2e5d1SRobin Murphy  *
3770db2e5d1SRobin Murphy  * Return: corresponding IOMMU API page protection flags
3780db2e5d1SRobin Murphy  */
379737c85caSMitchel Humpherys int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
380737c85caSMitchel Humpherys 		     unsigned long attrs)
3810db2e5d1SRobin Murphy {
3820db2e5d1SRobin Murphy 	int prot = coherent ? IOMMU_CACHE : 0;
3830db2e5d1SRobin Murphy 
384737c85caSMitchel Humpherys 	if (attrs & DMA_ATTR_PRIVILEGED)
385737c85caSMitchel Humpherys 		prot |= IOMMU_PRIV;
386737c85caSMitchel Humpherys 
3870db2e5d1SRobin Murphy 	switch (dir) {
3880db2e5d1SRobin Murphy 	case DMA_BIDIRECTIONAL:
3890db2e5d1SRobin Murphy 		return prot | IOMMU_READ | IOMMU_WRITE;
3900db2e5d1SRobin Murphy 	case DMA_TO_DEVICE:
3910db2e5d1SRobin Murphy 		return prot | IOMMU_READ;
3920db2e5d1SRobin Murphy 	case DMA_FROM_DEVICE:
3930db2e5d1SRobin Murphy 		return prot | IOMMU_WRITE;
3940db2e5d1SRobin Murphy 	default:
3950db2e5d1SRobin Murphy 		return 0;
3960db2e5d1SRobin Murphy 	}
3970db2e5d1SRobin Murphy }
3980db2e5d1SRobin Murphy 
399842fe519SRobin Murphy static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
400842fe519SRobin Murphy 		size_t size, dma_addr_t dma_limit, struct device *dev)
4010db2e5d1SRobin Murphy {
402a44e6657SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
403a44e6657SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
404bb65a64cSRobin Murphy 	unsigned long shift, iova_len, iova = 0;
4050db2e5d1SRobin Murphy 
406a44e6657SRobin Murphy 	if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
407a44e6657SRobin Murphy 		cookie->msi_iova += size;
408a44e6657SRobin Murphy 		return cookie->msi_iova - size;
409a44e6657SRobin Murphy 	}
410a44e6657SRobin Murphy 
411a44e6657SRobin Murphy 	shift = iova_shift(iovad);
412a44e6657SRobin Murphy 	iova_len = size >> shift;
413bb65a64cSRobin Murphy 	/*
414bb65a64cSRobin Murphy 	 * Freeing non-power-of-two-sized allocations back into the IOVA caches
415bb65a64cSRobin Murphy 	 * will come back to bite us badly, so we have to waste a bit of space
416bb65a64cSRobin Murphy 	 * rounding up anything cacheable to make sure that can't happen. The
417bb65a64cSRobin Murphy 	 * order of the unadjusted size will still match upon freeing.
418bb65a64cSRobin Murphy 	 */
419bb65a64cSRobin Murphy 	if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
420bb65a64cSRobin Murphy 		iova_len = roundup_pow_of_two(iova_len);
421a44e6657SRobin Murphy 
42203bfdc31SRobin Murphy 	if (dev->bus_dma_mask)
42303bfdc31SRobin Murphy 		dma_limit &= dev->bus_dma_mask;
42403bfdc31SRobin Murphy 
425c987ff0dSRobin Murphy 	if (domain->geometry.force_aperture)
426c987ff0dSRobin Murphy 		dma_limit = min(dma_limit, domain->geometry.aperture_end);
427122fac03SRobin Murphy 
428122fac03SRobin Murphy 	/* Try to get PCI devices a SAC address */
429122fac03SRobin Murphy 	if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
430538d5b33STomasz Nowicki 		iova = alloc_iova_fast(iovad, iova_len,
431538d5b33STomasz Nowicki 				       DMA_BIT_MASK(32) >> shift, false);
432122fac03SRobin Murphy 
433bb65a64cSRobin Murphy 	if (!iova)
434538d5b33STomasz Nowicki 		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
435538d5b33STomasz Nowicki 				       true);
436bb65a64cSRobin Murphy 
437bb65a64cSRobin Murphy 	return (dma_addr_t)iova << shift;
4380db2e5d1SRobin Murphy }
4390db2e5d1SRobin Murphy 
440842fe519SRobin Murphy static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
441842fe519SRobin Murphy 		dma_addr_t iova, size_t size)
4420db2e5d1SRobin Murphy {
443842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
4440db2e5d1SRobin Murphy 
445a44e6657SRobin Murphy 	/* The MSI case is only ever cleaning up its most recent allocation */
446bb65a64cSRobin Murphy 	if (cookie->type == IOMMU_DMA_MSI_COOKIE)
447a44e6657SRobin Murphy 		cookie->msi_iova -= size;
4482da274cdSZhen Lei 	else if (cookie->fq_domain)	/* non-strict mode */
4492da274cdSZhen Lei 		queue_iova(iovad, iova_pfn(iovad, iova),
4502da274cdSZhen Lei 				size >> iova_shift(iovad), 0);
451bb65a64cSRobin Murphy 	else
4521cc896edSRobin Murphy 		free_iova_fast(iovad, iova_pfn(iovad, iova),
4531cc896edSRobin Murphy 				size >> iova_shift(iovad));
454842fe519SRobin Murphy }
455842fe519SRobin Murphy 
456842fe519SRobin Murphy static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
457842fe519SRobin Murphy 		size_t size)
458842fe519SRobin Murphy {
459a44e6657SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
460a44e6657SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
461842fe519SRobin Murphy 	size_t iova_off = iova_offset(iovad, dma_addr);
462842fe519SRobin Murphy 
463842fe519SRobin Murphy 	dma_addr -= iova_off;
464842fe519SRobin Murphy 	size = iova_align(iovad, size + iova_off);
465842fe519SRobin Murphy 
4662da274cdSZhen Lei 	WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
4672da274cdSZhen Lei 	if (!cookie->fq_domain)
4682da274cdSZhen Lei 		iommu_tlb_sync(domain);
469a44e6657SRobin Murphy 	iommu_dma_free_iova(cookie, dma_addr, size);
4700db2e5d1SRobin Murphy }
4710db2e5d1SRobin Murphy 
4720db2e5d1SRobin Murphy static void __iommu_dma_free_pages(struct page **pages, int count)
4730db2e5d1SRobin Murphy {
4740db2e5d1SRobin Murphy 	while (count--)
4750db2e5d1SRobin Murphy 		__free_page(pages[count]);
4760db2e5d1SRobin Murphy 	kvfree(pages);
4770db2e5d1SRobin Murphy }
4780db2e5d1SRobin Murphy 
479c4b17afbSGanapatrao Kulkarni static struct page **__iommu_dma_alloc_pages(struct device *dev,
480c4b17afbSGanapatrao Kulkarni 		unsigned int count, unsigned long order_mask, gfp_t gfp)
4810db2e5d1SRobin Murphy {
4820db2e5d1SRobin Murphy 	struct page **pages;
483c4b17afbSGanapatrao Kulkarni 	unsigned int i = 0, nid = dev_to_node(dev);
4843b6b7e19SRobin Murphy 
4853b6b7e19SRobin Murphy 	order_mask &= (2U << MAX_ORDER) - 1;
4863b6b7e19SRobin Murphy 	if (!order_mask)
4873b6b7e19SRobin Murphy 		return NULL;
4880db2e5d1SRobin Murphy 
489c4b17afbSGanapatrao Kulkarni 	pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
4900db2e5d1SRobin Murphy 	if (!pages)
4910db2e5d1SRobin Murphy 		return NULL;
4920db2e5d1SRobin Murphy 
4930db2e5d1SRobin Murphy 	/* IOMMU can map any pages, so himem can also be used here */
4940db2e5d1SRobin Murphy 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
4950db2e5d1SRobin Murphy 
4960db2e5d1SRobin Murphy 	while (count) {
4970db2e5d1SRobin Murphy 		struct page *page = NULL;
4983b6b7e19SRobin Murphy 		unsigned int order_size;
4990db2e5d1SRobin Murphy 
5000db2e5d1SRobin Murphy 		/*
5010db2e5d1SRobin Murphy 		 * Higher-order allocations are a convenience rather
5020db2e5d1SRobin Murphy 		 * than a necessity, hence using __GFP_NORETRY until
5033b6b7e19SRobin Murphy 		 * falling back to minimum-order allocations.
5040db2e5d1SRobin Murphy 		 */
5053b6b7e19SRobin Murphy 		for (order_mask &= (2U << __fls(count)) - 1;
5063b6b7e19SRobin Murphy 		     order_mask; order_mask &= ~order_size) {
5073b6b7e19SRobin Murphy 			unsigned int order = __fls(order_mask);
508c4b17afbSGanapatrao Kulkarni 			gfp_t alloc_flags = gfp;
5093b6b7e19SRobin Murphy 
5103b6b7e19SRobin Murphy 			order_size = 1U << order;
511c4b17afbSGanapatrao Kulkarni 			if (order_mask > order_size)
512c4b17afbSGanapatrao Kulkarni 				alloc_flags |= __GFP_NORETRY;
513c4b17afbSGanapatrao Kulkarni 			page = alloc_pages_node(nid, alloc_flags, order);
5140db2e5d1SRobin Murphy 			if (!page)
5150db2e5d1SRobin Murphy 				continue;
5163b6b7e19SRobin Murphy 			if (!order)
5170db2e5d1SRobin Murphy 				break;
5183b6b7e19SRobin Murphy 			if (!PageCompound(page)) {
5190db2e5d1SRobin Murphy 				split_page(page, order);
5200db2e5d1SRobin Murphy 				break;
5213b6b7e19SRobin Murphy 			} else if (!split_huge_page(page)) {
5223b6b7e19SRobin Murphy 				break;
5230db2e5d1SRobin Murphy 			}
5243b6b7e19SRobin Murphy 			__free_pages(page, order);
5250db2e5d1SRobin Murphy 		}
5260db2e5d1SRobin Murphy 		if (!page) {
5270db2e5d1SRobin Murphy 			__iommu_dma_free_pages(pages, i);
5280db2e5d1SRobin Murphy 			return NULL;
5290db2e5d1SRobin Murphy 		}
5303b6b7e19SRobin Murphy 		count -= order_size;
5313b6b7e19SRobin Murphy 		while (order_size--)
5320db2e5d1SRobin Murphy 			pages[i++] = page++;
5330db2e5d1SRobin Murphy 	}
5340db2e5d1SRobin Murphy 	return pages;
5350db2e5d1SRobin Murphy }
5360db2e5d1SRobin Murphy 
5370db2e5d1SRobin Murphy /**
5380db2e5d1SRobin Murphy  * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
5390db2e5d1SRobin Murphy  * @dev: Device which owns this buffer
5400db2e5d1SRobin Murphy  * @pages: Array of buffer pages as returned by iommu_dma_alloc()
5410db2e5d1SRobin Murphy  * @size: Size of buffer in bytes
5420db2e5d1SRobin Murphy  * @handle: DMA address of buffer
5430db2e5d1SRobin Murphy  *
5440db2e5d1SRobin Murphy  * Frees both the pages associated with the buffer, and the array
5450db2e5d1SRobin Murphy  * describing them
5460db2e5d1SRobin Murphy  */
5470db2e5d1SRobin Murphy void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
5480db2e5d1SRobin Murphy 		dma_addr_t *handle)
5490db2e5d1SRobin Murphy {
55043c5bf11SRobin Murphy 	__iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
5510db2e5d1SRobin Murphy 	__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
552cad34be7SChristoph Hellwig 	*handle = DMA_MAPPING_ERROR;
5530db2e5d1SRobin Murphy }
5540db2e5d1SRobin Murphy 
5550db2e5d1SRobin Murphy /**
5560db2e5d1SRobin Murphy  * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
5570db2e5d1SRobin Murphy  * @dev: Device to allocate memory for. Must be a real device
5580db2e5d1SRobin Murphy  *	 attached to an iommu_dma_domain
5590db2e5d1SRobin Murphy  * @size: Size of buffer in bytes
5600db2e5d1SRobin Murphy  * @gfp: Allocation flags
5613b6b7e19SRobin Murphy  * @attrs: DMA attributes for this allocation
5620db2e5d1SRobin Murphy  * @prot: IOMMU mapping flags
5630db2e5d1SRobin Murphy  * @handle: Out argument for allocated DMA handle
5640db2e5d1SRobin Murphy  *
5650db2e5d1SRobin Murphy  * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
5660db2e5d1SRobin Murphy  * but an IOMMU which supports smaller pages might not map the whole thing.
5670db2e5d1SRobin Murphy  *
5680db2e5d1SRobin Murphy  * Return: Array of struct page pointers describing the buffer,
5690db2e5d1SRobin Murphy  *	   or NULL on failure.
5700db2e5d1SRobin Murphy  */
5713b6b7e19SRobin Murphy struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
572af751d43SChristoph Hellwig 		unsigned long attrs, int prot, dma_addr_t *handle)
5730db2e5d1SRobin Murphy {
57443c5bf11SRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
575842fe519SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
576842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
5770db2e5d1SRobin Murphy 	struct page **pages;
5780db2e5d1SRobin Murphy 	struct sg_table sgt;
579842fe519SRobin Murphy 	dma_addr_t iova;
5803b6b7e19SRobin Murphy 	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
5810db2e5d1SRobin Murphy 
582cad34be7SChristoph Hellwig 	*handle = DMA_MAPPING_ERROR;
5830db2e5d1SRobin Murphy 
5843b6b7e19SRobin Murphy 	min_size = alloc_sizes & -alloc_sizes;
5853b6b7e19SRobin Murphy 	if (min_size < PAGE_SIZE) {
5863b6b7e19SRobin Murphy 		min_size = PAGE_SIZE;
5873b6b7e19SRobin Murphy 		alloc_sizes |= PAGE_SIZE;
5883b6b7e19SRobin Murphy 	} else {
5893b6b7e19SRobin Murphy 		size = ALIGN(size, min_size);
5903b6b7e19SRobin Murphy 	}
59100085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
5923b6b7e19SRobin Murphy 		alloc_sizes = min_size;
5933b6b7e19SRobin Murphy 
5943b6b7e19SRobin Murphy 	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
595c4b17afbSGanapatrao Kulkarni 	pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
596c4b17afbSGanapatrao Kulkarni 					gfp);
5970db2e5d1SRobin Murphy 	if (!pages)
5980db2e5d1SRobin Murphy 		return NULL;
5990db2e5d1SRobin Murphy 
600842fe519SRobin Murphy 	size = iova_align(iovad, size);
601842fe519SRobin Murphy 	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
6020db2e5d1SRobin Murphy 	if (!iova)
6030db2e5d1SRobin Murphy 		goto out_free_pages;
6040db2e5d1SRobin Murphy 
6050db2e5d1SRobin Murphy 	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
6060db2e5d1SRobin Murphy 		goto out_free_iova;
6070db2e5d1SRobin Murphy 
6080db2e5d1SRobin Murphy 	if (!(prot & IOMMU_CACHE)) {
6090db2e5d1SRobin Murphy 		struct sg_mapping_iter miter;
6100db2e5d1SRobin Murphy 		/*
6110db2e5d1SRobin Murphy 		 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
6120db2e5d1SRobin Murphy 		 * sufficient here, so skip it by using the "wrong" direction.
6130db2e5d1SRobin Murphy 		 */
6140db2e5d1SRobin Murphy 		sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
6150db2e5d1SRobin Murphy 		while (sg_miter_next(&miter))
616af751d43SChristoph Hellwig 			arch_dma_prep_coherent(miter.page, PAGE_SIZE);
6170db2e5d1SRobin Murphy 		sg_miter_stop(&miter);
6180db2e5d1SRobin Murphy 	}
6190db2e5d1SRobin Murphy 
620842fe519SRobin Murphy 	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
6210db2e5d1SRobin Murphy 			< size)
6220db2e5d1SRobin Murphy 		goto out_free_sg;
6230db2e5d1SRobin Murphy 
624842fe519SRobin Murphy 	*handle = iova;
6250db2e5d1SRobin Murphy 	sg_free_table(&sgt);
6260db2e5d1SRobin Murphy 	return pages;
6270db2e5d1SRobin Murphy 
6280db2e5d1SRobin Murphy out_free_sg:
6290db2e5d1SRobin Murphy 	sg_free_table(&sgt);
6300db2e5d1SRobin Murphy out_free_iova:
631842fe519SRobin Murphy 	iommu_dma_free_iova(cookie, iova, size);
6320db2e5d1SRobin Murphy out_free_pages:
6330db2e5d1SRobin Murphy 	__iommu_dma_free_pages(pages, count);
6340db2e5d1SRobin Murphy 	return NULL;
6350db2e5d1SRobin Murphy }
6360db2e5d1SRobin Murphy 
6370db2e5d1SRobin Murphy /**
6380db2e5d1SRobin Murphy  * iommu_dma_mmap - Map a buffer into provided user VMA
6390db2e5d1SRobin Murphy  * @pages: Array representing buffer from iommu_dma_alloc()
6400db2e5d1SRobin Murphy  * @size: Size of buffer in bytes
6410db2e5d1SRobin Murphy  * @vma: VMA describing requested userspace mapping
6420db2e5d1SRobin Murphy  *
6430db2e5d1SRobin Murphy  * Maps the pages of the buffer in @pages into @vma. The caller is responsible
6440db2e5d1SRobin Murphy  * for verifying the correct size and protection of @vma beforehand.
6450db2e5d1SRobin Murphy  */
6460db2e5d1SRobin Murphy 
6470db2e5d1SRobin Murphy int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
6480db2e5d1SRobin Murphy {
649b0d0084fSSouptick Joarder 	return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
6500db2e5d1SRobin Murphy }
6510db2e5d1SRobin Murphy 
65251f8cc9eSRobin Murphy static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
65343c5bf11SRobin Murphy 		size_t size, int prot, struct iommu_domain *domain)
6540db2e5d1SRobin Murphy {
655842fe519SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
6561cc896edSRobin Murphy 	size_t iova_off = 0;
657842fe519SRobin Murphy 	dma_addr_t iova;
6580db2e5d1SRobin Murphy 
6591cc896edSRobin Murphy 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
6601cc896edSRobin Murphy 		iova_off = iova_offset(&cookie->iovad, phys);
6611cc896edSRobin Murphy 		size = iova_align(&cookie->iovad, size + iova_off);
6621cc896edSRobin Murphy 	}
6631cc896edSRobin Murphy 
664842fe519SRobin Murphy 	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
6650db2e5d1SRobin Murphy 	if (!iova)
666cad34be7SChristoph Hellwig 		return DMA_MAPPING_ERROR;
6670db2e5d1SRobin Murphy 
668842fe519SRobin Murphy 	if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
669842fe519SRobin Murphy 		iommu_dma_free_iova(cookie, iova, size);
670cad34be7SChristoph Hellwig 		return DMA_MAPPING_ERROR;
6710db2e5d1SRobin Murphy 	}
672842fe519SRobin Murphy 	return iova + iova_off;
6730db2e5d1SRobin Murphy }
6740db2e5d1SRobin Murphy 
67551f8cc9eSRobin Murphy dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
67651f8cc9eSRobin Murphy 		unsigned long offset, size_t size, int prot)
67751f8cc9eSRobin Murphy {
67843c5bf11SRobin Murphy 	return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
67943c5bf11SRobin Murphy 			iommu_get_dma_domain(dev));
68051f8cc9eSRobin Murphy }
68151f8cc9eSRobin Murphy 
6820db2e5d1SRobin Murphy void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
68300085f1eSKrzysztof Kozlowski 		enum dma_data_direction dir, unsigned long attrs)
6840db2e5d1SRobin Murphy {
68543c5bf11SRobin Murphy 	__iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
6860db2e5d1SRobin Murphy }
6870db2e5d1SRobin Murphy 
6880db2e5d1SRobin Murphy /*
6890db2e5d1SRobin Murphy  * Prepare a successfully-mapped scatterlist to give back to the caller.
690809eac54SRobin Murphy  *
691809eac54SRobin Murphy  * At this point the segments are already laid out by iommu_dma_map_sg() to
692809eac54SRobin Murphy  * avoid individually crossing any boundaries, so we merely need to check a
693809eac54SRobin Murphy  * segment's start address to avoid concatenating across one.
6940db2e5d1SRobin Murphy  */
6950db2e5d1SRobin Murphy static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
6960db2e5d1SRobin Murphy 		dma_addr_t dma_addr)
6970db2e5d1SRobin Murphy {
698809eac54SRobin Murphy 	struct scatterlist *s, *cur = sg;
699809eac54SRobin Murphy 	unsigned long seg_mask = dma_get_seg_boundary(dev);
700809eac54SRobin Murphy 	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
701809eac54SRobin Murphy 	int i, count = 0;
7020db2e5d1SRobin Murphy 
7030db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
704809eac54SRobin Murphy 		/* Restore this segment's original unaligned fields first */
705809eac54SRobin Murphy 		unsigned int s_iova_off = sg_dma_address(s);
7060db2e5d1SRobin Murphy 		unsigned int s_length = sg_dma_len(s);
707809eac54SRobin Murphy 		unsigned int s_iova_len = s->length;
7080db2e5d1SRobin Murphy 
709809eac54SRobin Murphy 		s->offset += s_iova_off;
7100db2e5d1SRobin Murphy 		s->length = s_length;
711cad34be7SChristoph Hellwig 		sg_dma_address(s) = DMA_MAPPING_ERROR;
712809eac54SRobin Murphy 		sg_dma_len(s) = 0;
713809eac54SRobin Murphy 
714809eac54SRobin Murphy 		/*
715809eac54SRobin Murphy 		 * Now fill in the real DMA data. If...
716809eac54SRobin Murphy 		 * - there is a valid output segment to append to
717809eac54SRobin Murphy 		 * - and this segment starts on an IOVA page boundary
718809eac54SRobin Murphy 		 * - but doesn't fall at a segment boundary
719809eac54SRobin Murphy 		 * - and wouldn't make the resulting output segment too long
720809eac54SRobin Murphy 		 */
721809eac54SRobin Murphy 		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
722809eac54SRobin Murphy 		    (cur_len + s_length <= max_len)) {
723809eac54SRobin Murphy 			/* ...then concatenate it with the previous one */
724809eac54SRobin Murphy 			cur_len += s_length;
725809eac54SRobin Murphy 		} else {
726809eac54SRobin Murphy 			/* Otherwise start the next output segment */
727809eac54SRobin Murphy 			if (i > 0)
728809eac54SRobin Murphy 				cur = sg_next(cur);
729809eac54SRobin Murphy 			cur_len = s_length;
730809eac54SRobin Murphy 			count++;
731809eac54SRobin Murphy 
732809eac54SRobin Murphy 			sg_dma_address(cur) = dma_addr + s_iova_off;
7330db2e5d1SRobin Murphy 		}
734809eac54SRobin Murphy 
735809eac54SRobin Murphy 		sg_dma_len(cur) = cur_len;
736809eac54SRobin Murphy 		dma_addr += s_iova_len;
737809eac54SRobin Murphy 
738809eac54SRobin Murphy 		if (s_length + s_iova_off < s_iova_len)
739809eac54SRobin Murphy 			cur_len = 0;
740809eac54SRobin Murphy 	}
741809eac54SRobin Murphy 	return count;
7420db2e5d1SRobin Murphy }
7430db2e5d1SRobin Murphy 
7440db2e5d1SRobin Murphy /*
7450db2e5d1SRobin Murphy  * If mapping failed, then just restore the original list,
7460db2e5d1SRobin Murphy  * but making sure the DMA fields are invalidated.
7470db2e5d1SRobin Murphy  */
7480db2e5d1SRobin Murphy static void __invalidate_sg(struct scatterlist *sg, int nents)
7490db2e5d1SRobin Murphy {
7500db2e5d1SRobin Murphy 	struct scatterlist *s;
7510db2e5d1SRobin Murphy 	int i;
7520db2e5d1SRobin Murphy 
7530db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
754cad34be7SChristoph Hellwig 		if (sg_dma_address(s) != DMA_MAPPING_ERROR)
75507b48ac4SRobin Murphy 			s->offset += sg_dma_address(s);
7560db2e5d1SRobin Murphy 		if (sg_dma_len(s))
7570db2e5d1SRobin Murphy 			s->length = sg_dma_len(s);
758cad34be7SChristoph Hellwig 		sg_dma_address(s) = DMA_MAPPING_ERROR;
7590db2e5d1SRobin Murphy 		sg_dma_len(s) = 0;
7600db2e5d1SRobin Murphy 	}
7610db2e5d1SRobin Murphy }
7620db2e5d1SRobin Murphy 
7630db2e5d1SRobin Murphy /*
7640db2e5d1SRobin Murphy  * The DMA API client is passing in a scatterlist which could describe
7650db2e5d1SRobin Murphy  * any old buffer layout, but the IOMMU API requires everything to be
7660db2e5d1SRobin Murphy  * aligned to IOMMU pages. Hence the need for this complicated bit of
7670db2e5d1SRobin Murphy  * impedance-matching, to be able to hand off a suitably-aligned list,
7680db2e5d1SRobin Murphy  * but still preserve the original offsets and sizes for the caller.
7690db2e5d1SRobin Murphy  */
7700db2e5d1SRobin Murphy int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
7710db2e5d1SRobin Murphy 		int nents, int prot)
7720db2e5d1SRobin Murphy {
77343c5bf11SRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
774842fe519SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
775842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
7760db2e5d1SRobin Murphy 	struct scatterlist *s, *prev = NULL;
777842fe519SRobin Murphy 	dma_addr_t iova;
7780db2e5d1SRobin Murphy 	size_t iova_len = 0;
779809eac54SRobin Murphy 	unsigned long mask = dma_get_seg_boundary(dev);
7800db2e5d1SRobin Murphy 	int i;
7810db2e5d1SRobin Murphy 
7820db2e5d1SRobin Murphy 	/*
7830db2e5d1SRobin Murphy 	 * Work out how much IOVA space we need, and align the segments to
7840db2e5d1SRobin Murphy 	 * IOVA granules for the IOMMU driver to handle. With some clever
7850db2e5d1SRobin Murphy 	 * trickery we can modify the list in-place, but reversibly, by
786809eac54SRobin Murphy 	 * stashing the unaligned parts in the as-yet-unused DMA fields.
7870db2e5d1SRobin Murphy 	 */
7880db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
789809eac54SRobin Murphy 		size_t s_iova_off = iova_offset(iovad, s->offset);
7900db2e5d1SRobin Murphy 		size_t s_length = s->length;
791809eac54SRobin Murphy 		size_t pad_len = (mask - iova_len + 1) & mask;
7920db2e5d1SRobin Murphy 
793809eac54SRobin Murphy 		sg_dma_address(s) = s_iova_off;
7940db2e5d1SRobin Murphy 		sg_dma_len(s) = s_length;
795809eac54SRobin Murphy 		s->offset -= s_iova_off;
796809eac54SRobin Murphy 		s_length = iova_align(iovad, s_length + s_iova_off);
7970db2e5d1SRobin Murphy 		s->length = s_length;
7980db2e5d1SRobin Murphy 
7990db2e5d1SRobin Murphy 		/*
800809eac54SRobin Murphy 		 * Due to the alignment of our single IOVA allocation, we can
801809eac54SRobin Murphy 		 * depend on these assumptions about the segment boundary mask:
802809eac54SRobin Murphy 		 * - If mask size >= IOVA size, then the IOVA range cannot
803809eac54SRobin Murphy 		 *   possibly fall across a boundary, so we don't care.
804809eac54SRobin Murphy 		 * - If mask size < IOVA size, then the IOVA range must start
805809eac54SRobin Murphy 		 *   exactly on a boundary, therefore we can lay things out
806809eac54SRobin Murphy 		 *   based purely on segment lengths without needing to know
807809eac54SRobin Murphy 		 *   the actual addresses beforehand.
808809eac54SRobin Murphy 		 * - The mask must be a power of 2, so pad_len == 0 if
809809eac54SRobin Murphy 		 *   iova_len == 0, thus we cannot dereference prev the first
810809eac54SRobin Murphy 		 *   time through here (i.e. before it has a meaningful value).
8110db2e5d1SRobin Murphy 		 */
812809eac54SRobin Murphy 		if (pad_len && pad_len < s_length - 1) {
8130db2e5d1SRobin Murphy 			prev->length += pad_len;
8140db2e5d1SRobin Murphy 			iova_len += pad_len;
8150db2e5d1SRobin Murphy 		}
8160db2e5d1SRobin Murphy 
8170db2e5d1SRobin Murphy 		iova_len += s_length;
8180db2e5d1SRobin Murphy 		prev = s;
8190db2e5d1SRobin Murphy 	}
8200db2e5d1SRobin Murphy 
821842fe519SRobin Murphy 	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
8220db2e5d1SRobin Murphy 	if (!iova)
8230db2e5d1SRobin Murphy 		goto out_restore_sg;
8240db2e5d1SRobin Murphy 
8250db2e5d1SRobin Murphy 	/*
8260db2e5d1SRobin Murphy 	 * We'll leave any physical concatenation to the IOMMU driver's
8270db2e5d1SRobin Murphy 	 * implementation - it knows better than we do.
8280db2e5d1SRobin Murphy 	 */
829842fe519SRobin Murphy 	if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
8300db2e5d1SRobin Murphy 		goto out_free_iova;
8310db2e5d1SRobin Murphy 
832842fe519SRobin Murphy 	return __finalise_sg(dev, sg, nents, iova);
8330db2e5d1SRobin Murphy 
8340db2e5d1SRobin Murphy out_free_iova:
835842fe519SRobin Murphy 	iommu_dma_free_iova(cookie, iova, iova_len);
8360db2e5d1SRobin Murphy out_restore_sg:
8370db2e5d1SRobin Murphy 	__invalidate_sg(sg, nents);
8380db2e5d1SRobin Murphy 	return 0;
8390db2e5d1SRobin Murphy }
8400db2e5d1SRobin Murphy 
8410db2e5d1SRobin Murphy void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
84200085f1eSKrzysztof Kozlowski 		enum dma_data_direction dir, unsigned long attrs)
8430db2e5d1SRobin Murphy {
844842fe519SRobin Murphy 	dma_addr_t start, end;
845842fe519SRobin Murphy 	struct scatterlist *tmp;
846842fe519SRobin Murphy 	int i;
8470db2e5d1SRobin Murphy 	/*
8480db2e5d1SRobin Murphy 	 * The scatterlist segments are mapped into a single
8490db2e5d1SRobin Murphy 	 * contiguous IOVA allocation, so this is incredibly easy.
8500db2e5d1SRobin Murphy 	 */
851842fe519SRobin Murphy 	start = sg_dma_address(sg);
852842fe519SRobin Murphy 	for_each_sg(sg_next(sg), tmp, nents - 1, i) {
853842fe519SRobin Murphy 		if (sg_dma_len(tmp) == 0)
854842fe519SRobin Murphy 			break;
855842fe519SRobin Murphy 		sg = tmp;
856842fe519SRobin Murphy 	}
857842fe519SRobin Murphy 	end = sg_dma_address(sg) + sg_dma_len(sg);
85843c5bf11SRobin Murphy 	__iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start);
8590db2e5d1SRobin Murphy }
8600db2e5d1SRobin Murphy 
86151f8cc9eSRobin Murphy dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
86251f8cc9eSRobin Murphy 		size_t size, enum dma_data_direction dir, unsigned long attrs)
86351f8cc9eSRobin Murphy {
86451f8cc9eSRobin Murphy 	return __iommu_dma_map(dev, phys, size,
86543c5bf11SRobin Murphy 			dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
86643c5bf11SRobin Murphy 			iommu_get_dma_domain(dev));
86751f8cc9eSRobin Murphy }
86851f8cc9eSRobin Murphy 
86951f8cc9eSRobin Murphy void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
87051f8cc9eSRobin Murphy 		size_t size, enum dma_data_direction dir, unsigned long attrs)
87151f8cc9eSRobin Murphy {
87243c5bf11SRobin Murphy 	__iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
87351f8cc9eSRobin Murphy }
87451f8cc9eSRobin Murphy 
87544bb7e24SRobin Murphy static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
87644bb7e24SRobin Murphy 		phys_addr_t msi_addr, struct iommu_domain *domain)
87744bb7e24SRobin Murphy {
87844bb7e24SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
87944bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi_page;
880842fe519SRobin Murphy 	dma_addr_t iova;
88144bb7e24SRobin Murphy 	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
882fdbe574eSRobin Murphy 	size_t size = cookie_msi_granule(cookie);
88344bb7e24SRobin Murphy 
884fdbe574eSRobin Murphy 	msi_addr &= ~(phys_addr_t)(size - 1);
88544bb7e24SRobin Murphy 	list_for_each_entry(msi_page, &cookie->msi_page_list, list)
88644bb7e24SRobin Murphy 		if (msi_page->phys == msi_addr)
88744bb7e24SRobin Murphy 			return msi_page;
88844bb7e24SRobin Murphy 
88944bb7e24SRobin Murphy 	msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
89044bb7e24SRobin Murphy 	if (!msi_page)
89144bb7e24SRobin Murphy 		return NULL;
89244bb7e24SRobin Murphy 
89343c5bf11SRobin Murphy 	iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
894cad34be7SChristoph Hellwig 	if (iova == DMA_MAPPING_ERROR)
89544bb7e24SRobin Murphy 		goto out_free_page;
89644bb7e24SRobin Murphy 
89744bb7e24SRobin Murphy 	INIT_LIST_HEAD(&msi_page->list);
898a44e6657SRobin Murphy 	msi_page->phys = msi_addr;
899a44e6657SRobin Murphy 	msi_page->iova = iova;
90044bb7e24SRobin Murphy 	list_add(&msi_page->list, &cookie->msi_page_list);
90144bb7e24SRobin Murphy 	return msi_page;
90244bb7e24SRobin Murphy 
90344bb7e24SRobin Murphy out_free_page:
90444bb7e24SRobin Murphy 	kfree(msi_page);
90544bb7e24SRobin Murphy 	return NULL;
90644bb7e24SRobin Murphy }
90744bb7e24SRobin Murphy 
908ece6e6f0SJulien Grall int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
90944bb7e24SRobin Murphy {
910ece6e6f0SJulien Grall 	struct device *dev = msi_desc_to_dev(desc);
91144bb7e24SRobin Murphy 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
91244bb7e24SRobin Murphy 	struct iommu_dma_cookie *cookie;
91344bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi_page;
91444bb7e24SRobin Murphy 	unsigned long flags;
91544bb7e24SRobin Murphy 
916ece6e6f0SJulien Grall 	if (!domain || !domain->iova_cookie) {
917ece6e6f0SJulien Grall 		desc->iommu_cookie = NULL;
918ece6e6f0SJulien Grall 		return 0;
919ece6e6f0SJulien Grall 	}
92044bb7e24SRobin Murphy 
92144bb7e24SRobin Murphy 	cookie = domain->iova_cookie;
92244bb7e24SRobin Murphy 
92344bb7e24SRobin Murphy 	/*
92444bb7e24SRobin Murphy 	 * We disable IRQs to rule out a possible inversion against
92544bb7e24SRobin Murphy 	 * irq_desc_lock if, say, someone tries to retarget the affinity
92644bb7e24SRobin Murphy 	 * of an MSI from within an IPI handler.
92744bb7e24SRobin Murphy 	 */
92844bb7e24SRobin Murphy 	spin_lock_irqsave(&cookie->msi_lock, flags);
92944bb7e24SRobin Murphy 	msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
93044bb7e24SRobin Murphy 	spin_unlock_irqrestore(&cookie->msi_lock, flags);
93144bb7e24SRobin Murphy 
932ece6e6f0SJulien Grall 	msi_desc_set_iommu_cookie(desc, msi_page);
933ece6e6f0SJulien Grall 
934ece6e6f0SJulien Grall 	if (!msi_page)
935ece6e6f0SJulien Grall 		return -ENOMEM;
936ece6e6f0SJulien Grall 	return 0;
93744bb7e24SRobin Murphy }
938ece6e6f0SJulien Grall 
939ece6e6f0SJulien Grall void iommu_dma_compose_msi_msg(struct msi_desc *desc,
940ece6e6f0SJulien Grall 			       struct msi_msg *msg)
941ece6e6f0SJulien Grall {
942ece6e6f0SJulien Grall 	struct device *dev = msi_desc_to_dev(desc);
943ece6e6f0SJulien Grall 	const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
944ece6e6f0SJulien Grall 	const struct iommu_dma_msi_page *msi_page;
945ece6e6f0SJulien Grall 
946ece6e6f0SJulien Grall 	msi_page = msi_desc_get_iommu_cookie(desc);
947ece6e6f0SJulien Grall 
948ece6e6f0SJulien Grall 	if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
949ece6e6f0SJulien Grall 		return;
950ece6e6f0SJulien Grall 
951ece6e6f0SJulien Grall 	msg->address_hi = upper_32_bits(msi_page->iova);
952ece6e6f0SJulien Grall 	msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
953ece6e6f0SJulien Grall 	msg->address_lo += lower_32_bits(msi_page->iova);
95444bb7e24SRobin Murphy }
955