xref: /openbmc/linux/drivers/iommu/dma-iommu.c (revision 9420139f)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
20db2e5d1SRobin Murphy /*
30db2e5d1SRobin Murphy  * A fairly generic DMA-API to IOMMU-API glue layer.
40db2e5d1SRobin Murphy  *
50db2e5d1SRobin Murphy  * Copyright (C) 2014-2015 ARM Ltd.
60db2e5d1SRobin Murphy  *
70db2e5d1SRobin Murphy  * based in part on arch/arm/mm/dma-mapping.c:
80db2e5d1SRobin Murphy  * Copyright (C) 2000-2004 Russell King
90db2e5d1SRobin Murphy  */
100db2e5d1SRobin Murphy 
11f51dc892SShameer Kolothum #include <linux/acpi_iort.h>
120db2e5d1SRobin Murphy #include <linux/device.h>
1306d60728SChristoph Hellwig #include <linux/dma-contiguous.h>
140db2e5d1SRobin Murphy #include <linux/dma-iommu.h>
15af751d43SChristoph Hellwig #include <linux/dma-noncoherent.h>
165b11e9cdSRobin Murphy #include <linux/gfp.h>
170db2e5d1SRobin Murphy #include <linux/huge_mm.h>
180db2e5d1SRobin Murphy #include <linux/iommu.h>
190db2e5d1SRobin Murphy #include <linux/iova.h>
2044bb7e24SRobin Murphy #include <linux/irq.h>
210db2e5d1SRobin Murphy #include <linux/mm.h>
22c1864790SRobin Murphy #include <linux/mutex.h>
23fade1ec0SRobin Murphy #include <linux/pci.h>
245b11e9cdSRobin Murphy #include <linux/scatterlist.h>
255b11e9cdSRobin Murphy #include <linux/vmalloc.h>
26795bbbb9STom Murphy #include <linux/crash_dump.h>
270db2e5d1SRobin Murphy 
2844bb7e24SRobin Murphy struct iommu_dma_msi_page {
2944bb7e24SRobin Murphy 	struct list_head	list;
3044bb7e24SRobin Murphy 	dma_addr_t		iova;
3144bb7e24SRobin Murphy 	phys_addr_t		phys;
3244bb7e24SRobin Murphy };
3344bb7e24SRobin Murphy 
34fdbe574eSRobin Murphy enum iommu_dma_cookie_type {
35fdbe574eSRobin Murphy 	IOMMU_DMA_IOVA_COOKIE,
36fdbe574eSRobin Murphy 	IOMMU_DMA_MSI_COOKIE,
37fdbe574eSRobin Murphy };
38fdbe574eSRobin Murphy 
3944bb7e24SRobin Murphy struct iommu_dma_cookie {
40fdbe574eSRobin Murphy 	enum iommu_dma_cookie_type	type;
41fdbe574eSRobin Murphy 	union {
42fdbe574eSRobin Murphy 		/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
4344bb7e24SRobin Murphy 		struct iova_domain	iovad;
44fdbe574eSRobin Murphy 		/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
45fdbe574eSRobin Murphy 		dma_addr_t		msi_iova;
46fdbe574eSRobin Murphy 	};
4744bb7e24SRobin Murphy 	struct list_head		msi_page_list;
482da274cdSZhen Lei 
492da274cdSZhen Lei 	/* Domain for flush queue callback; NULL if flush queue not in use */
502da274cdSZhen Lei 	struct iommu_domain		*fq_domain;
5144bb7e24SRobin Murphy };
5244bb7e24SRobin Murphy 
53fdbe574eSRobin Murphy static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
54fdbe574eSRobin Murphy {
55fdbe574eSRobin Murphy 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
56fdbe574eSRobin Murphy 		return cookie->iovad.granule;
57fdbe574eSRobin Murphy 	return PAGE_SIZE;
58fdbe574eSRobin Murphy }
59fdbe574eSRobin Murphy 
60fdbe574eSRobin Murphy static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
61fdbe574eSRobin Murphy {
62fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie;
63fdbe574eSRobin Murphy 
64fdbe574eSRobin Murphy 	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
65fdbe574eSRobin Murphy 	if (cookie) {
66fdbe574eSRobin Murphy 		INIT_LIST_HEAD(&cookie->msi_page_list);
67fdbe574eSRobin Murphy 		cookie->type = type;
68fdbe574eSRobin Murphy 	}
69fdbe574eSRobin Murphy 	return cookie;
7044bb7e24SRobin Murphy }
7144bb7e24SRobin Murphy 
720db2e5d1SRobin Murphy /**
730db2e5d1SRobin Murphy  * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
740db2e5d1SRobin Murphy  * @domain: IOMMU domain to prepare for DMA-API usage
750db2e5d1SRobin Murphy  *
760db2e5d1SRobin Murphy  * IOMMU drivers should normally call this from their domain_alloc
770db2e5d1SRobin Murphy  * callback when domain->type == IOMMU_DOMAIN_DMA.
780db2e5d1SRobin Murphy  */
790db2e5d1SRobin Murphy int iommu_get_dma_cookie(struct iommu_domain *domain)
800db2e5d1SRobin Murphy {
810db2e5d1SRobin Murphy 	if (domain->iova_cookie)
820db2e5d1SRobin Murphy 		return -EEXIST;
830db2e5d1SRobin Murphy 
84fdbe574eSRobin Murphy 	domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
85fdbe574eSRobin Murphy 	if (!domain->iova_cookie)
8644bb7e24SRobin Murphy 		return -ENOMEM;
870db2e5d1SRobin Murphy 
8844bb7e24SRobin Murphy 	return 0;
890db2e5d1SRobin Murphy }
900db2e5d1SRobin Murphy EXPORT_SYMBOL(iommu_get_dma_cookie);
910db2e5d1SRobin Murphy 
920db2e5d1SRobin Murphy /**
93fdbe574eSRobin Murphy  * iommu_get_msi_cookie - Acquire just MSI remapping resources
94fdbe574eSRobin Murphy  * @domain: IOMMU domain to prepare
95fdbe574eSRobin Murphy  * @base: Start address of IOVA region for MSI mappings
96fdbe574eSRobin Murphy  *
97fdbe574eSRobin Murphy  * Users who manage their own IOVA allocation and do not want DMA API support,
98fdbe574eSRobin Murphy  * but would still like to take advantage of automatic MSI remapping, can use
99fdbe574eSRobin Murphy  * this to initialise their own domain appropriately. Users should reserve a
100fdbe574eSRobin Murphy  * contiguous IOVA region, starting at @base, large enough to accommodate the
101fdbe574eSRobin Murphy  * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
102fdbe574eSRobin Murphy  * used by the devices attached to @domain.
103fdbe574eSRobin Murphy  */
104fdbe574eSRobin Murphy int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
105fdbe574eSRobin Murphy {
106fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie;
107fdbe574eSRobin Murphy 
108fdbe574eSRobin Murphy 	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
109fdbe574eSRobin Murphy 		return -EINVAL;
110fdbe574eSRobin Murphy 
111fdbe574eSRobin Murphy 	if (domain->iova_cookie)
112fdbe574eSRobin Murphy 		return -EEXIST;
113fdbe574eSRobin Murphy 
114fdbe574eSRobin Murphy 	cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
115fdbe574eSRobin Murphy 	if (!cookie)
116fdbe574eSRobin Murphy 		return -ENOMEM;
117fdbe574eSRobin Murphy 
118fdbe574eSRobin Murphy 	cookie->msi_iova = base;
119fdbe574eSRobin Murphy 	domain->iova_cookie = cookie;
120fdbe574eSRobin Murphy 	return 0;
121fdbe574eSRobin Murphy }
122fdbe574eSRobin Murphy EXPORT_SYMBOL(iommu_get_msi_cookie);
123fdbe574eSRobin Murphy 
124fdbe574eSRobin Murphy /**
1250db2e5d1SRobin Murphy  * iommu_put_dma_cookie - Release a domain's DMA mapping resources
126fdbe574eSRobin Murphy  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
127fdbe574eSRobin Murphy  *          iommu_get_msi_cookie()
1280db2e5d1SRobin Murphy  *
1290db2e5d1SRobin Murphy  * IOMMU drivers should normally call this from their domain_free callback.
1300db2e5d1SRobin Murphy  */
1310db2e5d1SRobin Murphy void iommu_put_dma_cookie(struct iommu_domain *domain)
1320db2e5d1SRobin Murphy {
13344bb7e24SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
13444bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi, *tmp;
1350db2e5d1SRobin Murphy 
13644bb7e24SRobin Murphy 	if (!cookie)
1370db2e5d1SRobin Murphy 		return;
1380db2e5d1SRobin Murphy 
139fdbe574eSRobin Murphy 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
14044bb7e24SRobin Murphy 		put_iova_domain(&cookie->iovad);
14144bb7e24SRobin Murphy 
14244bb7e24SRobin Murphy 	list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
14344bb7e24SRobin Murphy 		list_del(&msi->list);
14444bb7e24SRobin Murphy 		kfree(msi);
14544bb7e24SRobin Murphy 	}
14644bb7e24SRobin Murphy 	kfree(cookie);
1470db2e5d1SRobin Murphy 	domain->iova_cookie = NULL;
1480db2e5d1SRobin Murphy }
1490db2e5d1SRobin Murphy EXPORT_SYMBOL(iommu_put_dma_cookie);
1500db2e5d1SRobin Murphy 
151273df963SRobin Murphy /**
152273df963SRobin Murphy  * iommu_dma_get_resv_regions - Reserved region driver helper
153273df963SRobin Murphy  * @dev: Device from iommu_get_resv_regions()
154273df963SRobin Murphy  * @list: Reserved region list from iommu_get_resv_regions()
155273df963SRobin Murphy  *
156273df963SRobin Murphy  * IOMMU drivers can use this to implement their .get_resv_regions callback
157cd2c9fcfSShameer Kolothum  * for general non-IOMMU-specific reservations. Currently, this covers GICv3
158cd2c9fcfSShameer Kolothum  * ITS region reservation on ACPI based ARM platforms that may require HW MSI
159cd2c9fcfSShameer Kolothum  * reservation.
160273df963SRobin Murphy  */
161273df963SRobin Murphy void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
162fade1ec0SRobin Murphy {
163fade1ec0SRobin Murphy 
16498cc4f71SJoerg Roedel 	if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
165cd2c9fcfSShameer Kolothum 		iort_iommu_msi_get_resv_regions(dev, list);
166f51dc892SShameer Kolothum 
167fade1ec0SRobin Murphy }
168273df963SRobin Murphy EXPORT_SYMBOL(iommu_dma_get_resv_regions);
169fade1ec0SRobin Murphy 
1707c1b058cSRobin Murphy static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
1717c1b058cSRobin Murphy 		phys_addr_t start, phys_addr_t end)
1727c1b058cSRobin Murphy {
1737c1b058cSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
1747c1b058cSRobin Murphy 	struct iommu_dma_msi_page *msi_page;
1757c1b058cSRobin Murphy 	int i, num_pages;
1767c1b058cSRobin Murphy 
1777c1b058cSRobin Murphy 	start -= iova_offset(iovad, start);
1787c1b058cSRobin Murphy 	num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
1797c1b058cSRobin Murphy 
18065ac74f1SMarc Zyngier 	for (i = 0; i < num_pages; i++) {
18165ac74f1SMarc Zyngier 		msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
1827c1b058cSRobin Murphy 		if (!msi_page)
1837c1b058cSRobin Murphy 			return -ENOMEM;
1847c1b058cSRobin Murphy 
18565ac74f1SMarc Zyngier 		msi_page->phys = start;
18665ac74f1SMarc Zyngier 		msi_page->iova = start;
18765ac74f1SMarc Zyngier 		INIT_LIST_HEAD(&msi_page->list);
18865ac74f1SMarc Zyngier 		list_add(&msi_page->list, &cookie->msi_page_list);
1897c1b058cSRobin Murphy 		start += iovad->granule;
1907c1b058cSRobin Murphy 	}
1917c1b058cSRobin Murphy 
1927c1b058cSRobin Murphy 	return 0;
1937c1b058cSRobin Murphy }
1947c1b058cSRobin Murphy 
195aadad097SSrinath Mannam static int iova_reserve_pci_windows(struct pci_dev *dev,
196cd2c9fcfSShameer Kolothum 		struct iova_domain *iovad)
197cd2c9fcfSShameer Kolothum {
198cd2c9fcfSShameer Kolothum 	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
199cd2c9fcfSShameer Kolothum 	struct resource_entry *window;
200cd2c9fcfSShameer Kolothum 	unsigned long lo, hi;
201aadad097SSrinath Mannam 	phys_addr_t start = 0, end;
202cd2c9fcfSShameer Kolothum 
203cd2c9fcfSShameer Kolothum 	resource_list_for_each_entry(window, &bridge->windows) {
204cd2c9fcfSShameer Kolothum 		if (resource_type(window->res) != IORESOURCE_MEM)
205cd2c9fcfSShameer Kolothum 			continue;
206cd2c9fcfSShameer Kolothum 
207cd2c9fcfSShameer Kolothum 		lo = iova_pfn(iovad, window->res->start - window->offset);
208cd2c9fcfSShameer Kolothum 		hi = iova_pfn(iovad, window->res->end - window->offset);
209cd2c9fcfSShameer Kolothum 		reserve_iova(iovad, lo, hi);
210cd2c9fcfSShameer Kolothum 	}
211aadad097SSrinath Mannam 
212aadad097SSrinath Mannam 	/* Get reserved DMA windows from host bridge */
213aadad097SSrinath Mannam 	resource_list_for_each_entry(window, &bridge->dma_ranges) {
214aadad097SSrinath Mannam 		end = window->res->start - window->offset;
215aadad097SSrinath Mannam resv_iova:
216aadad097SSrinath Mannam 		if (end > start) {
217aadad097SSrinath Mannam 			lo = iova_pfn(iovad, start);
218aadad097SSrinath Mannam 			hi = iova_pfn(iovad, end);
219aadad097SSrinath Mannam 			reserve_iova(iovad, lo, hi);
220aadad097SSrinath Mannam 		} else {
221aadad097SSrinath Mannam 			/* dma_ranges list should be sorted */
222aadad097SSrinath Mannam 			dev_err(&dev->dev, "Failed to reserve IOVA\n");
223aadad097SSrinath Mannam 			return -EINVAL;
224aadad097SSrinath Mannam 		}
225aadad097SSrinath Mannam 
226aadad097SSrinath Mannam 		start = window->res->end - window->offset + 1;
227aadad097SSrinath Mannam 		/* If window is last entry */
228aadad097SSrinath Mannam 		if (window->node.next == &bridge->dma_ranges &&
22929fcea8cSArnd Bergmann 		    end != ~(phys_addr_t)0) {
23029fcea8cSArnd Bergmann 			end = ~(phys_addr_t)0;
231aadad097SSrinath Mannam 			goto resv_iova;
232aadad097SSrinath Mannam 		}
233aadad097SSrinath Mannam 	}
234aadad097SSrinath Mannam 
235aadad097SSrinath Mannam 	return 0;
236cd2c9fcfSShameer Kolothum }
237cd2c9fcfSShameer Kolothum 
2387c1b058cSRobin Murphy static int iova_reserve_iommu_regions(struct device *dev,
2397c1b058cSRobin Murphy 		struct iommu_domain *domain)
2407c1b058cSRobin Murphy {
2417c1b058cSRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
2427c1b058cSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
2437c1b058cSRobin Murphy 	struct iommu_resv_region *region;
2447c1b058cSRobin Murphy 	LIST_HEAD(resv_regions);
2457c1b058cSRobin Murphy 	int ret = 0;
2467c1b058cSRobin Murphy 
247aadad097SSrinath Mannam 	if (dev_is_pci(dev)) {
248aadad097SSrinath Mannam 		ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
249aadad097SSrinath Mannam 		if (ret)
250aadad097SSrinath Mannam 			return ret;
251aadad097SSrinath Mannam 	}
252cd2c9fcfSShameer Kolothum 
2537c1b058cSRobin Murphy 	iommu_get_resv_regions(dev, &resv_regions);
2547c1b058cSRobin Murphy 	list_for_each_entry(region, &resv_regions, list) {
2557c1b058cSRobin Murphy 		unsigned long lo, hi;
2567c1b058cSRobin Murphy 
2577c1b058cSRobin Murphy 		/* We ARE the software that manages these! */
2587c1b058cSRobin Murphy 		if (region->type == IOMMU_RESV_SW_MSI)
2597c1b058cSRobin Murphy 			continue;
2607c1b058cSRobin Murphy 
2617c1b058cSRobin Murphy 		lo = iova_pfn(iovad, region->start);
2627c1b058cSRobin Murphy 		hi = iova_pfn(iovad, region->start + region->length - 1);
2637c1b058cSRobin Murphy 		reserve_iova(iovad, lo, hi);
2647c1b058cSRobin Murphy 
2657c1b058cSRobin Murphy 		if (region->type == IOMMU_RESV_MSI)
2667c1b058cSRobin Murphy 			ret = cookie_init_hw_msi_region(cookie, region->start,
2677c1b058cSRobin Murphy 					region->start + region->length);
2687c1b058cSRobin Murphy 		if (ret)
2697c1b058cSRobin Murphy 			break;
2707c1b058cSRobin Murphy 	}
2717c1b058cSRobin Murphy 	iommu_put_resv_regions(dev, &resv_regions);
2727c1b058cSRobin Murphy 
2737c1b058cSRobin Murphy 	return ret;
2747c1b058cSRobin Murphy }
2757c1b058cSRobin Murphy 
2762da274cdSZhen Lei static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
2772da274cdSZhen Lei {
2782da274cdSZhen Lei 	struct iommu_dma_cookie *cookie;
2792da274cdSZhen Lei 	struct iommu_domain *domain;
2802da274cdSZhen Lei 
2812da274cdSZhen Lei 	cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
2822da274cdSZhen Lei 	domain = cookie->fq_domain;
2832da274cdSZhen Lei 	/*
2842da274cdSZhen Lei 	 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
2852da274cdSZhen Lei 	 * implies that ops->flush_iotlb_all must be non-NULL.
2862da274cdSZhen Lei 	 */
2872da274cdSZhen Lei 	domain->ops->flush_iotlb_all(domain);
2882da274cdSZhen Lei }
2892da274cdSZhen Lei 
2900db2e5d1SRobin Murphy /**
2910db2e5d1SRobin Murphy  * iommu_dma_init_domain - Initialise a DMA mapping domain
2920db2e5d1SRobin Murphy  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
2930db2e5d1SRobin Murphy  * @base: IOVA at which the mappable address space starts
2940db2e5d1SRobin Murphy  * @size: Size of IOVA space
295fade1ec0SRobin Murphy  * @dev: Device the domain is being initialised for
2960db2e5d1SRobin Murphy  *
2970db2e5d1SRobin Murphy  * @base and @size should be exact multiples of IOMMU page granularity to
2980db2e5d1SRobin Murphy  * avoid rounding surprises. If necessary, we reserve the page at address 0
2990db2e5d1SRobin Murphy  * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
3000db2e5d1SRobin Murphy  * any change which could make prior IOVAs invalid will fail.
3010db2e5d1SRobin Murphy  */
30206d60728SChristoph Hellwig static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
303fade1ec0SRobin Murphy 		u64 size, struct device *dev)
3040db2e5d1SRobin Murphy {
305fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
306c61a4633SShaokun Zhang 	unsigned long order, base_pfn;
3076b0c54e7SYunsheng Lin 	struct iova_domain *iovad;
3082da274cdSZhen Lei 	int attr;
3090db2e5d1SRobin Murphy 
310fdbe574eSRobin Murphy 	if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
311fdbe574eSRobin Murphy 		return -EINVAL;
3120db2e5d1SRobin Murphy 
3136b0c54e7SYunsheng Lin 	iovad = &cookie->iovad;
3146b0c54e7SYunsheng Lin 
3150db2e5d1SRobin Murphy 	/* Use the smallest supported page size for IOVA granularity */
316d16e0faaSRobin Murphy 	order = __ffs(domain->pgsize_bitmap);
3170db2e5d1SRobin Murphy 	base_pfn = max_t(unsigned long, 1, base >> order);
3180db2e5d1SRobin Murphy 
3190db2e5d1SRobin Murphy 	/* Check the domain allows at least some access to the device... */
3200db2e5d1SRobin Murphy 	if (domain->geometry.force_aperture) {
3210db2e5d1SRobin Murphy 		if (base > domain->geometry.aperture_end ||
3220db2e5d1SRobin Murphy 		    base + size <= domain->geometry.aperture_start) {
3230db2e5d1SRobin Murphy 			pr_warn("specified DMA range outside IOMMU capability\n");
3240db2e5d1SRobin Murphy 			return -EFAULT;
3250db2e5d1SRobin Murphy 		}
3260db2e5d1SRobin Murphy 		/* ...then finally give it a kicking to make sure it fits */
3270db2e5d1SRobin Murphy 		base_pfn = max_t(unsigned long, base_pfn,
3280db2e5d1SRobin Murphy 				domain->geometry.aperture_start >> order);
3290db2e5d1SRobin Murphy 	}
3300db2e5d1SRobin Murphy 
331f51d7bb7SRobin Murphy 	/* start_pfn is always nonzero for an already-initialised domain */
3320db2e5d1SRobin Murphy 	if (iovad->start_pfn) {
3330db2e5d1SRobin Murphy 		if (1UL << order != iovad->granule ||
334f51d7bb7SRobin Murphy 		    base_pfn != iovad->start_pfn) {
3350db2e5d1SRobin Murphy 			pr_warn("Incompatible range for DMA domain\n");
3360db2e5d1SRobin Murphy 			return -EFAULT;
3370db2e5d1SRobin Murphy 		}
3387c1b058cSRobin Murphy 
3390db2e5d1SRobin Murphy 		return 0;
3400db2e5d1SRobin Murphy 	}
3417c1b058cSRobin Murphy 
342aa3ac946SZhen Lei 	init_iova_domain(iovad, 1UL << order, base_pfn);
3432da274cdSZhen Lei 
3442da274cdSZhen Lei 	if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
3452da274cdSZhen Lei 			DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
3462da274cdSZhen Lei 		cookie->fq_domain = domain;
3472da274cdSZhen Lei 		init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
3482da274cdSZhen Lei 	}
3492da274cdSZhen Lei 
3507c1b058cSRobin Murphy 	if (!dev)
3517c1b058cSRobin Murphy 		return 0;
3527c1b058cSRobin Murphy 
3537c1b058cSRobin Murphy 	return iova_reserve_iommu_regions(dev, domain);
3547c1b058cSRobin Murphy }
3550db2e5d1SRobin Murphy 
356795bbbb9STom Murphy static int iommu_dma_deferred_attach(struct device *dev,
357795bbbb9STom Murphy 		struct iommu_domain *domain)
358795bbbb9STom Murphy {
359795bbbb9STom Murphy 	const struct iommu_ops *ops = domain->ops;
360795bbbb9STom Murphy 
361795bbbb9STom Murphy 	if (!is_kdump_kernel())
362795bbbb9STom Murphy 		return 0;
363795bbbb9STom Murphy 
364795bbbb9STom Murphy 	if (unlikely(ops->is_attach_deferred &&
365795bbbb9STom Murphy 			ops->is_attach_deferred(domain, dev)))
366795bbbb9STom Murphy 		return iommu_attach_device(domain, dev);
367795bbbb9STom Murphy 
368795bbbb9STom Murphy 	return 0;
369795bbbb9STom Murphy }
370795bbbb9STom Murphy 
3710db2e5d1SRobin Murphy /**
372737c85caSMitchel Humpherys  * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
373737c85caSMitchel Humpherys  *                    page flags.
3740db2e5d1SRobin Murphy  * @dir: Direction of DMA transfer
3750db2e5d1SRobin Murphy  * @coherent: Is the DMA master cache-coherent?
376737c85caSMitchel Humpherys  * @attrs: DMA attributes for the mapping
3770db2e5d1SRobin Murphy  *
3780db2e5d1SRobin Murphy  * Return: corresponding IOMMU API page protection flags
3790db2e5d1SRobin Murphy  */
38006d60728SChristoph Hellwig static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
381737c85caSMitchel Humpherys 		     unsigned long attrs)
3820db2e5d1SRobin Murphy {
3830db2e5d1SRobin Murphy 	int prot = coherent ? IOMMU_CACHE : 0;
3840db2e5d1SRobin Murphy 
385737c85caSMitchel Humpherys 	if (attrs & DMA_ATTR_PRIVILEGED)
386737c85caSMitchel Humpherys 		prot |= IOMMU_PRIV;
387737c85caSMitchel Humpherys 
3880db2e5d1SRobin Murphy 	switch (dir) {
3890db2e5d1SRobin Murphy 	case DMA_BIDIRECTIONAL:
3900db2e5d1SRobin Murphy 		return prot | IOMMU_READ | IOMMU_WRITE;
3910db2e5d1SRobin Murphy 	case DMA_TO_DEVICE:
3920db2e5d1SRobin Murphy 		return prot | IOMMU_READ;
3930db2e5d1SRobin Murphy 	case DMA_FROM_DEVICE:
3940db2e5d1SRobin Murphy 		return prot | IOMMU_WRITE;
3950db2e5d1SRobin Murphy 	default:
3960db2e5d1SRobin Murphy 		return 0;
3970db2e5d1SRobin Murphy 	}
3980db2e5d1SRobin Murphy }
3990db2e5d1SRobin Murphy 
400842fe519SRobin Murphy static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
401bd036d2fSRobin Murphy 		size_t size, u64 dma_limit, struct device *dev)
4020db2e5d1SRobin Murphy {
403a44e6657SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
404a44e6657SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
405bb65a64cSRobin Murphy 	unsigned long shift, iova_len, iova = 0;
4060db2e5d1SRobin Murphy 
407a44e6657SRobin Murphy 	if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
408a44e6657SRobin Murphy 		cookie->msi_iova += size;
409a44e6657SRobin Murphy 		return cookie->msi_iova - size;
410a44e6657SRobin Murphy 	}
411a44e6657SRobin Murphy 
412a44e6657SRobin Murphy 	shift = iova_shift(iovad);
413a44e6657SRobin Murphy 	iova_len = size >> shift;
414bb65a64cSRobin Murphy 	/*
415bb65a64cSRobin Murphy 	 * Freeing non-power-of-two-sized allocations back into the IOVA caches
416bb65a64cSRobin Murphy 	 * will come back to bite us badly, so we have to waste a bit of space
417bb65a64cSRobin Murphy 	 * rounding up anything cacheable to make sure that can't happen. The
418bb65a64cSRobin Murphy 	 * order of the unadjusted size will still match upon freeing.
419bb65a64cSRobin Murphy 	 */
420bb65a64cSRobin Murphy 	if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
421bb65a64cSRobin Murphy 		iova_len = roundup_pow_of_two(iova_len);
422a44e6657SRobin Murphy 
423a7ba70f1SNicolas Saenz Julienne 	dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
42403bfdc31SRobin Murphy 
425c987ff0dSRobin Murphy 	if (domain->geometry.force_aperture)
426bd036d2fSRobin Murphy 		dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
427122fac03SRobin Murphy 
428122fac03SRobin Murphy 	/* Try to get PCI devices a SAC address */
429122fac03SRobin Murphy 	if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
430538d5b33STomasz Nowicki 		iova = alloc_iova_fast(iovad, iova_len,
431538d5b33STomasz Nowicki 				       DMA_BIT_MASK(32) >> shift, false);
432122fac03SRobin Murphy 
433bb65a64cSRobin Murphy 	if (!iova)
434538d5b33STomasz Nowicki 		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
435538d5b33STomasz Nowicki 				       true);
436bb65a64cSRobin Murphy 
437bb65a64cSRobin Murphy 	return (dma_addr_t)iova << shift;
4380db2e5d1SRobin Murphy }
4390db2e5d1SRobin Murphy 
440842fe519SRobin Murphy static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
441842fe519SRobin Murphy 		dma_addr_t iova, size_t size)
4420db2e5d1SRobin Murphy {
443842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
4440db2e5d1SRobin Murphy 
445a44e6657SRobin Murphy 	/* The MSI case is only ever cleaning up its most recent allocation */
446bb65a64cSRobin Murphy 	if (cookie->type == IOMMU_DMA_MSI_COOKIE)
447a44e6657SRobin Murphy 		cookie->msi_iova -= size;
4482da274cdSZhen Lei 	else if (cookie->fq_domain)	/* non-strict mode */
4492da274cdSZhen Lei 		queue_iova(iovad, iova_pfn(iovad, iova),
4502da274cdSZhen Lei 				size >> iova_shift(iovad), 0);
451bb65a64cSRobin Murphy 	else
4521cc896edSRobin Murphy 		free_iova_fast(iovad, iova_pfn(iovad, iova),
4531cc896edSRobin Murphy 				size >> iova_shift(iovad));
454842fe519SRobin Murphy }
455842fe519SRobin Murphy 
456b61d271eSRobin Murphy static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
457842fe519SRobin Murphy 		size_t size)
458842fe519SRobin Murphy {
459b61d271eSRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
460a44e6657SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
461a44e6657SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
462842fe519SRobin Murphy 	size_t iova_off = iova_offset(iovad, dma_addr);
463a7d20dc1SWill Deacon 	struct iommu_iotlb_gather iotlb_gather;
464a7d20dc1SWill Deacon 	size_t unmapped;
465842fe519SRobin Murphy 
466842fe519SRobin Murphy 	dma_addr -= iova_off;
467842fe519SRobin Murphy 	size = iova_align(iovad, size + iova_off);
468a7d20dc1SWill Deacon 	iommu_iotlb_gather_init(&iotlb_gather);
469842fe519SRobin Murphy 
470a7d20dc1SWill Deacon 	unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
471a7d20dc1SWill Deacon 	WARN_ON(unmapped != size);
472a7d20dc1SWill Deacon 
4732da274cdSZhen Lei 	if (!cookie->fq_domain)
474a7d20dc1SWill Deacon 		iommu_tlb_sync(domain, &iotlb_gather);
475a44e6657SRobin Murphy 	iommu_dma_free_iova(cookie, dma_addr, size);
4760db2e5d1SRobin Murphy }
4770db2e5d1SRobin Murphy 
47892aec09cSChristoph Hellwig static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
479bd036d2fSRobin Murphy 		size_t size, int prot, u64 dma_mask)
48092aec09cSChristoph Hellwig {
481b61d271eSRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
48292aec09cSChristoph Hellwig 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
4838af23fadSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
4848af23fadSRobin Murphy 	size_t iova_off = iova_offset(iovad, phys);
48592aec09cSChristoph Hellwig 	dma_addr_t iova;
48692aec09cSChristoph Hellwig 
487795bbbb9STom Murphy 	if (unlikely(iommu_dma_deferred_attach(dev, domain)))
488795bbbb9STom Murphy 		return DMA_MAPPING_ERROR;
489795bbbb9STom Murphy 
4908af23fadSRobin Murphy 	size = iova_align(iovad, size + iova_off);
49192aec09cSChristoph Hellwig 
4926e235020STom Murphy 	iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
49392aec09cSChristoph Hellwig 	if (!iova)
49492aec09cSChristoph Hellwig 		return DMA_MAPPING_ERROR;
49592aec09cSChristoph Hellwig 
496781ca2deSTom Murphy 	if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
49792aec09cSChristoph Hellwig 		iommu_dma_free_iova(cookie, iova, size);
49892aec09cSChristoph Hellwig 		return DMA_MAPPING_ERROR;
49992aec09cSChristoph Hellwig 	}
50092aec09cSChristoph Hellwig 	return iova + iova_off;
50192aec09cSChristoph Hellwig }
50292aec09cSChristoph Hellwig 
5030db2e5d1SRobin Murphy static void __iommu_dma_free_pages(struct page **pages, int count)
5040db2e5d1SRobin Murphy {
5050db2e5d1SRobin Murphy 	while (count--)
5060db2e5d1SRobin Murphy 		__free_page(pages[count]);
5070db2e5d1SRobin Murphy 	kvfree(pages);
5080db2e5d1SRobin Murphy }
5090db2e5d1SRobin Murphy 
510c4b17afbSGanapatrao Kulkarni static struct page **__iommu_dma_alloc_pages(struct device *dev,
511c4b17afbSGanapatrao Kulkarni 		unsigned int count, unsigned long order_mask, gfp_t gfp)
5120db2e5d1SRobin Murphy {
5130db2e5d1SRobin Murphy 	struct page **pages;
514c4b17afbSGanapatrao Kulkarni 	unsigned int i = 0, nid = dev_to_node(dev);
5153b6b7e19SRobin Murphy 
5163b6b7e19SRobin Murphy 	order_mask &= (2U << MAX_ORDER) - 1;
5173b6b7e19SRobin Murphy 	if (!order_mask)
5183b6b7e19SRobin Murphy 		return NULL;
5190db2e5d1SRobin Murphy 
520c4b17afbSGanapatrao Kulkarni 	pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
5210db2e5d1SRobin Murphy 	if (!pages)
5220db2e5d1SRobin Murphy 		return NULL;
5230db2e5d1SRobin Murphy 
5240db2e5d1SRobin Murphy 	/* IOMMU can map any pages, so himem can also be used here */
5250db2e5d1SRobin Murphy 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
5260db2e5d1SRobin Murphy 
5270db2e5d1SRobin Murphy 	while (count) {
5280db2e5d1SRobin Murphy 		struct page *page = NULL;
5293b6b7e19SRobin Murphy 		unsigned int order_size;
5300db2e5d1SRobin Murphy 
5310db2e5d1SRobin Murphy 		/*
5320db2e5d1SRobin Murphy 		 * Higher-order allocations are a convenience rather
5330db2e5d1SRobin Murphy 		 * than a necessity, hence using __GFP_NORETRY until
5343b6b7e19SRobin Murphy 		 * falling back to minimum-order allocations.
5350db2e5d1SRobin Murphy 		 */
5363b6b7e19SRobin Murphy 		for (order_mask &= (2U << __fls(count)) - 1;
5373b6b7e19SRobin Murphy 		     order_mask; order_mask &= ~order_size) {
5383b6b7e19SRobin Murphy 			unsigned int order = __fls(order_mask);
539c4b17afbSGanapatrao Kulkarni 			gfp_t alloc_flags = gfp;
5403b6b7e19SRobin Murphy 
5413b6b7e19SRobin Murphy 			order_size = 1U << order;
542c4b17afbSGanapatrao Kulkarni 			if (order_mask > order_size)
543c4b17afbSGanapatrao Kulkarni 				alloc_flags |= __GFP_NORETRY;
544c4b17afbSGanapatrao Kulkarni 			page = alloc_pages_node(nid, alloc_flags, order);
5450db2e5d1SRobin Murphy 			if (!page)
5460db2e5d1SRobin Murphy 				continue;
5473b6b7e19SRobin Murphy 			if (!order)
5480db2e5d1SRobin Murphy 				break;
5493b6b7e19SRobin Murphy 			if (!PageCompound(page)) {
5500db2e5d1SRobin Murphy 				split_page(page, order);
5510db2e5d1SRobin Murphy 				break;
5523b6b7e19SRobin Murphy 			} else if (!split_huge_page(page)) {
5533b6b7e19SRobin Murphy 				break;
5540db2e5d1SRobin Murphy 			}
5553b6b7e19SRobin Murphy 			__free_pages(page, order);
5560db2e5d1SRobin Murphy 		}
5570db2e5d1SRobin Murphy 		if (!page) {
5580db2e5d1SRobin Murphy 			__iommu_dma_free_pages(pages, i);
5590db2e5d1SRobin Murphy 			return NULL;
5600db2e5d1SRobin Murphy 		}
5613b6b7e19SRobin Murphy 		count -= order_size;
5623b6b7e19SRobin Murphy 		while (order_size--)
5630db2e5d1SRobin Murphy 			pages[i++] = page++;
5640db2e5d1SRobin Murphy 	}
5650db2e5d1SRobin Murphy 	return pages;
5660db2e5d1SRobin Murphy }
5670db2e5d1SRobin Murphy 
5680db2e5d1SRobin Murphy /**
56921b95aafSChristoph Hellwig  * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space
5700db2e5d1SRobin Murphy  * @dev: Device to allocate memory for. Must be a real device
5710db2e5d1SRobin Murphy  *	 attached to an iommu_dma_domain
5720db2e5d1SRobin Murphy  * @size: Size of buffer in bytes
57321b95aafSChristoph Hellwig  * @dma_handle: Out argument for allocated DMA handle
5740db2e5d1SRobin Murphy  * @gfp: Allocation flags
5753b6b7e19SRobin Murphy  * @attrs: DMA attributes for this allocation
5760db2e5d1SRobin Murphy  *
5770db2e5d1SRobin Murphy  * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
5780db2e5d1SRobin Murphy  * but an IOMMU which supports smaller pages might not map the whole thing.
5790db2e5d1SRobin Murphy  *
58021b95aafSChristoph Hellwig  * Return: Mapped virtual address, or NULL on failure.
5810db2e5d1SRobin Murphy  */
58221b95aafSChristoph Hellwig static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
58321b95aafSChristoph Hellwig 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
5840db2e5d1SRobin Murphy {
58543c5bf11SRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
586842fe519SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
587842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
58821b95aafSChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
58921b95aafSChristoph Hellwig 	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
59033dcb37cSChristoph Hellwig 	pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
59121b95aafSChristoph Hellwig 	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
5920db2e5d1SRobin Murphy 	struct page **pages;
5930db2e5d1SRobin Murphy 	struct sg_table sgt;
594842fe519SRobin Murphy 	dma_addr_t iova;
59521b95aafSChristoph Hellwig 	void *vaddr;
5960db2e5d1SRobin Murphy 
59721b95aafSChristoph Hellwig 	*dma_handle = DMA_MAPPING_ERROR;
5980db2e5d1SRobin Murphy 
599795bbbb9STom Murphy 	if (unlikely(iommu_dma_deferred_attach(dev, domain)))
600795bbbb9STom Murphy 		return NULL;
601795bbbb9STom Murphy 
6023b6b7e19SRobin Murphy 	min_size = alloc_sizes & -alloc_sizes;
6033b6b7e19SRobin Murphy 	if (min_size < PAGE_SIZE) {
6043b6b7e19SRobin Murphy 		min_size = PAGE_SIZE;
6053b6b7e19SRobin Murphy 		alloc_sizes |= PAGE_SIZE;
6063b6b7e19SRobin Murphy 	} else {
6073b6b7e19SRobin Murphy 		size = ALIGN(size, min_size);
6083b6b7e19SRobin Murphy 	}
60900085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
6103b6b7e19SRobin Murphy 		alloc_sizes = min_size;
6113b6b7e19SRobin Murphy 
6123b6b7e19SRobin Murphy 	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
613c4b17afbSGanapatrao Kulkarni 	pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
614c4b17afbSGanapatrao Kulkarni 					gfp);
6150db2e5d1SRobin Murphy 	if (!pages)
6160db2e5d1SRobin Murphy 		return NULL;
6170db2e5d1SRobin Murphy 
618842fe519SRobin Murphy 	size = iova_align(iovad, size);
619842fe519SRobin Murphy 	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
6200db2e5d1SRobin Murphy 	if (!iova)
6210db2e5d1SRobin Murphy 		goto out_free_pages;
6220db2e5d1SRobin Murphy 
6230db2e5d1SRobin Murphy 	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
6240db2e5d1SRobin Murphy 		goto out_free_iova;
6250db2e5d1SRobin Murphy 
62621b95aafSChristoph Hellwig 	if (!(ioprot & IOMMU_CACHE)) {
62723f88e0aSChristoph Hellwig 		struct scatterlist *sg;
62823f88e0aSChristoph Hellwig 		int i;
62923f88e0aSChristoph Hellwig 
63023f88e0aSChristoph Hellwig 		for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
63123f88e0aSChristoph Hellwig 			arch_dma_prep_coherent(sg_page(sg), sg->length);
6320db2e5d1SRobin Murphy 	}
6330db2e5d1SRobin Murphy 
634781ca2deSTom Murphy 	if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
6350db2e5d1SRobin Murphy 			< size)
6360db2e5d1SRobin Murphy 		goto out_free_sg;
6370db2e5d1SRobin Murphy 
63851231740SChristoph Hellwig 	vaddr = dma_common_pages_remap(pages, size, prot,
63921b95aafSChristoph Hellwig 			__builtin_return_address(0));
64021b95aafSChristoph Hellwig 	if (!vaddr)
64121b95aafSChristoph Hellwig 		goto out_unmap;
6420db2e5d1SRobin Murphy 
64321b95aafSChristoph Hellwig 	*dma_handle = iova;
64421b95aafSChristoph Hellwig 	sg_free_table(&sgt);
64521b95aafSChristoph Hellwig 	return vaddr;
64621b95aafSChristoph Hellwig 
64721b95aafSChristoph Hellwig out_unmap:
64821b95aafSChristoph Hellwig 	__iommu_dma_unmap(dev, iova, size);
6490db2e5d1SRobin Murphy out_free_sg:
6500db2e5d1SRobin Murphy 	sg_free_table(&sgt);
6510db2e5d1SRobin Murphy out_free_iova:
652842fe519SRobin Murphy 	iommu_dma_free_iova(cookie, iova, size);
6530db2e5d1SRobin Murphy out_free_pages:
6540db2e5d1SRobin Murphy 	__iommu_dma_free_pages(pages, count);
6550db2e5d1SRobin Murphy 	return NULL;
6560db2e5d1SRobin Murphy }
6570db2e5d1SRobin Murphy 
6580db2e5d1SRobin Murphy /**
65906d60728SChristoph Hellwig  * __iommu_dma_mmap - Map a buffer into provided user VMA
66006d60728SChristoph Hellwig  * @pages: Array representing buffer from __iommu_dma_alloc()
6610db2e5d1SRobin Murphy  * @size: Size of buffer in bytes
6620db2e5d1SRobin Murphy  * @vma: VMA describing requested userspace mapping
6630db2e5d1SRobin Murphy  *
6640db2e5d1SRobin Murphy  * Maps the pages of the buffer in @pages into @vma. The caller is responsible
6650db2e5d1SRobin Murphy  * for verifying the correct size and protection of @vma beforehand.
6660db2e5d1SRobin Murphy  */
66706d60728SChristoph Hellwig static int __iommu_dma_mmap(struct page **pages, size_t size,
66806d60728SChristoph Hellwig 		struct vm_area_struct *vma)
6690db2e5d1SRobin Murphy {
670b0d0084fSSouptick Joarder 	return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
6710db2e5d1SRobin Murphy }
6720db2e5d1SRobin Murphy 
67306d60728SChristoph Hellwig static void iommu_dma_sync_single_for_cpu(struct device *dev,
67406d60728SChristoph Hellwig 		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
6750db2e5d1SRobin Murphy {
67606d60728SChristoph Hellwig 	phys_addr_t phys;
6770db2e5d1SRobin Murphy 
67806d60728SChristoph Hellwig 	if (dev_is_dma_coherent(dev))
67906d60728SChristoph Hellwig 		return;
68006d60728SChristoph Hellwig 
68106d60728SChristoph Hellwig 	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
68256e35f9cSChristoph Hellwig 	arch_sync_dma_for_cpu(phys, size, dir);
6831cc896edSRobin Murphy }
6841cc896edSRobin Murphy 
68506d60728SChristoph Hellwig static void iommu_dma_sync_single_for_device(struct device *dev,
68606d60728SChristoph Hellwig 		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
68751f8cc9eSRobin Murphy {
68806d60728SChristoph Hellwig 	phys_addr_t phys;
68906d60728SChristoph Hellwig 
69006d60728SChristoph Hellwig 	if (dev_is_dma_coherent(dev))
69106d60728SChristoph Hellwig 		return;
69206d60728SChristoph Hellwig 
69306d60728SChristoph Hellwig 	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
69456e35f9cSChristoph Hellwig 	arch_sync_dma_for_device(phys, size, dir);
69551f8cc9eSRobin Murphy }
69651f8cc9eSRobin Murphy 
69706d60728SChristoph Hellwig static void iommu_dma_sync_sg_for_cpu(struct device *dev,
69806d60728SChristoph Hellwig 		struct scatterlist *sgl, int nelems,
69906d60728SChristoph Hellwig 		enum dma_data_direction dir)
7000db2e5d1SRobin Murphy {
70106d60728SChristoph Hellwig 	struct scatterlist *sg;
70206d60728SChristoph Hellwig 	int i;
70306d60728SChristoph Hellwig 
70406d60728SChristoph Hellwig 	if (dev_is_dma_coherent(dev))
70506d60728SChristoph Hellwig 		return;
70606d60728SChristoph Hellwig 
70706d60728SChristoph Hellwig 	for_each_sg(sgl, sg, nelems, i)
70856e35f9cSChristoph Hellwig 		arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
70906d60728SChristoph Hellwig }
71006d60728SChristoph Hellwig 
71106d60728SChristoph Hellwig static void iommu_dma_sync_sg_for_device(struct device *dev,
71206d60728SChristoph Hellwig 		struct scatterlist *sgl, int nelems,
71306d60728SChristoph Hellwig 		enum dma_data_direction dir)
71406d60728SChristoph Hellwig {
71506d60728SChristoph Hellwig 	struct scatterlist *sg;
71606d60728SChristoph Hellwig 	int i;
71706d60728SChristoph Hellwig 
71806d60728SChristoph Hellwig 	if (dev_is_dma_coherent(dev))
71906d60728SChristoph Hellwig 		return;
72006d60728SChristoph Hellwig 
72106d60728SChristoph Hellwig 	for_each_sg(sgl, sg, nelems, i)
72256e35f9cSChristoph Hellwig 		arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
72306d60728SChristoph Hellwig }
72406d60728SChristoph Hellwig 
72506d60728SChristoph Hellwig static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
72606d60728SChristoph Hellwig 		unsigned long offset, size_t size, enum dma_data_direction dir,
72706d60728SChristoph Hellwig 		unsigned long attrs)
72806d60728SChristoph Hellwig {
72906d60728SChristoph Hellwig 	phys_addr_t phys = page_to_phys(page) + offset;
73006d60728SChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
731b61d271eSRobin Murphy 	int prot = dma_info_to_prot(dir, coherent, attrs);
73206d60728SChristoph Hellwig 	dma_addr_t dma_handle;
73306d60728SChristoph Hellwig 
7346e235020STom Murphy 	dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev));
73506d60728SChristoph Hellwig 	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
73606d60728SChristoph Hellwig 	    dma_handle != DMA_MAPPING_ERROR)
73756e35f9cSChristoph Hellwig 		arch_sync_dma_for_device(phys, size, dir);
73806d60728SChristoph Hellwig 	return dma_handle;
73906d60728SChristoph Hellwig }
74006d60728SChristoph Hellwig 
74106d60728SChristoph Hellwig static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
74206d60728SChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
74306d60728SChristoph Hellwig {
74406d60728SChristoph Hellwig 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
74506d60728SChristoph Hellwig 		iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
746b61d271eSRobin Murphy 	__iommu_dma_unmap(dev, dma_handle, size);
7470db2e5d1SRobin Murphy }
7480db2e5d1SRobin Murphy 
7490db2e5d1SRobin Murphy /*
7500db2e5d1SRobin Murphy  * Prepare a successfully-mapped scatterlist to give back to the caller.
751809eac54SRobin Murphy  *
752809eac54SRobin Murphy  * At this point the segments are already laid out by iommu_dma_map_sg() to
753809eac54SRobin Murphy  * avoid individually crossing any boundaries, so we merely need to check a
754809eac54SRobin Murphy  * segment's start address to avoid concatenating across one.
7550db2e5d1SRobin Murphy  */
7560db2e5d1SRobin Murphy static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
7570db2e5d1SRobin Murphy 		dma_addr_t dma_addr)
7580db2e5d1SRobin Murphy {
759809eac54SRobin Murphy 	struct scatterlist *s, *cur = sg;
760809eac54SRobin Murphy 	unsigned long seg_mask = dma_get_seg_boundary(dev);
761809eac54SRobin Murphy 	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
762809eac54SRobin Murphy 	int i, count = 0;
7630db2e5d1SRobin Murphy 
7640db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
765809eac54SRobin Murphy 		/* Restore this segment's original unaligned fields first */
766809eac54SRobin Murphy 		unsigned int s_iova_off = sg_dma_address(s);
7670db2e5d1SRobin Murphy 		unsigned int s_length = sg_dma_len(s);
768809eac54SRobin Murphy 		unsigned int s_iova_len = s->length;
7690db2e5d1SRobin Murphy 
770809eac54SRobin Murphy 		s->offset += s_iova_off;
7710db2e5d1SRobin Murphy 		s->length = s_length;
772cad34be7SChristoph Hellwig 		sg_dma_address(s) = DMA_MAPPING_ERROR;
773809eac54SRobin Murphy 		sg_dma_len(s) = 0;
774809eac54SRobin Murphy 
775809eac54SRobin Murphy 		/*
776809eac54SRobin Murphy 		 * Now fill in the real DMA data. If...
777809eac54SRobin Murphy 		 * - there is a valid output segment to append to
778809eac54SRobin Murphy 		 * - and this segment starts on an IOVA page boundary
779809eac54SRobin Murphy 		 * - but doesn't fall at a segment boundary
780809eac54SRobin Murphy 		 * - and wouldn't make the resulting output segment too long
781809eac54SRobin Murphy 		 */
782809eac54SRobin Murphy 		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
783ab2cbeb0SRobin Murphy 		    (max_len - cur_len >= s_length)) {
784809eac54SRobin Murphy 			/* ...then concatenate it with the previous one */
785809eac54SRobin Murphy 			cur_len += s_length;
786809eac54SRobin Murphy 		} else {
787809eac54SRobin Murphy 			/* Otherwise start the next output segment */
788809eac54SRobin Murphy 			if (i > 0)
789809eac54SRobin Murphy 				cur = sg_next(cur);
790809eac54SRobin Murphy 			cur_len = s_length;
791809eac54SRobin Murphy 			count++;
792809eac54SRobin Murphy 
793809eac54SRobin Murphy 			sg_dma_address(cur) = dma_addr + s_iova_off;
7940db2e5d1SRobin Murphy 		}
795809eac54SRobin Murphy 
796809eac54SRobin Murphy 		sg_dma_len(cur) = cur_len;
797809eac54SRobin Murphy 		dma_addr += s_iova_len;
798809eac54SRobin Murphy 
799809eac54SRobin Murphy 		if (s_length + s_iova_off < s_iova_len)
800809eac54SRobin Murphy 			cur_len = 0;
801809eac54SRobin Murphy 	}
802809eac54SRobin Murphy 	return count;
8030db2e5d1SRobin Murphy }
8040db2e5d1SRobin Murphy 
8050db2e5d1SRobin Murphy /*
8060db2e5d1SRobin Murphy  * If mapping failed, then just restore the original list,
8070db2e5d1SRobin Murphy  * but making sure the DMA fields are invalidated.
8080db2e5d1SRobin Murphy  */
8090db2e5d1SRobin Murphy static void __invalidate_sg(struct scatterlist *sg, int nents)
8100db2e5d1SRobin Murphy {
8110db2e5d1SRobin Murphy 	struct scatterlist *s;
8120db2e5d1SRobin Murphy 	int i;
8130db2e5d1SRobin Murphy 
8140db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
815cad34be7SChristoph Hellwig 		if (sg_dma_address(s) != DMA_MAPPING_ERROR)
81607b48ac4SRobin Murphy 			s->offset += sg_dma_address(s);
8170db2e5d1SRobin Murphy 		if (sg_dma_len(s))
8180db2e5d1SRobin Murphy 			s->length = sg_dma_len(s);
819cad34be7SChristoph Hellwig 		sg_dma_address(s) = DMA_MAPPING_ERROR;
8200db2e5d1SRobin Murphy 		sg_dma_len(s) = 0;
8210db2e5d1SRobin Murphy 	}
8220db2e5d1SRobin Murphy }
8230db2e5d1SRobin Murphy 
8240db2e5d1SRobin Murphy /*
8250db2e5d1SRobin Murphy  * The DMA API client is passing in a scatterlist which could describe
8260db2e5d1SRobin Murphy  * any old buffer layout, but the IOMMU API requires everything to be
8270db2e5d1SRobin Murphy  * aligned to IOMMU pages. Hence the need for this complicated bit of
8280db2e5d1SRobin Murphy  * impedance-matching, to be able to hand off a suitably-aligned list,
8290db2e5d1SRobin Murphy  * but still preserve the original offsets and sizes for the caller.
8300db2e5d1SRobin Murphy  */
83106d60728SChristoph Hellwig static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
83206d60728SChristoph Hellwig 		int nents, enum dma_data_direction dir, unsigned long attrs)
8330db2e5d1SRobin Murphy {
83443c5bf11SRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
835842fe519SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
836842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
8370db2e5d1SRobin Murphy 	struct scatterlist *s, *prev = NULL;
83806d60728SChristoph Hellwig 	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
839842fe519SRobin Murphy 	dma_addr_t iova;
8400db2e5d1SRobin Murphy 	size_t iova_len = 0;
841809eac54SRobin Murphy 	unsigned long mask = dma_get_seg_boundary(dev);
8420db2e5d1SRobin Murphy 	int i;
8430db2e5d1SRobin Murphy 
844795bbbb9STom Murphy 	if (unlikely(iommu_dma_deferred_attach(dev, domain)))
845795bbbb9STom Murphy 		return 0;
846795bbbb9STom Murphy 
84706d60728SChristoph Hellwig 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
84806d60728SChristoph Hellwig 		iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
84906d60728SChristoph Hellwig 
8500db2e5d1SRobin Murphy 	/*
8510db2e5d1SRobin Murphy 	 * Work out how much IOVA space we need, and align the segments to
8520db2e5d1SRobin Murphy 	 * IOVA granules for the IOMMU driver to handle. With some clever
8530db2e5d1SRobin Murphy 	 * trickery we can modify the list in-place, but reversibly, by
854809eac54SRobin Murphy 	 * stashing the unaligned parts in the as-yet-unused DMA fields.
8550db2e5d1SRobin Murphy 	 */
8560db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
857809eac54SRobin Murphy 		size_t s_iova_off = iova_offset(iovad, s->offset);
8580db2e5d1SRobin Murphy 		size_t s_length = s->length;
859809eac54SRobin Murphy 		size_t pad_len = (mask - iova_len + 1) & mask;
8600db2e5d1SRobin Murphy 
861809eac54SRobin Murphy 		sg_dma_address(s) = s_iova_off;
8620db2e5d1SRobin Murphy 		sg_dma_len(s) = s_length;
863809eac54SRobin Murphy 		s->offset -= s_iova_off;
864809eac54SRobin Murphy 		s_length = iova_align(iovad, s_length + s_iova_off);
8650db2e5d1SRobin Murphy 		s->length = s_length;
8660db2e5d1SRobin Murphy 
8670db2e5d1SRobin Murphy 		/*
868809eac54SRobin Murphy 		 * Due to the alignment of our single IOVA allocation, we can
869809eac54SRobin Murphy 		 * depend on these assumptions about the segment boundary mask:
870809eac54SRobin Murphy 		 * - If mask size >= IOVA size, then the IOVA range cannot
871809eac54SRobin Murphy 		 *   possibly fall across a boundary, so we don't care.
872809eac54SRobin Murphy 		 * - If mask size < IOVA size, then the IOVA range must start
873809eac54SRobin Murphy 		 *   exactly on a boundary, therefore we can lay things out
874809eac54SRobin Murphy 		 *   based purely on segment lengths without needing to know
875809eac54SRobin Murphy 		 *   the actual addresses beforehand.
876809eac54SRobin Murphy 		 * - The mask must be a power of 2, so pad_len == 0 if
877809eac54SRobin Murphy 		 *   iova_len == 0, thus we cannot dereference prev the first
878809eac54SRobin Murphy 		 *   time through here (i.e. before it has a meaningful value).
8790db2e5d1SRobin Murphy 		 */
880809eac54SRobin Murphy 		if (pad_len && pad_len < s_length - 1) {
8810db2e5d1SRobin Murphy 			prev->length += pad_len;
8820db2e5d1SRobin Murphy 			iova_len += pad_len;
8830db2e5d1SRobin Murphy 		}
8840db2e5d1SRobin Murphy 
8850db2e5d1SRobin Murphy 		iova_len += s_length;
8860db2e5d1SRobin Murphy 		prev = s;
8870db2e5d1SRobin Murphy 	}
8880db2e5d1SRobin Murphy 
889842fe519SRobin Murphy 	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
8900db2e5d1SRobin Murphy 	if (!iova)
8910db2e5d1SRobin Murphy 		goto out_restore_sg;
8920db2e5d1SRobin Murphy 
8930db2e5d1SRobin Murphy 	/*
8940db2e5d1SRobin Murphy 	 * We'll leave any physical concatenation to the IOMMU driver's
8950db2e5d1SRobin Murphy 	 * implementation - it knows better than we do.
8960db2e5d1SRobin Murphy 	 */
897781ca2deSTom Murphy 	if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len)
8980db2e5d1SRobin Murphy 		goto out_free_iova;
8990db2e5d1SRobin Murphy 
900842fe519SRobin Murphy 	return __finalise_sg(dev, sg, nents, iova);
9010db2e5d1SRobin Murphy 
9020db2e5d1SRobin Murphy out_free_iova:
903842fe519SRobin Murphy 	iommu_dma_free_iova(cookie, iova, iova_len);
9040db2e5d1SRobin Murphy out_restore_sg:
9050db2e5d1SRobin Murphy 	__invalidate_sg(sg, nents);
9060db2e5d1SRobin Murphy 	return 0;
9070db2e5d1SRobin Murphy }
9080db2e5d1SRobin Murphy 
90906d60728SChristoph Hellwig static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
91006d60728SChristoph Hellwig 		int nents, enum dma_data_direction dir, unsigned long attrs)
9110db2e5d1SRobin Murphy {
912842fe519SRobin Murphy 	dma_addr_t start, end;
913842fe519SRobin Murphy 	struct scatterlist *tmp;
914842fe519SRobin Murphy 	int i;
91506d60728SChristoph Hellwig 
9161b961423SNathan Chancellor 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
91706d60728SChristoph Hellwig 		iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
91806d60728SChristoph Hellwig 
9190db2e5d1SRobin Murphy 	/*
9200db2e5d1SRobin Murphy 	 * The scatterlist segments are mapped into a single
9210db2e5d1SRobin Murphy 	 * contiguous IOVA allocation, so this is incredibly easy.
9220db2e5d1SRobin Murphy 	 */
923842fe519SRobin Murphy 	start = sg_dma_address(sg);
924842fe519SRobin Murphy 	for_each_sg(sg_next(sg), tmp, nents - 1, i) {
925842fe519SRobin Murphy 		if (sg_dma_len(tmp) == 0)
926842fe519SRobin Murphy 			break;
927842fe519SRobin Murphy 		sg = tmp;
928842fe519SRobin Murphy 	}
929842fe519SRobin Murphy 	end = sg_dma_address(sg) + sg_dma_len(sg);
930b61d271eSRobin Murphy 	__iommu_dma_unmap(dev, start, end - start);
9310db2e5d1SRobin Murphy }
9320db2e5d1SRobin Murphy 
93306d60728SChristoph Hellwig static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
93451f8cc9eSRobin Murphy 		size_t size, enum dma_data_direction dir, unsigned long attrs)
93551f8cc9eSRobin Murphy {
93651f8cc9eSRobin Murphy 	return __iommu_dma_map(dev, phys, size,
9376e235020STom Murphy 			dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
9386e235020STom Murphy 			dma_get_mask(dev));
93951f8cc9eSRobin Murphy }
94051f8cc9eSRobin Murphy 
94106d60728SChristoph Hellwig static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
94251f8cc9eSRobin Murphy 		size_t size, enum dma_data_direction dir, unsigned long attrs)
94351f8cc9eSRobin Murphy {
944b61d271eSRobin Murphy 	__iommu_dma_unmap(dev, handle, size);
94551f8cc9eSRobin Murphy }
94651f8cc9eSRobin Murphy 
9478553f6e6SRobin Murphy static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
948bcf4b9c4SRobin Murphy {
949bcf4b9c4SRobin Murphy 	size_t alloc_size = PAGE_ALIGN(size);
950bcf4b9c4SRobin Murphy 	int count = alloc_size >> PAGE_SHIFT;
951bcf4b9c4SRobin Murphy 	struct page *page = NULL, **pages = NULL;
952bcf4b9c4SRobin Murphy 
953bcf4b9c4SRobin Murphy 	/* Non-coherent atomic allocation? Easy */
954e6475eb0SChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
955c84dc6e6SDavid Rientjes 	    dma_free_from_pool(dev, cpu_addr, alloc_size))
956bcf4b9c4SRobin Murphy 		return;
957bcf4b9c4SRobin Murphy 
958e6475eb0SChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
959bcf4b9c4SRobin Murphy 		/*
960bcf4b9c4SRobin Murphy 		 * If it the address is remapped, then it's either non-coherent
961bcf4b9c4SRobin Murphy 		 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
962bcf4b9c4SRobin Murphy 		 */
9635cf45379SChristoph Hellwig 		pages = dma_common_find_pages(cpu_addr);
964bcf4b9c4SRobin Murphy 		if (!pages)
965bcf4b9c4SRobin Murphy 			page = vmalloc_to_page(cpu_addr);
96651231740SChristoph Hellwig 		dma_common_free_remap(cpu_addr, alloc_size);
967bcf4b9c4SRobin Murphy 	} else {
968bcf4b9c4SRobin Murphy 		/* Lowmem means a coherent atomic or CMA allocation */
969bcf4b9c4SRobin Murphy 		page = virt_to_page(cpu_addr);
970bcf4b9c4SRobin Murphy 	}
971bcf4b9c4SRobin Murphy 
972bcf4b9c4SRobin Murphy 	if (pages)
973bcf4b9c4SRobin Murphy 		__iommu_dma_free_pages(pages, count);
974591fcf3bSNicolin Chen 	if (page)
975591fcf3bSNicolin Chen 		dma_free_contiguous(dev, page, alloc_size);
976bcf4b9c4SRobin Murphy }
977bcf4b9c4SRobin Murphy 
9788553f6e6SRobin Murphy static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
9798553f6e6SRobin Murphy 		dma_addr_t handle, unsigned long attrs)
9808553f6e6SRobin Murphy {
9818553f6e6SRobin Murphy 	__iommu_dma_unmap(dev, handle, size);
9828553f6e6SRobin Murphy 	__iommu_dma_free(dev, size, cpu_addr);
9838553f6e6SRobin Murphy }
9848553f6e6SRobin Murphy 
985ee1ef05dSChristoph Hellwig static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
986ee1ef05dSChristoph Hellwig 		struct page **pagep, gfp_t gfp, unsigned long attrs)
98706d60728SChristoph Hellwig {
98806d60728SChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
9899ad5d6edSRobin Murphy 	size_t alloc_size = PAGE_ALIGN(size);
99090ae409fSChristoph Hellwig 	int node = dev_to_node(dev);
9919a4ab94aSChristoph Hellwig 	struct page *page = NULL;
9929ad5d6edSRobin Murphy 	void *cpu_addr;
99306d60728SChristoph Hellwig 
994591fcf3bSNicolin Chen 	page = dma_alloc_contiguous(dev, alloc_size, gfp);
99506d60728SChristoph Hellwig 	if (!page)
99690ae409fSChristoph Hellwig 		page = alloc_pages_node(node, gfp, get_order(alloc_size));
99790ae409fSChristoph Hellwig 	if (!page)
99806d60728SChristoph Hellwig 		return NULL;
99906d60728SChristoph Hellwig 
1000e6475eb0SChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
100133dcb37cSChristoph Hellwig 		pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
10028680aa5aSRobin Murphy 
10039ad5d6edSRobin Murphy 		cpu_addr = dma_common_contiguous_remap(page, alloc_size,
100451231740SChristoph Hellwig 				prot, __builtin_return_address(0));
10059ad5d6edSRobin Murphy 		if (!cpu_addr)
1006ee1ef05dSChristoph Hellwig 			goto out_free_pages;
1007072bebc0SRobin Murphy 
100806d60728SChristoph Hellwig 		if (!coherent)
10099ad5d6edSRobin Murphy 			arch_dma_prep_coherent(page, size);
10108680aa5aSRobin Murphy 	} else {
10119ad5d6edSRobin Murphy 		cpu_addr = page_address(page);
10128680aa5aSRobin Murphy 	}
1013ee1ef05dSChristoph Hellwig 
1014ee1ef05dSChristoph Hellwig 	*pagep = page;
10159ad5d6edSRobin Murphy 	memset(cpu_addr, 0, alloc_size);
10169ad5d6edSRobin Murphy 	return cpu_addr;
1017072bebc0SRobin Murphy out_free_pages:
1018591fcf3bSNicolin Chen 	dma_free_contiguous(dev, page, alloc_size);
1019072bebc0SRobin Murphy 	return NULL;
102006d60728SChristoph Hellwig }
102106d60728SChristoph Hellwig 
1022ee1ef05dSChristoph Hellwig static void *iommu_dma_alloc(struct device *dev, size_t size,
1023ee1ef05dSChristoph Hellwig 		dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1024ee1ef05dSChristoph Hellwig {
1025ee1ef05dSChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
1026ee1ef05dSChristoph Hellwig 	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1027ee1ef05dSChristoph Hellwig 	struct page *page = NULL;
1028ee1ef05dSChristoph Hellwig 	void *cpu_addr;
1029ee1ef05dSChristoph Hellwig 
1030ee1ef05dSChristoph Hellwig 	gfp |= __GFP_ZERO;
1031ee1ef05dSChristoph Hellwig 
1032e6475eb0SChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
1033ee1ef05dSChristoph Hellwig 	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
1034ee1ef05dSChristoph Hellwig 		return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
1035ee1ef05dSChristoph Hellwig 
1036e6475eb0SChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1037e6475eb0SChristoph Hellwig 	    !gfpflags_allow_blocking(gfp) && !coherent)
10389420139fSChristoph Hellwig 		page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
10399420139fSChristoph Hellwig 					       gfp, NULL);
1040ee1ef05dSChristoph Hellwig 	else
1041ee1ef05dSChristoph Hellwig 		cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1042ee1ef05dSChristoph Hellwig 	if (!cpu_addr)
1043ee1ef05dSChristoph Hellwig 		return NULL;
1044ee1ef05dSChristoph Hellwig 
10456e235020STom Murphy 	*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
10466e235020STom Murphy 			dev->coherent_dma_mask);
1047ee1ef05dSChristoph Hellwig 	if (*handle == DMA_MAPPING_ERROR) {
1048ee1ef05dSChristoph Hellwig 		__iommu_dma_free(dev, size, cpu_addr);
1049ee1ef05dSChristoph Hellwig 		return NULL;
1050ee1ef05dSChristoph Hellwig 	}
1051ee1ef05dSChristoph Hellwig 
1052ee1ef05dSChristoph Hellwig 	return cpu_addr;
1053ee1ef05dSChristoph Hellwig }
1054ee1ef05dSChristoph Hellwig 
105506d60728SChristoph Hellwig static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
105606d60728SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
105706d60728SChristoph Hellwig 		unsigned long attrs)
105806d60728SChristoph Hellwig {
105906d60728SChristoph Hellwig 	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1060efd9f10bSChristoph Hellwig 	unsigned long pfn, off = vma->vm_pgoff;
106106d60728SChristoph Hellwig 	int ret;
106206d60728SChristoph Hellwig 
106333dcb37cSChristoph Hellwig 	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
106406d60728SChristoph Hellwig 
106506d60728SChristoph Hellwig 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
106606d60728SChristoph Hellwig 		return ret;
106706d60728SChristoph Hellwig 
106806d60728SChristoph Hellwig 	if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
106906d60728SChristoph Hellwig 		return -ENXIO;
107006d60728SChristoph Hellwig 
1071e6475eb0SChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
10725cf45379SChristoph Hellwig 		struct page **pages = dma_common_find_pages(cpu_addr);
107306d60728SChristoph Hellwig 
1074efd9f10bSChristoph Hellwig 		if (pages)
10754c360aceSRobin Murphy 			return __iommu_dma_mmap(pages, size, vma);
1076efd9f10bSChristoph Hellwig 		pfn = vmalloc_to_pfn(cpu_addr);
1077efd9f10bSChristoph Hellwig 	} else {
1078efd9f10bSChristoph Hellwig 		pfn = page_to_pfn(virt_to_page(cpu_addr));
1079efd9f10bSChristoph Hellwig 	}
1080efd9f10bSChristoph Hellwig 
1081efd9f10bSChristoph Hellwig 	return remap_pfn_range(vma, vma->vm_start, pfn + off,
1082efd9f10bSChristoph Hellwig 			       vma->vm_end - vma->vm_start,
1083efd9f10bSChristoph Hellwig 			       vma->vm_page_prot);
108406d60728SChristoph Hellwig }
108506d60728SChristoph Hellwig 
108606d60728SChristoph Hellwig static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
108706d60728SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
108806d60728SChristoph Hellwig 		unsigned long attrs)
108906d60728SChristoph Hellwig {
10903fb3378bSChristoph Hellwig 	struct page *page;
10913fb3378bSChristoph Hellwig 	int ret;
109206d60728SChristoph Hellwig 
1093e6475eb0SChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
10945cf45379SChristoph Hellwig 		struct page **pages = dma_common_find_pages(cpu_addr);
10953fb3378bSChristoph Hellwig 
10963fb3378bSChristoph Hellwig 		if (pages) {
10973fb3378bSChristoph Hellwig 			return sg_alloc_table_from_pages(sgt, pages,
10983fb3378bSChristoph Hellwig 					PAGE_ALIGN(size) >> PAGE_SHIFT,
10993fb3378bSChristoph Hellwig 					0, size, GFP_KERNEL);
110006d60728SChristoph Hellwig 		}
110106d60728SChristoph Hellwig 
11023fb3378bSChristoph Hellwig 		page = vmalloc_to_page(cpu_addr);
11033fb3378bSChristoph Hellwig 	} else {
11043fb3378bSChristoph Hellwig 		page = virt_to_page(cpu_addr);
110506d60728SChristoph Hellwig 	}
110606d60728SChristoph Hellwig 
11073fb3378bSChristoph Hellwig 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
11083fb3378bSChristoph Hellwig 	if (!ret)
11093fb3378bSChristoph Hellwig 		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
11103fb3378bSChristoph Hellwig 	return ret;
111106d60728SChristoph Hellwig }
111206d60728SChristoph Hellwig 
1113158a6d3cSYoshihiro Shimoda static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1114158a6d3cSYoshihiro Shimoda {
1115158a6d3cSYoshihiro Shimoda 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
1116158a6d3cSYoshihiro Shimoda 
1117158a6d3cSYoshihiro Shimoda 	return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1118158a6d3cSYoshihiro Shimoda }
1119158a6d3cSYoshihiro Shimoda 
112006d60728SChristoph Hellwig static const struct dma_map_ops iommu_dma_ops = {
112106d60728SChristoph Hellwig 	.alloc			= iommu_dma_alloc,
112206d60728SChristoph Hellwig 	.free			= iommu_dma_free,
112306d60728SChristoph Hellwig 	.mmap			= iommu_dma_mmap,
112406d60728SChristoph Hellwig 	.get_sgtable		= iommu_dma_get_sgtable,
112506d60728SChristoph Hellwig 	.map_page		= iommu_dma_map_page,
112606d60728SChristoph Hellwig 	.unmap_page		= iommu_dma_unmap_page,
112706d60728SChristoph Hellwig 	.map_sg			= iommu_dma_map_sg,
112806d60728SChristoph Hellwig 	.unmap_sg		= iommu_dma_unmap_sg,
112906d60728SChristoph Hellwig 	.sync_single_for_cpu	= iommu_dma_sync_single_for_cpu,
113006d60728SChristoph Hellwig 	.sync_single_for_device	= iommu_dma_sync_single_for_device,
113106d60728SChristoph Hellwig 	.sync_sg_for_cpu	= iommu_dma_sync_sg_for_cpu,
113206d60728SChristoph Hellwig 	.sync_sg_for_device	= iommu_dma_sync_sg_for_device,
113306d60728SChristoph Hellwig 	.map_resource		= iommu_dma_map_resource,
113406d60728SChristoph Hellwig 	.unmap_resource		= iommu_dma_unmap_resource,
1135158a6d3cSYoshihiro Shimoda 	.get_merge_boundary	= iommu_dma_get_merge_boundary,
113606d60728SChristoph Hellwig };
113706d60728SChristoph Hellwig 
113806d60728SChristoph Hellwig /*
113906d60728SChristoph Hellwig  * The IOMMU core code allocates the default DMA domain, which the underlying
114006d60728SChristoph Hellwig  * IOMMU driver needs to support via the dma-iommu layer.
114106d60728SChristoph Hellwig  */
114206d60728SChristoph Hellwig void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
114306d60728SChristoph Hellwig {
114406d60728SChristoph Hellwig 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
114506d60728SChristoph Hellwig 
114606d60728SChristoph Hellwig 	if (!domain)
114706d60728SChristoph Hellwig 		goto out_err;
114806d60728SChristoph Hellwig 
114906d60728SChristoph Hellwig 	/*
115006d60728SChristoph Hellwig 	 * The IOMMU core code allocates the default DMA domain, which the
115106d60728SChristoph Hellwig 	 * underlying IOMMU driver needs to support via the dma-iommu layer.
115206d60728SChristoph Hellwig 	 */
115306d60728SChristoph Hellwig 	if (domain->type == IOMMU_DOMAIN_DMA) {
115406d60728SChristoph Hellwig 		if (iommu_dma_init_domain(domain, dma_base, size, dev))
115506d60728SChristoph Hellwig 			goto out_err;
115606d60728SChristoph Hellwig 		dev->dma_ops = &iommu_dma_ops;
115706d60728SChristoph Hellwig 	}
115806d60728SChristoph Hellwig 
115906d60728SChristoph Hellwig 	return;
116006d60728SChristoph Hellwig out_err:
116106d60728SChristoph Hellwig 	 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
116206d60728SChristoph Hellwig 		 dev_name(dev));
116344bb7e24SRobin Murphy }
116444bb7e24SRobin Murphy 
116544bb7e24SRobin Murphy static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
116644bb7e24SRobin Murphy 		phys_addr_t msi_addr, struct iommu_domain *domain)
116744bb7e24SRobin Murphy {
116844bb7e24SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
116944bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi_page;
1170842fe519SRobin Murphy 	dma_addr_t iova;
117144bb7e24SRobin Murphy 	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1172fdbe574eSRobin Murphy 	size_t size = cookie_msi_granule(cookie);
117344bb7e24SRobin Murphy 
1174fdbe574eSRobin Murphy 	msi_addr &= ~(phys_addr_t)(size - 1);
117544bb7e24SRobin Murphy 	list_for_each_entry(msi_page, &cookie->msi_page_list, list)
117644bb7e24SRobin Murphy 		if (msi_page->phys == msi_addr)
117744bb7e24SRobin Murphy 			return msi_page;
117844bb7e24SRobin Murphy 
1179c1864790SRobin Murphy 	msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
118044bb7e24SRobin Murphy 	if (!msi_page)
118144bb7e24SRobin Murphy 		return NULL;
118244bb7e24SRobin Murphy 
11838af23fadSRobin Murphy 	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
11848af23fadSRobin Murphy 	if (!iova)
118544bb7e24SRobin Murphy 		goto out_free_page;
118644bb7e24SRobin Murphy 
11878af23fadSRobin Murphy 	if (iommu_map(domain, iova, msi_addr, size, prot))
11888af23fadSRobin Murphy 		goto out_free_iova;
11898af23fadSRobin Murphy 
119044bb7e24SRobin Murphy 	INIT_LIST_HEAD(&msi_page->list);
1191a44e6657SRobin Murphy 	msi_page->phys = msi_addr;
1192a44e6657SRobin Murphy 	msi_page->iova = iova;
119344bb7e24SRobin Murphy 	list_add(&msi_page->list, &cookie->msi_page_list);
119444bb7e24SRobin Murphy 	return msi_page;
119544bb7e24SRobin Murphy 
11968af23fadSRobin Murphy out_free_iova:
11978af23fadSRobin Murphy 	iommu_dma_free_iova(cookie, iova, size);
119844bb7e24SRobin Murphy out_free_page:
119944bb7e24SRobin Murphy 	kfree(msi_page);
120044bb7e24SRobin Murphy 	return NULL;
120144bb7e24SRobin Murphy }
120244bb7e24SRobin Murphy 
1203ece6e6f0SJulien Grall int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
120444bb7e24SRobin Murphy {
1205ece6e6f0SJulien Grall 	struct device *dev = msi_desc_to_dev(desc);
120644bb7e24SRobin Murphy 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
120744bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi_page;
1208c1864790SRobin Murphy 	static DEFINE_MUTEX(msi_prepare_lock); /* see below */
120944bb7e24SRobin Murphy 
1210ece6e6f0SJulien Grall 	if (!domain || !domain->iova_cookie) {
1211ece6e6f0SJulien Grall 		desc->iommu_cookie = NULL;
1212ece6e6f0SJulien Grall 		return 0;
1213ece6e6f0SJulien Grall 	}
121444bb7e24SRobin Murphy 
121544bb7e24SRobin Murphy 	/*
1216c1864790SRobin Murphy 	 * In fact the whole prepare operation should already be serialised by
1217c1864790SRobin Murphy 	 * irq_domain_mutex further up the callchain, but that's pretty subtle
1218c1864790SRobin Murphy 	 * on its own, so consider this locking as failsafe documentation...
121944bb7e24SRobin Murphy 	 */
1220c1864790SRobin Murphy 	mutex_lock(&msi_prepare_lock);
122144bb7e24SRobin Murphy 	msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1222c1864790SRobin Murphy 	mutex_unlock(&msi_prepare_lock);
122344bb7e24SRobin Murphy 
1224ece6e6f0SJulien Grall 	msi_desc_set_iommu_cookie(desc, msi_page);
1225ece6e6f0SJulien Grall 
1226ece6e6f0SJulien Grall 	if (!msi_page)
1227ece6e6f0SJulien Grall 		return -ENOMEM;
1228ece6e6f0SJulien Grall 	return 0;
122944bb7e24SRobin Murphy }
1230ece6e6f0SJulien Grall 
1231ece6e6f0SJulien Grall void iommu_dma_compose_msi_msg(struct msi_desc *desc,
1232ece6e6f0SJulien Grall 			       struct msi_msg *msg)
1233ece6e6f0SJulien Grall {
1234ece6e6f0SJulien Grall 	struct device *dev = msi_desc_to_dev(desc);
1235ece6e6f0SJulien Grall 	const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1236ece6e6f0SJulien Grall 	const struct iommu_dma_msi_page *msi_page;
1237ece6e6f0SJulien Grall 
1238ece6e6f0SJulien Grall 	msi_page = msi_desc_get_iommu_cookie(desc);
1239ece6e6f0SJulien Grall 
1240ece6e6f0SJulien Grall 	if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1241ece6e6f0SJulien Grall 		return;
1242ece6e6f0SJulien Grall 
1243ece6e6f0SJulien Grall 	msg->address_hi = upper_32_bits(msi_page->iova);
1244ece6e6f0SJulien Grall 	msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1245ece6e6f0SJulien Grall 	msg->address_lo += lower_32_bits(msi_page->iova);
124644bb7e24SRobin Murphy }
124706d60728SChristoph Hellwig 
124806d60728SChristoph Hellwig static int iommu_dma_init(void)
124906d60728SChristoph Hellwig {
125006d60728SChristoph Hellwig 	return iova_cache_get();
12510db2e5d1SRobin Murphy }
125206d60728SChristoph Hellwig arch_initcall(iommu_dma_init);
1253