xref: /openbmc/linux/drivers/iommu/dma-iommu.c (revision e07a16e6)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
20db2e5d1SRobin Murphy /*
30db2e5d1SRobin Murphy  * A fairly generic DMA-API to IOMMU-API glue layer.
40db2e5d1SRobin Murphy  *
50db2e5d1SRobin Murphy  * Copyright (C) 2014-2015 ARM Ltd.
60db2e5d1SRobin Murphy  *
70db2e5d1SRobin Murphy  * based in part on arch/arm/mm/dma-mapping.c:
80db2e5d1SRobin Murphy  * Copyright (C) 2000-2004 Russell King
90db2e5d1SRobin Murphy  */
100db2e5d1SRobin Murphy 
11f51dc892SShameer Kolothum #include <linux/acpi_iort.h>
12a17e3026SRobin Murphy #include <linux/atomic.h>
13a17e3026SRobin Murphy #include <linux/crash_dump.h>
140db2e5d1SRobin Murphy #include <linux/device.h>
15a17e3026SRobin Murphy #include <linux/dma-direct.h>
16a17e3026SRobin Murphy #include <linux/dma-map-ops.h>
175b11e9cdSRobin Murphy #include <linux/gfp.h>
180db2e5d1SRobin Murphy #include <linux/huge_mm.h>
190db2e5d1SRobin Murphy #include <linux/iommu.h>
200db2e5d1SRobin Murphy #include <linux/iova.h>
2144bb7e24SRobin Murphy #include <linux/irq.h>
22b8397a8fSRobin Murphy #include <linux/list_sort.h>
2330280eeeSLogan Gunthorpe #include <linux/memremap.h>
240db2e5d1SRobin Murphy #include <linux/mm.h>
25c1864790SRobin Murphy #include <linux/mutex.h>
265cef282eSThierry Reding #include <linux/of_iommu.h>
27fade1ec0SRobin Murphy #include <linux/pci.h>
285b11e9cdSRobin Murphy #include <linux/scatterlist.h>
29a17e3026SRobin Murphy #include <linux/spinlock.h>
30a17e3026SRobin Murphy #include <linux/swiotlb.h>
315b11e9cdSRobin Murphy #include <linux/vmalloc.h>
32509b9e74SIsaac J. Manjarres #include <trace/events/swiotlb.h>
330db2e5d1SRobin Murphy 
34f2042ed2SRobin Murphy #include "dma-iommu.h"
35f2042ed2SRobin Murphy 
3644bb7e24SRobin Murphy struct iommu_dma_msi_page {
3744bb7e24SRobin Murphy 	struct list_head	list;
3844bb7e24SRobin Murphy 	dma_addr_t		iova;
3944bb7e24SRobin Murphy 	phys_addr_t		phys;
4044bb7e24SRobin Murphy };
4144bb7e24SRobin Murphy 
42fdbe574eSRobin Murphy enum iommu_dma_cookie_type {
43fdbe574eSRobin Murphy 	IOMMU_DMA_IOVA_COOKIE,
44fdbe574eSRobin Murphy 	IOMMU_DMA_MSI_COOKIE,
45fdbe574eSRobin Murphy };
46fdbe574eSRobin Murphy 
4744bb7e24SRobin Murphy struct iommu_dma_cookie {
48fdbe574eSRobin Murphy 	enum iommu_dma_cookie_type	type;
49fdbe574eSRobin Murphy 	union {
50fdbe574eSRobin Murphy 		/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
51a17e3026SRobin Murphy 		struct {
5244bb7e24SRobin Murphy 			struct iova_domain	iovad;
53a17e3026SRobin Murphy 
54a17e3026SRobin Murphy 			struct iova_fq __percpu *fq;	/* Flush queue */
55a17e3026SRobin Murphy 			/* Number of TLB flushes that have been started */
56a17e3026SRobin Murphy 			atomic64_t		fq_flush_start_cnt;
57a17e3026SRobin Murphy 			/* Number of TLB flushes that have been finished */
58a17e3026SRobin Murphy 			atomic64_t		fq_flush_finish_cnt;
59a17e3026SRobin Murphy 			/* Timer to regularily empty the flush queues */
60a17e3026SRobin Murphy 			struct timer_list	fq_timer;
61a17e3026SRobin Murphy 			/* 1 when timer is active, 0 when not */
62a17e3026SRobin Murphy 			atomic_t		fq_timer_on;
63a17e3026SRobin Murphy 		};
64fdbe574eSRobin Murphy 		/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
65fdbe574eSRobin Murphy 		dma_addr_t		msi_iova;
66fdbe574eSRobin Murphy 	};
6744bb7e24SRobin Murphy 	struct list_head		msi_page_list;
682da274cdSZhen Lei 
692da274cdSZhen Lei 	/* Domain for flush queue callback; NULL if flush queue not in use */
702da274cdSZhen Lei 	struct iommu_domain		*fq_domain;
71ac9a5d52SYunfei Wang 	struct mutex			mutex;
7244bb7e24SRobin Murphy };
7344bb7e24SRobin Murphy 
74a8e8af35SLianbo Jiang static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
75af3e9579SLinus Torvalds bool iommu_dma_forcedac __read_mostly;
763542dcb1SRobin Murphy 
iommu_dma_forcedac_setup(char * str)773542dcb1SRobin Murphy static int __init iommu_dma_forcedac_setup(char *str)
783542dcb1SRobin Murphy {
793542dcb1SRobin Murphy 	int ret = kstrtobool(str, &iommu_dma_forcedac);
803542dcb1SRobin Murphy 
813542dcb1SRobin Murphy 	if (!ret && iommu_dma_forcedac)
823542dcb1SRobin Murphy 		pr_info("Forcing DAC for PCI devices\n");
833542dcb1SRobin Murphy 	return ret;
843542dcb1SRobin Murphy }
853542dcb1SRobin Murphy early_param("iommu.forcedac", iommu_dma_forcedac_setup);
86a8e8af35SLianbo Jiang 
87a17e3026SRobin Murphy /* Number of entries per flush queue */
88a17e3026SRobin Murphy #define IOVA_FQ_SIZE	256
89a17e3026SRobin Murphy 
90a17e3026SRobin Murphy /* Timeout (in ms) after which entries are flushed from the queue */
91a17e3026SRobin Murphy #define IOVA_FQ_TIMEOUT	10
92a17e3026SRobin Murphy 
93a17e3026SRobin Murphy /* Flush queue entry for deferred flushing */
94a17e3026SRobin Murphy struct iova_fq_entry {
95a17e3026SRobin Murphy 	unsigned long iova_pfn;
96a17e3026SRobin Murphy 	unsigned long pages;
97a17e3026SRobin Murphy 	struct list_head freelist;
98a17e3026SRobin Murphy 	u64 counter; /* Flush counter when this entry was added */
99a17e3026SRobin Murphy };
100a17e3026SRobin Murphy 
101a17e3026SRobin Murphy /* Per-CPU flush queue structure */
102a17e3026SRobin Murphy struct iova_fq {
103a17e3026SRobin Murphy 	struct iova_fq_entry entries[IOVA_FQ_SIZE];
104a17e3026SRobin Murphy 	unsigned int head, tail;
105a17e3026SRobin Murphy 	spinlock_t lock;
106a17e3026SRobin Murphy };
107a17e3026SRobin Murphy 
108f7f07484SRobin Murphy #define fq_ring_for_each(i, fq) \
109f7f07484SRobin Murphy 	for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
110f7f07484SRobin Murphy 
fq_full(struct iova_fq * fq)111f7f07484SRobin Murphy static inline bool fq_full(struct iova_fq *fq)
112f7f07484SRobin Murphy {
113f7f07484SRobin Murphy 	assert_spin_locked(&fq->lock);
114f7f07484SRobin Murphy 	return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
115f7f07484SRobin Murphy }
116f7f07484SRobin Murphy 
fq_ring_add(struct iova_fq * fq)117a17e3026SRobin Murphy static inline unsigned int fq_ring_add(struct iova_fq *fq)
118f7f07484SRobin Murphy {
119a17e3026SRobin Murphy 	unsigned int idx = fq->tail;
120f7f07484SRobin Murphy 
121f7f07484SRobin Murphy 	assert_spin_locked(&fq->lock);
122f7f07484SRobin Murphy 
123f7f07484SRobin Murphy 	fq->tail = (idx + 1) % IOVA_FQ_SIZE;
124f7f07484SRobin Murphy 
125f7f07484SRobin Murphy 	return idx;
126f7f07484SRobin Murphy }
127f7f07484SRobin Murphy 
fq_ring_free(struct iommu_dma_cookie * cookie,struct iova_fq * fq)128a17e3026SRobin Murphy static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
129f7f07484SRobin Murphy {
130a17e3026SRobin Murphy 	u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt);
131a17e3026SRobin Murphy 	unsigned int idx;
132f7f07484SRobin Murphy 
133f7f07484SRobin Murphy 	assert_spin_locked(&fq->lock);
134f7f07484SRobin Murphy 
135f7f07484SRobin Murphy 	fq_ring_for_each(idx, fq) {
136f7f07484SRobin Murphy 
137f7f07484SRobin Murphy 		if (fq->entries[idx].counter >= counter)
138f7f07484SRobin Murphy 			break;
139f7f07484SRobin Murphy 
140f7f07484SRobin Murphy 		put_pages_list(&fq->entries[idx].freelist);
141a17e3026SRobin Murphy 		free_iova_fast(&cookie->iovad,
142f7f07484SRobin Murphy 			       fq->entries[idx].iova_pfn,
143f7f07484SRobin Murphy 			       fq->entries[idx].pages);
144f7f07484SRobin Murphy 
145f7f07484SRobin Murphy 		fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
146f7f07484SRobin Murphy 	}
147f7f07484SRobin Murphy }
148f7f07484SRobin Murphy 
fq_flush_iotlb(struct iommu_dma_cookie * cookie)149a17e3026SRobin Murphy static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
150f7f07484SRobin Murphy {
151a17e3026SRobin Murphy 	atomic64_inc(&cookie->fq_flush_start_cnt);
152a17e3026SRobin Murphy 	cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain);
153a17e3026SRobin Murphy 	atomic64_inc(&cookie->fq_flush_finish_cnt);
154f7f07484SRobin Murphy }
155f7f07484SRobin Murphy 
fq_flush_timeout(struct timer_list * t)156f7f07484SRobin Murphy static void fq_flush_timeout(struct timer_list *t)
157f7f07484SRobin Murphy {
158a17e3026SRobin Murphy 	struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer);
159f7f07484SRobin Murphy 	int cpu;
160f7f07484SRobin Murphy 
161a17e3026SRobin Murphy 	atomic_set(&cookie->fq_timer_on, 0);
162a17e3026SRobin Murphy 	fq_flush_iotlb(cookie);
163f7f07484SRobin Murphy 
164f7f07484SRobin Murphy 	for_each_possible_cpu(cpu) {
165f7f07484SRobin Murphy 		unsigned long flags;
166f7f07484SRobin Murphy 		struct iova_fq *fq;
167f7f07484SRobin Murphy 
168a17e3026SRobin Murphy 		fq = per_cpu_ptr(cookie->fq, cpu);
169f7f07484SRobin Murphy 		spin_lock_irqsave(&fq->lock, flags);
170a17e3026SRobin Murphy 		fq_ring_free(cookie, fq);
171f7f07484SRobin Murphy 		spin_unlock_irqrestore(&fq->lock, flags);
172f7f07484SRobin Murphy 	}
173f7f07484SRobin Murphy }
174f7f07484SRobin Murphy 
queue_iova(struct iommu_dma_cookie * cookie,unsigned long pfn,unsigned long pages,struct list_head * freelist)175a17e3026SRobin Murphy static void queue_iova(struct iommu_dma_cookie *cookie,
176f7f07484SRobin Murphy 		unsigned long pfn, unsigned long pages,
177f7f07484SRobin Murphy 		struct list_head *freelist)
178f7f07484SRobin Murphy {
179f7f07484SRobin Murphy 	struct iova_fq *fq;
180f7f07484SRobin Murphy 	unsigned long flags;
181a17e3026SRobin Murphy 	unsigned int idx;
182f7f07484SRobin Murphy 
183f7f07484SRobin Murphy 	/*
184f7f07484SRobin Murphy 	 * Order against the IOMMU driver's pagetable update from unmapping
185a17e3026SRobin Murphy 	 * @pte, to guarantee that fq_flush_iotlb() observes that if called
186f7f07484SRobin Murphy 	 * from a different CPU before we release the lock below. Full barrier
187f7f07484SRobin Murphy 	 * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
188f7f07484SRobin Murphy 	 * written fq state here.
189f7f07484SRobin Murphy 	 */
190f7f07484SRobin Murphy 	smp_mb();
191f7f07484SRobin Murphy 
192a17e3026SRobin Murphy 	fq = raw_cpu_ptr(cookie->fq);
193f7f07484SRobin Murphy 	spin_lock_irqsave(&fq->lock, flags);
194f7f07484SRobin Murphy 
195f7f07484SRobin Murphy 	/*
196f7f07484SRobin Murphy 	 * First remove all entries from the flush queue that have already been
197f7f07484SRobin Murphy 	 * flushed out on another CPU. This makes the fq_full() check below less
198f7f07484SRobin Murphy 	 * likely to be true.
199f7f07484SRobin Murphy 	 */
200a17e3026SRobin Murphy 	fq_ring_free(cookie, fq);
201f7f07484SRobin Murphy 
202f7f07484SRobin Murphy 	if (fq_full(fq)) {
203a17e3026SRobin Murphy 		fq_flush_iotlb(cookie);
204a17e3026SRobin Murphy 		fq_ring_free(cookie, fq);
205f7f07484SRobin Murphy 	}
206f7f07484SRobin Murphy 
207f7f07484SRobin Murphy 	idx = fq_ring_add(fq);
208f7f07484SRobin Murphy 
209f7f07484SRobin Murphy 	fq->entries[idx].iova_pfn = pfn;
210f7f07484SRobin Murphy 	fq->entries[idx].pages    = pages;
211a17e3026SRobin Murphy 	fq->entries[idx].counter  = atomic64_read(&cookie->fq_flush_start_cnt);
212f7f07484SRobin Murphy 	list_splice(freelist, &fq->entries[idx].freelist);
213f7f07484SRobin Murphy 
214f7f07484SRobin Murphy 	spin_unlock_irqrestore(&fq->lock, flags);
215f7f07484SRobin Murphy 
216f7f07484SRobin Murphy 	/* Avoid false sharing as much as possible. */
217a17e3026SRobin Murphy 	if (!atomic_read(&cookie->fq_timer_on) &&
218a17e3026SRobin Murphy 	    !atomic_xchg(&cookie->fq_timer_on, 1))
219a17e3026SRobin Murphy 		mod_timer(&cookie->fq_timer,
220f7f07484SRobin Murphy 			  jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
221f7f07484SRobin Murphy }
222f7f07484SRobin Murphy 
iommu_dma_free_fq(struct iommu_dma_cookie * cookie)223a17e3026SRobin Murphy static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
224f7f07484SRobin Murphy {
225f7f07484SRobin Murphy 	int cpu, idx;
226f7f07484SRobin Murphy 
227a17e3026SRobin Murphy 	if (!cookie->fq)
228f7f07484SRobin Murphy 		return;
229f7f07484SRobin Murphy 
230a17e3026SRobin Murphy 	del_timer_sync(&cookie->fq_timer);
231a17e3026SRobin Murphy 	/* The IOVAs will be torn down separately, so just free our queued pages */
232f7f07484SRobin Murphy 	for_each_possible_cpu(cpu) {
233a17e3026SRobin Murphy 		struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu);
234f7f07484SRobin Murphy 
235f7f07484SRobin Murphy 		fq_ring_for_each(idx, fq)
236f7f07484SRobin Murphy 			put_pages_list(&fq->entries[idx].freelist);
237f7f07484SRobin Murphy 	}
238f7f07484SRobin Murphy 
239a17e3026SRobin Murphy 	free_percpu(cookie->fq);
240f7f07484SRobin Murphy }
241f7f07484SRobin Murphy 
242a17e3026SRobin Murphy /* sysfs updates are serialised by the mutex of the group owning @domain */
iommu_dma_init_fq(struct iommu_domain * domain)243a17e3026SRobin Murphy int iommu_dma_init_fq(struct iommu_domain *domain)
244f7f07484SRobin Murphy {
245a17e3026SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
246f7f07484SRobin Murphy 	struct iova_fq __percpu *queue;
247f7f07484SRobin Murphy 	int i, cpu;
248f7f07484SRobin Murphy 
249a17e3026SRobin Murphy 	if (cookie->fq_domain)
250a17e3026SRobin Murphy 		return 0;
251a17e3026SRobin Murphy 
252a17e3026SRobin Murphy 	atomic64_set(&cookie->fq_flush_start_cnt,  0);
253a17e3026SRobin Murphy 	atomic64_set(&cookie->fq_flush_finish_cnt, 0);
254f7f07484SRobin Murphy 
255f7f07484SRobin Murphy 	queue = alloc_percpu(struct iova_fq);
256a17e3026SRobin Murphy 	if (!queue) {
257a17e3026SRobin Murphy 		pr_warn("iova flush queue initialization failed\n");
258f7f07484SRobin Murphy 		return -ENOMEM;
259a17e3026SRobin Murphy 	}
260f7f07484SRobin Murphy 
261f7f07484SRobin Murphy 	for_each_possible_cpu(cpu) {
262f7f07484SRobin Murphy 		struct iova_fq *fq = per_cpu_ptr(queue, cpu);
263f7f07484SRobin Murphy 
264f7f07484SRobin Murphy 		fq->head = 0;
265f7f07484SRobin Murphy 		fq->tail = 0;
266f7f07484SRobin Murphy 
267f7f07484SRobin Murphy 		spin_lock_init(&fq->lock);
268f7f07484SRobin Murphy 
269f7f07484SRobin Murphy 		for (i = 0; i < IOVA_FQ_SIZE; i++)
270f7f07484SRobin Murphy 			INIT_LIST_HEAD(&fq->entries[i].freelist);
271f7f07484SRobin Murphy 	}
272f7f07484SRobin Murphy 
273a17e3026SRobin Murphy 	cookie->fq = queue;
274f7f07484SRobin Murphy 
275a17e3026SRobin Murphy 	timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
276a17e3026SRobin Murphy 	atomic_set(&cookie->fq_timer_on, 0);
277a17e3026SRobin Murphy 	/*
278a17e3026SRobin Murphy 	 * Prevent incomplete fq state being observable. Pairs with path from
279a17e3026SRobin Murphy 	 * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
280a17e3026SRobin Murphy 	 */
281a17e3026SRobin Murphy 	smp_wmb();
282a17e3026SRobin Murphy 	WRITE_ONCE(cookie->fq_domain, domain);
283f7f07484SRobin Murphy 	return 0;
284f7f07484SRobin Murphy }
285f7f07484SRobin Murphy 
cookie_msi_granule(struct iommu_dma_cookie * cookie)286fdbe574eSRobin Murphy static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
287fdbe574eSRobin Murphy {
288fdbe574eSRobin Murphy 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
289fdbe574eSRobin Murphy 		return cookie->iovad.granule;
290fdbe574eSRobin Murphy 	return PAGE_SIZE;
291fdbe574eSRobin Murphy }
292fdbe574eSRobin Murphy 
cookie_alloc(enum iommu_dma_cookie_type type)293fdbe574eSRobin Murphy static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
294fdbe574eSRobin Murphy {
295fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie;
296fdbe574eSRobin Murphy 
297fdbe574eSRobin Murphy 	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
298fdbe574eSRobin Murphy 	if (cookie) {
299fdbe574eSRobin Murphy 		INIT_LIST_HEAD(&cookie->msi_page_list);
300fdbe574eSRobin Murphy 		cookie->type = type;
301fdbe574eSRobin Murphy 	}
302fdbe574eSRobin Murphy 	return cookie;
30344bb7e24SRobin Murphy }
30444bb7e24SRobin Murphy 
3050db2e5d1SRobin Murphy /**
3060db2e5d1SRobin Murphy  * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
3070db2e5d1SRobin Murphy  * @domain: IOMMU domain to prepare for DMA-API usage
3080db2e5d1SRobin Murphy  */
iommu_get_dma_cookie(struct iommu_domain * domain)3090db2e5d1SRobin Murphy int iommu_get_dma_cookie(struct iommu_domain *domain)
3100db2e5d1SRobin Murphy {
3110db2e5d1SRobin Murphy 	if (domain->iova_cookie)
3120db2e5d1SRobin Murphy 		return -EEXIST;
3130db2e5d1SRobin Murphy 
314fdbe574eSRobin Murphy 	domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
315fdbe574eSRobin Murphy 	if (!domain->iova_cookie)
31644bb7e24SRobin Murphy 		return -ENOMEM;
3170db2e5d1SRobin Murphy 
318ac9a5d52SYunfei Wang 	mutex_init(&domain->iova_cookie->mutex);
31944bb7e24SRobin Murphy 	return 0;
3200db2e5d1SRobin Murphy }
3210db2e5d1SRobin Murphy 
3220db2e5d1SRobin Murphy /**
323fdbe574eSRobin Murphy  * iommu_get_msi_cookie - Acquire just MSI remapping resources
324fdbe574eSRobin Murphy  * @domain: IOMMU domain to prepare
325fdbe574eSRobin Murphy  * @base: Start address of IOVA region for MSI mappings
326fdbe574eSRobin Murphy  *
327fdbe574eSRobin Murphy  * Users who manage their own IOVA allocation and do not want DMA API support,
328fdbe574eSRobin Murphy  * but would still like to take advantage of automatic MSI remapping, can use
329fdbe574eSRobin Murphy  * this to initialise their own domain appropriately. Users should reserve a
330fdbe574eSRobin Murphy  * contiguous IOVA region, starting at @base, large enough to accommodate the
331fdbe574eSRobin Murphy  * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
332fdbe574eSRobin Murphy  * used by the devices attached to @domain.
333fdbe574eSRobin Murphy  */
iommu_get_msi_cookie(struct iommu_domain * domain,dma_addr_t base)334fdbe574eSRobin Murphy int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
335fdbe574eSRobin Murphy {
336fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie;
337fdbe574eSRobin Murphy 
338fdbe574eSRobin Murphy 	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
339fdbe574eSRobin Murphy 		return -EINVAL;
340fdbe574eSRobin Murphy 
341fdbe574eSRobin Murphy 	if (domain->iova_cookie)
342fdbe574eSRobin Murphy 		return -EEXIST;
343fdbe574eSRobin Murphy 
344fdbe574eSRobin Murphy 	cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
345fdbe574eSRobin Murphy 	if (!cookie)
346fdbe574eSRobin Murphy 		return -ENOMEM;
347fdbe574eSRobin Murphy 
348fdbe574eSRobin Murphy 	cookie->msi_iova = base;
349fdbe574eSRobin Murphy 	domain->iova_cookie = cookie;
350fdbe574eSRobin Murphy 	return 0;
351fdbe574eSRobin Murphy }
352fdbe574eSRobin Murphy EXPORT_SYMBOL(iommu_get_msi_cookie);
353fdbe574eSRobin Murphy 
354fdbe574eSRobin Murphy /**
3550db2e5d1SRobin Murphy  * iommu_put_dma_cookie - Release a domain's DMA mapping resources
356fdbe574eSRobin Murphy  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
357fdbe574eSRobin Murphy  *          iommu_get_msi_cookie()
3580db2e5d1SRobin Murphy  */
iommu_put_dma_cookie(struct iommu_domain * domain)3590db2e5d1SRobin Murphy void iommu_put_dma_cookie(struct iommu_domain *domain)
3600db2e5d1SRobin Murphy {
36144bb7e24SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
36244bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi, *tmp;
3630db2e5d1SRobin Murphy 
36444bb7e24SRobin Murphy 	if (!cookie)
3650db2e5d1SRobin Murphy 		return;
3660db2e5d1SRobin Murphy 
367f7f07484SRobin Murphy 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) {
368a17e3026SRobin Murphy 		iommu_dma_free_fq(cookie);
36944bb7e24SRobin Murphy 		put_iova_domain(&cookie->iovad);
370f7f07484SRobin Murphy 	}
37144bb7e24SRobin Murphy 
37244bb7e24SRobin Murphy 	list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
37344bb7e24SRobin Murphy 		list_del(&msi->list);
37444bb7e24SRobin Murphy 		kfree(msi);
37544bb7e24SRobin Murphy 	}
37644bb7e24SRobin Murphy 	kfree(cookie);
3770db2e5d1SRobin Murphy 	domain->iova_cookie = NULL;
3780db2e5d1SRobin Murphy }
3790db2e5d1SRobin Murphy 
380273df963SRobin Murphy /**
381273df963SRobin Murphy  * iommu_dma_get_resv_regions - Reserved region driver helper
382273df963SRobin Murphy  * @dev: Device from iommu_get_resv_regions()
383273df963SRobin Murphy  * @list: Reserved region list from iommu_get_resv_regions()
384273df963SRobin Murphy  *
385273df963SRobin Murphy  * IOMMU drivers can use this to implement their .get_resv_regions callback
386cd2c9fcfSShameer Kolothum  * for general non-IOMMU-specific reservations. Currently, this covers GICv3
387cd2c9fcfSShameer Kolothum  * ITS region reservation on ACPI based ARM platforms that may require HW MSI
388cd2c9fcfSShameer Kolothum  * reservation.
389273df963SRobin Murphy  */
iommu_dma_get_resv_regions(struct device * dev,struct list_head * list)390273df963SRobin Murphy void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
391fade1ec0SRobin Murphy {
392fade1ec0SRobin Murphy 
39398cc4f71SJoerg Roedel 	if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
39455be25b8SShameer Kolothum 		iort_iommu_get_resv_regions(dev, list);
395f51dc892SShameer Kolothum 
3965cef282eSThierry Reding 	if (dev->of_node)
3975cef282eSThierry Reding 		of_iommu_get_resv_regions(dev, list);
398fade1ec0SRobin Murphy }
399273df963SRobin Murphy EXPORT_SYMBOL(iommu_dma_get_resv_regions);
400fade1ec0SRobin Murphy 
cookie_init_hw_msi_region(struct iommu_dma_cookie * cookie,phys_addr_t start,phys_addr_t end)4017c1b058cSRobin Murphy static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
4027c1b058cSRobin Murphy 		phys_addr_t start, phys_addr_t end)
4037c1b058cSRobin Murphy {
4047c1b058cSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
4057c1b058cSRobin Murphy 	struct iommu_dma_msi_page *msi_page;
4067c1b058cSRobin Murphy 	int i, num_pages;
4077c1b058cSRobin Murphy 
4087c1b058cSRobin Murphy 	start -= iova_offset(iovad, start);
4097c1b058cSRobin Murphy 	num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
4107c1b058cSRobin Murphy 
41165ac74f1SMarc Zyngier 	for (i = 0; i < num_pages; i++) {
41265ac74f1SMarc Zyngier 		msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
4137c1b058cSRobin Murphy 		if (!msi_page)
4147c1b058cSRobin Murphy 			return -ENOMEM;
4157c1b058cSRobin Murphy 
41665ac74f1SMarc Zyngier 		msi_page->phys = start;
41765ac74f1SMarc Zyngier 		msi_page->iova = start;
41865ac74f1SMarc Zyngier 		INIT_LIST_HEAD(&msi_page->list);
41965ac74f1SMarc Zyngier 		list_add(&msi_page->list, &cookie->msi_page_list);
4207c1b058cSRobin Murphy 		start += iovad->granule;
4217c1b058cSRobin Murphy 	}
4227c1b058cSRobin Murphy 
4237c1b058cSRobin Murphy 	return 0;
4247c1b058cSRobin Murphy }
4257c1b058cSRobin Murphy 
iommu_dma_ranges_sort(void * priv,const struct list_head * a,const struct list_head * b)426b8397a8fSRobin Murphy static int iommu_dma_ranges_sort(void *priv, const struct list_head *a,
427b8397a8fSRobin Murphy 		const struct list_head *b)
428b8397a8fSRobin Murphy {
429b8397a8fSRobin Murphy 	struct resource_entry *res_a = list_entry(a, typeof(*res_a), node);
430b8397a8fSRobin Murphy 	struct resource_entry *res_b = list_entry(b, typeof(*res_b), node);
431b8397a8fSRobin Murphy 
432b8397a8fSRobin Murphy 	return res_a->res->start > res_b->res->start;
433b8397a8fSRobin Murphy }
434b8397a8fSRobin Murphy 
iova_reserve_pci_windows(struct pci_dev * dev,struct iova_domain * iovad)435aadad097SSrinath Mannam static int iova_reserve_pci_windows(struct pci_dev *dev,
436cd2c9fcfSShameer Kolothum 		struct iova_domain *iovad)
437cd2c9fcfSShameer Kolothum {
438cd2c9fcfSShameer Kolothum 	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
439cd2c9fcfSShameer Kolothum 	struct resource_entry *window;
440cd2c9fcfSShameer Kolothum 	unsigned long lo, hi;
441aadad097SSrinath Mannam 	phys_addr_t start = 0, end;
442cd2c9fcfSShameer Kolothum 
443cd2c9fcfSShameer Kolothum 	resource_list_for_each_entry(window, &bridge->windows) {
444cd2c9fcfSShameer Kolothum 		if (resource_type(window->res) != IORESOURCE_MEM)
445cd2c9fcfSShameer Kolothum 			continue;
446cd2c9fcfSShameer Kolothum 
447cd2c9fcfSShameer Kolothum 		lo = iova_pfn(iovad, window->res->start - window->offset);
448cd2c9fcfSShameer Kolothum 		hi = iova_pfn(iovad, window->res->end - window->offset);
449cd2c9fcfSShameer Kolothum 		reserve_iova(iovad, lo, hi);
450cd2c9fcfSShameer Kolothum 	}
451aadad097SSrinath Mannam 
452aadad097SSrinath Mannam 	/* Get reserved DMA windows from host bridge */
453b8397a8fSRobin Murphy 	list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort);
454aadad097SSrinath Mannam 	resource_list_for_each_entry(window, &bridge->dma_ranges) {
455aadad097SSrinath Mannam 		end = window->res->start - window->offset;
456aadad097SSrinath Mannam resv_iova:
457aadad097SSrinath Mannam 		if (end > start) {
458aadad097SSrinath Mannam 			lo = iova_pfn(iovad, start);
459aadad097SSrinath Mannam 			hi = iova_pfn(iovad, end);
460aadad097SSrinath Mannam 			reserve_iova(iovad, lo, hi);
461571f3160SSrinath Mannam 		} else if (end < start) {
462b8397a8fSRobin Murphy 			/* DMA ranges should be non-overlapping */
463571f3160SSrinath Mannam 			dev_err(&dev->dev,
4647154cbd3SJoerg Roedel 				"Failed to reserve IOVA [%pa-%pa]\n",
4657154cbd3SJoerg Roedel 				&start, &end);
466aadad097SSrinath Mannam 			return -EINVAL;
467aadad097SSrinath Mannam 		}
468aadad097SSrinath Mannam 
469aadad097SSrinath Mannam 		start = window->res->end - window->offset + 1;
470aadad097SSrinath Mannam 		/* If window is last entry */
471aadad097SSrinath Mannam 		if (window->node.next == &bridge->dma_ranges &&
47229fcea8cSArnd Bergmann 		    end != ~(phys_addr_t)0) {
47329fcea8cSArnd Bergmann 			end = ~(phys_addr_t)0;
474aadad097SSrinath Mannam 			goto resv_iova;
475aadad097SSrinath Mannam 		}
476aadad097SSrinath Mannam 	}
477aadad097SSrinath Mannam 
478aadad097SSrinath Mannam 	return 0;
479cd2c9fcfSShameer Kolothum }
480cd2c9fcfSShameer Kolothum 
iova_reserve_iommu_regions(struct device * dev,struct iommu_domain * domain)4817c1b058cSRobin Murphy static int iova_reserve_iommu_regions(struct device *dev,
4827c1b058cSRobin Murphy 		struct iommu_domain *domain)
4837c1b058cSRobin Murphy {
4847c1b058cSRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
4857c1b058cSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
4867c1b058cSRobin Murphy 	struct iommu_resv_region *region;
4877c1b058cSRobin Murphy 	LIST_HEAD(resv_regions);
4887c1b058cSRobin Murphy 	int ret = 0;
4897c1b058cSRobin Murphy 
490aadad097SSrinath Mannam 	if (dev_is_pci(dev)) {
491aadad097SSrinath Mannam 		ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
492aadad097SSrinath Mannam 		if (ret)
493aadad097SSrinath Mannam 			return ret;
494aadad097SSrinath Mannam 	}
495cd2c9fcfSShameer Kolothum 
4967c1b058cSRobin Murphy 	iommu_get_resv_regions(dev, &resv_regions);
4977c1b058cSRobin Murphy 	list_for_each_entry(region, &resv_regions, list) {
4987c1b058cSRobin Murphy 		unsigned long lo, hi;
4997c1b058cSRobin Murphy 
5007c1b058cSRobin Murphy 		/* We ARE the software that manages these! */
5017c1b058cSRobin Murphy 		if (region->type == IOMMU_RESV_SW_MSI)
5027c1b058cSRobin Murphy 			continue;
5037c1b058cSRobin Murphy 
5047c1b058cSRobin Murphy 		lo = iova_pfn(iovad, region->start);
5057c1b058cSRobin Murphy 		hi = iova_pfn(iovad, region->start + region->length - 1);
5067c1b058cSRobin Murphy 		reserve_iova(iovad, lo, hi);
5077c1b058cSRobin Murphy 
5087c1b058cSRobin Murphy 		if (region->type == IOMMU_RESV_MSI)
5097c1b058cSRobin Murphy 			ret = cookie_init_hw_msi_region(cookie, region->start,
5107c1b058cSRobin Murphy 					region->start + region->length);
5117c1b058cSRobin Murphy 		if (ret)
5127c1b058cSRobin Murphy 			break;
5137c1b058cSRobin Murphy 	}
5147c1b058cSRobin Murphy 	iommu_put_resv_regions(dev, &resv_regions);
5157c1b058cSRobin Murphy 
5167c1b058cSRobin Murphy 	return ret;
5177c1b058cSRobin Murphy }
5187c1b058cSRobin Murphy 
dev_is_untrusted(struct device * dev)51982c3cefbSLu Baolu static bool dev_is_untrusted(struct device *dev)
52082c3cefbSLu Baolu {
52182c3cefbSLu Baolu 	return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
52282c3cefbSLu Baolu }
52382c3cefbSLu Baolu 
dev_use_swiotlb(struct device * dev,size_t size,enum dma_data_direction dir)524861370f4SCatalin Marinas static bool dev_use_swiotlb(struct device *dev, size_t size,
525861370f4SCatalin Marinas 			    enum dma_data_direction dir)
5262e727bffSDavid Stevens {
527861370f4SCatalin Marinas 	return IS_ENABLED(CONFIG_SWIOTLB) &&
528861370f4SCatalin Marinas 		(dev_is_untrusted(dev) ||
529861370f4SCatalin Marinas 		 dma_kmalloc_needs_bounce(dev, size, dir));
530861370f4SCatalin Marinas }
531861370f4SCatalin Marinas 
dev_use_sg_swiotlb(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)532861370f4SCatalin Marinas static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
533861370f4SCatalin Marinas 			       int nents, enum dma_data_direction dir)
534861370f4SCatalin Marinas {
535861370f4SCatalin Marinas 	struct scatterlist *s;
536861370f4SCatalin Marinas 	int i;
537861370f4SCatalin Marinas 
538861370f4SCatalin Marinas 	if (!IS_ENABLED(CONFIG_SWIOTLB))
539861370f4SCatalin Marinas 		return false;
540861370f4SCatalin Marinas 
541861370f4SCatalin Marinas 	if (dev_is_untrusted(dev))
542861370f4SCatalin Marinas 		return true;
543861370f4SCatalin Marinas 
544861370f4SCatalin Marinas 	/*
545861370f4SCatalin Marinas 	 * If kmalloc() buffers are not DMA-safe for this device and
546861370f4SCatalin Marinas 	 * direction, check the individual lengths in the sg list. If any
547861370f4SCatalin Marinas 	 * element is deemed unsafe, use the swiotlb for bouncing.
548861370f4SCatalin Marinas 	 */
549861370f4SCatalin Marinas 	if (!dma_kmalloc_safe(dev, dir)) {
550861370f4SCatalin Marinas 		for_each_sg(sg, s, nents, i)
551861370f4SCatalin Marinas 			if (!dma_kmalloc_size_aligned(s->length))
552861370f4SCatalin Marinas 				return true;
553861370f4SCatalin Marinas 	}
554861370f4SCatalin Marinas 
555861370f4SCatalin Marinas 	return false;
5562e727bffSDavid Stevens }
5572e727bffSDavid Stevens 
5580db2e5d1SRobin Murphy /**
5590db2e5d1SRobin Murphy  * iommu_dma_init_domain - Initialise a DMA mapping domain
5600db2e5d1SRobin Murphy  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
5610db2e5d1SRobin Murphy  * @base: IOVA at which the mappable address space starts
562ac6d7046SJean-Philippe Brucker  * @limit: Last address of the IOVA space
563fade1ec0SRobin Murphy  * @dev: Device the domain is being initialised for
5640db2e5d1SRobin Murphy  *
565ac6d7046SJean-Philippe Brucker  * @base and @limit + 1 should be exact multiples of IOMMU page granularity to
5660db2e5d1SRobin Murphy  * avoid rounding surprises. If necessary, we reserve the page at address 0
5670db2e5d1SRobin Murphy  * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
5680db2e5d1SRobin Murphy  * any change which could make prior IOVAs invalid will fail.
5690db2e5d1SRobin Murphy  */
iommu_dma_init_domain(struct iommu_domain * domain,dma_addr_t base,dma_addr_t limit,struct device * dev)57006d60728SChristoph Hellwig static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
571ac6d7046SJean-Philippe Brucker 				 dma_addr_t limit, struct device *dev)
5720db2e5d1SRobin Murphy {
573fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
574c61a4633SShaokun Zhang 	unsigned long order, base_pfn;
5756b0c54e7SYunsheng Lin 	struct iova_domain *iovad;
57632e92d9fSJohn Garry 	int ret;
5770db2e5d1SRobin Murphy 
578fdbe574eSRobin Murphy 	if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
579fdbe574eSRobin Murphy 		return -EINVAL;
5800db2e5d1SRobin Murphy 
5816b0c54e7SYunsheng Lin 	iovad = &cookie->iovad;
5826b0c54e7SYunsheng Lin 
5830db2e5d1SRobin Murphy 	/* Use the smallest supported page size for IOVA granularity */
584d16e0faaSRobin Murphy 	order = __ffs(domain->pgsize_bitmap);
5850db2e5d1SRobin Murphy 	base_pfn = max_t(unsigned long, 1, base >> order);
5860db2e5d1SRobin Murphy 
5870db2e5d1SRobin Murphy 	/* Check the domain allows at least some access to the device... */
5880db2e5d1SRobin Murphy 	if (domain->geometry.force_aperture) {
5890db2e5d1SRobin Murphy 		if (base > domain->geometry.aperture_end ||
590ac6d7046SJean-Philippe Brucker 		    limit < domain->geometry.aperture_start) {
5910db2e5d1SRobin Murphy 			pr_warn("specified DMA range outside IOMMU capability\n");
5920db2e5d1SRobin Murphy 			return -EFAULT;
5930db2e5d1SRobin Murphy 		}
5940db2e5d1SRobin Murphy 		/* ...then finally give it a kicking to make sure it fits */
5950db2e5d1SRobin Murphy 		base_pfn = max_t(unsigned long, base_pfn,
5960db2e5d1SRobin Murphy 				domain->geometry.aperture_start >> order);
5970db2e5d1SRobin Murphy 	}
5980db2e5d1SRobin Murphy 
599f51d7bb7SRobin Murphy 	/* start_pfn is always nonzero for an already-initialised domain */
600ac9a5d52SYunfei Wang 	mutex_lock(&cookie->mutex);
6010db2e5d1SRobin Murphy 	if (iovad->start_pfn) {
6020db2e5d1SRobin Murphy 		if (1UL << order != iovad->granule ||
603f51d7bb7SRobin Murphy 		    base_pfn != iovad->start_pfn) {
6040db2e5d1SRobin Murphy 			pr_warn("Incompatible range for DMA domain\n");
605ac9a5d52SYunfei Wang 			ret = -EFAULT;
606ac9a5d52SYunfei Wang 			goto done_unlock;
6070db2e5d1SRobin Murphy 		}
6087c1b058cSRobin Murphy 
609ac9a5d52SYunfei Wang 		ret = 0;
610ac9a5d52SYunfei Wang 		goto done_unlock;
6110db2e5d1SRobin Murphy 	}
6127c1b058cSRobin Murphy 
613aa3ac946SZhen Lei 	init_iova_domain(iovad, 1UL << order, base_pfn);
61432e92d9fSJohn Garry 	ret = iova_domain_init_rcaches(iovad);
61532e92d9fSJohn Garry 	if (ret)
616ac9a5d52SYunfei Wang 		goto done_unlock;
6172da274cdSZhen Lei 
618c208916fSRobin Murphy 	/* If the FQ fails we can simply fall back to strict mode */
619a4fdd976SRobin Murphy 	if (domain->type == IOMMU_DOMAIN_DMA_FQ &&
620a4fdd976SRobin Murphy 	    (!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain)))
621c208916fSRobin Murphy 		domain->type = IOMMU_DOMAIN_DMA;
6227c1b058cSRobin Murphy 
623ac9a5d52SYunfei Wang 	ret = iova_reserve_iommu_regions(dev, domain);
624ac9a5d52SYunfei Wang 
625ac9a5d52SYunfei Wang done_unlock:
626ac9a5d52SYunfei Wang 	mutex_unlock(&cookie->mutex);
627ac9a5d52SYunfei Wang 	return ret;
6287c1b058cSRobin Murphy }
6290db2e5d1SRobin Murphy 
6300db2e5d1SRobin Murphy /**
631737c85caSMitchel Humpherys  * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
632737c85caSMitchel Humpherys  *                    page flags.
6330db2e5d1SRobin Murphy  * @dir: Direction of DMA transfer
6340db2e5d1SRobin Murphy  * @coherent: Is the DMA master cache-coherent?
635737c85caSMitchel Humpherys  * @attrs: DMA attributes for the mapping
6360db2e5d1SRobin Murphy  *
6370db2e5d1SRobin Murphy  * Return: corresponding IOMMU API page protection flags
6380db2e5d1SRobin Murphy  */
dma_info_to_prot(enum dma_data_direction dir,bool coherent,unsigned long attrs)63906d60728SChristoph Hellwig static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
640737c85caSMitchel Humpherys 		     unsigned long attrs)
6410db2e5d1SRobin Murphy {
6420db2e5d1SRobin Murphy 	int prot = coherent ? IOMMU_CACHE : 0;
6430db2e5d1SRobin Murphy 
644737c85caSMitchel Humpherys 	if (attrs & DMA_ATTR_PRIVILEGED)
645737c85caSMitchel Humpherys 		prot |= IOMMU_PRIV;
646737c85caSMitchel Humpherys 
6470db2e5d1SRobin Murphy 	switch (dir) {
6480db2e5d1SRobin Murphy 	case DMA_BIDIRECTIONAL:
6490db2e5d1SRobin Murphy 		return prot | IOMMU_READ | IOMMU_WRITE;
6500db2e5d1SRobin Murphy 	case DMA_TO_DEVICE:
6510db2e5d1SRobin Murphy 		return prot | IOMMU_READ;
6520db2e5d1SRobin Murphy 	case DMA_FROM_DEVICE:
6530db2e5d1SRobin Murphy 		return prot | IOMMU_WRITE;
6540db2e5d1SRobin Murphy 	default:
6550db2e5d1SRobin Murphy 		return 0;
6560db2e5d1SRobin Murphy 	}
6570db2e5d1SRobin Murphy }
6580db2e5d1SRobin Murphy 
iommu_dma_alloc_iova(struct iommu_domain * domain,size_t size,u64 dma_limit,struct device * dev)659842fe519SRobin Murphy static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
660bd036d2fSRobin Murphy 		size_t size, u64 dma_limit, struct device *dev)
6610db2e5d1SRobin Murphy {
662a44e6657SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
663a44e6657SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
664791c2b17SRobin Murphy 	unsigned long shift, iova_len, iova;
6650db2e5d1SRobin Murphy 
666a44e6657SRobin Murphy 	if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
667a44e6657SRobin Murphy 		cookie->msi_iova += size;
668a44e6657SRobin Murphy 		return cookie->msi_iova - size;
669a44e6657SRobin Murphy 	}
670a44e6657SRobin Murphy 
671a44e6657SRobin Murphy 	shift = iova_shift(iovad);
672a44e6657SRobin Murphy 	iova_len = size >> shift;
673a44e6657SRobin Murphy 
674a7ba70f1SNicolas Saenz Julienne 	dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
67503bfdc31SRobin Murphy 
676c987ff0dSRobin Murphy 	if (domain->geometry.force_aperture)
677bd036d2fSRobin Murphy 		dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
678122fac03SRobin Murphy 
679791c2b17SRobin Murphy 	/*
680791c2b17SRobin Murphy 	 * Try to use all the 32-bit PCI addresses first. The original SAC vs.
681791c2b17SRobin Murphy 	 * DAC reasoning loses relevance with PCIe, but enough hardware and
682791c2b17SRobin Murphy 	 * firmware bugs are still lurking out there that it's safest not to
683791c2b17SRobin Murphy 	 * venture into the 64-bit space until necessary.
684791c2b17SRobin Murphy 	 *
685791c2b17SRobin Murphy 	 * If your device goes wrong after seeing the notice then likely either
686791c2b17SRobin Murphy 	 * its driver is not setting DMA masks accurately, the hardware has
687791c2b17SRobin Murphy 	 * some inherent bug in handling >32-bit addresses, or not all the
688791c2b17SRobin Murphy 	 * expected address bits are wired up between the device and the IOMMU.
689791c2b17SRobin Murphy 	 */
690791c2b17SRobin Murphy 	if (dma_limit > DMA_BIT_MASK(32) && dev->iommu->pci_32bit_workaround) {
691538d5b33STomasz Nowicki 		iova = alloc_iova_fast(iovad, iova_len,
692538d5b33STomasz Nowicki 				       DMA_BIT_MASK(32) >> shift, false);
693791c2b17SRobin Murphy 		if (iova)
694791c2b17SRobin Murphy 			goto done;
695122fac03SRobin Murphy 
696791c2b17SRobin Murphy 		dev->iommu->pci_32bit_workaround = false;
697791c2b17SRobin Murphy 		dev_notice(dev, "Using %d-bit DMA addresses\n", bits_per(dma_limit));
698791c2b17SRobin Murphy 	}
699bb65a64cSRobin Murphy 
700791c2b17SRobin Murphy 	iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
701791c2b17SRobin Murphy done:
702bb65a64cSRobin Murphy 	return (dma_addr_t)iova << shift;
7030db2e5d1SRobin Murphy }
7040db2e5d1SRobin Murphy 
iommu_dma_free_iova(struct iommu_dma_cookie * cookie,dma_addr_t iova,size_t size,struct iommu_iotlb_gather * gather)705842fe519SRobin Murphy static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
706452e69b5SRobin Murphy 		dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
7070db2e5d1SRobin Murphy {
708842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
7090db2e5d1SRobin Murphy 
710a44e6657SRobin Murphy 	/* The MSI case is only ever cleaning up its most recent allocation */
711bb65a64cSRobin Murphy 	if (cookie->type == IOMMU_DMA_MSI_COOKIE)
712a44e6657SRobin Murphy 		cookie->msi_iova -= size;
713452e69b5SRobin Murphy 	else if (gather && gather->queued)
714a17e3026SRobin Murphy 		queue_iova(cookie, iova_pfn(iovad, iova),
7152a2b8eaaSTom Murphy 				size >> iova_shift(iovad),
71687f60cc6SMatthew Wilcox (Oracle) 				&gather->freelist);
717bb65a64cSRobin Murphy 	else
7181cc896edSRobin Murphy 		free_iova_fast(iovad, iova_pfn(iovad, iova),
7191cc896edSRobin Murphy 				size >> iova_shift(iovad));
720842fe519SRobin Murphy }
721842fe519SRobin Murphy 
__iommu_dma_unmap(struct device * dev,dma_addr_t dma_addr,size_t size)722b61d271eSRobin Murphy static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
723842fe519SRobin Murphy 		size_t size)
724842fe519SRobin Murphy {
725b61d271eSRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
726a44e6657SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
727a44e6657SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
728842fe519SRobin Murphy 	size_t iova_off = iova_offset(iovad, dma_addr);
729a7d20dc1SWill Deacon 	struct iommu_iotlb_gather iotlb_gather;
730a7d20dc1SWill Deacon 	size_t unmapped;
731842fe519SRobin Murphy 
732842fe519SRobin Murphy 	dma_addr -= iova_off;
733842fe519SRobin Murphy 	size = iova_align(iovad, size + iova_off);
734a7d20dc1SWill Deacon 	iommu_iotlb_gather_init(&iotlb_gather);
735452e69b5SRobin Murphy 	iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
736842fe519SRobin Murphy 
737a7d20dc1SWill Deacon 	unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
738a7d20dc1SWill Deacon 	WARN_ON(unmapped != size);
739a7d20dc1SWill Deacon 
740452e69b5SRobin Murphy 	if (!iotlb_gather.queued)
741aae4c8e2STom Murphy 		iommu_iotlb_sync(domain, &iotlb_gather);
742452e69b5SRobin Murphy 	iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
7430db2e5d1SRobin Murphy }
7440db2e5d1SRobin Murphy 
__iommu_dma_map(struct device * dev,phys_addr_t phys,size_t size,int prot,u64 dma_mask)74592aec09cSChristoph Hellwig static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
746bd036d2fSRobin Murphy 		size_t size, int prot, u64 dma_mask)
74792aec09cSChristoph Hellwig {
748b61d271eSRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
74992aec09cSChristoph Hellwig 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
7508af23fadSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
7518af23fadSRobin Murphy 	size_t iova_off = iova_offset(iovad, phys);
75292aec09cSChristoph Hellwig 	dma_addr_t iova;
75392aec09cSChristoph Hellwig 
754a8e8af35SLianbo Jiang 	if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
7553ab65729SLianbo Jiang 	    iommu_deferred_attach(dev, domain))
756795bbbb9STom Murphy 		return DMA_MAPPING_ERROR;
757795bbbb9STom Murphy 
7588af23fadSRobin Murphy 	size = iova_align(iovad, size + iova_off);
75992aec09cSChristoph Hellwig 
7606e235020STom Murphy 	iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
76192aec09cSChristoph Hellwig 	if (!iova)
76292aec09cSChristoph Hellwig 		return DMA_MAPPING_ERROR;
76392aec09cSChristoph Hellwig 
7644dc6376aSJason Gunthorpe 	if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) {
7652a2b8eaaSTom Murphy 		iommu_dma_free_iova(cookie, iova, size, NULL);
76692aec09cSChristoph Hellwig 		return DMA_MAPPING_ERROR;
76792aec09cSChristoph Hellwig 	}
76892aec09cSChristoph Hellwig 	return iova + iova_off;
76992aec09cSChristoph Hellwig }
77092aec09cSChristoph Hellwig 
__iommu_dma_free_pages(struct page ** pages,int count)7710db2e5d1SRobin Murphy static void __iommu_dma_free_pages(struct page **pages, int count)
7720db2e5d1SRobin Murphy {
7730db2e5d1SRobin Murphy 	while (count--)
7740db2e5d1SRobin Murphy 		__free_page(pages[count]);
7750db2e5d1SRobin Murphy 	kvfree(pages);
7760db2e5d1SRobin Murphy }
7770db2e5d1SRobin Murphy 
__iommu_dma_alloc_pages(struct device * dev,unsigned int count,unsigned long order_mask,gfp_t gfp)778c4b17afbSGanapatrao Kulkarni static struct page **__iommu_dma_alloc_pages(struct device *dev,
779c4b17afbSGanapatrao Kulkarni 		unsigned int count, unsigned long order_mask, gfp_t gfp)
7800db2e5d1SRobin Murphy {
7810db2e5d1SRobin Murphy 	struct page **pages;
782c4b17afbSGanapatrao Kulkarni 	unsigned int i = 0, nid = dev_to_node(dev);
7833b6b7e19SRobin Murphy 
78423baf831SKirill A. Shutemov 	order_mask &= GENMASK(MAX_ORDER, 0);
7853b6b7e19SRobin Murphy 	if (!order_mask)
7863b6b7e19SRobin Murphy 		return NULL;
7870db2e5d1SRobin Murphy 
788ab6f4b00SGustavo A. R. Silva 	pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
7890db2e5d1SRobin Murphy 	if (!pages)
7900db2e5d1SRobin Murphy 		return NULL;
7910db2e5d1SRobin Murphy 
7920db2e5d1SRobin Murphy 	/* IOMMU can map any pages, so himem can also be used here */
7930db2e5d1SRobin Murphy 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
7940db2e5d1SRobin Murphy 
7950db2e5d1SRobin Murphy 	while (count) {
7960db2e5d1SRobin Murphy 		struct page *page = NULL;
7973b6b7e19SRobin Murphy 		unsigned int order_size;
7980db2e5d1SRobin Murphy 
7990db2e5d1SRobin Murphy 		/*
8000db2e5d1SRobin Murphy 		 * Higher-order allocations are a convenience rather
8010db2e5d1SRobin Murphy 		 * than a necessity, hence using __GFP_NORETRY until
8023b6b7e19SRobin Murphy 		 * falling back to minimum-order allocations.
8030db2e5d1SRobin Murphy 		 */
80461883d3cSKirill A. Shutemov 		for (order_mask &= GENMASK(__fls(count), 0);
8053b6b7e19SRobin Murphy 		     order_mask; order_mask &= ~order_size) {
8063b6b7e19SRobin Murphy 			unsigned int order = __fls(order_mask);
807c4b17afbSGanapatrao Kulkarni 			gfp_t alloc_flags = gfp;
8083b6b7e19SRobin Murphy 
8093b6b7e19SRobin Murphy 			order_size = 1U << order;
810c4b17afbSGanapatrao Kulkarni 			if (order_mask > order_size)
811c4b17afbSGanapatrao Kulkarni 				alloc_flags |= __GFP_NORETRY;
812c4b17afbSGanapatrao Kulkarni 			page = alloc_pages_node(nid, alloc_flags, order);
8130db2e5d1SRobin Murphy 			if (!page)
8140db2e5d1SRobin Murphy 				continue;
8154604393cSRobin Murphy 			if (order)
8160db2e5d1SRobin Murphy 				split_page(page, order);
8170db2e5d1SRobin Murphy 			break;
8180db2e5d1SRobin Murphy 		}
8190db2e5d1SRobin Murphy 		if (!page) {
8200db2e5d1SRobin Murphy 			__iommu_dma_free_pages(pages, i);
8210db2e5d1SRobin Murphy 			return NULL;
8220db2e5d1SRobin Murphy 		}
8233b6b7e19SRobin Murphy 		count -= order_size;
8243b6b7e19SRobin Murphy 		while (order_size--)
8250db2e5d1SRobin Murphy 			pages[i++] = page++;
8260db2e5d1SRobin Murphy 	}
8270db2e5d1SRobin Murphy 	return pages;
8280db2e5d1SRobin Murphy }
8290db2e5d1SRobin Murphy 
8308230ce9aSChristoph Hellwig /*
8318230ce9aSChristoph Hellwig  * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
8320db2e5d1SRobin Murphy  * but an IOMMU which supports smaller pages might not map the whole thing.
8330db2e5d1SRobin Murphy  */
__iommu_dma_alloc_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt,gfp_t gfp,pgprot_t prot,unsigned long attrs)8348230ce9aSChristoph Hellwig static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
8358230ce9aSChristoph Hellwig 		size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot,
836e8d39a90SChristoph Hellwig 		unsigned long attrs)
8370db2e5d1SRobin Murphy {
83843c5bf11SRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
839842fe519SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
840842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
84121b95aafSChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
84221b95aafSChristoph Hellwig 	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
84321b95aafSChristoph Hellwig 	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
8440db2e5d1SRobin Murphy 	struct page **pages;
845842fe519SRobin Murphy 	dma_addr_t iova;
846a3884774SYunfei Wang 	ssize_t ret;
8470db2e5d1SRobin Murphy 
848a8e8af35SLianbo Jiang 	if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
8493ab65729SLianbo Jiang 	    iommu_deferred_attach(dev, domain))
850795bbbb9STom Murphy 		return NULL;
851795bbbb9STom Murphy 
8523b6b7e19SRobin Murphy 	min_size = alloc_sizes & -alloc_sizes;
8533b6b7e19SRobin Murphy 	if (min_size < PAGE_SIZE) {
8543b6b7e19SRobin Murphy 		min_size = PAGE_SIZE;
8553b6b7e19SRobin Murphy 		alloc_sizes |= PAGE_SIZE;
8563b6b7e19SRobin Murphy 	} else {
8573b6b7e19SRobin Murphy 		size = ALIGN(size, min_size);
8583b6b7e19SRobin Murphy 	}
85900085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
8603b6b7e19SRobin Murphy 		alloc_sizes = min_size;
8613b6b7e19SRobin Murphy 
8623b6b7e19SRobin Murphy 	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
863c4b17afbSGanapatrao Kulkarni 	pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
864c4b17afbSGanapatrao Kulkarni 					gfp);
8650db2e5d1SRobin Murphy 	if (!pages)
8660db2e5d1SRobin Murphy 		return NULL;
8670db2e5d1SRobin Murphy 
868842fe519SRobin Murphy 	size = iova_align(iovad, size);
869842fe519SRobin Murphy 	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
8700db2e5d1SRobin Murphy 	if (!iova)
8710db2e5d1SRobin Murphy 		goto out_free_pages;
8720db2e5d1SRobin Murphy 
87396d57808SJason Gunthorpe 	/*
87496d57808SJason Gunthorpe 	 * Remove the zone/policy flags from the GFP - these are applied to the
87596d57808SJason Gunthorpe 	 * __iommu_dma_alloc_pages() but are not used for the supporting
87696d57808SJason Gunthorpe 	 * internal allocations that follow.
87796d57808SJason Gunthorpe 	 */
87896d57808SJason Gunthorpe 	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_COMP);
87996d57808SJason Gunthorpe 
88096d57808SJason Gunthorpe 	if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, gfp))
8810db2e5d1SRobin Murphy 		goto out_free_iova;
8820db2e5d1SRobin Murphy 
88321b95aafSChristoph Hellwig 	if (!(ioprot & IOMMU_CACHE)) {
88423f88e0aSChristoph Hellwig 		struct scatterlist *sg;
88523f88e0aSChristoph Hellwig 		int i;
88623f88e0aSChristoph Hellwig 
8878230ce9aSChristoph Hellwig 		for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
88823f88e0aSChristoph Hellwig 			arch_dma_prep_coherent(sg_page(sg), sg->length);
8890db2e5d1SRobin Murphy 	}
8900db2e5d1SRobin Murphy 
891f2b2c051SJason Gunthorpe 	ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot,
89296d57808SJason Gunthorpe 			   gfp);
893a3884774SYunfei Wang 	if (ret < 0 || ret < size)
8940db2e5d1SRobin Murphy 		goto out_free_sg;
8950db2e5d1SRobin Murphy 
8968230ce9aSChristoph Hellwig 	sgt->sgl->dma_address = iova;
897e817ee5fSChristoph Hellwig 	sgt->sgl->dma_length = size;
8988230ce9aSChristoph Hellwig 	return pages;
8990db2e5d1SRobin Murphy 
9000db2e5d1SRobin Murphy out_free_sg:
9018230ce9aSChristoph Hellwig 	sg_free_table(sgt);
9020db2e5d1SRobin Murphy out_free_iova:
9032a2b8eaaSTom Murphy 	iommu_dma_free_iova(cookie, iova, size, NULL);
9040db2e5d1SRobin Murphy out_free_pages:
9050db2e5d1SRobin Murphy 	__iommu_dma_free_pages(pages, count);
9060db2e5d1SRobin Murphy 	return NULL;
9070db2e5d1SRobin Murphy }
9080db2e5d1SRobin Murphy 
iommu_dma_alloc_remap(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t gfp,pgprot_t prot,unsigned long attrs)9098230ce9aSChristoph Hellwig static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
9108230ce9aSChristoph Hellwig 		dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
9118230ce9aSChristoph Hellwig 		unsigned long attrs)
9128230ce9aSChristoph Hellwig {
9138230ce9aSChristoph Hellwig 	struct page **pages;
9148230ce9aSChristoph Hellwig 	struct sg_table sgt;
9158230ce9aSChristoph Hellwig 	void *vaddr;
9168230ce9aSChristoph Hellwig 
9178230ce9aSChristoph Hellwig 	pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot,
9188230ce9aSChristoph Hellwig 						attrs);
9198230ce9aSChristoph Hellwig 	if (!pages)
9208230ce9aSChristoph Hellwig 		return NULL;
9218230ce9aSChristoph Hellwig 	*dma_handle = sgt.sgl->dma_address;
9228230ce9aSChristoph Hellwig 	sg_free_table(&sgt);
9238230ce9aSChristoph Hellwig 	vaddr = dma_common_pages_remap(pages, size, prot,
9248230ce9aSChristoph Hellwig 			__builtin_return_address(0));
9258230ce9aSChristoph Hellwig 	if (!vaddr)
9268230ce9aSChristoph Hellwig 		goto out_unmap;
9278230ce9aSChristoph Hellwig 	return vaddr;
9288230ce9aSChristoph Hellwig 
9298230ce9aSChristoph Hellwig out_unmap:
9308230ce9aSChristoph Hellwig 	__iommu_dma_unmap(dev, *dma_handle, size);
9318230ce9aSChristoph Hellwig 	__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
9328230ce9aSChristoph Hellwig 	return NULL;
9338230ce9aSChristoph Hellwig }
9348230ce9aSChristoph Hellwig 
iommu_dma_alloc_noncontiguous(struct device * dev,size_t size,enum dma_data_direction dir,gfp_t gfp,unsigned long attrs)935e817ee5fSChristoph Hellwig static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
936e817ee5fSChristoph Hellwig 		size_t size, enum dma_data_direction dir, gfp_t gfp,
937e817ee5fSChristoph Hellwig 		unsigned long attrs)
938e817ee5fSChristoph Hellwig {
939e817ee5fSChristoph Hellwig 	struct dma_sgt_handle *sh;
940e817ee5fSChristoph Hellwig 
941e817ee5fSChristoph Hellwig 	sh = kmalloc(sizeof(*sh), gfp);
942e817ee5fSChristoph Hellwig 	if (!sh)
943e817ee5fSChristoph Hellwig 		return NULL;
944e817ee5fSChristoph Hellwig 
945e817ee5fSChristoph Hellwig 	sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp,
946e817ee5fSChristoph Hellwig 						    PAGE_KERNEL, attrs);
947e817ee5fSChristoph Hellwig 	if (!sh->pages) {
948e817ee5fSChristoph Hellwig 		kfree(sh);
949e817ee5fSChristoph Hellwig 		return NULL;
950e817ee5fSChristoph Hellwig 	}
951e817ee5fSChristoph Hellwig 	return &sh->sgt;
952e817ee5fSChristoph Hellwig }
953e817ee5fSChristoph Hellwig 
iommu_dma_free_noncontiguous(struct device * dev,size_t size,struct sg_table * sgt,enum dma_data_direction dir)954e817ee5fSChristoph Hellwig static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
955e817ee5fSChristoph Hellwig 		struct sg_table *sgt, enum dma_data_direction dir)
956e817ee5fSChristoph Hellwig {
957e817ee5fSChristoph Hellwig 	struct dma_sgt_handle *sh = sgt_handle(sgt);
958e817ee5fSChristoph Hellwig 
959e817ee5fSChristoph Hellwig 	__iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
960e817ee5fSChristoph Hellwig 	__iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
961e817ee5fSChristoph Hellwig 	sg_free_table(&sh->sgt);
9620fbea680SEzequiel Garcia 	kfree(sh);
963e817ee5fSChristoph Hellwig }
964e817ee5fSChristoph Hellwig 
iommu_dma_sync_single_for_cpu(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir)96506d60728SChristoph Hellwig static void iommu_dma_sync_single_for_cpu(struct device *dev,
96606d60728SChristoph Hellwig 		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
9670db2e5d1SRobin Murphy {
96806d60728SChristoph Hellwig 	phys_addr_t phys;
9690db2e5d1SRobin Murphy 
970861370f4SCatalin Marinas 	if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
97106d60728SChristoph Hellwig 		return;
97206d60728SChristoph Hellwig 
97306d60728SChristoph Hellwig 	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
97482612d66STom Murphy 	if (!dev_is_dma_coherent(dev))
97556e35f9cSChristoph Hellwig 		arch_sync_dma_for_cpu(phys, size, dir);
97682612d66STom Murphy 
9777fd856aaSClaire Chang 	if (is_swiotlb_buffer(dev, phys))
97880808d27SChristoph Hellwig 		swiotlb_sync_single_for_cpu(dev, phys, size, dir);
9791cc896edSRobin Murphy }
9801cc896edSRobin Murphy 
iommu_dma_sync_single_for_device(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir)98106d60728SChristoph Hellwig static void iommu_dma_sync_single_for_device(struct device *dev,
98206d60728SChristoph Hellwig 		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
98351f8cc9eSRobin Murphy {
98406d60728SChristoph Hellwig 	phys_addr_t phys;
98506d60728SChristoph Hellwig 
986861370f4SCatalin Marinas 	if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev, size, dir))
98706d60728SChristoph Hellwig 		return;
98806d60728SChristoph Hellwig 
98906d60728SChristoph Hellwig 	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
9907fd856aaSClaire Chang 	if (is_swiotlb_buffer(dev, phys))
99180808d27SChristoph Hellwig 		swiotlb_sync_single_for_device(dev, phys, size, dir);
99282612d66STom Murphy 
99382612d66STom Murphy 	if (!dev_is_dma_coherent(dev))
99456e35f9cSChristoph Hellwig 		arch_sync_dma_for_device(phys, size, dir);
99551f8cc9eSRobin Murphy }
99651f8cc9eSRobin Murphy 
iommu_dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir)99706d60728SChristoph Hellwig static void iommu_dma_sync_sg_for_cpu(struct device *dev,
99806d60728SChristoph Hellwig 		struct scatterlist *sgl, int nelems,
99906d60728SChristoph Hellwig 		enum dma_data_direction dir)
10000db2e5d1SRobin Murphy {
100106d60728SChristoph Hellwig 	struct scatterlist *sg;
100206d60728SChristoph Hellwig 	int i;
100306d60728SChristoph Hellwig 
1004861370f4SCatalin Marinas 	if (sg_dma_is_swiotlb(sgl))
100508ae5d4aSDavid Stevens 		for_each_sg(sgl, sg, nelems, i)
100608ae5d4aSDavid Stevens 			iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
100780808d27SChristoph Hellwig 						      sg->length, dir);
100808ae5d4aSDavid Stevens 	else if (!dev_is_dma_coherent(dev))
100908ae5d4aSDavid Stevens 		for_each_sg(sgl, sg, nelems, i)
101008ae5d4aSDavid Stevens 			arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
101106d60728SChristoph Hellwig }
101206d60728SChristoph Hellwig 
iommu_dma_sync_sg_for_device(struct device * dev,struct scatterlist * sgl,int nelems,enum dma_data_direction dir)101306d60728SChristoph Hellwig static void iommu_dma_sync_sg_for_device(struct device *dev,
101406d60728SChristoph Hellwig 		struct scatterlist *sgl, int nelems,
101506d60728SChristoph Hellwig 		enum dma_data_direction dir)
101606d60728SChristoph Hellwig {
101706d60728SChristoph Hellwig 	struct scatterlist *sg;
101806d60728SChristoph Hellwig 	int i;
101906d60728SChristoph Hellwig 
1020861370f4SCatalin Marinas 	if (sg_dma_is_swiotlb(sgl))
102108ae5d4aSDavid Stevens 		for_each_sg(sgl, sg, nelems, i)
102208ae5d4aSDavid Stevens 			iommu_dma_sync_single_for_device(dev,
102308ae5d4aSDavid Stevens 							 sg_dma_address(sg),
102480808d27SChristoph Hellwig 							 sg->length, dir);
102508ae5d4aSDavid Stevens 	else if (!dev_is_dma_coherent(dev))
102608ae5d4aSDavid Stevens 		for_each_sg(sgl, sg, nelems, i)
102756e35f9cSChristoph Hellwig 			arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
102806d60728SChristoph Hellwig }
102906d60728SChristoph Hellwig 
iommu_dma_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)103006d60728SChristoph Hellwig static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
103106d60728SChristoph Hellwig 		unsigned long offset, size_t size, enum dma_data_direction dir,
103206d60728SChristoph Hellwig 		unsigned long attrs)
103306d60728SChristoph Hellwig {
103406d60728SChristoph Hellwig 	phys_addr_t phys = page_to_phys(page) + offset;
103506d60728SChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
10369b49bbc2SDavid Stevens 	int prot = dma_info_to_prot(dir, coherent, attrs);
10379b49bbc2SDavid Stevens 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
10389b49bbc2SDavid Stevens 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
10399b49bbc2SDavid Stevens 	struct iova_domain *iovad = &cookie->iovad;
10409b49bbc2SDavid Stevens 	dma_addr_t iova, dma_mask = dma_get_mask(dev);
104106d60728SChristoph Hellwig 
10429b49bbc2SDavid Stevens 	/*
10439b49bbc2SDavid Stevens 	 * If both the physical buffer start address and size are
10449b49bbc2SDavid Stevens 	 * page aligned, we don't need to use a bounce page.
10459b49bbc2SDavid Stevens 	 */
1046861370f4SCatalin Marinas 	if (dev_use_swiotlb(dev, size, dir) &&
1047861370f4SCatalin Marinas 	    iova_offset(iovad, phys | size)) {
10489b49bbc2SDavid Stevens 		void *padding_start;
10492cbc61a1SDavid Stevens 		size_t padding_size, aligned_size;
10509b49bbc2SDavid Stevens 
1051f316ba0aSMario Limonciello 		if (!is_swiotlb_active(dev)) {
1052f316ba0aSMario Limonciello 			dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
1053f316ba0aSMario Limonciello 			return DMA_MAPPING_ERROR;
1054f316ba0aSMario Limonciello 		}
1055f316ba0aSMario Limonciello 
1056509b9e74SIsaac J. Manjarres 		trace_swiotlb_bounced(dev, phys, size);
1057509b9e74SIsaac J. Manjarres 
10589b49bbc2SDavid Stevens 		aligned_size = iova_align(iovad, size);
1059e81e99baSDavid Stevens 		phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
1060e81e99baSDavid Stevens 					      iova_mask(iovad), dir, attrs);
10619b49bbc2SDavid Stevens 
10629b49bbc2SDavid Stevens 		if (phys == DMA_MAPPING_ERROR)
10639b49bbc2SDavid Stevens 			return DMA_MAPPING_ERROR;
10649b49bbc2SDavid Stevens 
10659b49bbc2SDavid Stevens 		/* Cleanup the padding area. */
10669b49bbc2SDavid Stevens 		padding_start = phys_to_virt(phys);
10679b49bbc2SDavid Stevens 		padding_size = aligned_size;
10689b49bbc2SDavid Stevens 
10699b49bbc2SDavid Stevens 		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
10709b49bbc2SDavid Stevens 		    (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
10719b49bbc2SDavid Stevens 			padding_start += size;
10729b49bbc2SDavid Stevens 			padding_size -= size;
10739b49bbc2SDavid Stevens 		}
10749b49bbc2SDavid Stevens 
10759b49bbc2SDavid Stevens 		memset(padding_start, 0, padding_size);
10769b49bbc2SDavid Stevens 	}
10779b49bbc2SDavid Stevens 
10789b49bbc2SDavid Stevens 	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
107956e35f9cSChristoph Hellwig 		arch_sync_dma_for_device(phys, size, dir);
10809b49bbc2SDavid Stevens 
10812cbc61a1SDavid Stevens 	iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
10829b49bbc2SDavid Stevens 	if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
10839b49bbc2SDavid Stevens 		swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
10849b49bbc2SDavid Stevens 	return iova;
108506d60728SChristoph Hellwig }
108606d60728SChristoph Hellwig 
iommu_dma_unmap_page(struct device * dev,dma_addr_t dma_handle,size_t size,enum dma_data_direction dir,unsigned long attrs)108706d60728SChristoph Hellwig static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
108806d60728SChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
108906d60728SChristoph Hellwig {
10909b49bbc2SDavid Stevens 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
10919b49bbc2SDavid Stevens 	phys_addr_t phys;
10929b49bbc2SDavid Stevens 
10939b49bbc2SDavid Stevens 	phys = iommu_iova_to_phys(domain, dma_handle);
10949b49bbc2SDavid Stevens 	if (WARN_ON(!phys))
10959b49bbc2SDavid Stevens 		return;
10969b49bbc2SDavid Stevens 
10979b49bbc2SDavid Stevens 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
10989b49bbc2SDavid Stevens 		arch_sync_dma_for_cpu(phys, size, dir);
10999b49bbc2SDavid Stevens 
11009b49bbc2SDavid Stevens 	__iommu_dma_unmap(dev, dma_handle, size);
11019b49bbc2SDavid Stevens 
11029b49bbc2SDavid Stevens 	if (unlikely(is_swiotlb_buffer(dev, phys)))
11039b49bbc2SDavid Stevens 		swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
11040db2e5d1SRobin Murphy }
11050db2e5d1SRobin Murphy 
11060db2e5d1SRobin Murphy /*
11070db2e5d1SRobin Murphy  * Prepare a successfully-mapped scatterlist to give back to the caller.
1108809eac54SRobin Murphy  *
1109809eac54SRobin Murphy  * At this point the segments are already laid out by iommu_dma_map_sg() to
1110809eac54SRobin Murphy  * avoid individually crossing any boundaries, so we merely need to check a
1111809eac54SRobin Murphy  * segment's start address to avoid concatenating across one.
11120db2e5d1SRobin Murphy  */
__finalise_sg(struct device * dev,struct scatterlist * sg,int nents,dma_addr_t dma_addr)11130db2e5d1SRobin Murphy static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
11140db2e5d1SRobin Murphy 		dma_addr_t dma_addr)
11150db2e5d1SRobin Murphy {
1116809eac54SRobin Murphy 	struct scatterlist *s, *cur = sg;
1117809eac54SRobin Murphy 	unsigned long seg_mask = dma_get_seg_boundary(dev);
1118809eac54SRobin Murphy 	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
1119809eac54SRobin Murphy 	int i, count = 0;
11200db2e5d1SRobin Murphy 
11210db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
1122809eac54SRobin Murphy 		/* Restore this segment's original unaligned fields first */
112330280eeeSLogan Gunthorpe 		dma_addr_t s_dma_addr = sg_dma_address(s);
1124809eac54SRobin Murphy 		unsigned int s_iova_off = sg_dma_address(s);
11250db2e5d1SRobin Murphy 		unsigned int s_length = sg_dma_len(s);
1126809eac54SRobin Murphy 		unsigned int s_iova_len = s->length;
11270db2e5d1SRobin Murphy 
1128cad34be7SChristoph Hellwig 		sg_dma_address(s) = DMA_MAPPING_ERROR;
1129809eac54SRobin Murphy 		sg_dma_len(s) = 0;
1130809eac54SRobin Murphy 
1131cb147bbeSRobin Murphy 		if (sg_dma_is_bus_address(s)) {
113230280eeeSLogan Gunthorpe 			if (i > 0)
113330280eeeSLogan Gunthorpe 				cur = sg_next(cur);
113430280eeeSLogan Gunthorpe 
113530280eeeSLogan Gunthorpe 			sg_dma_unmark_bus_address(s);
113630280eeeSLogan Gunthorpe 			sg_dma_address(cur) = s_dma_addr;
113730280eeeSLogan Gunthorpe 			sg_dma_len(cur) = s_length;
113830280eeeSLogan Gunthorpe 			sg_dma_mark_bus_address(cur);
113930280eeeSLogan Gunthorpe 			count++;
114030280eeeSLogan Gunthorpe 			cur_len = 0;
114130280eeeSLogan Gunthorpe 			continue;
114230280eeeSLogan Gunthorpe 		}
114330280eeeSLogan Gunthorpe 
114430280eeeSLogan Gunthorpe 		s->offset += s_iova_off;
114530280eeeSLogan Gunthorpe 		s->length = s_length;
114630280eeeSLogan Gunthorpe 
1147809eac54SRobin Murphy 		/*
1148809eac54SRobin Murphy 		 * Now fill in the real DMA data. If...
1149809eac54SRobin Murphy 		 * - there is a valid output segment to append to
1150809eac54SRobin Murphy 		 * - and this segment starts on an IOVA page boundary
1151809eac54SRobin Murphy 		 * - but doesn't fall at a segment boundary
1152809eac54SRobin Murphy 		 * - and wouldn't make the resulting output segment too long
1153809eac54SRobin Murphy 		 */
1154809eac54SRobin Murphy 		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
1155ab2cbeb0SRobin Murphy 		    (max_len - cur_len >= s_length)) {
1156809eac54SRobin Murphy 			/* ...then concatenate it with the previous one */
1157809eac54SRobin Murphy 			cur_len += s_length;
1158809eac54SRobin Murphy 		} else {
1159809eac54SRobin Murphy 			/* Otherwise start the next output segment */
1160809eac54SRobin Murphy 			if (i > 0)
1161809eac54SRobin Murphy 				cur = sg_next(cur);
1162809eac54SRobin Murphy 			cur_len = s_length;
1163809eac54SRobin Murphy 			count++;
1164809eac54SRobin Murphy 
1165809eac54SRobin Murphy 			sg_dma_address(cur) = dma_addr + s_iova_off;
11660db2e5d1SRobin Murphy 		}
1167809eac54SRobin Murphy 
1168809eac54SRobin Murphy 		sg_dma_len(cur) = cur_len;
1169809eac54SRobin Murphy 		dma_addr += s_iova_len;
1170809eac54SRobin Murphy 
1171809eac54SRobin Murphy 		if (s_length + s_iova_off < s_iova_len)
1172809eac54SRobin Murphy 			cur_len = 0;
1173809eac54SRobin Murphy 	}
1174809eac54SRobin Murphy 	return count;
11750db2e5d1SRobin Murphy }
11760db2e5d1SRobin Murphy 
11770db2e5d1SRobin Murphy /*
11780db2e5d1SRobin Murphy  * If mapping failed, then just restore the original list,
11790db2e5d1SRobin Murphy  * but making sure the DMA fields are invalidated.
11800db2e5d1SRobin Murphy  */
__invalidate_sg(struct scatterlist * sg,int nents)11810db2e5d1SRobin Murphy static void __invalidate_sg(struct scatterlist *sg, int nents)
11820db2e5d1SRobin Murphy {
11830db2e5d1SRobin Murphy 	struct scatterlist *s;
11840db2e5d1SRobin Murphy 	int i;
11850db2e5d1SRobin Murphy 
11860db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
1187cb147bbeSRobin Murphy 		if (sg_dma_is_bus_address(s)) {
118830280eeeSLogan Gunthorpe 			sg_dma_unmark_bus_address(s);
118930280eeeSLogan Gunthorpe 		} else {
1190cad34be7SChristoph Hellwig 			if (sg_dma_address(s) != DMA_MAPPING_ERROR)
119107b48ac4SRobin Murphy 				s->offset += sg_dma_address(s);
11920db2e5d1SRobin Murphy 			if (sg_dma_len(s))
11930db2e5d1SRobin Murphy 				s->length = sg_dma_len(s);
119430280eeeSLogan Gunthorpe 		}
1195cad34be7SChristoph Hellwig 		sg_dma_address(s) = DMA_MAPPING_ERROR;
11960db2e5d1SRobin Murphy 		sg_dma_len(s) = 0;
11970db2e5d1SRobin Murphy 	}
11980db2e5d1SRobin Murphy }
11990db2e5d1SRobin Murphy 
iommu_dma_unmap_sg_swiotlb(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)120082612d66STom Murphy static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
120182612d66STom Murphy 		int nents, enum dma_data_direction dir, unsigned long attrs)
120282612d66STom Murphy {
120382612d66STom Murphy 	struct scatterlist *s;
120482612d66STom Murphy 	int i;
120582612d66STom Murphy 
120682612d66STom Murphy 	for_each_sg(sg, s, nents, i)
12079b49bbc2SDavid Stevens 		iommu_dma_unmap_page(dev, sg_dma_address(s),
120882612d66STom Murphy 				sg_dma_len(s), dir, attrs);
120982612d66STom Murphy }
121082612d66STom Murphy 
iommu_dma_map_sg_swiotlb(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)121182612d66STom Murphy static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
121282612d66STom Murphy 		int nents, enum dma_data_direction dir, unsigned long attrs)
121382612d66STom Murphy {
121482612d66STom Murphy 	struct scatterlist *s;
121582612d66STom Murphy 	int i;
121682612d66STom Murphy 
1217861370f4SCatalin Marinas 	sg_dma_mark_swiotlb(sg);
1218861370f4SCatalin Marinas 
121982612d66STom Murphy 	for_each_sg(sg, s, nents, i) {
12209b49bbc2SDavid Stevens 		sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
12219b49bbc2SDavid Stevens 				s->offset, s->length, dir, attrs);
122282612d66STom Murphy 		if (sg_dma_address(s) == DMA_MAPPING_ERROR)
122382612d66STom Murphy 			goto out_unmap;
122482612d66STom Murphy 		sg_dma_len(s) = s->length;
122582612d66STom Murphy 	}
122682612d66STom Murphy 
122782612d66STom Murphy 	return nents;
122882612d66STom Murphy 
122982612d66STom Murphy out_unmap:
123082612d66STom Murphy 	iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
1231dabb16f6SLogan Gunthorpe 	return -EIO;
123282612d66STom Murphy }
123382612d66STom Murphy 
12340db2e5d1SRobin Murphy /*
12350db2e5d1SRobin Murphy  * The DMA API client is passing in a scatterlist which could describe
12360db2e5d1SRobin Murphy  * any old buffer layout, but the IOMMU API requires everything to be
12370db2e5d1SRobin Murphy  * aligned to IOMMU pages. Hence the need for this complicated bit of
12380db2e5d1SRobin Murphy  * impedance-matching, to be able to hand off a suitably-aligned list,
12390db2e5d1SRobin Murphy  * but still preserve the original offsets and sizes for the caller.
12400db2e5d1SRobin Murphy  */
iommu_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)124106d60728SChristoph Hellwig static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
124206d60728SChristoph Hellwig 		int nents, enum dma_data_direction dir, unsigned long attrs)
12430db2e5d1SRobin Murphy {
124443c5bf11SRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
1245842fe519SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
1246842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
12470db2e5d1SRobin Murphy 	struct scatterlist *s, *prev = NULL;
124806d60728SChristoph Hellwig 	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
124930280eeeSLogan Gunthorpe 	struct pci_p2pdma_map_state p2pdma_state = {};
125030280eeeSLogan Gunthorpe 	enum pci_p2pdma_map_type map;
1251842fe519SRobin Murphy 	dma_addr_t iova;
12520db2e5d1SRobin Murphy 	size_t iova_len = 0;
1253809eac54SRobin Murphy 	unsigned long mask = dma_get_seg_boundary(dev);
1254dabb16f6SLogan Gunthorpe 	ssize_t ret;
12550db2e5d1SRobin Murphy 	int i;
12560db2e5d1SRobin Murphy 
1257dabb16f6SLogan Gunthorpe 	if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
1258dabb16f6SLogan Gunthorpe 		ret = iommu_deferred_attach(dev, domain);
1259ac315f96SLogan Gunthorpe 		if (ret)
1260dabb16f6SLogan Gunthorpe 			goto out;
1261dabb16f6SLogan Gunthorpe 	}
1262795bbbb9STom Murphy 
1263861370f4SCatalin Marinas 	if (dev_use_sg_swiotlb(dev, sg, nents, dir))
126482612d66STom Murphy 		return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
126582612d66STom Murphy 
12660db2e5d1SRobin Murphy 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
12670db2e5d1SRobin Murphy 		iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
12680db2e5d1SRobin Murphy 
12690db2e5d1SRobin Murphy 	/*
12700db2e5d1SRobin Murphy 	 * Work out how much IOVA space we need, and align the segments to
12710db2e5d1SRobin Murphy 	 * IOVA granules for the IOMMU driver to handle. With some clever
12720db2e5d1SRobin Murphy 	 * trickery we can modify the list in-place, but reversibly, by
1273809eac54SRobin Murphy 	 * stashing the unaligned parts in the as-yet-unused DMA fields.
12740db2e5d1SRobin Murphy 	 */
12750db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
1276809eac54SRobin Murphy 		size_t s_iova_off = iova_offset(iovad, s->offset);
12770db2e5d1SRobin Murphy 		size_t s_length = s->length;
1278809eac54SRobin Murphy 		size_t pad_len = (mask - iova_len + 1) & mask;
12790db2e5d1SRobin Murphy 
128030280eeeSLogan Gunthorpe 		if (is_pci_p2pdma_page(sg_page(s))) {
128130280eeeSLogan Gunthorpe 			map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
128230280eeeSLogan Gunthorpe 			switch (map) {
128330280eeeSLogan Gunthorpe 			case PCI_P2PDMA_MAP_BUS_ADDR:
128430280eeeSLogan Gunthorpe 				/*
128530280eeeSLogan Gunthorpe 				 * iommu_map_sg() will skip this segment as
128630280eeeSLogan Gunthorpe 				 * it is marked as a bus address,
128730280eeeSLogan Gunthorpe 				 * __finalise_sg() will copy the dma address
128830280eeeSLogan Gunthorpe 				 * into the output segment.
128930280eeeSLogan Gunthorpe 				 */
129030280eeeSLogan Gunthorpe 				continue;
129130280eeeSLogan Gunthorpe 			case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
129230280eeeSLogan Gunthorpe 				/*
129330280eeeSLogan Gunthorpe 				 * Mapping through host bridge should be
129430280eeeSLogan Gunthorpe 				 * mapped with regular IOVAs, thus we
129530280eeeSLogan Gunthorpe 				 * do nothing here and continue below.
129630280eeeSLogan Gunthorpe 				 */
129730280eeeSLogan Gunthorpe 				break;
129830280eeeSLogan Gunthorpe 			default:
129930280eeeSLogan Gunthorpe 				ret = -EREMOTEIO;
130030280eeeSLogan Gunthorpe 				goto out_restore_sg;
130130280eeeSLogan Gunthorpe 			}
130230280eeeSLogan Gunthorpe 		}
130330280eeeSLogan Gunthorpe 
1304809eac54SRobin Murphy 		sg_dma_address(s) = s_iova_off;
13050db2e5d1SRobin Murphy 		sg_dma_len(s) = s_length;
1306809eac54SRobin Murphy 		s->offset -= s_iova_off;
1307809eac54SRobin Murphy 		s_length = iova_align(iovad, s_length + s_iova_off);
13080db2e5d1SRobin Murphy 		s->length = s_length;
13090db2e5d1SRobin Murphy 
13100db2e5d1SRobin Murphy 		/*
1311809eac54SRobin Murphy 		 * Due to the alignment of our single IOVA allocation, we can
1312809eac54SRobin Murphy 		 * depend on these assumptions about the segment boundary mask:
1313809eac54SRobin Murphy 		 * - If mask size >= IOVA size, then the IOVA range cannot
1314809eac54SRobin Murphy 		 *   possibly fall across a boundary, so we don't care.
1315809eac54SRobin Murphy 		 * - If mask size < IOVA size, then the IOVA range must start
1316809eac54SRobin Murphy 		 *   exactly on a boundary, therefore we can lay things out
1317809eac54SRobin Murphy 		 *   based purely on segment lengths without needing to know
1318809eac54SRobin Murphy 		 *   the actual addresses beforehand.
1319809eac54SRobin Murphy 		 * - The mask must be a power of 2, so pad_len == 0 if
1320809eac54SRobin Murphy 		 *   iova_len == 0, thus we cannot dereference prev the first
1321809eac54SRobin Murphy 		 *   time through here (i.e. before it has a meaningful value).
13220db2e5d1SRobin Murphy 		 */
1323809eac54SRobin Murphy 		if (pad_len && pad_len < s_length - 1) {
13240db2e5d1SRobin Murphy 			prev->length += pad_len;
13250db2e5d1SRobin Murphy 			iova_len += pad_len;
13260db2e5d1SRobin Murphy 		}
13270db2e5d1SRobin Murphy 
13280db2e5d1SRobin Murphy 		iova_len += s_length;
13290db2e5d1SRobin Murphy 		prev = s;
13300db2e5d1SRobin Murphy 	}
13310db2e5d1SRobin Murphy 
133230280eeeSLogan Gunthorpe 	if (!iova_len)
133330280eeeSLogan Gunthorpe 		return __finalise_sg(dev, sg, nents, 0);
133430280eeeSLogan Gunthorpe 
1335842fe519SRobin Murphy 	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
1336dabb16f6SLogan Gunthorpe 	if (!iova) {
1337dabb16f6SLogan Gunthorpe 		ret = -ENOMEM;
13380db2e5d1SRobin Murphy 		goto out_restore_sg;
1339dabb16f6SLogan Gunthorpe 	}
13400db2e5d1SRobin Murphy 
13410db2e5d1SRobin Murphy 	/*
13420db2e5d1SRobin Murphy 	 * We'll leave any physical concatenation to the IOMMU driver's
13430db2e5d1SRobin Murphy 	 * implementation - it knows better than we do.
13440db2e5d1SRobin Murphy 	 */
1345f2b2c051SJason Gunthorpe 	ret = iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
1346a3884774SYunfei Wang 	if (ret < 0 || ret < iova_len)
13470db2e5d1SRobin Murphy 		goto out_free_iova;
13480db2e5d1SRobin Murphy 
1349842fe519SRobin Murphy 	return __finalise_sg(dev, sg, nents, iova);
13500db2e5d1SRobin Murphy 
13510db2e5d1SRobin Murphy out_free_iova:
13522a2b8eaaSTom Murphy 	iommu_dma_free_iova(cookie, iova, iova_len, NULL);
13530db2e5d1SRobin Murphy out_restore_sg:
13540db2e5d1SRobin Murphy 	__invalidate_sg(sg, nents);
1355dabb16f6SLogan Gunthorpe out:
135630280eeeSLogan Gunthorpe 	if (ret != -ENOMEM && ret != -EREMOTEIO)
1357dabb16f6SLogan Gunthorpe 		return -EINVAL;
1358dabb16f6SLogan Gunthorpe 	return ret;
13590db2e5d1SRobin Murphy }
13600db2e5d1SRobin Murphy 
iommu_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,unsigned long attrs)136106d60728SChristoph Hellwig static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
136206d60728SChristoph Hellwig 		int nents, enum dma_data_direction dir, unsigned long attrs)
13630db2e5d1SRobin Murphy {
136430280eeeSLogan Gunthorpe 	dma_addr_t end = 0, start;
1365842fe519SRobin Murphy 	struct scatterlist *tmp;
1366842fe519SRobin Murphy 	int i;
136706d60728SChristoph Hellwig 
1368861370f4SCatalin Marinas 	if (sg_dma_is_swiotlb(sg)) {
136982612d66STom Murphy 		iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
137082612d66STom Murphy 		return;
137182612d66STom Murphy 	}
137282612d66STom Murphy 
1373ee9d4097SDavid Stevens 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1374ee9d4097SDavid Stevens 		iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
1375ee9d4097SDavid Stevens 
13760db2e5d1SRobin Murphy 	/*
13770db2e5d1SRobin Murphy 	 * The scatterlist segments are mapped into a single
137830280eeeSLogan Gunthorpe 	 * contiguous IOVA allocation, the start and end points
137930280eeeSLogan Gunthorpe 	 * just have to be determined.
13800db2e5d1SRobin Murphy 	 */
138130280eeeSLogan Gunthorpe 	for_each_sg(sg, tmp, nents, i) {
1382cb147bbeSRobin Murphy 		if (sg_dma_is_bus_address(tmp)) {
138330280eeeSLogan Gunthorpe 			sg_dma_unmark_bus_address(tmp);
138430280eeeSLogan Gunthorpe 			continue;
138530280eeeSLogan Gunthorpe 		}
138630280eeeSLogan Gunthorpe 
1387842fe519SRobin Murphy 		if (sg_dma_len(tmp) == 0)
1388842fe519SRobin Murphy 			break;
138930280eeeSLogan Gunthorpe 
139030280eeeSLogan Gunthorpe 		start = sg_dma_address(tmp);
139130280eeeSLogan Gunthorpe 		break;
1392842fe519SRobin Murphy 	}
139330280eeeSLogan Gunthorpe 
139430280eeeSLogan Gunthorpe 	nents -= i;
139530280eeeSLogan Gunthorpe 	for_each_sg(tmp, tmp, nents, i) {
1396cb147bbeSRobin Murphy 		if (sg_dma_is_bus_address(tmp)) {
139730280eeeSLogan Gunthorpe 			sg_dma_unmark_bus_address(tmp);
139830280eeeSLogan Gunthorpe 			continue;
139930280eeeSLogan Gunthorpe 		}
140030280eeeSLogan Gunthorpe 
140130280eeeSLogan Gunthorpe 		if (sg_dma_len(tmp) == 0)
140230280eeeSLogan Gunthorpe 			break;
140330280eeeSLogan Gunthorpe 
140430280eeeSLogan Gunthorpe 		end = sg_dma_address(tmp) + sg_dma_len(tmp);
140530280eeeSLogan Gunthorpe 	}
140630280eeeSLogan Gunthorpe 
140730280eeeSLogan Gunthorpe 	if (end)
1408b61d271eSRobin Murphy 		__iommu_dma_unmap(dev, start, end - start);
14090db2e5d1SRobin Murphy }
14100db2e5d1SRobin Murphy 
iommu_dma_map_resource(struct device * dev,phys_addr_t phys,size_t size,enum dma_data_direction dir,unsigned long attrs)141106d60728SChristoph Hellwig static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
141251f8cc9eSRobin Murphy 		size_t size, enum dma_data_direction dir, unsigned long attrs)
141351f8cc9eSRobin Murphy {
141451f8cc9eSRobin Murphy 	return __iommu_dma_map(dev, phys, size,
14156e235020STom Murphy 			dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
14166e235020STom Murphy 			dma_get_mask(dev));
141751f8cc9eSRobin Murphy }
141851f8cc9eSRobin Murphy 
iommu_dma_unmap_resource(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir,unsigned long attrs)141906d60728SChristoph Hellwig static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
142051f8cc9eSRobin Murphy 		size_t size, enum dma_data_direction dir, unsigned long attrs)
142151f8cc9eSRobin Murphy {
1422b61d271eSRobin Murphy 	__iommu_dma_unmap(dev, handle, size);
142351f8cc9eSRobin Murphy }
142451f8cc9eSRobin Murphy 
__iommu_dma_free(struct device * dev,size_t size,void * cpu_addr)14258553f6e6SRobin Murphy static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
1426bcf4b9c4SRobin Murphy {
1427bcf4b9c4SRobin Murphy 	size_t alloc_size = PAGE_ALIGN(size);
1428bcf4b9c4SRobin Murphy 	int count = alloc_size >> PAGE_SHIFT;
1429bcf4b9c4SRobin Murphy 	struct page *page = NULL, **pages = NULL;
1430bcf4b9c4SRobin Murphy 
1431bcf4b9c4SRobin Murphy 	/* Non-coherent atomic allocation? Easy */
1432e6475eb0SChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1433c84dc6e6SDavid Rientjes 	    dma_free_from_pool(dev, cpu_addr, alloc_size))
1434bcf4b9c4SRobin Murphy 		return;
1435bcf4b9c4SRobin Murphy 
1436f5ff79fdSChristoph Hellwig 	if (is_vmalloc_addr(cpu_addr)) {
1437bcf4b9c4SRobin Murphy 		/*
1438bcf4b9c4SRobin Murphy 		 * If it the address is remapped, then it's either non-coherent
1439bcf4b9c4SRobin Murphy 		 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1440bcf4b9c4SRobin Murphy 		 */
14415cf45379SChristoph Hellwig 		pages = dma_common_find_pages(cpu_addr);
1442bcf4b9c4SRobin Murphy 		if (!pages)
1443bcf4b9c4SRobin Murphy 			page = vmalloc_to_page(cpu_addr);
144451231740SChristoph Hellwig 		dma_common_free_remap(cpu_addr, alloc_size);
1445bcf4b9c4SRobin Murphy 	} else {
1446bcf4b9c4SRobin Murphy 		/* Lowmem means a coherent atomic or CMA allocation */
1447bcf4b9c4SRobin Murphy 		page = virt_to_page(cpu_addr);
1448bcf4b9c4SRobin Murphy 	}
1449bcf4b9c4SRobin Murphy 
1450bcf4b9c4SRobin Murphy 	if (pages)
1451bcf4b9c4SRobin Murphy 		__iommu_dma_free_pages(pages, count);
1452591fcf3bSNicolin Chen 	if (page)
1453591fcf3bSNicolin Chen 		dma_free_contiguous(dev, page, alloc_size);
1454bcf4b9c4SRobin Murphy }
1455bcf4b9c4SRobin Murphy 
iommu_dma_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle,unsigned long attrs)14568553f6e6SRobin Murphy static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
14578553f6e6SRobin Murphy 		dma_addr_t handle, unsigned long attrs)
14588553f6e6SRobin Murphy {
14598553f6e6SRobin Murphy 	__iommu_dma_unmap(dev, handle, size);
14608553f6e6SRobin Murphy 	__iommu_dma_free(dev, size, cpu_addr);
14618553f6e6SRobin Murphy }
14628553f6e6SRobin Murphy 
iommu_dma_alloc_pages(struct device * dev,size_t size,struct page ** pagep,gfp_t gfp,unsigned long attrs)1463ee1ef05dSChristoph Hellwig static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
1464ee1ef05dSChristoph Hellwig 		struct page **pagep, gfp_t gfp, unsigned long attrs)
146506d60728SChristoph Hellwig {
146606d60728SChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
14679ad5d6edSRobin Murphy 	size_t alloc_size = PAGE_ALIGN(size);
146890ae409fSChristoph Hellwig 	int node = dev_to_node(dev);
14699a4ab94aSChristoph Hellwig 	struct page *page = NULL;
14709ad5d6edSRobin Murphy 	void *cpu_addr;
147106d60728SChristoph Hellwig 
1472591fcf3bSNicolin Chen 	page = dma_alloc_contiguous(dev, alloc_size, gfp);
147306d60728SChristoph Hellwig 	if (!page)
147490ae409fSChristoph Hellwig 		page = alloc_pages_node(node, gfp, get_order(alloc_size));
147590ae409fSChristoph Hellwig 	if (!page)
147606d60728SChristoph Hellwig 		return NULL;
147706d60728SChristoph Hellwig 
1478f5ff79fdSChristoph Hellwig 	if (!coherent || PageHighMem(page)) {
147933dcb37cSChristoph Hellwig 		pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
14808680aa5aSRobin Murphy 
14819ad5d6edSRobin Murphy 		cpu_addr = dma_common_contiguous_remap(page, alloc_size,
148251231740SChristoph Hellwig 				prot, __builtin_return_address(0));
14839ad5d6edSRobin Murphy 		if (!cpu_addr)
1484ee1ef05dSChristoph Hellwig 			goto out_free_pages;
1485072bebc0SRobin Murphy 
148606d60728SChristoph Hellwig 		if (!coherent)
14879ad5d6edSRobin Murphy 			arch_dma_prep_coherent(page, size);
14888680aa5aSRobin Murphy 	} else {
14899ad5d6edSRobin Murphy 		cpu_addr = page_address(page);
14908680aa5aSRobin Murphy 	}
1491ee1ef05dSChristoph Hellwig 
1492ee1ef05dSChristoph Hellwig 	*pagep = page;
14939ad5d6edSRobin Murphy 	memset(cpu_addr, 0, alloc_size);
14949ad5d6edSRobin Murphy 	return cpu_addr;
1495072bebc0SRobin Murphy out_free_pages:
1496591fcf3bSNicolin Chen 	dma_free_contiguous(dev, page, alloc_size);
1497072bebc0SRobin Murphy 	return NULL;
149806d60728SChristoph Hellwig }
149906d60728SChristoph Hellwig 
iommu_dma_alloc(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp,unsigned long attrs)1500ee1ef05dSChristoph Hellwig static void *iommu_dma_alloc(struct device *dev, size_t size,
1501ee1ef05dSChristoph Hellwig 		dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1502ee1ef05dSChristoph Hellwig {
1503ee1ef05dSChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
1504ee1ef05dSChristoph Hellwig 	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1505ee1ef05dSChristoph Hellwig 	struct page *page = NULL;
1506ee1ef05dSChristoph Hellwig 	void *cpu_addr;
1507ee1ef05dSChristoph Hellwig 
1508ee1ef05dSChristoph Hellwig 	gfp |= __GFP_ZERO;
1509ee1ef05dSChristoph Hellwig 
1510f5ff79fdSChristoph Hellwig 	if (gfpflags_allow_blocking(gfp) &&
1511e8d39a90SChristoph Hellwig 	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
1512e8d39a90SChristoph Hellwig 		return iommu_dma_alloc_remap(dev, size, handle, gfp,
1513e8d39a90SChristoph Hellwig 				dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
1514e8d39a90SChristoph Hellwig 	}
1515ee1ef05dSChristoph Hellwig 
1516e6475eb0SChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1517e6475eb0SChristoph Hellwig 	    !gfpflags_allow_blocking(gfp) && !coherent)
15189420139fSChristoph Hellwig 		page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
15199420139fSChristoph Hellwig 					       gfp, NULL);
1520ee1ef05dSChristoph Hellwig 	else
1521ee1ef05dSChristoph Hellwig 		cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1522ee1ef05dSChristoph Hellwig 	if (!cpu_addr)
1523ee1ef05dSChristoph Hellwig 		return NULL;
1524ee1ef05dSChristoph Hellwig 
15256e235020STom Murphy 	*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
15266e235020STom Murphy 			dev->coherent_dma_mask);
1527ee1ef05dSChristoph Hellwig 	if (*handle == DMA_MAPPING_ERROR) {
1528ee1ef05dSChristoph Hellwig 		__iommu_dma_free(dev, size, cpu_addr);
1529ee1ef05dSChristoph Hellwig 		return NULL;
1530ee1ef05dSChristoph Hellwig 	}
1531ee1ef05dSChristoph Hellwig 
1532ee1ef05dSChristoph Hellwig 	return cpu_addr;
1533ee1ef05dSChristoph Hellwig }
1534ee1ef05dSChristoph Hellwig 
iommu_dma_mmap(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)153506d60728SChristoph Hellwig static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
153606d60728SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
153706d60728SChristoph Hellwig 		unsigned long attrs)
153806d60728SChristoph Hellwig {
153906d60728SChristoph Hellwig 	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1540efd9f10bSChristoph Hellwig 	unsigned long pfn, off = vma->vm_pgoff;
154106d60728SChristoph Hellwig 	int ret;
154206d60728SChristoph Hellwig 
154333dcb37cSChristoph Hellwig 	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
154406d60728SChristoph Hellwig 
154506d60728SChristoph Hellwig 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
154606d60728SChristoph Hellwig 		return ret;
154706d60728SChristoph Hellwig 
154806d60728SChristoph Hellwig 	if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
154906d60728SChristoph Hellwig 		return -ENXIO;
155006d60728SChristoph Hellwig 
1551f5ff79fdSChristoph Hellwig 	if (is_vmalloc_addr(cpu_addr)) {
15525cf45379SChristoph Hellwig 		struct page **pages = dma_common_find_pages(cpu_addr);
155306d60728SChristoph Hellwig 
1554efd9f10bSChristoph Hellwig 		if (pages)
155571fe89ceSChristoph Hellwig 			return vm_map_pages(vma, pages, nr_pages);
1556efd9f10bSChristoph Hellwig 		pfn = vmalloc_to_pfn(cpu_addr);
1557efd9f10bSChristoph Hellwig 	} else {
1558efd9f10bSChristoph Hellwig 		pfn = page_to_pfn(virt_to_page(cpu_addr));
1559efd9f10bSChristoph Hellwig 	}
1560efd9f10bSChristoph Hellwig 
1561efd9f10bSChristoph Hellwig 	return remap_pfn_range(vma, vma->vm_start, pfn + off,
1562efd9f10bSChristoph Hellwig 			       vma->vm_end - vma->vm_start,
1563efd9f10bSChristoph Hellwig 			       vma->vm_page_prot);
156406d60728SChristoph Hellwig }
156506d60728SChristoph Hellwig 
iommu_dma_get_sgtable(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,unsigned long attrs)156606d60728SChristoph Hellwig static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
156706d60728SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
156806d60728SChristoph Hellwig 		unsigned long attrs)
156906d60728SChristoph Hellwig {
15703fb3378bSChristoph Hellwig 	struct page *page;
15713fb3378bSChristoph Hellwig 	int ret;
157206d60728SChristoph Hellwig 
1573f5ff79fdSChristoph Hellwig 	if (is_vmalloc_addr(cpu_addr)) {
15745cf45379SChristoph Hellwig 		struct page **pages = dma_common_find_pages(cpu_addr);
15753fb3378bSChristoph Hellwig 
15763fb3378bSChristoph Hellwig 		if (pages) {
15773fb3378bSChristoph Hellwig 			return sg_alloc_table_from_pages(sgt, pages,
15783fb3378bSChristoph Hellwig 					PAGE_ALIGN(size) >> PAGE_SHIFT,
15793fb3378bSChristoph Hellwig 					0, size, GFP_KERNEL);
158006d60728SChristoph Hellwig 		}
158106d60728SChristoph Hellwig 
15823fb3378bSChristoph Hellwig 		page = vmalloc_to_page(cpu_addr);
15833fb3378bSChristoph Hellwig 	} else {
15843fb3378bSChristoph Hellwig 		page = virt_to_page(cpu_addr);
158506d60728SChristoph Hellwig 	}
158606d60728SChristoph Hellwig 
15873fb3378bSChristoph Hellwig 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
15883fb3378bSChristoph Hellwig 	if (!ret)
15893fb3378bSChristoph Hellwig 		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
15903fb3378bSChristoph Hellwig 	return ret;
159106d60728SChristoph Hellwig }
159206d60728SChristoph Hellwig 
iommu_dma_get_merge_boundary(struct device * dev)1593158a6d3cSYoshihiro Shimoda static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1594158a6d3cSYoshihiro Shimoda {
1595158a6d3cSYoshihiro Shimoda 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
1596158a6d3cSYoshihiro Shimoda 
1597158a6d3cSYoshihiro Shimoda 	return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1598158a6d3cSYoshihiro Shimoda }
1599158a6d3cSYoshihiro Shimoda 
iommu_dma_opt_mapping_size(void)16006d9870b7SJohn Garry static size_t iommu_dma_opt_mapping_size(void)
16016d9870b7SJohn Garry {
16026d9870b7SJohn Garry 	return iova_rcache_range();
16036d9870b7SJohn Garry }
16046d9870b7SJohn Garry 
iommu_dma_max_mapping_size(struct device * dev)1605e07a16e6SNicolin Chen static size_t iommu_dma_max_mapping_size(struct device *dev)
1606e07a16e6SNicolin Chen {
1607e07a16e6SNicolin Chen 	if (dev_is_untrusted(dev))
1608e07a16e6SNicolin Chen 		return swiotlb_max_mapping_size(dev);
1609e07a16e6SNicolin Chen 
1610e07a16e6SNicolin Chen 	return SIZE_MAX;
1611e07a16e6SNicolin Chen }
1612e07a16e6SNicolin Chen 
161306d60728SChristoph Hellwig static const struct dma_map_ops iommu_dma_ops = {
161430280eeeSLogan Gunthorpe 	.flags			= DMA_F_PCI_P2PDMA_SUPPORTED,
161506d60728SChristoph Hellwig 	.alloc			= iommu_dma_alloc,
161606d60728SChristoph Hellwig 	.free			= iommu_dma_free,
1617efa70f2fSChristoph Hellwig 	.alloc_pages		= dma_common_alloc_pages,
1618efa70f2fSChristoph Hellwig 	.free_pages		= dma_common_free_pages,
1619e817ee5fSChristoph Hellwig 	.alloc_noncontiguous	= iommu_dma_alloc_noncontiguous,
1620e817ee5fSChristoph Hellwig 	.free_noncontiguous	= iommu_dma_free_noncontiguous,
162106d60728SChristoph Hellwig 	.mmap			= iommu_dma_mmap,
162206d60728SChristoph Hellwig 	.get_sgtable		= iommu_dma_get_sgtable,
162306d60728SChristoph Hellwig 	.map_page		= iommu_dma_map_page,
162406d60728SChristoph Hellwig 	.unmap_page		= iommu_dma_unmap_page,
162506d60728SChristoph Hellwig 	.map_sg			= iommu_dma_map_sg,
162606d60728SChristoph Hellwig 	.unmap_sg		= iommu_dma_unmap_sg,
162706d60728SChristoph Hellwig 	.sync_single_for_cpu	= iommu_dma_sync_single_for_cpu,
162806d60728SChristoph Hellwig 	.sync_single_for_device	= iommu_dma_sync_single_for_device,
162906d60728SChristoph Hellwig 	.sync_sg_for_cpu	= iommu_dma_sync_sg_for_cpu,
163006d60728SChristoph Hellwig 	.sync_sg_for_device	= iommu_dma_sync_sg_for_device,
163106d60728SChristoph Hellwig 	.map_resource		= iommu_dma_map_resource,
163206d60728SChristoph Hellwig 	.unmap_resource		= iommu_dma_unmap_resource,
1633158a6d3cSYoshihiro Shimoda 	.get_merge_boundary	= iommu_dma_get_merge_boundary,
16346d9870b7SJohn Garry 	.opt_mapping_size	= iommu_dma_opt_mapping_size,
1635e07a16e6SNicolin Chen 	.max_mapping_size       = iommu_dma_max_mapping_size,
163606d60728SChristoph Hellwig };
163706d60728SChristoph Hellwig 
163806d60728SChristoph Hellwig /*
163906d60728SChristoph Hellwig  * The IOMMU core code allocates the default DMA domain, which the underlying
164006d60728SChristoph Hellwig  * IOMMU driver needs to support via the dma-iommu layer.
164106d60728SChristoph Hellwig  */
iommu_setup_dma_ops(struct device * dev,u64 dma_base,u64 dma_limit)1642ac6d7046SJean-Philippe Brucker void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
164306d60728SChristoph Hellwig {
164406d60728SChristoph Hellwig 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
164506d60728SChristoph Hellwig 
164606d60728SChristoph Hellwig 	if (!domain)
164706d60728SChristoph Hellwig 		goto out_err;
164806d60728SChristoph Hellwig 
164906d60728SChristoph Hellwig 	/*
165006d60728SChristoph Hellwig 	 * The IOMMU core code allocates the default DMA domain, which the
165106d60728SChristoph Hellwig 	 * underlying IOMMU driver needs to support via the dma-iommu layer.
165206d60728SChristoph Hellwig 	 */
1653bf3aed46SRobin Murphy 	if (iommu_is_dma_domain(domain)) {
1654ac6d7046SJean-Philippe Brucker 		if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev))
165506d60728SChristoph Hellwig 			goto out_err;
165606d60728SChristoph Hellwig 		dev->dma_ops = &iommu_dma_ops;
165706d60728SChristoph Hellwig 	}
165806d60728SChristoph Hellwig 
165906d60728SChristoph Hellwig 	return;
166006d60728SChristoph Hellwig out_err:
166106d60728SChristoph Hellwig 	 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
166206d60728SChristoph Hellwig 		 dev_name(dev));
166344bb7e24SRobin Murphy }
16648ce4904bSJean-Philippe Brucker EXPORT_SYMBOL_GPL(iommu_setup_dma_ops);
166544bb7e24SRobin Murphy 
iommu_dma_get_msi_page(struct device * dev,phys_addr_t msi_addr,struct iommu_domain * domain)166644bb7e24SRobin Murphy static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
166744bb7e24SRobin Murphy 		phys_addr_t msi_addr, struct iommu_domain *domain)
166844bb7e24SRobin Murphy {
166944bb7e24SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
167044bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi_page;
1671842fe519SRobin Murphy 	dma_addr_t iova;
167244bb7e24SRobin Murphy 	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1673fdbe574eSRobin Murphy 	size_t size = cookie_msi_granule(cookie);
167444bb7e24SRobin Murphy 
1675fdbe574eSRobin Murphy 	msi_addr &= ~(phys_addr_t)(size - 1);
167644bb7e24SRobin Murphy 	list_for_each_entry(msi_page, &cookie->msi_page_list, list)
167744bb7e24SRobin Murphy 		if (msi_page->phys == msi_addr)
167844bb7e24SRobin Murphy 			return msi_page;
167944bb7e24SRobin Murphy 
1680c1864790SRobin Murphy 	msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
168144bb7e24SRobin Murphy 	if (!msi_page)
168244bb7e24SRobin Murphy 		return NULL;
168344bb7e24SRobin Murphy 
16848af23fadSRobin Murphy 	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
16858af23fadSRobin Murphy 	if (!iova)
168644bb7e24SRobin Murphy 		goto out_free_page;
168744bb7e24SRobin Murphy 
16881369459bSJason Gunthorpe 	if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL))
16898af23fadSRobin Murphy 		goto out_free_iova;
16908af23fadSRobin Murphy 
169144bb7e24SRobin Murphy 	INIT_LIST_HEAD(&msi_page->list);
1692a44e6657SRobin Murphy 	msi_page->phys = msi_addr;
1693a44e6657SRobin Murphy 	msi_page->iova = iova;
169444bb7e24SRobin Murphy 	list_add(&msi_page->list, &cookie->msi_page_list);
169544bb7e24SRobin Murphy 	return msi_page;
169644bb7e24SRobin Murphy 
16978af23fadSRobin Murphy out_free_iova:
16982a2b8eaaSTom Murphy 	iommu_dma_free_iova(cookie, iova, size, NULL);
169944bb7e24SRobin Murphy out_free_page:
170044bb7e24SRobin Murphy 	kfree(msi_page);
170144bb7e24SRobin Murphy 	return NULL;
170244bb7e24SRobin Murphy }
170344bb7e24SRobin Murphy 
1704fa49364cSRobin Murphy /**
1705fa49364cSRobin Murphy  * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
1706fa49364cSRobin Murphy  * @desc: MSI descriptor, will store the MSI page
1707fa49364cSRobin Murphy  * @msi_addr: MSI target address to be mapped
1708fa49364cSRobin Murphy  *
1709fa49364cSRobin Murphy  * Return: 0 on success or negative error code if the mapping failed.
1710fa49364cSRobin Murphy  */
iommu_dma_prepare_msi(struct msi_desc * desc,phys_addr_t msi_addr)1711ece6e6f0SJulien Grall int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
171244bb7e24SRobin Murphy {
1713ece6e6f0SJulien Grall 	struct device *dev = msi_desc_to_dev(desc);
171444bb7e24SRobin Murphy 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
171544bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi_page;
1716c1864790SRobin Murphy 	static DEFINE_MUTEX(msi_prepare_lock); /* see below */
171744bb7e24SRobin Murphy 
1718ece6e6f0SJulien Grall 	if (!domain || !domain->iova_cookie) {
1719ece6e6f0SJulien Grall 		desc->iommu_cookie = NULL;
1720ece6e6f0SJulien Grall 		return 0;
1721ece6e6f0SJulien Grall 	}
172244bb7e24SRobin Murphy 
172344bb7e24SRobin Murphy 	/*
1724c1864790SRobin Murphy 	 * In fact the whole prepare operation should already be serialised by
1725c1864790SRobin Murphy 	 * irq_domain_mutex further up the callchain, but that's pretty subtle
1726c1864790SRobin Murphy 	 * on its own, so consider this locking as failsafe documentation...
172744bb7e24SRobin Murphy 	 */
1728c1864790SRobin Murphy 	mutex_lock(&msi_prepare_lock);
172944bb7e24SRobin Murphy 	msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1730c1864790SRobin Murphy 	mutex_unlock(&msi_prepare_lock);
173144bb7e24SRobin Murphy 
1732ece6e6f0SJulien Grall 	msi_desc_set_iommu_cookie(desc, msi_page);
1733ece6e6f0SJulien Grall 
1734ece6e6f0SJulien Grall 	if (!msi_page)
1735ece6e6f0SJulien Grall 		return -ENOMEM;
1736ece6e6f0SJulien Grall 	return 0;
173744bb7e24SRobin Murphy }
1738ece6e6f0SJulien Grall 
1739fa49364cSRobin Murphy /**
1740fa49364cSRobin Murphy  * iommu_dma_compose_msi_msg() - Apply translation to an MSI message
1741fa49364cSRobin Murphy  * @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
1742fa49364cSRobin Murphy  * @msg: MSI message containing target physical address
1743fa49364cSRobin Murphy  */
iommu_dma_compose_msi_msg(struct msi_desc * desc,struct msi_msg * msg)1744fa49364cSRobin Murphy void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
1745ece6e6f0SJulien Grall {
1746ece6e6f0SJulien Grall 	struct device *dev = msi_desc_to_dev(desc);
1747ece6e6f0SJulien Grall 	const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1748ece6e6f0SJulien Grall 	const struct iommu_dma_msi_page *msi_page;
1749ece6e6f0SJulien Grall 
1750ece6e6f0SJulien Grall 	msi_page = msi_desc_get_iommu_cookie(desc);
1751ece6e6f0SJulien Grall 
1752ece6e6f0SJulien Grall 	if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1753ece6e6f0SJulien Grall 		return;
1754ece6e6f0SJulien Grall 
1755ece6e6f0SJulien Grall 	msg->address_hi = upper_32_bits(msi_page->iova);
1756ece6e6f0SJulien Grall 	msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1757ece6e6f0SJulien Grall 	msg->address_lo += lower_32_bits(msi_page->iova);
175844bb7e24SRobin Murphy }
175906d60728SChristoph Hellwig 
iommu_dma_init(void)176006d60728SChristoph Hellwig static int iommu_dma_init(void)
176106d60728SChristoph Hellwig {
1762a8e8af35SLianbo Jiang 	if (is_kdump_kernel())
1763a8e8af35SLianbo Jiang 		static_branch_enable(&iommu_deferred_attach_enabled);
1764a8e8af35SLianbo Jiang 
176506d60728SChristoph Hellwig 	return iova_cache_get();
17660db2e5d1SRobin Murphy }
176706d60728SChristoph Hellwig arch_initcall(iommu_dma_init);
1768