xref: /openbmc/linux/drivers/iommu/dma-iommu.c (revision 4dc6376a)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
20db2e5d1SRobin Murphy /*
30db2e5d1SRobin Murphy  * A fairly generic DMA-API to IOMMU-API glue layer.
40db2e5d1SRobin Murphy  *
50db2e5d1SRobin Murphy  * Copyright (C) 2014-2015 ARM Ltd.
60db2e5d1SRobin Murphy  *
70db2e5d1SRobin Murphy  * based in part on arch/arm/mm/dma-mapping.c:
80db2e5d1SRobin Murphy  * Copyright (C) 2000-2004 Russell King
90db2e5d1SRobin Murphy  */
100db2e5d1SRobin Murphy 
11f51dc892SShameer Kolothum #include <linux/acpi_iort.h>
12a17e3026SRobin Murphy #include <linux/atomic.h>
13a17e3026SRobin Murphy #include <linux/crash_dump.h>
140db2e5d1SRobin Murphy #include <linux/device.h>
15a17e3026SRobin Murphy #include <linux/dma-direct.h>
16a17e3026SRobin Murphy #include <linux/dma-map-ops.h>
175b11e9cdSRobin Murphy #include <linux/gfp.h>
180db2e5d1SRobin Murphy #include <linux/huge_mm.h>
190db2e5d1SRobin Murphy #include <linux/iommu.h>
200db2e5d1SRobin Murphy #include <linux/iova.h>
2144bb7e24SRobin Murphy #include <linux/irq.h>
22b8397a8fSRobin Murphy #include <linux/list_sort.h>
2330280eeeSLogan Gunthorpe #include <linux/memremap.h>
240db2e5d1SRobin Murphy #include <linux/mm.h>
25c1864790SRobin Murphy #include <linux/mutex.h>
26fade1ec0SRobin Murphy #include <linux/pci.h>
275b11e9cdSRobin Murphy #include <linux/scatterlist.h>
28a17e3026SRobin Murphy #include <linux/spinlock.h>
29a17e3026SRobin Murphy #include <linux/swiotlb.h>
305b11e9cdSRobin Murphy #include <linux/vmalloc.h>
310db2e5d1SRobin Murphy 
32f2042ed2SRobin Murphy #include "dma-iommu.h"
33f2042ed2SRobin Murphy 
3444bb7e24SRobin Murphy struct iommu_dma_msi_page {
3544bb7e24SRobin Murphy 	struct list_head	list;
3644bb7e24SRobin Murphy 	dma_addr_t		iova;
3744bb7e24SRobin Murphy 	phys_addr_t		phys;
3844bb7e24SRobin Murphy };
3944bb7e24SRobin Murphy 
40fdbe574eSRobin Murphy enum iommu_dma_cookie_type {
41fdbe574eSRobin Murphy 	IOMMU_DMA_IOVA_COOKIE,
42fdbe574eSRobin Murphy 	IOMMU_DMA_MSI_COOKIE,
43fdbe574eSRobin Murphy };
44fdbe574eSRobin Murphy 
4544bb7e24SRobin Murphy struct iommu_dma_cookie {
46fdbe574eSRobin Murphy 	enum iommu_dma_cookie_type	type;
47fdbe574eSRobin Murphy 	union {
48fdbe574eSRobin Murphy 		/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
49a17e3026SRobin Murphy 		struct {
5044bb7e24SRobin Murphy 			struct iova_domain	iovad;
51a17e3026SRobin Murphy 
52a17e3026SRobin Murphy 			struct iova_fq __percpu *fq;	/* Flush queue */
53a17e3026SRobin Murphy 			/* Number of TLB flushes that have been started */
54a17e3026SRobin Murphy 			atomic64_t		fq_flush_start_cnt;
55a17e3026SRobin Murphy 			/* Number of TLB flushes that have been finished */
56a17e3026SRobin Murphy 			atomic64_t		fq_flush_finish_cnt;
57a17e3026SRobin Murphy 			/* Timer to regularily empty the flush queues */
58a17e3026SRobin Murphy 			struct timer_list	fq_timer;
59a17e3026SRobin Murphy 			/* 1 when timer is active, 0 when not */
60a17e3026SRobin Murphy 			atomic_t		fq_timer_on;
61a17e3026SRobin Murphy 		};
62fdbe574eSRobin Murphy 		/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
63fdbe574eSRobin Murphy 		dma_addr_t		msi_iova;
64fdbe574eSRobin Murphy 	};
6544bb7e24SRobin Murphy 	struct list_head		msi_page_list;
662da274cdSZhen Lei 
672da274cdSZhen Lei 	/* Domain for flush queue callback; NULL if flush queue not in use */
682da274cdSZhen Lei 	struct iommu_domain		*fq_domain;
69ac9a5d52SYunfei Wang 	struct mutex			mutex;
7044bb7e24SRobin Murphy };
7144bb7e24SRobin Murphy 
72a8e8af35SLianbo Jiang static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
73af3e9579SLinus Torvalds bool iommu_dma_forcedac __read_mostly;
743542dcb1SRobin Murphy 
753542dcb1SRobin Murphy static int __init iommu_dma_forcedac_setup(char *str)
763542dcb1SRobin Murphy {
773542dcb1SRobin Murphy 	int ret = kstrtobool(str, &iommu_dma_forcedac);
783542dcb1SRobin Murphy 
793542dcb1SRobin Murphy 	if (!ret && iommu_dma_forcedac)
803542dcb1SRobin Murphy 		pr_info("Forcing DAC for PCI devices\n");
813542dcb1SRobin Murphy 	return ret;
823542dcb1SRobin Murphy }
833542dcb1SRobin Murphy early_param("iommu.forcedac", iommu_dma_forcedac_setup);
84a8e8af35SLianbo Jiang 
85a17e3026SRobin Murphy /* Number of entries per flush queue */
86a17e3026SRobin Murphy #define IOVA_FQ_SIZE	256
87a17e3026SRobin Murphy 
88a17e3026SRobin Murphy /* Timeout (in ms) after which entries are flushed from the queue */
89a17e3026SRobin Murphy #define IOVA_FQ_TIMEOUT	10
90a17e3026SRobin Murphy 
91a17e3026SRobin Murphy /* Flush queue entry for deferred flushing */
92a17e3026SRobin Murphy struct iova_fq_entry {
93a17e3026SRobin Murphy 	unsigned long iova_pfn;
94a17e3026SRobin Murphy 	unsigned long pages;
95a17e3026SRobin Murphy 	struct list_head freelist;
96a17e3026SRobin Murphy 	u64 counter; /* Flush counter when this entry was added */
97a17e3026SRobin Murphy };
98a17e3026SRobin Murphy 
99a17e3026SRobin Murphy /* Per-CPU flush queue structure */
100a17e3026SRobin Murphy struct iova_fq {
101a17e3026SRobin Murphy 	struct iova_fq_entry entries[IOVA_FQ_SIZE];
102a17e3026SRobin Murphy 	unsigned int head, tail;
103a17e3026SRobin Murphy 	spinlock_t lock;
104a17e3026SRobin Murphy };
105a17e3026SRobin Murphy 
106f7f07484SRobin Murphy #define fq_ring_for_each(i, fq) \
107f7f07484SRobin Murphy 	for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
108f7f07484SRobin Murphy 
109f7f07484SRobin Murphy static inline bool fq_full(struct iova_fq *fq)
110f7f07484SRobin Murphy {
111f7f07484SRobin Murphy 	assert_spin_locked(&fq->lock);
112f7f07484SRobin Murphy 	return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
113f7f07484SRobin Murphy }
114f7f07484SRobin Murphy 
115a17e3026SRobin Murphy static inline unsigned int fq_ring_add(struct iova_fq *fq)
116f7f07484SRobin Murphy {
117a17e3026SRobin Murphy 	unsigned int idx = fq->tail;
118f7f07484SRobin Murphy 
119f7f07484SRobin Murphy 	assert_spin_locked(&fq->lock);
120f7f07484SRobin Murphy 
121f7f07484SRobin Murphy 	fq->tail = (idx + 1) % IOVA_FQ_SIZE;
122f7f07484SRobin Murphy 
123f7f07484SRobin Murphy 	return idx;
124f7f07484SRobin Murphy }
125f7f07484SRobin Murphy 
126a17e3026SRobin Murphy static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
127f7f07484SRobin Murphy {
128a17e3026SRobin Murphy 	u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt);
129a17e3026SRobin Murphy 	unsigned int idx;
130f7f07484SRobin Murphy 
131f7f07484SRobin Murphy 	assert_spin_locked(&fq->lock);
132f7f07484SRobin Murphy 
133f7f07484SRobin Murphy 	fq_ring_for_each(idx, fq) {
134f7f07484SRobin Murphy 
135f7f07484SRobin Murphy 		if (fq->entries[idx].counter >= counter)
136f7f07484SRobin Murphy 			break;
137f7f07484SRobin Murphy 
138f7f07484SRobin Murphy 		put_pages_list(&fq->entries[idx].freelist);
139a17e3026SRobin Murphy 		free_iova_fast(&cookie->iovad,
140f7f07484SRobin Murphy 			       fq->entries[idx].iova_pfn,
141f7f07484SRobin Murphy 			       fq->entries[idx].pages);
142f7f07484SRobin Murphy 
143f7f07484SRobin Murphy 		fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
144f7f07484SRobin Murphy 	}
145f7f07484SRobin Murphy }
146f7f07484SRobin Murphy 
147a17e3026SRobin Murphy static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
148f7f07484SRobin Murphy {
149a17e3026SRobin Murphy 	atomic64_inc(&cookie->fq_flush_start_cnt);
150a17e3026SRobin Murphy 	cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain);
151a17e3026SRobin Murphy 	atomic64_inc(&cookie->fq_flush_finish_cnt);
152f7f07484SRobin Murphy }
153f7f07484SRobin Murphy 
154f7f07484SRobin Murphy static void fq_flush_timeout(struct timer_list *t)
155f7f07484SRobin Murphy {
156a17e3026SRobin Murphy 	struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer);
157f7f07484SRobin Murphy 	int cpu;
158f7f07484SRobin Murphy 
159a17e3026SRobin Murphy 	atomic_set(&cookie->fq_timer_on, 0);
160a17e3026SRobin Murphy 	fq_flush_iotlb(cookie);
161f7f07484SRobin Murphy 
162f7f07484SRobin Murphy 	for_each_possible_cpu(cpu) {
163f7f07484SRobin Murphy 		unsigned long flags;
164f7f07484SRobin Murphy 		struct iova_fq *fq;
165f7f07484SRobin Murphy 
166a17e3026SRobin Murphy 		fq = per_cpu_ptr(cookie->fq, cpu);
167f7f07484SRobin Murphy 		spin_lock_irqsave(&fq->lock, flags);
168a17e3026SRobin Murphy 		fq_ring_free(cookie, fq);
169f7f07484SRobin Murphy 		spin_unlock_irqrestore(&fq->lock, flags);
170f7f07484SRobin Murphy 	}
171f7f07484SRobin Murphy }
172f7f07484SRobin Murphy 
173a17e3026SRobin Murphy static void queue_iova(struct iommu_dma_cookie *cookie,
174f7f07484SRobin Murphy 		unsigned long pfn, unsigned long pages,
175f7f07484SRobin Murphy 		struct list_head *freelist)
176f7f07484SRobin Murphy {
177f7f07484SRobin Murphy 	struct iova_fq *fq;
178f7f07484SRobin Murphy 	unsigned long flags;
179a17e3026SRobin Murphy 	unsigned int idx;
180f7f07484SRobin Murphy 
181f7f07484SRobin Murphy 	/*
182f7f07484SRobin Murphy 	 * Order against the IOMMU driver's pagetable update from unmapping
183a17e3026SRobin Murphy 	 * @pte, to guarantee that fq_flush_iotlb() observes that if called
184f7f07484SRobin Murphy 	 * from a different CPU before we release the lock below. Full barrier
185f7f07484SRobin Murphy 	 * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
186f7f07484SRobin Murphy 	 * written fq state here.
187f7f07484SRobin Murphy 	 */
188f7f07484SRobin Murphy 	smp_mb();
189f7f07484SRobin Murphy 
190a17e3026SRobin Murphy 	fq = raw_cpu_ptr(cookie->fq);
191f7f07484SRobin Murphy 	spin_lock_irqsave(&fq->lock, flags);
192f7f07484SRobin Murphy 
193f7f07484SRobin Murphy 	/*
194f7f07484SRobin Murphy 	 * First remove all entries from the flush queue that have already been
195f7f07484SRobin Murphy 	 * flushed out on another CPU. This makes the fq_full() check below less
196f7f07484SRobin Murphy 	 * likely to be true.
197f7f07484SRobin Murphy 	 */
198a17e3026SRobin Murphy 	fq_ring_free(cookie, fq);
199f7f07484SRobin Murphy 
200f7f07484SRobin Murphy 	if (fq_full(fq)) {
201a17e3026SRobin Murphy 		fq_flush_iotlb(cookie);
202a17e3026SRobin Murphy 		fq_ring_free(cookie, fq);
203f7f07484SRobin Murphy 	}
204f7f07484SRobin Murphy 
205f7f07484SRobin Murphy 	idx = fq_ring_add(fq);
206f7f07484SRobin Murphy 
207f7f07484SRobin Murphy 	fq->entries[idx].iova_pfn = pfn;
208f7f07484SRobin Murphy 	fq->entries[idx].pages    = pages;
209a17e3026SRobin Murphy 	fq->entries[idx].counter  = atomic64_read(&cookie->fq_flush_start_cnt);
210f7f07484SRobin Murphy 	list_splice(freelist, &fq->entries[idx].freelist);
211f7f07484SRobin Murphy 
212f7f07484SRobin Murphy 	spin_unlock_irqrestore(&fq->lock, flags);
213f7f07484SRobin Murphy 
214f7f07484SRobin Murphy 	/* Avoid false sharing as much as possible. */
215a17e3026SRobin Murphy 	if (!atomic_read(&cookie->fq_timer_on) &&
216a17e3026SRobin Murphy 	    !atomic_xchg(&cookie->fq_timer_on, 1))
217a17e3026SRobin Murphy 		mod_timer(&cookie->fq_timer,
218f7f07484SRobin Murphy 			  jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
219f7f07484SRobin Murphy }
220f7f07484SRobin Murphy 
221a17e3026SRobin Murphy static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
222f7f07484SRobin Murphy {
223f7f07484SRobin Murphy 	int cpu, idx;
224f7f07484SRobin Murphy 
225a17e3026SRobin Murphy 	if (!cookie->fq)
226f7f07484SRobin Murphy 		return;
227f7f07484SRobin Murphy 
228a17e3026SRobin Murphy 	del_timer_sync(&cookie->fq_timer);
229a17e3026SRobin Murphy 	/* The IOVAs will be torn down separately, so just free our queued pages */
230f7f07484SRobin Murphy 	for_each_possible_cpu(cpu) {
231a17e3026SRobin Murphy 		struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu);
232f7f07484SRobin Murphy 
233f7f07484SRobin Murphy 		fq_ring_for_each(idx, fq)
234f7f07484SRobin Murphy 			put_pages_list(&fq->entries[idx].freelist);
235f7f07484SRobin Murphy 	}
236f7f07484SRobin Murphy 
237a17e3026SRobin Murphy 	free_percpu(cookie->fq);
238f7f07484SRobin Murphy }
239f7f07484SRobin Murphy 
240a17e3026SRobin Murphy /* sysfs updates are serialised by the mutex of the group owning @domain */
241a17e3026SRobin Murphy int iommu_dma_init_fq(struct iommu_domain *domain)
242f7f07484SRobin Murphy {
243a17e3026SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
244f7f07484SRobin Murphy 	struct iova_fq __percpu *queue;
245f7f07484SRobin Murphy 	int i, cpu;
246f7f07484SRobin Murphy 
247a17e3026SRobin Murphy 	if (cookie->fq_domain)
248a17e3026SRobin Murphy 		return 0;
249a17e3026SRobin Murphy 
250a17e3026SRobin Murphy 	atomic64_set(&cookie->fq_flush_start_cnt,  0);
251a17e3026SRobin Murphy 	atomic64_set(&cookie->fq_flush_finish_cnt, 0);
252f7f07484SRobin Murphy 
253f7f07484SRobin Murphy 	queue = alloc_percpu(struct iova_fq);
254a17e3026SRobin Murphy 	if (!queue) {
255a17e3026SRobin Murphy 		pr_warn("iova flush queue initialization failed\n");
256f7f07484SRobin Murphy 		return -ENOMEM;
257a17e3026SRobin Murphy 	}
258f7f07484SRobin Murphy 
259f7f07484SRobin Murphy 	for_each_possible_cpu(cpu) {
260f7f07484SRobin Murphy 		struct iova_fq *fq = per_cpu_ptr(queue, cpu);
261f7f07484SRobin Murphy 
262f7f07484SRobin Murphy 		fq->head = 0;
263f7f07484SRobin Murphy 		fq->tail = 0;
264f7f07484SRobin Murphy 
265f7f07484SRobin Murphy 		spin_lock_init(&fq->lock);
266f7f07484SRobin Murphy 
267f7f07484SRobin Murphy 		for (i = 0; i < IOVA_FQ_SIZE; i++)
268f7f07484SRobin Murphy 			INIT_LIST_HEAD(&fq->entries[i].freelist);
269f7f07484SRobin Murphy 	}
270f7f07484SRobin Murphy 
271a17e3026SRobin Murphy 	cookie->fq = queue;
272f7f07484SRobin Murphy 
273a17e3026SRobin Murphy 	timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
274a17e3026SRobin Murphy 	atomic_set(&cookie->fq_timer_on, 0);
275a17e3026SRobin Murphy 	/*
276a17e3026SRobin Murphy 	 * Prevent incomplete fq state being observable. Pairs with path from
277a17e3026SRobin Murphy 	 * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
278a17e3026SRobin Murphy 	 */
279a17e3026SRobin Murphy 	smp_wmb();
280a17e3026SRobin Murphy 	WRITE_ONCE(cookie->fq_domain, domain);
281f7f07484SRobin Murphy 	return 0;
282f7f07484SRobin Murphy }
283f7f07484SRobin Murphy 
284fdbe574eSRobin Murphy static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
285fdbe574eSRobin Murphy {
286fdbe574eSRobin Murphy 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
287fdbe574eSRobin Murphy 		return cookie->iovad.granule;
288fdbe574eSRobin Murphy 	return PAGE_SIZE;
289fdbe574eSRobin Murphy }
290fdbe574eSRobin Murphy 
291fdbe574eSRobin Murphy static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
292fdbe574eSRobin Murphy {
293fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie;
294fdbe574eSRobin Murphy 
295fdbe574eSRobin Murphy 	cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
296fdbe574eSRobin Murphy 	if (cookie) {
297fdbe574eSRobin Murphy 		INIT_LIST_HEAD(&cookie->msi_page_list);
298fdbe574eSRobin Murphy 		cookie->type = type;
299fdbe574eSRobin Murphy 	}
300fdbe574eSRobin Murphy 	return cookie;
30144bb7e24SRobin Murphy }
30244bb7e24SRobin Murphy 
3030db2e5d1SRobin Murphy /**
3040db2e5d1SRobin Murphy  * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
3050db2e5d1SRobin Murphy  * @domain: IOMMU domain to prepare for DMA-API usage
3060db2e5d1SRobin Murphy  */
3070db2e5d1SRobin Murphy int iommu_get_dma_cookie(struct iommu_domain *domain)
3080db2e5d1SRobin Murphy {
3090db2e5d1SRobin Murphy 	if (domain->iova_cookie)
3100db2e5d1SRobin Murphy 		return -EEXIST;
3110db2e5d1SRobin Murphy 
312fdbe574eSRobin Murphy 	domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
313fdbe574eSRobin Murphy 	if (!domain->iova_cookie)
31444bb7e24SRobin Murphy 		return -ENOMEM;
3150db2e5d1SRobin Murphy 
316ac9a5d52SYunfei Wang 	mutex_init(&domain->iova_cookie->mutex);
31744bb7e24SRobin Murphy 	return 0;
3180db2e5d1SRobin Murphy }
3190db2e5d1SRobin Murphy 
3200db2e5d1SRobin Murphy /**
321fdbe574eSRobin Murphy  * iommu_get_msi_cookie - Acquire just MSI remapping resources
322fdbe574eSRobin Murphy  * @domain: IOMMU domain to prepare
323fdbe574eSRobin Murphy  * @base: Start address of IOVA region for MSI mappings
324fdbe574eSRobin Murphy  *
325fdbe574eSRobin Murphy  * Users who manage their own IOVA allocation and do not want DMA API support,
326fdbe574eSRobin Murphy  * but would still like to take advantage of automatic MSI remapping, can use
327fdbe574eSRobin Murphy  * this to initialise their own domain appropriately. Users should reserve a
328fdbe574eSRobin Murphy  * contiguous IOVA region, starting at @base, large enough to accommodate the
329fdbe574eSRobin Murphy  * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
330fdbe574eSRobin Murphy  * used by the devices attached to @domain.
331fdbe574eSRobin Murphy  */
332fdbe574eSRobin Murphy int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
333fdbe574eSRobin Murphy {
334fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie;
335fdbe574eSRobin Murphy 
336fdbe574eSRobin Murphy 	if (domain->type != IOMMU_DOMAIN_UNMANAGED)
337fdbe574eSRobin Murphy 		return -EINVAL;
338fdbe574eSRobin Murphy 
339fdbe574eSRobin Murphy 	if (domain->iova_cookie)
340fdbe574eSRobin Murphy 		return -EEXIST;
341fdbe574eSRobin Murphy 
342fdbe574eSRobin Murphy 	cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
343fdbe574eSRobin Murphy 	if (!cookie)
344fdbe574eSRobin Murphy 		return -ENOMEM;
345fdbe574eSRobin Murphy 
346fdbe574eSRobin Murphy 	cookie->msi_iova = base;
347fdbe574eSRobin Murphy 	domain->iova_cookie = cookie;
348fdbe574eSRobin Murphy 	return 0;
349fdbe574eSRobin Murphy }
350fdbe574eSRobin Murphy EXPORT_SYMBOL(iommu_get_msi_cookie);
351fdbe574eSRobin Murphy 
352fdbe574eSRobin Murphy /**
3530db2e5d1SRobin Murphy  * iommu_put_dma_cookie - Release a domain's DMA mapping resources
354fdbe574eSRobin Murphy  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
355fdbe574eSRobin Murphy  *          iommu_get_msi_cookie()
3560db2e5d1SRobin Murphy  */
3570db2e5d1SRobin Murphy void iommu_put_dma_cookie(struct iommu_domain *domain)
3580db2e5d1SRobin Murphy {
35944bb7e24SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
36044bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi, *tmp;
3610db2e5d1SRobin Murphy 
36244bb7e24SRobin Murphy 	if (!cookie)
3630db2e5d1SRobin Murphy 		return;
3640db2e5d1SRobin Murphy 
365f7f07484SRobin Murphy 	if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) {
366a17e3026SRobin Murphy 		iommu_dma_free_fq(cookie);
36744bb7e24SRobin Murphy 		put_iova_domain(&cookie->iovad);
368f7f07484SRobin Murphy 	}
36944bb7e24SRobin Murphy 
37044bb7e24SRobin Murphy 	list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
37144bb7e24SRobin Murphy 		list_del(&msi->list);
37244bb7e24SRobin Murphy 		kfree(msi);
37344bb7e24SRobin Murphy 	}
37444bb7e24SRobin Murphy 	kfree(cookie);
3750db2e5d1SRobin Murphy 	domain->iova_cookie = NULL;
3760db2e5d1SRobin Murphy }
3770db2e5d1SRobin Murphy 
378273df963SRobin Murphy /**
379273df963SRobin Murphy  * iommu_dma_get_resv_regions - Reserved region driver helper
380273df963SRobin Murphy  * @dev: Device from iommu_get_resv_regions()
381273df963SRobin Murphy  * @list: Reserved region list from iommu_get_resv_regions()
382273df963SRobin Murphy  *
383273df963SRobin Murphy  * IOMMU drivers can use this to implement their .get_resv_regions callback
384cd2c9fcfSShameer Kolothum  * for general non-IOMMU-specific reservations. Currently, this covers GICv3
385cd2c9fcfSShameer Kolothum  * ITS region reservation on ACPI based ARM platforms that may require HW MSI
386cd2c9fcfSShameer Kolothum  * reservation.
387273df963SRobin Murphy  */
388273df963SRobin Murphy void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
389fade1ec0SRobin Murphy {
390fade1ec0SRobin Murphy 
39198cc4f71SJoerg Roedel 	if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
39255be25b8SShameer Kolothum 		iort_iommu_get_resv_regions(dev, list);
393f51dc892SShameer Kolothum 
394fade1ec0SRobin Murphy }
395273df963SRobin Murphy EXPORT_SYMBOL(iommu_dma_get_resv_regions);
396fade1ec0SRobin Murphy 
3977c1b058cSRobin Murphy static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
3987c1b058cSRobin Murphy 		phys_addr_t start, phys_addr_t end)
3997c1b058cSRobin Murphy {
4007c1b058cSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
4017c1b058cSRobin Murphy 	struct iommu_dma_msi_page *msi_page;
4027c1b058cSRobin Murphy 	int i, num_pages;
4037c1b058cSRobin Murphy 
4047c1b058cSRobin Murphy 	start -= iova_offset(iovad, start);
4057c1b058cSRobin Murphy 	num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
4067c1b058cSRobin Murphy 
40765ac74f1SMarc Zyngier 	for (i = 0; i < num_pages; i++) {
40865ac74f1SMarc Zyngier 		msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
4097c1b058cSRobin Murphy 		if (!msi_page)
4107c1b058cSRobin Murphy 			return -ENOMEM;
4117c1b058cSRobin Murphy 
41265ac74f1SMarc Zyngier 		msi_page->phys = start;
41365ac74f1SMarc Zyngier 		msi_page->iova = start;
41465ac74f1SMarc Zyngier 		INIT_LIST_HEAD(&msi_page->list);
41565ac74f1SMarc Zyngier 		list_add(&msi_page->list, &cookie->msi_page_list);
4167c1b058cSRobin Murphy 		start += iovad->granule;
4177c1b058cSRobin Murphy 	}
4187c1b058cSRobin Murphy 
4197c1b058cSRobin Murphy 	return 0;
4207c1b058cSRobin Murphy }
4217c1b058cSRobin Murphy 
422b8397a8fSRobin Murphy static int iommu_dma_ranges_sort(void *priv, const struct list_head *a,
423b8397a8fSRobin Murphy 		const struct list_head *b)
424b8397a8fSRobin Murphy {
425b8397a8fSRobin Murphy 	struct resource_entry *res_a = list_entry(a, typeof(*res_a), node);
426b8397a8fSRobin Murphy 	struct resource_entry *res_b = list_entry(b, typeof(*res_b), node);
427b8397a8fSRobin Murphy 
428b8397a8fSRobin Murphy 	return res_a->res->start > res_b->res->start;
429b8397a8fSRobin Murphy }
430b8397a8fSRobin Murphy 
431aadad097SSrinath Mannam static int iova_reserve_pci_windows(struct pci_dev *dev,
432cd2c9fcfSShameer Kolothum 		struct iova_domain *iovad)
433cd2c9fcfSShameer Kolothum {
434cd2c9fcfSShameer Kolothum 	struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
435cd2c9fcfSShameer Kolothum 	struct resource_entry *window;
436cd2c9fcfSShameer Kolothum 	unsigned long lo, hi;
437aadad097SSrinath Mannam 	phys_addr_t start = 0, end;
438cd2c9fcfSShameer Kolothum 
439cd2c9fcfSShameer Kolothum 	resource_list_for_each_entry(window, &bridge->windows) {
440cd2c9fcfSShameer Kolothum 		if (resource_type(window->res) != IORESOURCE_MEM)
441cd2c9fcfSShameer Kolothum 			continue;
442cd2c9fcfSShameer Kolothum 
443cd2c9fcfSShameer Kolothum 		lo = iova_pfn(iovad, window->res->start - window->offset);
444cd2c9fcfSShameer Kolothum 		hi = iova_pfn(iovad, window->res->end - window->offset);
445cd2c9fcfSShameer Kolothum 		reserve_iova(iovad, lo, hi);
446cd2c9fcfSShameer Kolothum 	}
447aadad097SSrinath Mannam 
448aadad097SSrinath Mannam 	/* Get reserved DMA windows from host bridge */
449b8397a8fSRobin Murphy 	list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort);
450aadad097SSrinath Mannam 	resource_list_for_each_entry(window, &bridge->dma_ranges) {
451aadad097SSrinath Mannam 		end = window->res->start - window->offset;
452aadad097SSrinath Mannam resv_iova:
453aadad097SSrinath Mannam 		if (end > start) {
454aadad097SSrinath Mannam 			lo = iova_pfn(iovad, start);
455aadad097SSrinath Mannam 			hi = iova_pfn(iovad, end);
456aadad097SSrinath Mannam 			reserve_iova(iovad, lo, hi);
457571f3160SSrinath Mannam 		} else if (end < start) {
458b8397a8fSRobin Murphy 			/* DMA ranges should be non-overlapping */
459571f3160SSrinath Mannam 			dev_err(&dev->dev,
4607154cbd3SJoerg Roedel 				"Failed to reserve IOVA [%pa-%pa]\n",
4617154cbd3SJoerg Roedel 				&start, &end);
462aadad097SSrinath Mannam 			return -EINVAL;
463aadad097SSrinath Mannam 		}
464aadad097SSrinath Mannam 
465aadad097SSrinath Mannam 		start = window->res->end - window->offset + 1;
466aadad097SSrinath Mannam 		/* If window is last entry */
467aadad097SSrinath Mannam 		if (window->node.next == &bridge->dma_ranges &&
46829fcea8cSArnd Bergmann 		    end != ~(phys_addr_t)0) {
46929fcea8cSArnd Bergmann 			end = ~(phys_addr_t)0;
470aadad097SSrinath Mannam 			goto resv_iova;
471aadad097SSrinath Mannam 		}
472aadad097SSrinath Mannam 	}
473aadad097SSrinath Mannam 
474aadad097SSrinath Mannam 	return 0;
475cd2c9fcfSShameer Kolothum }
476cd2c9fcfSShameer Kolothum 
4777c1b058cSRobin Murphy static int iova_reserve_iommu_regions(struct device *dev,
4787c1b058cSRobin Murphy 		struct iommu_domain *domain)
4797c1b058cSRobin Murphy {
4807c1b058cSRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
4817c1b058cSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
4827c1b058cSRobin Murphy 	struct iommu_resv_region *region;
4837c1b058cSRobin Murphy 	LIST_HEAD(resv_regions);
4847c1b058cSRobin Murphy 	int ret = 0;
4857c1b058cSRobin Murphy 
486aadad097SSrinath Mannam 	if (dev_is_pci(dev)) {
487aadad097SSrinath Mannam 		ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
488aadad097SSrinath Mannam 		if (ret)
489aadad097SSrinath Mannam 			return ret;
490aadad097SSrinath Mannam 	}
491cd2c9fcfSShameer Kolothum 
4927c1b058cSRobin Murphy 	iommu_get_resv_regions(dev, &resv_regions);
4937c1b058cSRobin Murphy 	list_for_each_entry(region, &resv_regions, list) {
4947c1b058cSRobin Murphy 		unsigned long lo, hi;
4957c1b058cSRobin Murphy 
4967c1b058cSRobin Murphy 		/* We ARE the software that manages these! */
4977c1b058cSRobin Murphy 		if (region->type == IOMMU_RESV_SW_MSI)
4987c1b058cSRobin Murphy 			continue;
4997c1b058cSRobin Murphy 
5007c1b058cSRobin Murphy 		lo = iova_pfn(iovad, region->start);
5017c1b058cSRobin Murphy 		hi = iova_pfn(iovad, region->start + region->length - 1);
5027c1b058cSRobin Murphy 		reserve_iova(iovad, lo, hi);
5037c1b058cSRobin Murphy 
5047c1b058cSRobin Murphy 		if (region->type == IOMMU_RESV_MSI)
5057c1b058cSRobin Murphy 			ret = cookie_init_hw_msi_region(cookie, region->start,
5067c1b058cSRobin Murphy 					region->start + region->length);
5077c1b058cSRobin Murphy 		if (ret)
5087c1b058cSRobin Murphy 			break;
5097c1b058cSRobin Murphy 	}
5107c1b058cSRobin Murphy 	iommu_put_resv_regions(dev, &resv_regions);
5117c1b058cSRobin Murphy 
5127c1b058cSRobin Murphy 	return ret;
5137c1b058cSRobin Murphy }
5147c1b058cSRobin Murphy 
51582c3cefbSLu Baolu static bool dev_is_untrusted(struct device *dev)
51682c3cefbSLu Baolu {
51782c3cefbSLu Baolu 	return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
51882c3cefbSLu Baolu }
51982c3cefbSLu Baolu 
5202e727bffSDavid Stevens static bool dev_use_swiotlb(struct device *dev)
5212e727bffSDavid Stevens {
5222e727bffSDavid Stevens 	return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev);
5232e727bffSDavid Stevens }
5242e727bffSDavid Stevens 
5250db2e5d1SRobin Murphy /**
5260db2e5d1SRobin Murphy  * iommu_dma_init_domain - Initialise a DMA mapping domain
5270db2e5d1SRobin Murphy  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
5280db2e5d1SRobin Murphy  * @base: IOVA at which the mappable address space starts
529ac6d7046SJean-Philippe Brucker  * @limit: Last address of the IOVA space
530fade1ec0SRobin Murphy  * @dev: Device the domain is being initialised for
5310db2e5d1SRobin Murphy  *
532ac6d7046SJean-Philippe Brucker  * @base and @limit + 1 should be exact multiples of IOMMU page granularity to
5330db2e5d1SRobin Murphy  * avoid rounding surprises. If necessary, we reserve the page at address 0
5340db2e5d1SRobin Murphy  * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
5350db2e5d1SRobin Murphy  * any change which could make prior IOVAs invalid will fail.
5360db2e5d1SRobin Murphy  */
53706d60728SChristoph Hellwig static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
538ac6d7046SJean-Philippe Brucker 				 dma_addr_t limit, struct device *dev)
5390db2e5d1SRobin Murphy {
540fdbe574eSRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
541c61a4633SShaokun Zhang 	unsigned long order, base_pfn;
5426b0c54e7SYunsheng Lin 	struct iova_domain *iovad;
54332e92d9fSJohn Garry 	int ret;
5440db2e5d1SRobin Murphy 
545fdbe574eSRobin Murphy 	if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
546fdbe574eSRobin Murphy 		return -EINVAL;
5470db2e5d1SRobin Murphy 
5486b0c54e7SYunsheng Lin 	iovad = &cookie->iovad;
5496b0c54e7SYunsheng Lin 
5500db2e5d1SRobin Murphy 	/* Use the smallest supported page size for IOVA granularity */
551d16e0faaSRobin Murphy 	order = __ffs(domain->pgsize_bitmap);
5520db2e5d1SRobin Murphy 	base_pfn = max_t(unsigned long, 1, base >> order);
5530db2e5d1SRobin Murphy 
5540db2e5d1SRobin Murphy 	/* Check the domain allows at least some access to the device... */
5550db2e5d1SRobin Murphy 	if (domain->geometry.force_aperture) {
5560db2e5d1SRobin Murphy 		if (base > domain->geometry.aperture_end ||
557ac6d7046SJean-Philippe Brucker 		    limit < domain->geometry.aperture_start) {
5580db2e5d1SRobin Murphy 			pr_warn("specified DMA range outside IOMMU capability\n");
5590db2e5d1SRobin Murphy 			return -EFAULT;
5600db2e5d1SRobin Murphy 		}
5610db2e5d1SRobin Murphy 		/* ...then finally give it a kicking to make sure it fits */
5620db2e5d1SRobin Murphy 		base_pfn = max_t(unsigned long, base_pfn,
5630db2e5d1SRobin Murphy 				domain->geometry.aperture_start >> order);
5640db2e5d1SRobin Murphy 	}
5650db2e5d1SRobin Murphy 
566f51d7bb7SRobin Murphy 	/* start_pfn is always nonzero for an already-initialised domain */
567ac9a5d52SYunfei Wang 	mutex_lock(&cookie->mutex);
5680db2e5d1SRobin Murphy 	if (iovad->start_pfn) {
5690db2e5d1SRobin Murphy 		if (1UL << order != iovad->granule ||
570f51d7bb7SRobin Murphy 		    base_pfn != iovad->start_pfn) {
5710db2e5d1SRobin Murphy 			pr_warn("Incompatible range for DMA domain\n");
572ac9a5d52SYunfei Wang 			ret = -EFAULT;
573ac9a5d52SYunfei Wang 			goto done_unlock;
5740db2e5d1SRobin Murphy 		}
5757c1b058cSRobin Murphy 
576ac9a5d52SYunfei Wang 		ret = 0;
577ac9a5d52SYunfei Wang 		goto done_unlock;
5780db2e5d1SRobin Murphy 	}
5797c1b058cSRobin Murphy 
580aa3ac946SZhen Lei 	init_iova_domain(iovad, 1UL << order, base_pfn);
58132e92d9fSJohn Garry 	ret = iova_domain_init_rcaches(iovad);
58232e92d9fSJohn Garry 	if (ret)
583ac9a5d52SYunfei Wang 		goto done_unlock;
5842da274cdSZhen Lei 
585c208916fSRobin Murphy 	/* If the FQ fails we can simply fall back to strict mode */
586452e69b5SRobin Murphy 	if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
587c208916fSRobin Murphy 		domain->type = IOMMU_DOMAIN_DMA;
5887c1b058cSRobin Murphy 
589ac9a5d52SYunfei Wang 	ret = iova_reserve_iommu_regions(dev, domain);
590ac9a5d52SYunfei Wang 
591ac9a5d52SYunfei Wang done_unlock:
592ac9a5d52SYunfei Wang 	mutex_unlock(&cookie->mutex);
593ac9a5d52SYunfei Wang 	return ret;
5947c1b058cSRobin Murphy }
5950db2e5d1SRobin Murphy 
5960db2e5d1SRobin Murphy /**
597737c85caSMitchel Humpherys  * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
598737c85caSMitchel Humpherys  *                    page flags.
5990db2e5d1SRobin Murphy  * @dir: Direction of DMA transfer
6000db2e5d1SRobin Murphy  * @coherent: Is the DMA master cache-coherent?
601737c85caSMitchel Humpherys  * @attrs: DMA attributes for the mapping
6020db2e5d1SRobin Murphy  *
6030db2e5d1SRobin Murphy  * Return: corresponding IOMMU API page protection flags
6040db2e5d1SRobin Murphy  */
60506d60728SChristoph Hellwig static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
606737c85caSMitchel Humpherys 		     unsigned long attrs)
6070db2e5d1SRobin Murphy {
6080db2e5d1SRobin Murphy 	int prot = coherent ? IOMMU_CACHE : 0;
6090db2e5d1SRobin Murphy 
610737c85caSMitchel Humpherys 	if (attrs & DMA_ATTR_PRIVILEGED)
611737c85caSMitchel Humpherys 		prot |= IOMMU_PRIV;
612737c85caSMitchel Humpherys 
6130db2e5d1SRobin Murphy 	switch (dir) {
6140db2e5d1SRobin Murphy 	case DMA_BIDIRECTIONAL:
6150db2e5d1SRobin Murphy 		return prot | IOMMU_READ | IOMMU_WRITE;
6160db2e5d1SRobin Murphy 	case DMA_TO_DEVICE:
6170db2e5d1SRobin Murphy 		return prot | IOMMU_READ;
6180db2e5d1SRobin Murphy 	case DMA_FROM_DEVICE:
6190db2e5d1SRobin Murphy 		return prot | IOMMU_WRITE;
6200db2e5d1SRobin Murphy 	default:
6210db2e5d1SRobin Murphy 		return 0;
6220db2e5d1SRobin Murphy 	}
6230db2e5d1SRobin Murphy }
6240db2e5d1SRobin Murphy 
625842fe519SRobin Murphy static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
626bd036d2fSRobin Murphy 		size_t size, u64 dma_limit, struct device *dev)
6270db2e5d1SRobin Murphy {
628a44e6657SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
629a44e6657SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
630bb65a64cSRobin Murphy 	unsigned long shift, iova_len, iova = 0;
6310db2e5d1SRobin Murphy 
632a44e6657SRobin Murphy 	if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
633a44e6657SRobin Murphy 		cookie->msi_iova += size;
634a44e6657SRobin Murphy 		return cookie->msi_iova - size;
635a44e6657SRobin Murphy 	}
636a44e6657SRobin Murphy 
637a44e6657SRobin Murphy 	shift = iova_shift(iovad);
638a44e6657SRobin Murphy 	iova_len = size >> shift;
639a44e6657SRobin Murphy 
640a7ba70f1SNicolas Saenz Julienne 	dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
64103bfdc31SRobin Murphy 
642c987ff0dSRobin Murphy 	if (domain->geometry.force_aperture)
643bd036d2fSRobin Murphy 		dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
644122fac03SRobin Murphy 
645122fac03SRobin Murphy 	/* Try to get PCI devices a SAC address */
6463542dcb1SRobin Murphy 	if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
647538d5b33STomasz Nowicki 		iova = alloc_iova_fast(iovad, iova_len,
648538d5b33STomasz Nowicki 				       DMA_BIT_MASK(32) >> shift, false);
649122fac03SRobin Murphy 
650bb65a64cSRobin Murphy 	if (!iova)
651538d5b33STomasz Nowicki 		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
652538d5b33STomasz Nowicki 				       true);
653bb65a64cSRobin Murphy 
654bb65a64cSRobin Murphy 	return (dma_addr_t)iova << shift;
6550db2e5d1SRobin Murphy }
6560db2e5d1SRobin Murphy 
657842fe519SRobin Murphy static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
658452e69b5SRobin Murphy 		dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
6590db2e5d1SRobin Murphy {
660842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
6610db2e5d1SRobin Murphy 
662a44e6657SRobin Murphy 	/* The MSI case is only ever cleaning up its most recent allocation */
663bb65a64cSRobin Murphy 	if (cookie->type == IOMMU_DMA_MSI_COOKIE)
664a44e6657SRobin Murphy 		cookie->msi_iova -= size;
665452e69b5SRobin Murphy 	else if (gather && gather->queued)
666a17e3026SRobin Murphy 		queue_iova(cookie, iova_pfn(iovad, iova),
6672a2b8eaaSTom Murphy 				size >> iova_shift(iovad),
66887f60cc6SMatthew Wilcox (Oracle) 				&gather->freelist);
669bb65a64cSRobin Murphy 	else
6701cc896edSRobin Murphy 		free_iova_fast(iovad, iova_pfn(iovad, iova),
6711cc896edSRobin Murphy 				size >> iova_shift(iovad));
672842fe519SRobin Murphy }
673842fe519SRobin Murphy 
674b61d271eSRobin Murphy static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
675842fe519SRobin Murphy 		size_t size)
676842fe519SRobin Murphy {
677b61d271eSRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
678a44e6657SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
679a44e6657SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
680842fe519SRobin Murphy 	size_t iova_off = iova_offset(iovad, dma_addr);
681a7d20dc1SWill Deacon 	struct iommu_iotlb_gather iotlb_gather;
682a7d20dc1SWill Deacon 	size_t unmapped;
683842fe519SRobin Murphy 
684842fe519SRobin Murphy 	dma_addr -= iova_off;
685842fe519SRobin Murphy 	size = iova_align(iovad, size + iova_off);
686a7d20dc1SWill Deacon 	iommu_iotlb_gather_init(&iotlb_gather);
687452e69b5SRobin Murphy 	iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
688842fe519SRobin Murphy 
689a7d20dc1SWill Deacon 	unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
690a7d20dc1SWill Deacon 	WARN_ON(unmapped != size);
691a7d20dc1SWill Deacon 
692452e69b5SRobin Murphy 	if (!iotlb_gather.queued)
693aae4c8e2STom Murphy 		iommu_iotlb_sync(domain, &iotlb_gather);
694452e69b5SRobin Murphy 	iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
6950db2e5d1SRobin Murphy }
6960db2e5d1SRobin Murphy 
69792aec09cSChristoph Hellwig static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
698bd036d2fSRobin Murphy 		size_t size, int prot, u64 dma_mask)
69992aec09cSChristoph Hellwig {
700b61d271eSRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
70192aec09cSChristoph Hellwig 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
7028af23fadSRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
7038af23fadSRobin Murphy 	size_t iova_off = iova_offset(iovad, phys);
70492aec09cSChristoph Hellwig 	dma_addr_t iova;
70592aec09cSChristoph Hellwig 
706a8e8af35SLianbo Jiang 	if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
7073ab65729SLianbo Jiang 	    iommu_deferred_attach(dev, domain))
708795bbbb9STom Murphy 		return DMA_MAPPING_ERROR;
709795bbbb9STom Murphy 
7108af23fadSRobin Murphy 	size = iova_align(iovad, size + iova_off);
71192aec09cSChristoph Hellwig 
7126e235020STom Murphy 	iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
71392aec09cSChristoph Hellwig 	if (!iova)
71492aec09cSChristoph Hellwig 		return DMA_MAPPING_ERROR;
71592aec09cSChristoph Hellwig 
716*4dc6376aSJason Gunthorpe 	if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) {
7172a2b8eaaSTom Murphy 		iommu_dma_free_iova(cookie, iova, size, NULL);
71892aec09cSChristoph Hellwig 		return DMA_MAPPING_ERROR;
71992aec09cSChristoph Hellwig 	}
72092aec09cSChristoph Hellwig 	return iova + iova_off;
72192aec09cSChristoph Hellwig }
72292aec09cSChristoph Hellwig 
7230db2e5d1SRobin Murphy static void __iommu_dma_free_pages(struct page **pages, int count)
7240db2e5d1SRobin Murphy {
7250db2e5d1SRobin Murphy 	while (count--)
7260db2e5d1SRobin Murphy 		__free_page(pages[count]);
7270db2e5d1SRobin Murphy 	kvfree(pages);
7280db2e5d1SRobin Murphy }
7290db2e5d1SRobin Murphy 
730c4b17afbSGanapatrao Kulkarni static struct page **__iommu_dma_alloc_pages(struct device *dev,
731c4b17afbSGanapatrao Kulkarni 		unsigned int count, unsigned long order_mask, gfp_t gfp)
7320db2e5d1SRobin Murphy {
7330db2e5d1SRobin Murphy 	struct page **pages;
734c4b17afbSGanapatrao Kulkarni 	unsigned int i = 0, nid = dev_to_node(dev);
7353b6b7e19SRobin Murphy 
7363b6b7e19SRobin Murphy 	order_mask &= (2U << MAX_ORDER) - 1;
7373b6b7e19SRobin Murphy 	if (!order_mask)
7383b6b7e19SRobin Murphy 		return NULL;
7390db2e5d1SRobin Murphy 
740ab6f4b00SGustavo A. R. Silva 	pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL);
7410db2e5d1SRobin Murphy 	if (!pages)
7420db2e5d1SRobin Murphy 		return NULL;
7430db2e5d1SRobin Murphy 
7440db2e5d1SRobin Murphy 	/* IOMMU can map any pages, so himem can also be used here */
7450db2e5d1SRobin Murphy 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
7460db2e5d1SRobin Murphy 
7470db2e5d1SRobin Murphy 	while (count) {
7480db2e5d1SRobin Murphy 		struct page *page = NULL;
7493b6b7e19SRobin Murphy 		unsigned int order_size;
7500db2e5d1SRobin Murphy 
7510db2e5d1SRobin Murphy 		/*
7520db2e5d1SRobin Murphy 		 * Higher-order allocations are a convenience rather
7530db2e5d1SRobin Murphy 		 * than a necessity, hence using __GFP_NORETRY until
7543b6b7e19SRobin Murphy 		 * falling back to minimum-order allocations.
7550db2e5d1SRobin Murphy 		 */
7563b6b7e19SRobin Murphy 		for (order_mask &= (2U << __fls(count)) - 1;
7573b6b7e19SRobin Murphy 		     order_mask; order_mask &= ~order_size) {
7583b6b7e19SRobin Murphy 			unsigned int order = __fls(order_mask);
759c4b17afbSGanapatrao Kulkarni 			gfp_t alloc_flags = gfp;
7603b6b7e19SRobin Murphy 
7613b6b7e19SRobin Murphy 			order_size = 1U << order;
762c4b17afbSGanapatrao Kulkarni 			if (order_mask > order_size)
763c4b17afbSGanapatrao Kulkarni 				alloc_flags |= __GFP_NORETRY;
764c4b17afbSGanapatrao Kulkarni 			page = alloc_pages_node(nid, alloc_flags, order);
7650db2e5d1SRobin Murphy 			if (!page)
7660db2e5d1SRobin Murphy 				continue;
7674604393cSRobin Murphy 			if (order)
7680db2e5d1SRobin Murphy 				split_page(page, order);
7690db2e5d1SRobin Murphy 			break;
7700db2e5d1SRobin Murphy 		}
7710db2e5d1SRobin Murphy 		if (!page) {
7720db2e5d1SRobin Murphy 			__iommu_dma_free_pages(pages, i);
7730db2e5d1SRobin Murphy 			return NULL;
7740db2e5d1SRobin Murphy 		}
7753b6b7e19SRobin Murphy 		count -= order_size;
7763b6b7e19SRobin Murphy 		while (order_size--)
7770db2e5d1SRobin Murphy 			pages[i++] = page++;
7780db2e5d1SRobin Murphy 	}
7790db2e5d1SRobin Murphy 	return pages;
7800db2e5d1SRobin Murphy }
7810db2e5d1SRobin Murphy 
7828230ce9aSChristoph Hellwig /*
7838230ce9aSChristoph Hellwig  * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
7840db2e5d1SRobin Murphy  * but an IOMMU which supports smaller pages might not map the whole thing.
7850db2e5d1SRobin Murphy  */
7868230ce9aSChristoph Hellwig static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
7878230ce9aSChristoph Hellwig 		size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot,
788e8d39a90SChristoph Hellwig 		unsigned long attrs)
7890db2e5d1SRobin Murphy {
79043c5bf11SRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
791842fe519SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
792842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
79321b95aafSChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
79421b95aafSChristoph Hellwig 	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
79521b95aafSChristoph Hellwig 	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
7960db2e5d1SRobin Murphy 	struct page **pages;
797842fe519SRobin Murphy 	dma_addr_t iova;
798a3884774SYunfei Wang 	ssize_t ret;
7990db2e5d1SRobin Murphy 
800a8e8af35SLianbo Jiang 	if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
8013ab65729SLianbo Jiang 	    iommu_deferred_attach(dev, domain))
802795bbbb9STom Murphy 		return NULL;
803795bbbb9STom Murphy 
8043b6b7e19SRobin Murphy 	min_size = alloc_sizes & -alloc_sizes;
8053b6b7e19SRobin Murphy 	if (min_size < PAGE_SIZE) {
8063b6b7e19SRobin Murphy 		min_size = PAGE_SIZE;
8073b6b7e19SRobin Murphy 		alloc_sizes |= PAGE_SIZE;
8083b6b7e19SRobin Murphy 	} else {
8093b6b7e19SRobin Murphy 		size = ALIGN(size, min_size);
8103b6b7e19SRobin Murphy 	}
81100085f1eSKrzysztof Kozlowski 	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
8123b6b7e19SRobin Murphy 		alloc_sizes = min_size;
8133b6b7e19SRobin Murphy 
8143b6b7e19SRobin Murphy 	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
815c4b17afbSGanapatrao Kulkarni 	pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
816c4b17afbSGanapatrao Kulkarni 					gfp);
8170db2e5d1SRobin Murphy 	if (!pages)
8180db2e5d1SRobin Murphy 		return NULL;
8190db2e5d1SRobin Murphy 
820842fe519SRobin Murphy 	size = iova_align(iovad, size);
821842fe519SRobin Murphy 	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
8220db2e5d1SRobin Murphy 	if (!iova)
8230db2e5d1SRobin Murphy 		goto out_free_pages;
8240db2e5d1SRobin Murphy 
8258230ce9aSChristoph Hellwig 	if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL))
8260db2e5d1SRobin Murphy 		goto out_free_iova;
8270db2e5d1SRobin Murphy 
82821b95aafSChristoph Hellwig 	if (!(ioprot & IOMMU_CACHE)) {
82923f88e0aSChristoph Hellwig 		struct scatterlist *sg;
83023f88e0aSChristoph Hellwig 		int i;
83123f88e0aSChristoph Hellwig 
8328230ce9aSChristoph Hellwig 		for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
83323f88e0aSChristoph Hellwig 			arch_dma_prep_coherent(sg_page(sg), sg->length);
8340db2e5d1SRobin Murphy 	}
8350db2e5d1SRobin Murphy 
836a3884774SYunfei Wang 	ret = iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot);
837a3884774SYunfei Wang 	if (ret < 0 || ret < size)
8380db2e5d1SRobin Murphy 		goto out_free_sg;
8390db2e5d1SRobin Murphy 
8408230ce9aSChristoph Hellwig 	sgt->sgl->dma_address = iova;
841e817ee5fSChristoph Hellwig 	sgt->sgl->dma_length = size;
8428230ce9aSChristoph Hellwig 	return pages;
8430db2e5d1SRobin Murphy 
8440db2e5d1SRobin Murphy out_free_sg:
8458230ce9aSChristoph Hellwig 	sg_free_table(sgt);
8460db2e5d1SRobin Murphy out_free_iova:
8472a2b8eaaSTom Murphy 	iommu_dma_free_iova(cookie, iova, size, NULL);
8480db2e5d1SRobin Murphy out_free_pages:
8490db2e5d1SRobin Murphy 	__iommu_dma_free_pages(pages, count);
8500db2e5d1SRobin Murphy 	return NULL;
8510db2e5d1SRobin Murphy }
8520db2e5d1SRobin Murphy 
8538230ce9aSChristoph Hellwig static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
8548230ce9aSChristoph Hellwig 		dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot,
8558230ce9aSChristoph Hellwig 		unsigned long attrs)
8568230ce9aSChristoph Hellwig {
8578230ce9aSChristoph Hellwig 	struct page **pages;
8588230ce9aSChristoph Hellwig 	struct sg_table sgt;
8598230ce9aSChristoph Hellwig 	void *vaddr;
8608230ce9aSChristoph Hellwig 
8618230ce9aSChristoph Hellwig 	pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot,
8628230ce9aSChristoph Hellwig 						attrs);
8638230ce9aSChristoph Hellwig 	if (!pages)
8648230ce9aSChristoph Hellwig 		return NULL;
8658230ce9aSChristoph Hellwig 	*dma_handle = sgt.sgl->dma_address;
8668230ce9aSChristoph Hellwig 	sg_free_table(&sgt);
8678230ce9aSChristoph Hellwig 	vaddr = dma_common_pages_remap(pages, size, prot,
8688230ce9aSChristoph Hellwig 			__builtin_return_address(0));
8698230ce9aSChristoph Hellwig 	if (!vaddr)
8708230ce9aSChristoph Hellwig 		goto out_unmap;
8718230ce9aSChristoph Hellwig 	return vaddr;
8728230ce9aSChristoph Hellwig 
8738230ce9aSChristoph Hellwig out_unmap:
8748230ce9aSChristoph Hellwig 	__iommu_dma_unmap(dev, *dma_handle, size);
8758230ce9aSChristoph Hellwig 	__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
8768230ce9aSChristoph Hellwig 	return NULL;
8778230ce9aSChristoph Hellwig }
8788230ce9aSChristoph Hellwig 
879e817ee5fSChristoph Hellwig static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev,
880e817ee5fSChristoph Hellwig 		size_t size, enum dma_data_direction dir, gfp_t gfp,
881e817ee5fSChristoph Hellwig 		unsigned long attrs)
882e817ee5fSChristoph Hellwig {
883e817ee5fSChristoph Hellwig 	struct dma_sgt_handle *sh;
884e817ee5fSChristoph Hellwig 
885e817ee5fSChristoph Hellwig 	sh = kmalloc(sizeof(*sh), gfp);
886e817ee5fSChristoph Hellwig 	if (!sh)
887e817ee5fSChristoph Hellwig 		return NULL;
888e817ee5fSChristoph Hellwig 
889e817ee5fSChristoph Hellwig 	sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp,
890e817ee5fSChristoph Hellwig 						    PAGE_KERNEL, attrs);
891e817ee5fSChristoph Hellwig 	if (!sh->pages) {
892e817ee5fSChristoph Hellwig 		kfree(sh);
893e817ee5fSChristoph Hellwig 		return NULL;
894e817ee5fSChristoph Hellwig 	}
895e817ee5fSChristoph Hellwig 	return &sh->sgt;
896e817ee5fSChristoph Hellwig }
897e817ee5fSChristoph Hellwig 
898e817ee5fSChristoph Hellwig static void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
899e817ee5fSChristoph Hellwig 		struct sg_table *sgt, enum dma_data_direction dir)
900e817ee5fSChristoph Hellwig {
901e817ee5fSChristoph Hellwig 	struct dma_sgt_handle *sh = sgt_handle(sgt);
902e817ee5fSChristoph Hellwig 
903e817ee5fSChristoph Hellwig 	__iommu_dma_unmap(dev, sgt->sgl->dma_address, size);
904e817ee5fSChristoph Hellwig 	__iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
905e817ee5fSChristoph Hellwig 	sg_free_table(&sh->sgt);
9060fbea680SEzequiel Garcia 	kfree(sh);
907e817ee5fSChristoph Hellwig }
908e817ee5fSChristoph Hellwig 
90906d60728SChristoph Hellwig static void iommu_dma_sync_single_for_cpu(struct device *dev,
91006d60728SChristoph Hellwig 		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
9110db2e5d1SRobin Murphy {
91206d60728SChristoph Hellwig 	phys_addr_t phys;
9130db2e5d1SRobin Murphy 
9142e727bffSDavid Stevens 	if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
91506d60728SChristoph Hellwig 		return;
91606d60728SChristoph Hellwig 
91706d60728SChristoph Hellwig 	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
91882612d66STom Murphy 	if (!dev_is_dma_coherent(dev))
91956e35f9cSChristoph Hellwig 		arch_sync_dma_for_cpu(phys, size, dir);
92082612d66STom Murphy 
9217fd856aaSClaire Chang 	if (is_swiotlb_buffer(dev, phys))
92280808d27SChristoph Hellwig 		swiotlb_sync_single_for_cpu(dev, phys, size, dir);
9231cc896edSRobin Murphy }
9241cc896edSRobin Murphy 
92506d60728SChristoph Hellwig static void iommu_dma_sync_single_for_device(struct device *dev,
92606d60728SChristoph Hellwig 		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
92751f8cc9eSRobin Murphy {
92806d60728SChristoph Hellwig 	phys_addr_t phys;
92906d60728SChristoph Hellwig 
9302e727bffSDavid Stevens 	if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev))
93106d60728SChristoph Hellwig 		return;
93206d60728SChristoph Hellwig 
93306d60728SChristoph Hellwig 	phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
9347fd856aaSClaire Chang 	if (is_swiotlb_buffer(dev, phys))
93580808d27SChristoph Hellwig 		swiotlb_sync_single_for_device(dev, phys, size, dir);
93682612d66STom Murphy 
93782612d66STom Murphy 	if (!dev_is_dma_coherent(dev))
93856e35f9cSChristoph Hellwig 		arch_sync_dma_for_device(phys, size, dir);
93951f8cc9eSRobin Murphy }
94051f8cc9eSRobin Murphy 
94106d60728SChristoph Hellwig static void iommu_dma_sync_sg_for_cpu(struct device *dev,
94206d60728SChristoph Hellwig 		struct scatterlist *sgl, int nelems,
94306d60728SChristoph Hellwig 		enum dma_data_direction dir)
9440db2e5d1SRobin Murphy {
94506d60728SChristoph Hellwig 	struct scatterlist *sg;
94606d60728SChristoph Hellwig 	int i;
94706d60728SChristoph Hellwig 
9482e727bffSDavid Stevens 	if (dev_use_swiotlb(dev))
94908ae5d4aSDavid Stevens 		for_each_sg(sgl, sg, nelems, i)
95008ae5d4aSDavid Stevens 			iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
95180808d27SChristoph Hellwig 						      sg->length, dir);
95208ae5d4aSDavid Stevens 	else if (!dev_is_dma_coherent(dev))
95308ae5d4aSDavid Stevens 		for_each_sg(sgl, sg, nelems, i)
95408ae5d4aSDavid Stevens 			arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
95506d60728SChristoph Hellwig }
95606d60728SChristoph Hellwig 
95706d60728SChristoph Hellwig static void iommu_dma_sync_sg_for_device(struct device *dev,
95806d60728SChristoph Hellwig 		struct scatterlist *sgl, int nelems,
95906d60728SChristoph Hellwig 		enum dma_data_direction dir)
96006d60728SChristoph Hellwig {
96106d60728SChristoph Hellwig 	struct scatterlist *sg;
96206d60728SChristoph Hellwig 	int i;
96306d60728SChristoph Hellwig 
9642e727bffSDavid Stevens 	if (dev_use_swiotlb(dev))
96508ae5d4aSDavid Stevens 		for_each_sg(sgl, sg, nelems, i)
96608ae5d4aSDavid Stevens 			iommu_dma_sync_single_for_device(dev,
96708ae5d4aSDavid Stevens 							 sg_dma_address(sg),
96880808d27SChristoph Hellwig 							 sg->length, dir);
96908ae5d4aSDavid Stevens 	else if (!dev_is_dma_coherent(dev))
97008ae5d4aSDavid Stevens 		for_each_sg(sgl, sg, nelems, i)
97156e35f9cSChristoph Hellwig 			arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
97206d60728SChristoph Hellwig }
97306d60728SChristoph Hellwig 
97406d60728SChristoph Hellwig static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
97506d60728SChristoph Hellwig 		unsigned long offset, size_t size, enum dma_data_direction dir,
97606d60728SChristoph Hellwig 		unsigned long attrs)
97706d60728SChristoph Hellwig {
97806d60728SChristoph Hellwig 	phys_addr_t phys = page_to_phys(page) + offset;
97906d60728SChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
9809b49bbc2SDavid Stevens 	int prot = dma_info_to_prot(dir, coherent, attrs);
9819b49bbc2SDavid Stevens 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
9829b49bbc2SDavid Stevens 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
9839b49bbc2SDavid Stevens 	struct iova_domain *iovad = &cookie->iovad;
9849b49bbc2SDavid Stevens 	dma_addr_t iova, dma_mask = dma_get_mask(dev);
98506d60728SChristoph Hellwig 
9869b49bbc2SDavid Stevens 	/*
9879b49bbc2SDavid Stevens 	 * If both the physical buffer start address and size are
9889b49bbc2SDavid Stevens 	 * page aligned, we don't need to use a bounce page.
9899b49bbc2SDavid Stevens 	 */
9902e727bffSDavid Stevens 	if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) {
9919b49bbc2SDavid Stevens 		void *padding_start;
9922cbc61a1SDavid Stevens 		size_t padding_size, aligned_size;
9939b49bbc2SDavid Stevens 
994f316ba0aSMario Limonciello 		if (!is_swiotlb_active(dev)) {
995f316ba0aSMario Limonciello 			dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n");
996f316ba0aSMario Limonciello 			return DMA_MAPPING_ERROR;
997f316ba0aSMario Limonciello 		}
998f316ba0aSMario Limonciello 
9999b49bbc2SDavid Stevens 		aligned_size = iova_align(iovad, size);
1000e81e99baSDavid Stevens 		phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size,
1001e81e99baSDavid Stevens 					      iova_mask(iovad), dir, attrs);
10029b49bbc2SDavid Stevens 
10039b49bbc2SDavid Stevens 		if (phys == DMA_MAPPING_ERROR)
10049b49bbc2SDavid Stevens 			return DMA_MAPPING_ERROR;
10059b49bbc2SDavid Stevens 
10069b49bbc2SDavid Stevens 		/* Cleanup the padding area. */
10079b49bbc2SDavid Stevens 		padding_start = phys_to_virt(phys);
10089b49bbc2SDavid Stevens 		padding_size = aligned_size;
10099b49bbc2SDavid Stevens 
10109b49bbc2SDavid Stevens 		if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
10119b49bbc2SDavid Stevens 		    (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) {
10129b49bbc2SDavid Stevens 			padding_start += size;
10139b49bbc2SDavid Stevens 			padding_size -= size;
10149b49bbc2SDavid Stevens 		}
10159b49bbc2SDavid Stevens 
10169b49bbc2SDavid Stevens 		memset(padding_start, 0, padding_size);
10179b49bbc2SDavid Stevens 	}
10189b49bbc2SDavid Stevens 
10199b49bbc2SDavid Stevens 	if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
102056e35f9cSChristoph Hellwig 		arch_sync_dma_for_device(phys, size, dir);
10219b49bbc2SDavid Stevens 
10222cbc61a1SDavid Stevens 	iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
10239b49bbc2SDavid Stevens 	if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
10249b49bbc2SDavid Stevens 		swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
10259b49bbc2SDavid Stevens 	return iova;
102606d60728SChristoph Hellwig }
102706d60728SChristoph Hellwig 
102806d60728SChristoph Hellwig static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
102906d60728SChristoph Hellwig 		size_t size, enum dma_data_direction dir, unsigned long attrs)
103006d60728SChristoph Hellwig {
10319b49bbc2SDavid Stevens 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
10329b49bbc2SDavid Stevens 	phys_addr_t phys;
10339b49bbc2SDavid Stevens 
10349b49bbc2SDavid Stevens 	phys = iommu_iova_to_phys(domain, dma_handle);
10359b49bbc2SDavid Stevens 	if (WARN_ON(!phys))
10369b49bbc2SDavid Stevens 		return;
10379b49bbc2SDavid Stevens 
10389b49bbc2SDavid Stevens 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev))
10399b49bbc2SDavid Stevens 		arch_sync_dma_for_cpu(phys, size, dir);
10409b49bbc2SDavid Stevens 
10419b49bbc2SDavid Stevens 	__iommu_dma_unmap(dev, dma_handle, size);
10429b49bbc2SDavid Stevens 
10439b49bbc2SDavid Stevens 	if (unlikely(is_swiotlb_buffer(dev, phys)))
10449b49bbc2SDavid Stevens 		swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
10450db2e5d1SRobin Murphy }
10460db2e5d1SRobin Murphy 
10470db2e5d1SRobin Murphy /*
10480db2e5d1SRobin Murphy  * Prepare a successfully-mapped scatterlist to give back to the caller.
1049809eac54SRobin Murphy  *
1050809eac54SRobin Murphy  * At this point the segments are already laid out by iommu_dma_map_sg() to
1051809eac54SRobin Murphy  * avoid individually crossing any boundaries, so we merely need to check a
1052809eac54SRobin Murphy  * segment's start address to avoid concatenating across one.
10530db2e5d1SRobin Murphy  */
10540db2e5d1SRobin Murphy static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
10550db2e5d1SRobin Murphy 		dma_addr_t dma_addr)
10560db2e5d1SRobin Murphy {
1057809eac54SRobin Murphy 	struct scatterlist *s, *cur = sg;
1058809eac54SRobin Murphy 	unsigned long seg_mask = dma_get_seg_boundary(dev);
1059809eac54SRobin Murphy 	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
1060809eac54SRobin Murphy 	int i, count = 0;
10610db2e5d1SRobin Murphy 
10620db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
1063809eac54SRobin Murphy 		/* Restore this segment's original unaligned fields first */
106430280eeeSLogan Gunthorpe 		dma_addr_t s_dma_addr = sg_dma_address(s);
1065809eac54SRobin Murphy 		unsigned int s_iova_off = sg_dma_address(s);
10660db2e5d1SRobin Murphy 		unsigned int s_length = sg_dma_len(s);
1067809eac54SRobin Murphy 		unsigned int s_iova_len = s->length;
10680db2e5d1SRobin Murphy 
1069cad34be7SChristoph Hellwig 		sg_dma_address(s) = DMA_MAPPING_ERROR;
1070809eac54SRobin Murphy 		sg_dma_len(s) = 0;
1071809eac54SRobin Murphy 
107230280eeeSLogan Gunthorpe 		if (sg_is_dma_bus_address(s)) {
107330280eeeSLogan Gunthorpe 			if (i > 0)
107430280eeeSLogan Gunthorpe 				cur = sg_next(cur);
107530280eeeSLogan Gunthorpe 
107630280eeeSLogan Gunthorpe 			sg_dma_unmark_bus_address(s);
107730280eeeSLogan Gunthorpe 			sg_dma_address(cur) = s_dma_addr;
107830280eeeSLogan Gunthorpe 			sg_dma_len(cur) = s_length;
107930280eeeSLogan Gunthorpe 			sg_dma_mark_bus_address(cur);
108030280eeeSLogan Gunthorpe 			count++;
108130280eeeSLogan Gunthorpe 			cur_len = 0;
108230280eeeSLogan Gunthorpe 			continue;
108330280eeeSLogan Gunthorpe 		}
108430280eeeSLogan Gunthorpe 
108530280eeeSLogan Gunthorpe 		s->offset += s_iova_off;
108630280eeeSLogan Gunthorpe 		s->length = s_length;
108730280eeeSLogan Gunthorpe 
1088809eac54SRobin Murphy 		/*
1089809eac54SRobin Murphy 		 * Now fill in the real DMA data. If...
1090809eac54SRobin Murphy 		 * - there is a valid output segment to append to
1091809eac54SRobin Murphy 		 * - and this segment starts on an IOVA page boundary
1092809eac54SRobin Murphy 		 * - but doesn't fall at a segment boundary
1093809eac54SRobin Murphy 		 * - and wouldn't make the resulting output segment too long
1094809eac54SRobin Murphy 		 */
1095809eac54SRobin Murphy 		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
1096ab2cbeb0SRobin Murphy 		    (max_len - cur_len >= s_length)) {
1097809eac54SRobin Murphy 			/* ...then concatenate it with the previous one */
1098809eac54SRobin Murphy 			cur_len += s_length;
1099809eac54SRobin Murphy 		} else {
1100809eac54SRobin Murphy 			/* Otherwise start the next output segment */
1101809eac54SRobin Murphy 			if (i > 0)
1102809eac54SRobin Murphy 				cur = sg_next(cur);
1103809eac54SRobin Murphy 			cur_len = s_length;
1104809eac54SRobin Murphy 			count++;
1105809eac54SRobin Murphy 
1106809eac54SRobin Murphy 			sg_dma_address(cur) = dma_addr + s_iova_off;
11070db2e5d1SRobin Murphy 		}
1108809eac54SRobin Murphy 
1109809eac54SRobin Murphy 		sg_dma_len(cur) = cur_len;
1110809eac54SRobin Murphy 		dma_addr += s_iova_len;
1111809eac54SRobin Murphy 
1112809eac54SRobin Murphy 		if (s_length + s_iova_off < s_iova_len)
1113809eac54SRobin Murphy 			cur_len = 0;
1114809eac54SRobin Murphy 	}
1115809eac54SRobin Murphy 	return count;
11160db2e5d1SRobin Murphy }
11170db2e5d1SRobin Murphy 
11180db2e5d1SRobin Murphy /*
11190db2e5d1SRobin Murphy  * If mapping failed, then just restore the original list,
11200db2e5d1SRobin Murphy  * but making sure the DMA fields are invalidated.
11210db2e5d1SRobin Murphy  */
11220db2e5d1SRobin Murphy static void __invalidate_sg(struct scatterlist *sg, int nents)
11230db2e5d1SRobin Murphy {
11240db2e5d1SRobin Murphy 	struct scatterlist *s;
11250db2e5d1SRobin Murphy 	int i;
11260db2e5d1SRobin Murphy 
11270db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
112830280eeeSLogan Gunthorpe 		if (sg_is_dma_bus_address(s)) {
112930280eeeSLogan Gunthorpe 			sg_dma_unmark_bus_address(s);
113030280eeeSLogan Gunthorpe 		} else {
1131cad34be7SChristoph Hellwig 			if (sg_dma_address(s) != DMA_MAPPING_ERROR)
113207b48ac4SRobin Murphy 				s->offset += sg_dma_address(s);
11330db2e5d1SRobin Murphy 			if (sg_dma_len(s))
11340db2e5d1SRobin Murphy 				s->length = sg_dma_len(s);
113530280eeeSLogan Gunthorpe 		}
1136cad34be7SChristoph Hellwig 		sg_dma_address(s) = DMA_MAPPING_ERROR;
11370db2e5d1SRobin Murphy 		sg_dma_len(s) = 0;
11380db2e5d1SRobin Murphy 	}
11390db2e5d1SRobin Murphy }
11400db2e5d1SRobin Murphy 
114182612d66STom Murphy static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg,
114282612d66STom Murphy 		int nents, enum dma_data_direction dir, unsigned long attrs)
114382612d66STom Murphy {
114482612d66STom Murphy 	struct scatterlist *s;
114582612d66STom Murphy 	int i;
114682612d66STom Murphy 
114782612d66STom Murphy 	for_each_sg(sg, s, nents, i)
11489b49bbc2SDavid Stevens 		iommu_dma_unmap_page(dev, sg_dma_address(s),
114982612d66STom Murphy 				sg_dma_len(s), dir, attrs);
115082612d66STom Murphy }
115182612d66STom Murphy 
115282612d66STom Murphy static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg,
115382612d66STom Murphy 		int nents, enum dma_data_direction dir, unsigned long attrs)
115482612d66STom Murphy {
115582612d66STom Murphy 	struct scatterlist *s;
115682612d66STom Murphy 	int i;
115782612d66STom Murphy 
115882612d66STom Murphy 	for_each_sg(sg, s, nents, i) {
11599b49bbc2SDavid Stevens 		sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s),
11609b49bbc2SDavid Stevens 				s->offset, s->length, dir, attrs);
116182612d66STom Murphy 		if (sg_dma_address(s) == DMA_MAPPING_ERROR)
116282612d66STom Murphy 			goto out_unmap;
116382612d66STom Murphy 		sg_dma_len(s) = s->length;
116482612d66STom Murphy 	}
116582612d66STom Murphy 
116682612d66STom Murphy 	return nents;
116782612d66STom Murphy 
116882612d66STom Murphy out_unmap:
116982612d66STom Murphy 	iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
1170dabb16f6SLogan Gunthorpe 	return -EIO;
117182612d66STom Murphy }
117282612d66STom Murphy 
11730db2e5d1SRobin Murphy /*
11740db2e5d1SRobin Murphy  * The DMA API client is passing in a scatterlist which could describe
11750db2e5d1SRobin Murphy  * any old buffer layout, but the IOMMU API requires everything to be
11760db2e5d1SRobin Murphy  * aligned to IOMMU pages. Hence the need for this complicated bit of
11770db2e5d1SRobin Murphy  * impedance-matching, to be able to hand off a suitably-aligned list,
11780db2e5d1SRobin Murphy  * but still preserve the original offsets and sizes for the caller.
11790db2e5d1SRobin Murphy  */
118006d60728SChristoph Hellwig static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
118106d60728SChristoph Hellwig 		int nents, enum dma_data_direction dir, unsigned long attrs)
11820db2e5d1SRobin Murphy {
118343c5bf11SRobin Murphy 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
1184842fe519SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
1185842fe519SRobin Murphy 	struct iova_domain *iovad = &cookie->iovad;
11860db2e5d1SRobin Murphy 	struct scatterlist *s, *prev = NULL;
118706d60728SChristoph Hellwig 	int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
118830280eeeSLogan Gunthorpe 	struct pci_p2pdma_map_state p2pdma_state = {};
118930280eeeSLogan Gunthorpe 	enum pci_p2pdma_map_type map;
1190842fe519SRobin Murphy 	dma_addr_t iova;
11910db2e5d1SRobin Murphy 	size_t iova_len = 0;
1192809eac54SRobin Murphy 	unsigned long mask = dma_get_seg_boundary(dev);
1193dabb16f6SLogan Gunthorpe 	ssize_t ret;
11940db2e5d1SRobin Murphy 	int i;
11950db2e5d1SRobin Murphy 
1196dabb16f6SLogan Gunthorpe 	if (static_branch_unlikely(&iommu_deferred_attach_enabled)) {
1197dabb16f6SLogan Gunthorpe 		ret = iommu_deferred_attach(dev, domain);
1198ac315f96SLogan Gunthorpe 		if (ret)
1199dabb16f6SLogan Gunthorpe 			goto out;
1200dabb16f6SLogan Gunthorpe 	}
1201795bbbb9STom Murphy 
12022e727bffSDavid Stevens 	if (dev_use_swiotlb(dev))
120382612d66STom Murphy 		return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs);
120482612d66STom Murphy 
12050db2e5d1SRobin Murphy 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
12060db2e5d1SRobin Murphy 		iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
12070db2e5d1SRobin Murphy 
12080db2e5d1SRobin Murphy 	/*
12090db2e5d1SRobin Murphy 	 * Work out how much IOVA space we need, and align the segments to
12100db2e5d1SRobin Murphy 	 * IOVA granules for the IOMMU driver to handle. With some clever
12110db2e5d1SRobin Murphy 	 * trickery we can modify the list in-place, but reversibly, by
1212809eac54SRobin Murphy 	 * stashing the unaligned parts in the as-yet-unused DMA fields.
12130db2e5d1SRobin Murphy 	 */
12140db2e5d1SRobin Murphy 	for_each_sg(sg, s, nents, i) {
1215809eac54SRobin Murphy 		size_t s_iova_off = iova_offset(iovad, s->offset);
12160db2e5d1SRobin Murphy 		size_t s_length = s->length;
1217809eac54SRobin Murphy 		size_t pad_len = (mask - iova_len + 1) & mask;
12180db2e5d1SRobin Murphy 
121930280eeeSLogan Gunthorpe 		if (is_pci_p2pdma_page(sg_page(s))) {
122030280eeeSLogan Gunthorpe 			map = pci_p2pdma_map_segment(&p2pdma_state, dev, s);
122130280eeeSLogan Gunthorpe 			switch (map) {
122230280eeeSLogan Gunthorpe 			case PCI_P2PDMA_MAP_BUS_ADDR:
122330280eeeSLogan Gunthorpe 				/*
122430280eeeSLogan Gunthorpe 				 * iommu_map_sg() will skip this segment as
122530280eeeSLogan Gunthorpe 				 * it is marked as a bus address,
122630280eeeSLogan Gunthorpe 				 * __finalise_sg() will copy the dma address
122730280eeeSLogan Gunthorpe 				 * into the output segment.
122830280eeeSLogan Gunthorpe 				 */
122930280eeeSLogan Gunthorpe 				continue;
123030280eeeSLogan Gunthorpe 			case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
123130280eeeSLogan Gunthorpe 				/*
123230280eeeSLogan Gunthorpe 				 * Mapping through host bridge should be
123330280eeeSLogan Gunthorpe 				 * mapped with regular IOVAs, thus we
123430280eeeSLogan Gunthorpe 				 * do nothing here and continue below.
123530280eeeSLogan Gunthorpe 				 */
123630280eeeSLogan Gunthorpe 				break;
123730280eeeSLogan Gunthorpe 			default:
123830280eeeSLogan Gunthorpe 				ret = -EREMOTEIO;
123930280eeeSLogan Gunthorpe 				goto out_restore_sg;
124030280eeeSLogan Gunthorpe 			}
124130280eeeSLogan Gunthorpe 		}
124230280eeeSLogan Gunthorpe 
1243809eac54SRobin Murphy 		sg_dma_address(s) = s_iova_off;
12440db2e5d1SRobin Murphy 		sg_dma_len(s) = s_length;
1245809eac54SRobin Murphy 		s->offset -= s_iova_off;
1246809eac54SRobin Murphy 		s_length = iova_align(iovad, s_length + s_iova_off);
12470db2e5d1SRobin Murphy 		s->length = s_length;
12480db2e5d1SRobin Murphy 
12490db2e5d1SRobin Murphy 		/*
1250809eac54SRobin Murphy 		 * Due to the alignment of our single IOVA allocation, we can
1251809eac54SRobin Murphy 		 * depend on these assumptions about the segment boundary mask:
1252809eac54SRobin Murphy 		 * - If mask size >= IOVA size, then the IOVA range cannot
1253809eac54SRobin Murphy 		 *   possibly fall across a boundary, so we don't care.
1254809eac54SRobin Murphy 		 * - If mask size < IOVA size, then the IOVA range must start
1255809eac54SRobin Murphy 		 *   exactly on a boundary, therefore we can lay things out
1256809eac54SRobin Murphy 		 *   based purely on segment lengths without needing to know
1257809eac54SRobin Murphy 		 *   the actual addresses beforehand.
1258809eac54SRobin Murphy 		 * - The mask must be a power of 2, so pad_len == 0 if
1259809eac54SRobin Murphy 		 *   iova_len == 0, thus we cannot dereference prev the first
1260809eac54SRobin Murphy 		 *   time through here (i.e. before it has a meaningful value).
12610db2e5d1SRobin Murphy 		 */
1262809eac54SRobin Murphy 		if (pad_len && pad_len < s_length - 1) {
12630db2e5d1SRobin Murphy 			prev->length += pad_len;
12640db2e5d1SRobin Murphy 			iova_len += pad_len;
12650db2e5d1SRobin Murphy 		}
12660db2e5d1SRobin Murphy 
12670db2e5d1SRobin Murphy 		iova_len += s_length;
12680db2e5d1SRobin Murphy 		prev = s;
12690db2e5d1SRobin Murphy 	}
12700db2e5d1SRobin Murphy 
127130280eeeSLogan Gunthorpe 	if (!iova_len)
127230280eeeSLogan Gunthorpe 		return __finalise_sg(dev, sg, nents, 0);
127330280eeeSLogan Gunthorpe 
1274842fe519SRobin Murphy 	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
1275dabb16f6SLogan Gunthorpe 	if (!iova) {
1276dabb16f6SLogan Gunthorpe 		ret = -ENOMEM;
12770db2e5d1SRobin Murphy 		goto out_restore_sg;
1278dabb16f6SLogan Gunthorpe 	}
12790db2e5d1SRobin Murphy 
12800db2e5d1SRobin Murphy 	/*
12810db2e5d1SRobin Murphy 	 * We'll leave any physical concatenation to the IOMMU driver's
12820db2e5d1SRobin Murphy 	 * implementation - it knows better than we do.
12830db2e5d1SRobin Murphy 	 */
1284dabb16f6SLogan Gunthorpe 	ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot);
1285a3884774SYunfei Wang 	if (ret < 0 || ret < iova_len)
12860db2e5d1SRobin Murphy 		goto out_free_iova;
12870db2e5d1SRobin Murphy 
1288842fe519SRobin Murphy 	return __finalise_sg(dev, sg, nents, iova);
12890db2e5d1SRobin Murphy 
12900db2e5d1SRobin Murphy out_free_iova:
12912a2b8eaaSTom Murphy 	iommu_dma_free_iova(cookie, iova, iova_len, NULL);
12920db2e5d1SRobin Murphy out_restore_sg:
12930db2e5d1SRobin Murphy 	__invalidate_sg(sg, nents);
1294dabb16f6SLogan Gunthorpe out:
129530280eeeSLogan Gunthorpe 	if (ret != -ENOMEM && ret != -EREMOTEIO)
1296dabb16f6SLogan Gunthorpe 		return -EINVAL;
1297dabb16f6SLogan Gunthorpe 	return ret;
12980db2e5d1SRobin Murphy }
12990db2e5d1SRobin Murphy 
130006d60728SChristoph Hellwig static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
130106d60728SChristoph Hellwig 		int nents, enum dma_data_direction dir, unsigned long attrs)
13020db2e5d1SRobin Murphy {
130330280eeeSLogan Gunthorpe 	dma_addr_t end = 0, start;
1304842fe519SRobin Murphy 	struct scatterlist *tmp;
1305842fe519SRobin Murphy 	int i;
130606d60728SChristoph Hellwig 
13072e727bffSDavid Stevens 	if (dev_use_swiotlb(dev)) {
130882612d66STom Murphy 		iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs);
130982612d66STom Murphy 		return;
131082612d66STom Murphy 	}
131182612d66STom Murphy 
1312ee9d4097SDavid Stevens 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1313ee9d4097SDavid Stevens 		iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
1314ee9d4097SDavid Stevens 
13150db2e5d1SRobin Murphy 	/*
13160db2e5d1SRobin Murphy 	 * The scatterlist segments are mapped into a single
131730280eeeSLogan Gunthorpe 	 * contiguous IOVA allocation, the start and end points
131830280eeeSLogan Gunthorpe 	 * just have to be determined.
13190db2e5d1SRobin Murphy 	 */
132030280eeeSLogan Gunthorpe 	for_each_sg(sg, tmp, nents, i) {
132130280eeeSLogan Gunthorpe 		if (sg_is_dma_bus_address(tmp)) {
132230280eeeSLogan Gunthorpe 			sg_dma_unmark_bus_address(tmp);
132330280eeeSLogan Gunthorpe 			continue;
132430280eeeSLogan Gunthorpe 		}
132530280eeeSLogan Gunthorpe 
1326842fe519SRobin Murphy 		if (sg_dma_len(tmp) == 0)
1327842fe519SRobin Murphy 			break;
132830280eeeSLogan Gunthorpe 
132930280eeeSLogan Gunthorpe 		start = sg_dma_address(tmp);
133030280eeeSLogan Gunthorpe 		break;
1331842fe519SRobin Murphy 	}
133230280eeeSLogan Gunthorpe 
133330280eeeSLogan Gunthorpe 	nents -= i;
133430280eeeSLogan Gunthorpe 	for_each_sg(tmp, tmp, nents, i) {
133530280eeeSLogan Gunthorpe 		if (sg_is_dma_bus_address(tmp)) {
133630280eeeSLogan Gunthorpe 			sg_dma_unmark_bus_address(tmp);
133730280eeeSLogan Gunthorpe 			continue;
133830280eeeSLogan Gunthorpe 		}
133930280eeeSLogan Gunthorpe 
134030280eeeSLogan Gunthorpe 		if (sg_dma_len(tmp) == 0)
134130280eeeSLogan Gunthorpe 			break;
134230280eeeSLogan Gunthorpe 
134330280eeeSLogan Gunthorpe 		end = sg_dma_address(tmp) + sg_dma_len(tmp);
134430280eeeSLogan Gunthorpe 	}
134530280eeeSLogan Gunthorpe 
134630280eeeSLogan Gunthorpe 	if (end)
1347b61d271eSRobin Murphy 		__iommu_dma_unmap(dev, start, end - start);
13480db2e5d1SRobin Murphy }
13490db2e5d1SRobin Murphy 
135006d60728SChristoph Hellwig static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
135151f8cc9eSRobin Murphy 		size_t size, enum dma_data_direction dir, unsigned long attrs)
135251f8cc9eSRobin Murphy {
135351f8cc9eSRobin Murphy 	return __iommu_dma_map(dev, phys, size,
13546e235020STom Murphy 			dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
13556e235020STom Murphy 			dma_get_mask(dev));
135651f8cc9eSRobin Murphy }
135751f8cc9eSRobin Murphy 
135806d60728SChristoph Hellwig static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
135951f8cc9eSRobin Murphy 		size_t size, enum dma_data_direction dir, unsigned long attrs)
136051f8cc9eSRobin Murphy {
1361b61d271eSRobin Murphy 	__iommu_dma_unmap(dev, handle, size);
136251f8cc9eSRobin Murphy }
136351f8cc9eSRobin Murphy 
13648553f6e6SRobin Murphy static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
1365bcf4b9c4SRobin Murphy {
1366bcf4b9c4SRobin Murphy 	size_t alloc_size = PAGE_ALIGN(size);
1367bcf4b9c4SRobin Murphy 	int count = alloc_size >> PAGE_SHIFT;
1368bcf4b9c4SRobin Murphy 	struct page *page = NULL, **pages = NULL;
1369bcf4b9c4SRobin Murphy 
1370bcf4b9c4SRobin Murphy 	/* Non-coherent atomic allocation? Easy */
1371e6475eb0SChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1372c84dc6e6SDavid Rientjes 	    dma_free_from_pool(dev, cpu_addr, alloc_size))
1373bcf4b9c4SRobin Murphy 		return;
1374bcf4b9c4SRobin Murphy 
1375f5ff79fdSChristoph Hellwig 	if (is_vmalloc_addr(cpu_addr)) {
1376bcf4b9c4SRobin Murphy 		/*
1377bcf4b9c4SRobin Murphy 		 * If it the address is remapped, then it's either non-coherent
1378bcf4b9c4SRobin Murphy 		 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1379bcf4b9c4SRobin Murphy 		 */
13805cf45379SChristoph Hellwig 		pages = dma_common_find_pages(cpu_addr);
1381bcf4b9c4SRobin Murphy 		if (!pages)
1382bcf4b9c4SRobin Murphy 			page = vmalloc_to_page(cpu_addr);
138351231740SChristoph Hellwig 		dma_common_free_remap(cpu_addr, alloc_size);
1384bcf4b9c4SRobin Murphy 	} else {
1385bcf4b9c4SRobin Murphy 		/* Lowmem means a coherent atomic or CMA allocation */
1386bcf4b9c4SRobin Murphy 		page = virt_to_page(cpu_addr);
1387bcf4b9c4SRobin Murphy 	}
1388bcf4b9c4SRobin Murphy 
1389bcf4b9c4SRobin Murphy 	if (pages)
1390bcf4b9c4SRobin Murphy 		__iommu_dma_free_pages(pages, count);
1391591fcf3bSNicolin Chen 	if (page)
1392591fcf3bSNicolin Chen 		dma_free_contiguous(dev, page, alloc_size);
1393bcf4b9c4SRobin Murphy }
1394bcf4b9c4SRobin Murphy 
13958553f6e6SRobin Murphy static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
13968553f6e6SRobin Murphy 		dma_addr_t handle, unsigned long attrs)
13978553f6e6SRobin Murphy {
13988553f6e6SRobin Murphy 	__iommu_dma_unmap(dev, handle, size);
13998553f6e6SRobin Murphy 	__iommu_dma_free(dev, size, cpu_addr);
14008553f6e6SRobin Murphy }
14018553f6e6SRobin Murphy 
1402ee1ef05dSChristoph Hellwig static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
1403ee1ef05dSChristoph Hellwig 		struct page **pagep, gfp_t gfp, unsigned long attrs)
140406d60728SChristoph Hellwig {
140506d60728SChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
14069ad5d6edSRobin Murphy 	size_t alloc_size = PAGE_ALIGN(size);
140790ae409fSChristoph Hellwig 	int node = dev_to_node(dev);
14089a4ab94aSChristoph Hellwig 	struct page *page = NULL;
14099ad5d6edSRobin Murphy 	void *cpu_addr;
141006d60728SChristoph Hellwig 
1411591fcf3bSNicolin Chen 	page = dma_alloc_contiguous(dev, alloc_size, gfp);
141206d60728SChristoph Hellwig 	if (!page)
141390ae409fSChristoph Hellwig 		page = alloc_pages_node(node, gfp, get_order(alloc_size));
141490ae409fSChristoph Hellwig 	if (!page)
141506d60728SChristoph Hellwig 		return NULL;
141606d60728SChristoph Hellwig 
1417f5ff79fdSChristoph Hellwig 	if (!coherent || PageHighMem(page)) {
141833dcb37cSChristoph Hellwig 		pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
14198680aa5aSRobin Murphy 
14209ad5d6edSRobin Murphy 		cpu_addr = dma_common_contiguous_remap(page, alloc_size,
142151231740SChristoph Hellwig 				prot, __builtin_return_address(0));
14229ad5d6edSRobin Murphy 		if (!cpu_addr)
1423ee1ef05dSChristoph Hellwig 			goto out_free_pages;
1424072bebc0SRobin Murphy 
142506d60728SChristoph Hellwig 		if (!coherent)
14269ad5d6edSRobin Murphy 			arch_dma_prep_coherent(page, size);
14278680aa5aSRobin Murphy 	} else {
14289ad5d6edSRobin Murphy 		cpu_addr = page_address(page);
14298680aa5aSRobin Murphy 	}
1430ee1ef05dSChristoph Hellwig 
1431ee1ef05dSChristoph Hellwig 	*pagep = page;
14329ad5d6edSRobin Murphy 	memset(cpu_addr, 0, alloc_size);
14339ad5d6edSRobin Murphy 	return cpu_addr;
1434072bebc0SRobin Murphy out_free_pages:
1435591fcf3bSNicolin Chen 	dma_free_contiguous(dev, page, alloc_size);
1436072bebc0SRobin Murphy 	return NULL;
143706d60728SChristoph Hellwig }
143806d60728SChristoph Hellwig 
1439ee1ef05dSChristoph Hellwig static void *iommu_dma_alloc(struct device *dev, size_t size,
1440ee1ef05dSChristoph Hellwig 		dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1441ee1ef05dSChristoph Hellwig {
1442ee1ef05dSChristoph Hellwig 	bool coherent = dev_is_dma_coherent(dev);
1443ee1ef05dSChristoph Hellwig 	int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1444ee1ef05dSChristoph Hellwig 	struct page *page = NULL;
1445ee1ef05dSChristoph Hellwig 	void *cpu_addr;
1446ee1ef05dSChristoph Hellwig 
1447ee1ef05dSChristoph Hellwig 	gfp |= __GFP_ZERO;
1448ee1ef05dSChristoph Hellwig 
1449f5ff79fdSChristoph Hellwig 	if (gfpflags_allow_blocking(gfp) &&
1450e8d39a90SChristoph Hellwig 	    !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) {
1451e8d39a90SChristoph Hellwig 		return iommu_dma_alloc_remap(dev, size, handle, gfp,
1452e8d39a90SChristoph Hellwig 				dma_pgprot(dev, PAGE_KERNEL, attrs), attrs);
1453e8d39a90SChristoph Hellwig 	}
1454ee1ef05dSChristoph Hellwig 
1455e6475eb0SChristoph Hellwig 	if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1456e6475eb0SChristoph Hellwig 	    !gfpflags_allow_blocking(gfp) && !coherent)
14579420139fSChristoph Hellwig 		page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
14589420139fSChristoph Hellwig 					       gfp, NULL);
1459ee1ef05dSChristoph Hellwig 	else
1460ee1ef05dSChristoph Hellwig 		cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1461ee1ef05dSChristoph Hellwig 	if (!cpu_addr)
1462ee1ef05dSChristoph Hellwig 		return NULL;
1463ee1ef05dSChristoph Hellwig 
14646e235020STom Murphy 	*handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot,
14656e235020STom Murphy 			dev->coherent_dma_mask);
1466ee1ef05dSChristoph Hellwig 	if (*handle == DMA_MAPPING_ERROR) {
1467ee1ef05dSChristoph Hellwig 		__iommu_dma_free(dev, size, cpu_addr);
1468ee1ef05dSChristoph Hellwig 		return NULL;
1469ee1ef05dSChristoph Hellwig 	}
1470ee1ef05dSChristoph Hellwig 
1471ee1ef05dSChristoph Hellwig 	return cpu_addr;
1472ee1ef05dSChristoph Hellwig }
1473ee1ef05dSChristoph Hellwig 
147406d60728SChristoph Hellwig static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
147506d60728SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
147606d60728SChristoph Hellwig 		unsigned long attrs)
147706d60728SChristoph Hellwig {
147806d60728SChristoph Hellwig 	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1479efd9f10bSChristoph Hellwig 	unsigned long pfn, off = vma->vm_pgoff;
148006d60728SChristoph Hellwig 	int ret;
148106d60728SChristoph Hellwig 
148233dcb37cSChristoph Hellwig 	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
148306d60728SChristoph Hellwig 
148406d60728SChristoph Hellwig 	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
148506d60728SChristoph Hellwig 		return ret;
148606d60728SChristoph Hellwig 
148706d60728SChristoph Hellwig 	if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
148806d60728SChristoph Hellwig 		return -ENXIO;
148906d60728SChristoph Hellwig 
1490f5ff79fdSChristoph Hellwig 	if (is_vmalloc_addr(cpu_addr)) {
14915cf45379SChristoph Hellwig 		struct page **pages = dma_common_find_pages(cpu_addr);
149206d60728SChristoph Hellwig 
1493efd9f10bSChristoph Hellwig 		if (pages)
149471fe89ceSChristoph Hellwig 			return vm_map_pages(vma, pages, nr_pages);
1495efd9f10bSChristoph Hellwig 		pfn = vmalloc_to_pfn(cpu_addr);
1496efd9f10bSChristoph Hellwig 	} else {
1497efd9f10bSChristoph Hellwig 		pfn = page_to_pfn(virt_to_page(cpu_addr));
1498efd9f10bSChristoph Hellwig 	}
1499efd9f10bSChristoph Hellwig 
1500efd9f10bSChristoph Hellwig 	return remap_pfn_range(vma, vma->vm_start, pfn + off,
1501efd9f10bSChristoph Hellwig 			       vma->vm_end - vma->vm_start,
1502efd9f10bSChristoph Hellwig 			       vma->vm_page_prot);
150306d60728SChristoph Hellwig }
150406d60728SChristoph Hellwig 
150506d60728SChristoph Hellwig static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
150606d60728SChristoph Hellwig 		void *cpu_addr, dma_addr_t dma_addr, size_t size,
150706d60728SChristoph Hellwig 		unsigned long attrs)
150806d60728SChristoph Hellwig {
15093fb3378bSChristoph Hellwig 	struct page *page;
15103fb3378bSChristoph Hellwig 	int ret;
151106d60728SChristoph Hellwig 
1512f5ff79fdSChristoph Hellwig 	if (is_vmalloc_addr(cpu_addr)) {
15135cf45379SChristoph Hellwig 		struct page **pages = dma_common_find_pages(cpu_addr);
15143fb3378bSChristoph Hellwig 
15153fb3378bSChristoph Hellwig 		if (pages) {
15163fb3378bSChristoph Hellwig 			return sg_alloc_table_from_pages(sgt, pages,
15173fb3378bSChristoph Hellwig 					PAGE_ALIGN(size) >> PAGE_SHIFT,
15183fb3378bSChristoph Hellwig 					0, size, GFP_KERNEL);
151906d60728SChristoph Hellwig 		}
152006d60728SChristoph Hellwig 
15213fb3378bSChristoph Hellwig 		page = vmalloc_to_page(cpu_addr);
15223fb3378bSChristoph Hellwig 	} else {
15233fb3378bSChristoph Hellwig 		page = virt_to_page(cpu_addr);
152406d60728SChristoph Hellwig 	}
152506d60728SChristoph Hellwig 
15263fb3378bSChristoph Hellwig 	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
15273fb3378bSChristoph Hellwig 	if (!ret)
15283fb3378bSChristoph Hellwig 		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
15293fb3378bSChristoph Hellwig 	return ret;
153006d60728SChristoph Hellwig }
153106d60728SChristoph Hellwig 
1532158a6d3cSYoshihiro Shimoda static unsigned long iommu_dma_get_merge_boundary(struct device *dev)
1533158a6d3cSYoshihiro Shimoda {
1534158a6d3cSYoshihiro Shimoda 	struct iommu_domain *domain = iommu_get_dma_domain(dev);
1535158a6d3cSYoshihiro Shimoda 
1536158a6d3cSYoshihiro Shimoda 	return (1UL << __ffs(domain->pgsize_bitmap)) - 1;
1537158a6d3cSYoshihiro Shimoda }
1538158a6d3cSYoshihiro Shimoda 
15396d9870b7SJohn Garry static size_t iommu_dma_opt_mapping_size(void)
15406d9870b7SJohn Garry {
15416d9870b7SJohn Garry 	return iova_rcache_range();
15426d9870b7SJohn Garry }
15436d9870b7SJohn Garry 
154406d60728SChristoph Hellwig static const struct dma_map_ops iommu_dma_ops = {
154530280eeeSLogan Gunthorpe 	.flags			= DMA_F_PCI_P2PDMA_SUPPORTED,
154606d60728SChristoph Hellwig 	.alloc			= iommu_dma_alloc,
154706d60728SChristoph Hellwig 	.free			= iommu_dma_free,
1548efa70f2fSChristoph Hellwig 	.alloc_pages		= dma_common_alloc_pages,
1549efa70f2fSChristoph Hellwig 	.free_pages		= dma_common_free_pages,
1550e817ee5fSChristoph Hellwig 	.alloc_noncontiguous	= iommu_dma_alloc_noncontiguous,
1551e817ee5fSChristoph Hellwig 	.free_noncontiguous	= iommu_dma_free_noncontiguous,
155206d60728SChristoph Hellwig 	.mmap			= iommu_dma_mmap,
155306d60728SChristoph Hellwig 	.get_sgtable		= iommu_dma_get_sgtable,
155406d60728SChristoph Hellwig 	.map_page		= iommu_dma_map_page,
155506d60728SChristoph Hellwig 	.unmap_page		= iommu_dma_unmap_page,
155606d60728SChristoph Hellwig 	.map_sg			= iommu_dma_map_sg,
155706d60728SChristoph Hellwig 	.unmap_sg		= iommu_dma_unmap_sg,
155806d60728SChristoph Hellwig 	.sync_single_for_cpu	= iommu_dma_sync_single_for_cpu,
155906d60728SChristoph Hellwig 	.sync_single_for_device	= iommu_dma_sync_single_for_device,
156006d60728SChristoph Hellwig 	.sync_sg_for_cpu	= iommu_dma_sync_sg_for_cpu,
156106d60728SChristoph Hellwig 	.sync_sg_for_device	= iommu_dma_sync_sg_for_device,
156206d60728SChristoph Hellwig 	.map_resource		= iommu_dma_map_resource,
156306d60728SChristoph Hellwig 	.unmap_resource		= iommu_dma_unmap_resource,
1564158a6d3cSYoshihiro Shimoda 	.get_merge_boundary	= iommu_dma_get_merge_boundary,
15656d9870b7SJohn Garry 	.opt_mapping_size	= iommu_dma_opt_mapping_size,
156606d60728SChristoph Hellwig };
156706d60728SChristoph Hellwig 
156806d60728SChristoph Hellwig /*
156906d60728SChristoph Hellwig  * The IOMMU core code allocates the default DMA domain, which the underlying
157006d60728SChristoph Hellwig  * IOMMU driver needs to support via the dma-iommu layer.
157106d60728SChristoph Hellwig  */
1572ac6d7046SJean-Philippe Brucker void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
157306d60728SChristoph Hellwig {
157406d60728SChristoph Hellwig 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
157506d60728SChristoph Hellwig 
157606d60728SChristoph Hellwig 	if (!domain)
157706d60728SChristoph Hellwig 		goto out_err;
157806d60728SChristoph Hellwig 
157906d60728SChristoph Hellwig 	/*
158006d60728SChristoph Hellwig 	 * The IOMMU core code allocates the default DMA domain, which the
158106d60728SChristoph Hellwig 	 * underlying IOMMU driver needs to support via the dma-iommu layer.
158206d60728SChristoph Hellwig 	 */
1583bf3aed46SRobin Murphy 	if (iommu_is_dma_domain(domain)) {
1584ac6d7046SJean-Philippe Brucker 		if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev))
158506d60728SChristoph Hellwig 			goto out_err;
158606d60728SChristoph Hellwig 		dev->dma_ops = &iommu_dma_ops;
158706d60728SChristoph Hellwig 	}
158806d60728SChristoph Hellwig 
158906d60728SChristoph Hellwig 	return;
159006d60728SChristoph Hellwig out_err:
159106d60728SChristoph Hellwig 	 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
159206d60728SChristoph Hellwig 		 dev_name(dev));
159344bb7e24SRobin Murphy }
15948ce4904bSJean-Philippe Brucker EXPORT_SYMBOL_GPL(iommu_setup_dma_ops);
159544bb7e24SRobin Murphy 
159644bb7e24SRobin Murphy static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
159744bb7e24SRobin Murphy 		phys_addr_t msi_addr, struct iommu_domain *domain)
159844bb7e24SRobin Murphy {
159944bb7e24SRobin Murphy 	struct iommu_dma_cookie *cookie = domain->iova_cookie;
160044bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi_page;
1601842fe519SRobin Murphy 	dma_addr_t iova;
160244bb7e24SRobin Murphy 	int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1603fdbe574eSRobin Murphy 	size_t size = cookie_msi_granule(cookie);
160444bb7e24SRobin Murphy 
1605fdbe574eSRobin Murphy 	msi_addr &= ~(phys_addr_t)(size - 1);
160644bb7e24SRobin Murphy 	list_for_each_entry(msi_page, &cookie->msi_page_list, list)
160744bb7e24SRobin Murphy 		if (msi_page->phys == msi_addr)
160844bb7e24SRobin Murphy 			return msi_page;
160944bb7e24SRobin Murphy 
1610c1864790SRobin Murphy 	msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL);
161144bb7e24SRobin Murphy 	if (!msi_page)
161244bb7e24SRobin Murphy 		return NULL;
161344bb7e24SRobin Murphy 
16148af23fadSRobin Murphy 	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
16158af23fadSRobin Murphy 	if (!iova)
161644bb7e24SRobin Murphy 		goto out_free_page;
161744bb7e24SRobin Murphy 
16181369459bSJason Gunthorpe 	if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL))
16198af23fadSRobin Murphy 		goto out_free_iova;
16208af23fadSRobin Murphy 
162144bb7e24SRobin Murphy 	INIT_LIST_HEAD(&msi_page->list);
1622a44e6657SRobin Murphy 	msi_page->phys = msi_addr;
1623a44e6657SRobin Murphy 	msi_page->iova = iova;
162444bb7e24SRobin Murphy 	list_add(&msi_page->list, &cookie->msi_page_list);
162544bb7e24SRobin Murphy 	return msi_page;
162644bb7e24SRobin Murphy 
16278af23fadSRobin Murphy out_free_iova:
16282a2b8eaaSTom Murphy 	iommu_dma_free_iova(cookie, iova, size, NULL);
162944bb7e24SRobin Murphy out_free_page:
163044bb7e24SRobin Murphy 	kfree(msi_page);
163144bb7e24SRobin Murphy 	return NULL;
163244bb7e24SRobin Murphy }
163344bb7e24SRobin Murphy 
1634fa49364cSRobin Murphy /**
1635fa49364cSRobin Murphy  * iommu_dma_prepare_msi() - Map the MSI page in the IOMMU domain
1636fa49364cSRobin Murphy  * @desc: MSI descriptor, will store the MSI page
1637fa49364cSRobin Murphy  * @msi_addr: MSI target address to be mapped
1638fa49364cSRobin Murphy  *
1639fa49364cSRobin Murphy  * Return: 0 on success or negative error code if the mapping failed.
1640fa49364cSRobin Murphy  */
1641ece6e6f0SJulien Grall int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
164244bb7e24SRobin Murphy {
1643ece6e6f0SJulien Grall 	struct device *dev = msi_desc_to_dev(desc);
164444bb7e24SRobin Murphy 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
164544bb7e24SRobin Murphy 	struct iommu_dma_msi_page *msi_page;
1646c1864790SRobin Murphy 	static DEFINE_MUTEX(msi_prepare_lock); /* see below */
164744bb7e24SRobin Murphy 
1648ece6e6f0SJulien Grall 	if (!domain || !domain->iova_cookie) {
1649ece6e6f0SJulien Grall 		desc->iommu_cookie = NULL;
1650ece6e6f0SJulien Grall 		return 0;
1651ece6e6f0SJulien Grall 	}
165244bb7e24SRobin Murphy 
165344bb7e24SRobin Murphy 	/*
1654c1864790SRobin Murphy 	 * In fact the whole prepare operation should already be serialised by
1655c1864790SRobin Murphy 	 * irq_domain_mutex further up the callchain, but that's pretty subtle
1656c1864790SRobin Murphy 	 * on its own, so consider this locking as failsafe documentation...
165744bb7e24SRobin Murphy 	 */
1658c1864790SRobin Murphy 	mutex_lock(&msi_prepare_lock);
165944bb7e24SRobin Murphy 	msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1660c1864790SRobin Murphy 	mutex_unlock(&msi_prepare_lock);
166144bb7e24SRobin Murphy 
1662ece6e6f0SJulien Grall 	msi_desc_set_iommu_cookie(desc, msi_page);
1663ece6e6f0SJulien Grall 
1664ece6e6f0SJulien Grall 	if (!msi_page)
1665ece6e6f0SJulien Grall 		return -ENOMEM;
1666ece6e6f0SJulien Grall 	return 0;
166744bb7e24SRobin Murphy }
1668ece6e6f0SJulien Grall 
1669fa49364cSRobin Murphy /**
1670fa49364cSRobin Murphy  * iommu_dma_compose_msi_msg() - Apply translation to an MSI message
1671fa49364cSRobin Murphy  * @desc: MSI descriptor prepared by iommu_dma_prepare_msi()
1672fa49364cSRobin Murphy  * @msg: MSI message containing target physical address
1673fa49364cSRobin Murphy  */
1674fa49364cSRobin Murphy void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
1675ece6e6f0SJulien Grall {
1676ece6e6f0SJulien Grall 	struct device *dev = msi_desc_to_dev(desc);
1677ece6e6f0SJulien Grall 	const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1678ece6e6f0SJulien Grall 	const struct iommu_dma_msi_page *msi_page;
1679ece6e6f0SJulien Grall 
1680ece6e6f0SJulien Grall 	msi_page = msi_desc_get_iommu_cookie(desc);
1681ece6e6f0SJulien Grall 
1682ece6e6f0SJulien Grall 	if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1683ece6e6f0SJulien Grall 		return;
1684ece6e6f0SJulien Grall 
1685ece6e6f0SJulien Grall 	msg->address_hi = upper_32_bits(msi_page->iova);
1686ece6e6f0SJulien Grall 	msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1687ece6e6f0SJulien Grall 	msg->address_lo += lower_32_bits(msi_page->iova);
168844bb7e24SRobin Murphy }
168906d60728SChristoph Hellwig 
169006d60728SChristoph Hellwig static int iommu_dma_init(void)
169106d60728SChristoph Hellwig {
1692a8e8af35SLianbo Jiang 	if (is_kdump_kernel())
1693a8e8af35SLianbo Jiang 		static_branch_enable(&iommu_deferred_attach_enabled);
1694a8e8af35SLianbo Jiang 
169506d60728SChristoph Hellwig 	return iova_cache_get();
16960db2e5d1SRobin Murphy }
169706d60728SChristoph Hellwig arch_initcall(iommu_dma_init);
1698