xref: /openbmc/linux/drivers/iommu/dma-iommu.c (revision bc5aa3a0)
1 /*
2  * A fairly generic DMA-API to IOMMU-API glue layer.
3  *
4  * Copyright (C) 2014-2015 ARM Ltd.
5  *
6  * based in part on arch/arm/mm/dma-mapping.c:
7  * Copyright (C) 2000-2004 Russell King
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include <linux/device.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/gfp.h>
25 #include <linux/huge_mm.h>
26 #include <linux/iommu.h>
27 #include <linux/iova.h>
28 #include <linux/mm.h>
29 #include <linux/scatterlist.h>
30 #include <linux/vmalloc.h>
31 
32 int iommu_dma_init(void)
33 {
34 	return iova_cache_get();
35 }
36 
37 /**
38  * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
39  * @domain: IOMMU domain to prepare for DMA-API usage
40  *
41  * IOMMU drivers should normally call this from their domain_alloc
42  * callback when domain->type == IOMMU_DOMAIN_DMA.
43  */
44 int iommu_get_dma_cookie(struct iommu_domain *domain)
45 {
46 	struct iova_domain *iovad;
47 
48 	if (domain->iova_cookie)
49 		return -EEXIST;
50 
51 	iovad = kzalloc(sizeof(*iovad), GFP_KERNEL);
52 	domain->iova_cookie = iovad;
53 
54 	return iovad ? 0 : -ENOMEM;
55 }
56 EXPORT_SYMBOL(iommu_get_dma_cookie);
57 
58 /**
59  * iommu_put_dma_cookie - Release a domain's DMA mapping resources
60  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
61  *
62  * IOMMU drivers should normally call this from their domain_free callback.
63  */
64 void iommu_put_dma_cookie(struct iommu_domain *domain)
65 {
66 	struct iova_domain *iovad = domain->iova_cookie;
67 
68 	if (!iovad)
69 		return;
70 
71 	if (iovad->granule)
72 		put_iova_domain(iovad);
73 	kfree(iovad);
74 	domain->iova_cookie = NULL;
75 }
76 EXPORT_SYMBOL(iommu_put_dma_cookie);
77 
78 /**
79  * iommu_dma_init_domain - Initialise a DMA mapping domain
80  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
81  * @base: IOVA at which the mappable address space starts
82  * @size: Size of IOVA space
83  *
84  * @base and @size should be exact multiples of IOMMU page granularity to
85  * avoid rounding surprises. If necessary, we reserve the page at address 0
86  * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
87  * any change which could make prior IOVAs invalid will fail.
88  */
89 int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size)
90 {
91 	struct iova_domain *iovad = domain->iova_cookie;
92 	unsigned long order, base_pfn, end_pfn;
93 
94 	if (!iovad)
95 		return -ENODEV;
96 
97 	/* Use the smallest supported page size for IOVA granularity */
98 	order = __ffs(domain->pgsize_bitmap);
99 	base_pfn = max_t(unsigned long, 1, base >> order);
100 	end_pfn = (base + size - 1) >> order;
101 
102 	/* Check the domain allows at least some access to the device... */
103 	if (domain->geometry.force_aperture) {
104 		if (base > domain->geometry.aperture_end ||
105 		    base + size <= domain->geometry.aperture_start) {
106 			pr_warn("specified DMA range outside IOMMU capability\n");
107 			return -EFAULT;
108 		}
109 		/* ...then finally give it a kicking to make sure it fits */
110 		base_pfn = max_t(unsigned long, base_pfn,
111 				domain->geometry.aperture_start >> order);
112 		end_pfn = min_t(unsigned long, end_pfn,
113 				domain->geometry.aperture_end >> order);
114 	}
115 
116 	/* All we can safely do with an existing domain is enlarge it */
117 	if (iovad->start_pfn) {
118 		if (1UL << order != iovad->granule ||
119 		    base_pfn != iovad->start_pfn ||
120 		    end_pfn < iovad->dma_32bit_pfn) {
121 			pr_warn("Incompatible range for DMA domain\n");
122 			return -EFAULT;
123 		}
124 		iovad->dma_32bit_pfn = end_pfn;
125 	} else {
126 		init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
127 	}
128 	return 0;
129 }
130 EXPORT_SYMBOL(iommu_dma_init_domain);
131 
132 /**
133  * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags
134  * @dir: Direction of DMA transfer
135  * @coherent: Is the DMA master cache-coherent?
136  *
137  * Return: corresponding IOMMU API page protection flags
138  */
139 int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
140 {
141 	int prot = coherent ? IOMMU_CACHE : 0;
142 
143 	switch (dir) {
144 	case DMA_BIDIRECTIONAL:
145 		return prot | IOMMU_READ | IOMMU_WRITE;
146 	case DMA_TO_DEVICE:
147 		return prot | IOMMU_READ;
148 	case DMA_FROM_DEVICE:
149 		return prot | IOMMU_WRITE;
150 	default:
151 		return 0;
152 	}
153 }
154 
155 static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
156 		dma_addr_t dma_limit)
157 {
158 	struct iova_domain *iovad = domain->iova_cookie;
159 	unsigned long shift = iova_shift(iovad);
160 	unsigned long length = iova_align(iovad, size) >> shift;
161 
162 	if (domain->geometry.force_aperture)
163 		dma_limit = min(dma_limit, domain->geometry.aperture_end);
164 	/*
165 	 * Enforce size-alignment to be safe - there could perhaps be an
166 	 * attribute to control this per-device, or at least per-domain...
167 	 */
168 	return alloc_iova(iovad, length, dma_limit >> shift, true);
169 }
170 
171 /* The IOVA allocator knows what we mapped, so just unmap whatever that was */
172 static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr)
173 {
174 	struct iova_domain *iovad = domain->iova_cookie;
175 	unsigned long shift = iova_shift(iovad);
176 	unsigned long pfn = dma_addr >> shift;
177 	struct iova *iova = find_iova(iovad, pfn);
178 	size_t size;
179 
180 	if (WARN_ON(!iova))
181 		return;
182 
183 	size = iova_size(iova) << shift;
184 	size -= iommu_unmap(domain, pfn << shift, size);
185 	/* ...and if we can't, then something is horribly, horribly wrong */
186 	WARN_ON(size > 0);
187 	__free_iova(iovad, iova);
188 }
189 
190 static void __iommu_dma_free_pages(struct page **pages, int count)
191 {
192 	while (count--)
193 		__free_page(pages[count]);
194 	kvfree(pages);
195 }
196 
197 static struct page **__iommu_dma_alloc_pages(unsigned int count,
198 		unsigned long order_mask, gfp_t gfp)
199 {
200 	struct page **pages;
201 	unsigned int i = 0, array_size = count * sizeof(*pages);
202 
203 	order_mask &= (2U << MAX_ORDER) - 1;
204 	if (!order_mask)
205 		return NULL;
206 
207 	if (array_size <= PAGE_SIZE)
208 		pages = kzalloc(array_size, GFP_KERNEL);
209 	else
210 		pages = vzalloc(array_size);
211 	if (!pages)
212 		return NULL;
213 
214 	/* IOMMU can map any pages, so himem can also be used here */
215 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
216 
217 	while (count) {
218 		struct page *page = NULL;
219 		unsigned int order_size;
220 
221 		/*
222 		 * Higher-order allocations are a convenience rather
223 		 * than a necessity, hence using __GFP_NORETRY until
224 		 * falling back to minimum-order allocations.
225 		 */
226 		for (order_mask &= (2U << __fls(count)) - 1;
227 		     order_mask; order_mask &= ~order_size) {
228 			unsigned int order = __fls(order_mask);
229 
230 			order_size = 1U << order;
231 			page = alloc_pages((order_mask - order_size) ?
232 					   gfp | __GFP_NORETRY : gfp, order);
233 			if (!page)
234 				continue;
235 			if (!order)
236 				break;
237 			if (!PageCompound(page)) {
238 				split_page(page, order);
239 				break;
240 			} else if (!split_huge_page(page)) {
241 				break;
242 			}
243 			__free_pages(page, order);
244 		}
245 		if (!page) {
246 			__iommu_dma_free_pages(pages, i);
247 			return NULL;
248 		}
249 		count -= order_size;
250 		while (order_size--)
251 			pages[i++] = page++;
252 	}
253 	return pages;
254 }
255 
256 /**
257  * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
258  * @dev: Device which owns this buffer
259  * @pages: Array of buffer pages as returned by iommu_dma_alloc()
260  * @size: Size of buffer in bytes
261  * @handle: DMA address of buffer
262  *
263  * Frees both the pages associated with the buffer, and the array
264  * describing them
265  */
266 void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
267 		dma_addr_t *handle)
268 {
269 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle);
270 	__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
271 	*handle = DMA_ERROR_CODE;
272 }
273 
274 /**
275  * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
276  * @dev: Device to allocate memory for. Must be a real device
277  *	 attached to an iommu_dma_domain
278  * @size: Size of buffer in bytes
279  * @gfp: Allocation flags
280  * @attrs: DMA attributes for this allocation
281  * @prot: IOMMU mapping flags
282  * @handle: Out argument for allocated DMA handle
283  * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
284  *		given VA/PA are visible to the given non-coherent device.
285  *
286  * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
287  * but an IOMMU which supports smaller pages might not map the whole thing.
288  *
289  * Return: Array of struct page pointers describing the buffer,
290  *	   or NULL on failure.
291  */
292 struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
293 		unsigned long attrs, int prot, dma_addr_t *handle,
294 		void (*flush_page)(struct device *, const void *, phys_addr_t))
295 {
296 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
297 	struct iova_domain *iovad = domain->iova_cookie;
298 	struct iova *iova;
299 	struct page **pages;
300 	struct sg_table sgt;
301 	dma_addr_t dma_addr;
302 	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
303 
304 	*handle = DMA_ERROR_CODE;
305 
306 	min_size = alloc_sizes & -alloc_sizes;
307 	if (min_size < PAGE_SIZE) {
308 		min_size = PAGE_SIZE;
309 		alloc_sizes |= PAGE_SIZE;
310 	} else {
311 		size = ALIGN(size, min_size);
312 	}
313 	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
314 		alloc_sizes = min_size;
315 
316 	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
317 	pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
318 	if (!pages)
319 		return NULL;
320 
321 	iova = __alloc_iova(domain, size, dev->coherent_dma_mask);
322 	if (!iova)
323 		goto out_free_pages;
324 
325 	size = iova_align(iovad, size);
326 	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
327 		goto out_free_iova;
328 
329 	if (!(prot & IOMMU_CACHE)) {
330 		struct sg_mapping_iter miter;
331 		/*
332 		 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
333 		 * sufficient here, so skip it by using the "wrong" direction.
334 		 */
335 		sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
336 		while (sg_miter_next(&miter))
337 			flush_page(dev, miter.addr, page_to_phys(miter.page));
338 		sg_miter_stop(&miter);
339 	}
340 
341 	dma_addr = iova_dma_addr(iovad, iova);
342 	if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot)
343 			< size)
344 		goto out_free_sg;
345 
346 	*handle = dma_addr;
347 	sg_free_table(&sgt);
348 	return pages;
349 
350 out_free_sg:
351 	sg_free_table(&sgt);
352 out_free_iova:
353 	__free_iova(iovad, iova);
354 out_free_pages:
355 	__iommu_dma_free_pages(pages, count);
356 	return NULL;
357 }
358 
359 /**
360  * iommu_dma_mmap - Map a buffer into provided user VMA
361  * @pages: Array representing buffer from iommu_dma_alloc()
362  * @size: Size of buffer in bytes
363  * @vma: VMA describing requested userspace mapping
364  *
365  * Maps the pages of the buffer in @pages into @vma. The caller is responsible
366  * for verifying the correct size and protection of @vma beforehand.
367  */
368 
369 int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
370 {
371 	unsigned long uaddr = vma->vm_start;
372 	unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
373 	int ret = -ENXIO;
374 
375 	for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
376 		ret = vm_insert_page(vma, uaddr, pages[i]);
377 		if (ret)
378 			break;
379 		uaddr += PAGE_SIZE;
380 	}
381 	return ret;
382 }
383 
384 dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
385 		unsigned long offset, size_t size, int prot)
386 {
387 	dma_addr_t dma_addr;
388 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
389 	struct iova_domain *iovad = domain->iova_cookie;
390 	phys_addr_t phys = page_to_phys(page) + offset;
391 	size_t iova_off = iova_offset(iovad, phys);
392 	size_t len = iova_align(iovad, size + iova_off);
393 	struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
394 
395 	if (!iova)
396 		return DMA_ERROR_CODE;
397 
398 	dma_addr = iova_dma_addr(iovad, iova);
399 	if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) {
400 		__free_iova(iovad, iova);
401 		return DMA_ERROR_CODE;
402 	}
403 	return dma_addr + iova_off;
404 }
405 
406 void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
407 		enum dma_data_direction dir, unsigned long attrs)
408 {
409 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
410 }
411 
412 /*
413  * Prepare a successfully-mapped scatterlist to give back to the caller.
414  *
415  * At this point the segments are already laid out by iommu_dma_map_sg() to
416  * avoid individually crossing any boundaries, so we merely need to check a
417  * segment's start address to avoid concatenating across one.
418  */
419 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
420 		dma_addr_t dma_addr)
421 {
422 	struct scatterlist *s, *cur = sg;
423 	unsigned long seg_mask = dma_get_seg_boundary(dev);
424 	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
425 	int i, count = 0;
426 
427 	for_each_sg(sg, s, nents, i) {
428 		/* Restore this segment's original unaligned fields first */
429 		unsigned int s_iova_off = sg_dma_address(s);
430 		unsigned int s_length = sg_dma_len(s);
431 		unsigned int s_iova_len = s->length;
432 
433 		s->offset += s_iova_off;
434 		s->length = s_length;
435 		sg_dma_address(s) = DMA_ERROR_CODE;
436 		sg_dma_len(s) = 0;
437 
438 		/*
439 		 * Now fill in the real DMA data. If...
440 		 * - there is a valid output segment to append to
441 		 * - and this segment starts on an IOVA page boundary
442 		 * - but doesn't fall at a segment boundary
443 		 * - and wouldn't make the resulting output segment too long
444 		 */
445 		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
446 		    (cur_len + s_length <= max_len)) {
447 			/* ...then concatenate it with the previous one */
448 			cur_len += s_length;
449 		} else {
450 			/* Otherwise start the next output segment */
451 			if (i > 0)
452 				cur = sg_next(cur);
453 			cur_len = s_length;
454 			count++;
455 
456 			sg_dma_address(cur) = dma_addr + s_iova_off;
457 		}
458 
459 		sg_dma_len(cur) = cur_len;
460 		dma_addr += s_iova_len;
461 
462 		if (s_length + s_iova_off < s_iova_len)
463 			cur_len = 0;
464 	}
465 	return count;
466 }
467 
468 /*
469  * If mapping failed, then just restore the original list,
470  * but making sure the DMA fields are invalidated.
471  */
472 static void __invalidate_sg(struct scatterlist *sg, int nents)
473 {
474 	struct scatterlist *s;
475 	int i;
476 
477 	for_each_sg(sg, s, nents, i) {
478 		if (sg_dma_address(s) != DMA_ERROR_CODE)
479 			s->offset += sg_dma_address(s);
480 		if (sg_dma_len(s))
481 			s->length = sg_dma_len(s);
482 		sg_dma_address(s) = DMA_ERROR_CODE;
483 		sg_dma_len(s) = 0;
484 	}
485 }
486 
487 /*
488  * The DMA API client is passing in a scatterlist which could describe
489  * any old buffer layout, but the IOMMU API requires everything to be
490  * aligned to IOMMU pages. Hence the need for this complicated bit of
491  * impedance-matching, to be able to hand off a suitably-aligned list,
492  * but still preserve the original offsets and sizes for the caller.
493  */
494 int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
495 		int nents, int prot)
496 {
497 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
498 	struct iova_domain *iovad = domain->iova_cookie;
499 	struct iova *iova;
500 	struct scatterlist *s, *prev = NULL;
501 	dma_addr_t dma_addr;
502 	size_t iova_len = 0;
503 	unsigned long mask = dma_get_seg_boundary(dev);
504 	int i;
505 
506 	/*
507 	 * Work out how much IOVA space we need, and align the segments to
508 	 * IOVA granules for the IOMMU driver to handle. With some clever
509 	 * trickery we can modify the list in-place, but reversibly, by
510 	 * stashing the unaligned parts in the as-yet-unused DMA fields.
511 	 */
512 	for_each_sg(sg, s, nents, i) {
513 		size_t s_iova_off = iova_offset(iovad, s->offset);
514 		size_t s_length = s->length;
515 		size_t pad_len = (mask - iova_len + 1) & mask;
516 
517 		sg_dma_address(s) = s_iova_off;
518 		sg_dma_len(s) = s_length;
519 		s->offset -= s_iova_off;
520 		s_length = iova_align(iovad, s_length + s_iova_off);
521 		s->length = s_length;
522 
523 		/*
524 		 * Due to the alignment of our single IOVA allocation, we can
525 		 * depend on these assumptions about the segment boundary mask:
526 		 * - If mask size >= IOVA size, then the IOVA range cannot
527 		 *   possibly fall across a boundary, so we don't care.
528 		 * - If mask size < IOVA size, then the IOVA range must start
529 		 *   exactly on a boundary, therefore we can lay things out
530 		 *   based purely on segment lengths without needing to know
531 		 *   the actual addresses beforehand.
532 		 * - The mask must be a power of 2, so pad_len == 0 if
533 		 *   iova_len == 0, thus we cannot dereference prev the first
534 		 *   time through here (i.e. before it has a meaningful value).
535 		 */
536 		if (pad_len && pad_len < s_length - 1) {
537 			prev->length += pad_len;
538 			iova_len += pad_len;
539 		}
540 
541 		iova_len += s_length;
542 		prev = s;
543 	}
544 
545 	iova = __alloc_iova(domain, iova_len, dma_get_mask(dev));
546 	if (!iova)
547 		goto out_restore_sg;
548 
549 	/*
550 	 * We'll leave any physical concatenation to the IOMMU driver's
551 	 * implementation - it knows better than we do.
552 	 */
553 	dma_addr = iova_dma_addr(iovad, iova);
554 	if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len)
555 		goto out_free_iova;
556 
557 	return __finalise_sg(dev, sg, nents, dma_addr);
558 
559 out_free_iova:
560 	__free_iova(iovad, iova);
561 out_restore_sg:
562 	__invalidate_sg(sg, nents);
563 	return 0;
564 }
565 
566 void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
567 		enum dma_data_direction dir, unsigned long attrs)
568 {
569 	/*
570 	 * The scatterlist segments are mapped into a single
571 	 * contiguous IOVA allocation, so this is incredibly easy.
572 	 */
573 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
574 }
575 
576 int iommu_dma_supported(struct device *dev, u64 mask)
577 {
578 	/*
579 	 * 'Special' IOMMUs which don't have the same addressing capability
580 	 * as the CPU will have to wait until we have some way to query that
581 	 * before they'll be able to use this framework.
582 	 */
583 	return 1;
584 }
585 
586 int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
587 {
588 	return dma_addr == DMA_ERROR_CODE;
589 }
590