xref: /openbmc/linux/drivers/iommu/dma-iommu.c (revision 95b384f9)
1 /*
2  * A fairly generic DMA-API to IOMMU-API glue layer.
3  *
4  * Copyright (C) 2014-2015 ARM Ltd.
5  *
6  * based in part on arch/arm/mm/dma-mapping.c:
7  * Copyright (C) 2000-2004 Russell King
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include <linux/device.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/gfp.h>
25 #include <linux/huge_mm.h>
26 #include <linux/iommu.h>
27 #include <linux/iova.h>
28 #include <linux/mm.h>
29 #include <linux/scatterlist.h>
30 #include <linux/vmalloc.h>
31 
32 int iommu_dma_init(void)
33 {
34 	return iova_cache_get();
35 }
36 
37 /**
38  * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
39  * @domain: IOMMU domain to prepare for DMA-API usage
40  *
41  * IOMMU drivers should normally call this from their domain_alloc
42  * callback when domain->type == IOMMU_DOMAIN_DMA.
43  */
44 int iommu_get_dma_cookie(struct iommu_domain *domain)
45 {
46 	struct iova_domain *iovad;
47 
48 	if (domain->iova_cookie)
49 		return -EEXIST;
50 
51 	iovad = kzalloc(sizeof(*iovad), GFP_KERNEL);
52 	domain->iova_cookie = iovad;
53 
54 	return iovad ? 0 : -ENOMEM;
55 }
56 EXPORT_SYMBOL(iommu_get_dma_cookie);
57 
58 /**
59  * iommu_put_dma_cookie - Release a domain's DMA mapping resources
60  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
61  *
62  * IOMMU drivers should normally call this from their domain_free callback.
63  */
64 void iommu_put_dma_cookie(struct iommu_domain *domain)
65 {
66 	struct iova_domain *iovad = domain->iova_cookie;
67 
68 	if (!iovad)
69 		return;
70 
71 	put_iova_domain(iovad);
72 	kfree(iovad);
73 	domain->iova_cookie = NULL;
74 }
75 EXPORT_SYMBOL(iommu_put_dma_cookie);
76 
77 /**
78  * iommu_dma_init_domain - Initialise a DMA mapping domain
79  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
80  * @base: IOVA at which the mappable address space starts
81  * @size: Size of IOVA space
82  *
83  * @base and @size should be exact multiples of IOMMU page granularity to
84  * avoid rounding surprises. If necessary, we reserve the page at address 0
85  * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
86  * any change which could make prior IOVAs invalid will fail.
87  */
88 int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size)
89 {
90 	struct iova_domain *iovad = domain->iova_cookie;
91 	unsigned long order, base_pfn, end_pfn;
92 
93 	if (!iovad)
94 		return -ENODEV;
95 
96 	/* Use the smallest supported page size for IOVA granularity */
97 	order = __ffs(domain->pgsize_bitmap);
98 	base_pfn = max_t(unsigned long, 1, base >> order);
99 	end_pfn = (base + size - 1) >> order;
100 
101 	/* Check the domain allows at least some access to the device... */
102 	if (domain->geometry.force_aperture) {
103 		if (base > domain->geometry.aperture_end ||
104 		    base + size <= domain->geometry.aperture_start) {
105 			pr_warn("specified DMA range outside IOMMU capability\n");
106 			return -EFAULT;
107 		}
108 		/* ...then finally give it a kicking to make sure it fits */
109 		base_pfn = max_t(unsigned long, base_pfn,
110 				domain->geometry.aperture_start >> order);
111 		end_pfn = min_t(unsigned long, end_pfn,
112 				domain->geometry.aperture_end >> order);
113 	}
114 
115 	/* All we can safely do with an existing domain is enlarge it */
116 	if (iovad->start_pfn) {
117 		if (1UL << order != iovad->granule ||
118 		    base_pfn != iovad->start_pfn ||
119 		    end_pfn < iovad->dma_32bit_pfn) {
120 			pr_warn("Incompatible range for DMA domain\n");
121 			return -EFAULT;
122 		}
123 		iovad->dma_32bit_pfn = end_pfn;
124 	} else {
125 		init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
126 	}
127 	return 0;
128 }
129 EXPORT_SYMBOL(iommu_dma_init_domain);
130 
131 /**
132  * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags
133  * @dir: Direction of DMA transfer
134  * @coherent: Is the DMA master cache-coherent?
135  *
136  * Return: corresponding IOMMU API page protection flags
137  */
138 int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
139 {
140 	int prot = coherent ? IOMMU_CACHE : 0;
141 
142 	switch (dir) {
143 	case DMA_BIDIRECTIONAL:
144 		return prot | IOMMU_READ | IOMMU_WRITE;
145 	case DMA_TO_DEVICE:
146 		return prot | IOMMU_READ;
147 	case DMA_FROM_DEVICE:
148 		return prot | IOMMU_WRITE;
149 	default:
150 		return 0;
151 	}
152 }
153 
154 static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size,
155 		dma_addr_t dma_limit)
156 {
157 	unsigned long shift = iova_shift(iovad);
158 	unsigned long length = iova_align(iovad, size) >> shift;
159 
160 	/*
161 	 * Enforce size-alignment to be safe - there could perhaps be an
162 	 * attribute to control this per-device, or at least per-domain...
163 	 */
164 	return alloc_iova(iovad, length, dma_limit >> shift, true);
165 }
166 
167 /* The IOVA allocator knows what we mapped, so just unmap whatever that was */
168 static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr)
169 {
170 	struct iova_domain *iovad = domain->iova_cookie;
171 	unsigned long shift = iova_shift(iovad);
172 	unsigned long pfn = dma_addr >> shift;
173 	struct iova *iova = find_iova(iovad, pfn);
174 	size_t size;
175 
176 	if (WARN_ON(!iova))
177 		return;
178 
179 	size = iova_size(iova) << shift;
180 	size -= iommu_unmap(domain, pfn << shift, size);
181 	/* ...and if we can't, then something is horribly, horribly wrong */
182 	WARN_ON(size > 0);
183 	__free_iova(iovad, iova);
184 }
185 
186 static void __iommu_dma_free_pages(struct page **pages, int count)
187 {
188 	while (count--)
189 		__free_page(pages[count]);
190 	kvfree(pages);
191 }
192 
193 static struct page **__iommu_dma_alloc_pages(unsigned int count,
194 		unsigned long order_mask, gfp_t gfp)
195 {
196 	struct page **pages;
197 	unsigned int i = 0, array_size = count * sizeof(*pages);
198 
199 	order_mask &= (2U << MAX_ORDER) - 1;
200 	if (!order_mask)
201 		return NULL;
202 
203 	if (array_size <= PAGE_SIZE)
204 		pages = kzalloc(array_size, GFP_KERNEL);
205 	else
206 		pages = vzalloc(array_size);
207 	if (!pages)
208 		return NULL;
209 
210 	/* IOMMU can map any pages, so himem can also be used here */
211 	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
212 
213 	while (count) {
214 		struct page *page = NULL;
215 		unsigned int order_size;
216 
217 		/*
218 		 * Higher-order allocations are a convenience rather
219 		 * than a necessity, hence using __GFP_NORETRY until
220 		 * falling back to minimum-order allocations.
221 		 */
222 		for (order_mask &= (2U << __fls(count)) - 1;
223 		     order_mask; order_mask &= ~order_size) {
224 			unsigned int order = __fls(order_mask);
225 
226 			order_size = 1U << order;
227 			page = alloc_pages((order_mask - order_size) ?
228 					   gfp | __GFP_NORETRY : gfp, order);
229 			if (!page)
230 				continue;
231 			if (!order)
232 				break;
233 			if (!PageCompound(page)) {
234 				split_page(page, order);
235 				break;
236 			} else if (!split_huge_page(page)) {
237 				break;
238 			}
239 			__free_pages(page, order);
240 		}
241 		if (!page) {
242 			__iommu_dma_free_pages(pages, i);
243 			return NULL;
244 		}
245 		count -= order_size;
246 		while (order_size--)
247 			pages[i++] = page++;
248 	}
249 	return pages;
250 }
251 
252 /**
253  * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
254  * @dev: Device which owns this buffer
255  * @pages: Array of buffer pages as returned by iommu_dma_alloc()
256  * @size: Size of buffer in bytes
257  * @handle: DMA address of buffer
258  *
259  * Frees both the pages associated with the buffer, and the array
260  * describing them
261  */
262 void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
263 		dma_addr_t *handle)
264 {
265 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle);
266 	__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
267 	*handle = DMA_ERROR_CODE;
268 }
269 
270 /**
271  * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
272  * @dev: Device to allocate memory for. Must be a real device
273  *	 attached to an iommu_dma_domain
274  * @size: Size of buffer in bytes
275  * @gfp: Allocation flags
276  * @attrs: DMA attributes for this allocation
277  * @prot: IOMMU mapping flags
278  * @handle: Out argument for allocated DMA handle
279  * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
280  *		given VA/PA are visible to the given non-coherent device.
281  *
282  * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
283  * but an IOMMU which supports smaller pages might not map the whole thing.
284  *
285  * Return: Array of struct page pointers describing the buffer,
286  *	   or NULL on failure.
287  */
288 struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
289 		struct dma_attrs *attrs, int prot, dma_addr_t *handle,
290 		void (*flush_page)(struct device *, const void *, phys_addr_t))
291 {
292 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
293 	struct iova_domain *iovad = domain->iova_cookie;
294 	struct iova *iova;
295 	struct page **pages;
296 	struct sg_table sgt;
297 	dma_addr_t dma_addr;
298 	unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
299 
300 	*handle = DMA_ERROR_CODE;
301 
302 	min_size = alloc_sizes & -alloc_sizes;
303 	if (min_size < PAGE_SIZE) {
304 		min_size = PAGE_SIZE;
305 		alloc_sizes |= PAGE_SIZE;
306 	} else {
307 		size = ALIGN(size, min_size);
308 	}
309 	if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs))
310 		alloc_sizes = min_size;
311 
312 	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
313 	pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
314 	if (!pages)
315 		return NULL;
316 
317 	iova = __alloc_iova(iovad, size, dev->coherent_dma_mask);
318 	if (!iova)
319 		goto out_free_pages;
320 
321 	size = iova_align(iovad, size);
322 	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
323 		goto out_free_iova;
324 
325 	if (!(prot & IOMMU_CACHE)) {
326 		struct sg_mapping_iter miter;
327 		/*
328 		 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
329 		 * sufficient here, so skip it by using the "wrong" direction.
330 		 */
331 		sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
332 		while (sg_miter_next(&miter))
333 			flush_page(dev, miter.addr, page_to_phys(miter.page));
334 		sg_miter_stop(&miter);
335 	}
336 
337 	dma_addr = iova_dma_addr(iovad, iova);
338 	if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot)
339 			< size)
340 		goto out_free_sg;
341 
342 	*handle = dma_addr;
343 	sg_free_table(&sgt);
344 	return pages;
345 
346 out_free_sg:
347 	sg_free_table(&sgt);
348 out_free_iova:
349 	__free_iova(iovad, iova);
350 out_free_pages:
351 	__iommu_dma_free_pages(pages, count);
352 	return NULL;
353 }
354 
355 /**
356  * iommu_dma_mmap - Map a buffer into provided user VMA
357  * @pages: Array representing buffer from iommu_dma_alloc()
358  * @size: Size of buffer in bytes
359  * @vma: VMA describing requested userspace mapping
360  *
361  * Maps the pages of the buffer in @pages into @vma. The caller is responsible
362  * for verifying the correct size and protection of @vma beforehand.
363  */
364 
365 int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
366 {
367 	unsigned long uaddr = vma->vm_start;
368 	unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
369 	int ret = -ENXIO;
370 
371 	for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
372 		ret = vm_insert_page(vma, uaddr, pages[i]);
373 		if (ret)
374 			break;
375 		uaddr += PAGE_SIZE;
376 	}
377 	return ret;
378 }
379 
380 dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
381 		unsigned long offset, size_t size, int prot)
382 {
383 	dma_addr_t dma_addr;
384 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
385 	struct iova_domain *iovad = domain->iova_cookie;
386 	phys_addr_t phys = page_to_phys(page) + offset;
387 	size_t iova_off = iova_offset(iovad, phys);
388 	size_t len = iova_align(iovad, size + iova_off);
389 	struct iova *iova = __alloc_iova(iovad, len, dma_get_mask(dev));
390 
391 	if (!iova)
392 		return DMA_ERROR_CODE;
393 
394 	dma_addr = iova_dma_addr(iovad, iova);
395 	if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) {
396 		__free_iova(iovad, iova);
397 		return DMA_ERROR_CODE;
398 	}
399 	return dma_addr + iova_off;
400 }
401 
402 void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
403 		enum dma_data_direction dir, struct dma_attrs *attrs)
404 {
405 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
406 }
407 
408 /*
409  * Prepare a successfully-mapped scatterlist to give back to the caller.
410  *
411  * At this point the segments are already laid out by iommu_dma_map_sg() to
412  * avoid individually crossing any boundaries, so we merely need to check a
413  * segment's start address to avoid concatenating across one.
414  */
415 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
416 		dma_addr_t dma_addr)
417 {
418 	struct scatterlist *s, *cur = sg;
419 	unsigned long seg_mask = dma_get_seg_boundary(dev);
420 	unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
421 	int i, count = 0;
422 
423 	for_each_sg(sg, s, nents, i) {
424 		/* Restore this segment's original unaligned fields first */
425 		unsigned int s_iova_off = sg_dma_address(s);
426 		unsigned int s_length = sg_dma_len(s);
427 		unsigned int s_iova_len = s->length;
428 
429 		s->offset += s_iova_off;
430 		s->length = s_length;
431 		sg_dma_address(s) = DMA_ERROR_CODE;
432 		sg_dma_len(s) = 0;
433 
434 		/*
435 		 * Now fill in the real DMA data. If...
436 		 * - there is a valid output segment to append to
437 		 * - and this segment starts on an IOVA page boundary
438 		 * - but doesn't fall at a segment boundary
439 		 * - and wouldn't make the resulting output segment too long
440 		 */
441 		if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
442 		    (cur_len + s_length <= max_len)) {
443 			/* ...then concatenate it with the previous one */
444 			cur_len += s_length;
445 		} else {
446 			/* Otherwise start the next output segment */
447 			if (i > 0)
448 				cur = sg_next(cur);
449 			cur_len = s_length;
450 			count++;
451 
452 			sg_dma_address(cur) = dma_addr + s_iova_off;
453 		}
454 
455 		sg_dma_len(cur) = cur_len;
456 		dma_addr += s_iova_len;
457 
458 		if (s_length + s_iova_off < s_iova_len)
459 			cur_len = 0;
460 	}
461 	return count;
462 }
463 
464 /*
465  * If mapping failed, then just restore the original list,
466  * but making sure the DMA fields are invalidated.
467  */
468 static void __invalidate_sg(struct scatterlist *sg, int nents)
469 {
470 	struct scatterlist *s;
471 	int i;
472 
473 	for_each_sg(sg, s, nents, i) {
474 		if (sg_dma_address(s) != DMA_ERROR_CODE)
475 			s->offset += sg_dma_address(s);
476 		if (sg_dma_len(s))
477 			s->length = sg_dma_len(s);
478 		sg_dma_address(s) = DMA_ERROR_CODE;
479 		sg_dma_len(s) = 0;
480 	}
481 }
482 
483 /*
484  * The DMA API client is passing in a scatterlist which could describe
485  * any old buffer layout, but the IOMMU API requires everything to be
486  * aligned to IOMMU pages. Hence the need for this complicated bit of
487  * impedance-matching, to be able to hand off a suitably-aligned list,
488  * but still preserve the original offsets and sizes for the caller.
489  */
490 int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
491 		int nents, int prot)
492 {
493 	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
494 	struct iova_domain *iovad = domain->iova_cookie;
495 	struct iova *iova;
496 	struct scatterlist *s, *prev = NULL;
497 	dma_addr_t dma_addr;
498 	size_t iova_len = 0;
499 	unsigned long mask = dma_get_seg_boundary(dev);
500 	int i;
501 
502 	/*
503 	 * Work out how much IOVA space we need, and align the segments to
504 	 * IOVA granules for the IOMMU driver to handle. With some clever
505 	 * trickery we can modify the list in-place, but reversibly, by
506 	 * stashing the unaligned parts in the as-yet-unused DMA fields.
507 	 */
508 	for_each_sg(sg, s, nents, i) {
509 		size_t s_iova_off = iova_offset(iovad, s->offset);
510 		size_t s_length = s->length;
511 		size_t pad_len = (mask - iova_len + 1) & mask;
512 
513 		sg_dma_address(s) = s_iova_off;
514 		sg_dma_len(s) = s_length;
515 		s->offset -= s_iova_off;
516 		s_length = iova_align(iovad, s_length + s_iova_off);
517 		s->length = s_length;
518 
519 		/*
520 		 * Due to the alignment of our single IOVA allocation, we can
521 		 * depend on these assumptions about the segment boundary mask:
522 		 * - If mask size >= IOVA size, then the IOVA range cannot
523 		 *   possibly fall across a boundary, so we don't care.
524 		 * - If mask size < IOVA size, then the IOVA range must start
525 		 *   exactly on a boundary, therefore we can lay things out
526 		 *   based purely on segment lengths without needing to know
527 		 *   the actual addresses beforehand.
528 		 * - The mask must be a power of 2, so pad_len == 0 if
529 		 *   iova_len == 0, thus we cannot dereference prev the first
530 		 *   time through here (i.e. before it has a meaningful value).
531 		 */
532 		if (pad_len && pad_len < s_length - 1) {
533 			prev->length += pad_len;
534 			iova_len += pad_len;
535 		}
536 
537 		iova_len += s_length;
538 		prev = s;
539 	}
540 
541 	iova = __alloc_iova(iovad, iova_len, dma_get_mask(dev));
542 	if (!iova)
543 		goto out_restore_sg;
544 
545 	/*
546 	 * We'll leave any physical concatenation to the IOMMU driver's
547 	 * implementation - it knows better than we do.
548 	 */
549 	dma_addr = iova_dma_addr(iovad, iova);
550 	if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len)
551 		goto out_free_iova;
552 
553 	return __finalise_sg(dev, sg, nents, dma_addr);
554 
555 out_free_iova:
556 	__free_iova(iovad, iova);
557 out_restore_sg:
558 	__invalidate_sg(sg, nents);
559 	return 0;
560 }
561 
562 void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
563 		enum dma_data_direction dir, struct dma_attrs *attrs)
564 {
565 	/*
566 	 * The scatterlist segments are mapped into a single
567 	 * contiguous IOVA allocation, so this is incredibly easy.
568 	 */
569 	__iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
570 }
571 
572 int iommu_dma_supported(struct device *dev, u64 mask)
573 {
574 	/*
575 	 * 'Special' IOMMUs which don't have the same addressing capability
576 	 * as the CPU will have to wait until we have some way to query that
577 	 * before they'll be able to use this framework.
578 	 */
579 	return 1;
580 }
581 
582 int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
583 {
584 	return dma_addr == DMA_ERROR_CODE;
585 }
586