xref: /openbmc/linux/kernel/dma/swiotlb.c (revision b971612c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Dynamic DMA mapping support.
4  *
5  * This implementation is a fallback for platforms that do not support
6  * I/O TLBs (aka DMA address translation hardware).
7  * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8  * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9  * Copyright (C) 2000, 2003 Hewlett-Packard Co
10  *	David Mosberger-Tang <davidm@hpl.hp.com>
11  *
12  * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
13  * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
14  *			unnecessary i-cache flushing.
15  * 04/07/.. ak		Better overflow handling. Assorted fixes.
16  * 05/09/10 linville	Add support for syncing ranges, support syncing for
17  *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
18  * 08/12/11 beckyb	Add highmem support
19  */
20 
21 #define pr_fmt(fmt) "software IO TLB: " fmt
22 
23 #include <linux/cache.h>
24 #include <linux/cc_platform.h>
25 #include <linux/ctype.h>
26 #include <linux/debugfs.h>
27 #include <linux/dma-direct.h>
28 #include <linux/dma-map-ops.h>
29 #include <linux/export.h>
30 #include <linux/gfp.h>
31 #include <linux/highmem.h>
32 #include <linux/io.h>
33 #include <linux/iommu-helper.h>
34 #include <linux/init.h>
35 #include <linux/memblock.h>
36 #include <linux/mm.h>
37 #include <linux/pfn.h>
38 #include <linux/scatterlist.h>
39 #include <linux/set_memory.h>
40 #include <linux/spinlock.h>
41 #include <linux/string.h>
42 #include <linux/swiotlb.h>
43 #include <linux/types.h>
44 #ifdef CONFIG_DMA_RESTRICTED_POOL
45 #include <linux/of.h>
46 #include <linux/of_fdt.h>
47 #include <linux/of_reserved_mem.h>
48 #include <linux/slab.h>
49 #endif
50 
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/swiotlb.h>
53 
54 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
55 
56 /*
57  * Minimum IO TLB size to bother booting with.  Systems with mainly
58  * 64bit capable cards will only lightly use the swiotlb.  If we can't
59  * allocate a contiguous 1MB, we're probably in trouble anyway.
60  */
61 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
62 
63 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
64 
65 struct io_tlb_slot {
66 	phys_addr_t orig_addr;
67 	size_t alloc_size;
68 	unsigned int list;
69 };
70 
71 static bool swiotlb_force_bounce;
72 static bool swiotlb_force_disable;
73 
74 struct io_tlb_mem io_tlb_default_mem;
75 
76 phys_addr_t swiotlb_unencrypted_base;
77 
78 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
79 static unsigned long default_nareas;
80 
81 /**
82  * struct io_tlb_area - IO TLB memory area descriptor
83  *
84  * This is a single area with a single lock.
85  *
86  * @used:	The number of used IO TLB block.
87  * @index:	The slot index to start searching in this area for next round.
88  * @lock:	The lock to protect the above data structures in the map and
89  *		unmap calls.
90  */
91 struct io_tlb_area {
92 	unsigned long used;
93 	unsigned int index;
94 	spinlock_t lock;
95 };
96 
97 /*
98  * Round up number of slabs to the next power of 2. The last area is going
99  * be smaller than the rest if default_nslabs is not power of two.
100  * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE,
101  * otherwise a segment may span two or more areas. It conflicts with free
102  * contiguous slots tracking: free slots are treated contiguous no matter
103  * whether they cross an area boundary.
104  *
105  * Return true if default_nslabs is rounded up.
106  */
107 static bool round_up_default_nslabs(void)
108 {
109 	if (!default_nareas)
110 		return false;
111 
112 	if (default_nslabs < IO_TLB_SEGSIZE * default_nareas)
113 		default_nslabs = IO_TLB_SEGSIZE * default_nareas;
114 	else if (is_power_of_2(default_nslabs))
115 		return false;
116 	default_nslabs = roundup_pow_of_two(default_nslabs);
117 	return true;
118 }
119 
120 static void swiotlb_adjust_nareas(unsigned int nareas)
121 {
122 	/* use a single area when non is specified */
123 	if (!nareas)
124 		nareas = 1;
125 	else if (!is_power_of_2(nareas))
126 		nareas = roundup_pow_of_two(nareas);
127 
128 	default_nareas = nareas;
129 
130 	pr_info("area num %d.\n", nareas);
131 	if (round_up_default_nslabs())
132 		pr_info("SWIOTLB bounce buffer size roundup to %luMB",
133 			(default_nslabs << IO_TLB_SHIFT) >> 20);
134 }
135 
136 static int __init
137 setup_io_tlb_npages(char *str)
138 {
139 	if (isdigit(*str)) {
140 		/* avoid tail segment of size < IO_TLB_SEGSIZE */
141 		default_nslabs =
142 			ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
143 	}
144 	if (*str == ',')
145 		++str;
146 	if (isdigit(*str))
147 		swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
148 	if (*str == ',')
149 		++str;
150 	if (!strcmp(str, "force"))
151 		swiotlb_force_bounce = true;
152 	else if (!strcmp(str, "noforce"))
153 		swiotlb_force_disable = true;
154 
155 	return 0;
156 }
157 early_param("swiotlb", setup_io_tlb_npages);
158 
159 unsigned int swiotlb_max_segment(void)
160 {
161 	if (!io_tlb_default_mem.nslabs)
162 		return 0;
163 	return rounddown(io_tlb_default_mem.nslabs << IO_TLB_SHIFT, PAGE_SIZE);
164 }
165 EXPORT_SYMBOL_GPL(swiotlb_max_segment);
166 
167 unsigned long swiotlb_size_or_default(void)
168 {
169 	return default_nslabs << IO_TLB_SHIFT;
170 }
171 
172 void __init swiotlb_adjust_size(unsigned long size)
173 {
174 	/*
175 	 * If swiotlb parameter has not been specified, give a chance to
176 	 * architectures such as those supporting memory encryption to
177 	 * adjust/expand SWIOTLB size for their use.
178 	 */
179 	if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
180 		return;
181 
182 	size = ALIGN(size, IO_TLB_SIZE);
183 	default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
184 	if (round_up_default_nslabs())
185 		size = default_nslabs << IO_TLB_SHIFT;
186 	pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
187 }
188 
189 void swiotlb_print_info(void)
190 {
191 	struct io_tlb_mem *mem = &io_tlb_default_mem;
192 
193 	if (!mem->nslabs) {
194 		pr_warn("No low mem\n");
195 		return;
196 	}
197 
198 	pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
199 	       (mem->nslabs << IO_TLB_SHIFT) >> 20);
200 }
201 
202 static inline unsigned long io_tlb_offset(unsigned long val)
203 {
204 	return val & (IO_TLB_SEGSIZE - 1);
205 }
206 
207 static inline unsigned long nr_slots(u64 val)
208 {
209 	return DIV_ROUND_UP(val, IO_TLB_SIZE);
210 }
211 
212 /*
213  * Remap swioltb memory in the unencrypted physical address space
214  * when swiotlb_unencrypted_base is set. (e.g. for Hyper-V AMD SEV-SNP
215  * Isolation VMs).
216  */
217 #ifdef CONFIG_HAS_IOMEM
218 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
219 {
220 	void *vaddr = NULL;
221 
222 	if (swiotlb_unencrypted_base) {
223 		phys_addr_t paddr = mem->start + swiotlb_unencrypted_base;
224 
225 		vaddr = memremap(paddr, bytes, MEMREMAP_WB);
226 		if (!vaddr)
227 			pr_err("Failed to map the unencrypted memory %pa size %lx.\n",
228 			       &paddr, bytes);
229 	}
230 
231 	return vaddr;
232 }
233 #else
234 static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
235 {
236 	return NULL;
237 }
238 #endif
239 
240 /*
241  * Early SWIOTLB allocation may be too early to allow an architecture to
242  * perform the desired operations.  This function allows the architecture to
243  * call SWIOTLB when the operations are possible.  It needs to be called
244  * before the SWIOTLB memory is used.
245  */
246 void __init swiotlb_update_mem_attributes(void)
247 {
248 	struct io_tlb_mem *mem = &io_tlb_default_mem;
249 	void *vaddr;
250 	unsigned long bytes;
251 
252 	if (!mem->nslabs || mem->late_alloc)
253 		return;
254 	vaddr = phys_to_virt(mem->start);
255 	bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
256 	set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
257 
258 	mem->vaddr = swiotlb_mem_remap(mem, bytes);
259 	if (!mem->vaddr)
260 		mem->vaddr = vaddr;
261 }
262 
263 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
264 		unsigned long nslabs, unsigned int flags,
265 		bool late_alloc, unsigned int nareas)
266 {
267 	void *vaddr = phys_to_virt(start);
268 	unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
269 
270 	mem->nslabs = nslabs;
271 	mem->start = start;
272 	mem->end = mem->start + bytes;
273 	mem->late_alloc = late_alloc;
274 	mem->nareas = nareas;
275 	mem->area_nslabs = nslabs / mem->nareas;
276 
277 	mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
278 
279 	for (i = 0; i < mem->nareas; i++) {
280 		spin_lock_init(&mem->areas[i].lock);
281 		mem->areas[i].index = 0;
282 		mem->areas[i].used = 0;
283 	}
284 
285 	for (i = 0; i < mem->nslabs; i++) {
286 		mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
287 		mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
288 		mem->slots[i].alloc_size = 0;
289 	}
290 
291 	/*
292 	 * If swiotlb_unencrypted_base is set, the bounce buffer memory will
293 	 * be remapped and cleared in swiotlb_update_mem_attributes.
294 	 */
295 	if (swiotlb_unencrypted_base)
296 		return;
297 
298 	memset(vaddr, 0, bytes);
299 	mem->vaddr = vaddr;
300 	return;
301 }
302 
303 /*
304  * Statically reserve bounce buffer space and initialize bounce buffer data
305  * structures for the software IO TLB used to implement the DMA API.
306  */
307 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
308 		int (*remap)(void *tlb, unsigned long nslabs))
309 {
310 	struct io_tlb_mem *mem = &io_tlb_default_mem;
311 	unsigned long nslabs;
312 	size_t alloc_size;
313 	size_t bytes;
314 	void *tlb;
315 
316 	if (!addressing_limit && !swiotlb_force_bounce)
317 		return;
318 	if (swiotlb_force_disable)
319 		return;
320 
321 	/*
322 	 * default_nslabs maybe changed when adjust area number.
323 	 * So allocate bounce buffer after adjusting area number.
324 	 */
325 	if (!default_nareas)
326 		swiotlb_adjust_nareas(num_possible_cpus());
327 
328 	nslabs = default_nslabs;
329 	/*
330 	 * By default allocate the bounce buffer memory from low memory, but
331 	 * allow to pick a location everywhere for hypervisors with guest
332 	 * memory encryption.
333 	 */
334 retry:
335 	bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
336 	if (flags & SWIOTLB_ANY)
337 		tlb = memblock_alloc(bytes, PAGE_SIZE);
338 	else
339 		tlb = memblock_alloc_low(bytes, PAGE_SIZE);
340 	if (!tlb) {
341 		pr_warn("%s: failed to allocate tlb structure\n", __func__);
342 		return;
343 	}
344 
345 	if (remap && remap(tlb, nslabs) < 0) {
346 		memblock_free(tlb, PAGE_ALIGN(bytes));
347 
348 		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
349 		if (nslabs < IO_TLB_MIN_SLABS)
350 			panic("%s: Failed to remap %zu bytes\n",
351 			      __func__, bytes);
352 		goto retry;
353 	}
354 
355 	alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
356 	mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
357 	if (!mem->slots)
358 		panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
359 		      __func__, alloc_size, PAGE_SIZE);
360 
361 	mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
362 		default_nareas), SMP_CACHE_BYTES);
363 	if (!mem->areas)
364 		panic("%s: Failed to allocate mem->areas.\n", __func__);
365 
366 	swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false,
367 				default_nareas);
368 
369 	if (flags & SWIOTLB_VERBOSE)
370 		swiotlb_print_info();
371 }
372 
373 void __init swiotlb_init(bool addressing_limit, unsigned int flags)
374 {
375 	swiotlb_init_remap(addressing_limit, flags, NULL);
376 }
377 
378 /*
379  * Systems with larger DMA zones (those that don't support ISA) can
380  * initialize the swiotlb later using the slab allocator if needed.
381  * This should be just like above, but with some error catching.
382  */
383 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
384 		int (*remap)(void *tlb, unsigned long nslabs))
385 {
386 	struct io_tlb_mem *mem = &io_tlb_default_mem;
387 	unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
388 	unsigned char *vstart = NULL;
389 	unsigned int order, area_order;
390 	bool retried = false;
391 	int rc = 0;
392 
393 	if (swiotlb_force_disable)
394 		return 0;
395 
396 retry:
397 	order = get_order(nslabs << IO_TLB_SHIFT);
398 	nslabs = SLABS_PER_PAGE << order;
399 
400 	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
401 		vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
402 						  order);
403 		if (vstart)
404 			break;
405 		order--;
406 		nslabs = SLABS_PER_PAGE << order;
407 		retried = true;
408 	}
409 
410 	if (!vstart)
411 		return -ENOMEM;
412 
413 	if (remap)
414 		rc = remap(vstart, nslabs);
415 	if (rc) {
416 		free_pages((unsigned long)vstart, order);
417 
418 		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
419 		if (nslabs < IO_TLB_MIN_SLABS)
420 			return rc;
421 		retried = true;
422 		goto retry;
423 	}
424 
425 	if (retried) {
426 		pr_warn("only able to allocate %ld MB\n",
427 			(PAGE_SIZE << order) >> 20);
428 	}
429 
430 	if (!default_nareas)
431 		swiotlb_adjust_nareas(num_possible_cpus());
432 
433 	area_order = get_order(array_size(sizeof(*mem->areas),
434 		default_nareas));
435 	mem->areas = (struct io_tlb_area *)
436 		__get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
437 	if (!mem->areas)
438 		goto error_area;
439 
440 	mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
441 		get_order(array_size(sizeof(*mem->slots), nslabs)));
442 	if (!mem->slots)
443 		goto error_slots;
444 
445 	set_memory_decrypted((unsigned long)vstart,
446 			     (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
447 	swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true,
448 				default_nareas);
449 
450 	swiotlb_print_info();
451 	return 0;
452 
453 error_slots:
454 	free_pages((unsigned long)mem->areas, area_order);
455 error_area:
456 	free_pages((unsigned long)vstart, order);
457 	return -ENOMEM;
458 }
459 
460 void __init swiotlb_exit(void)
461 {
462 	struct io_tlb_mem *mem = &io_tlb_default_mem;
463 	unsigned long tbl_vaddr;
464 	size_t tbl_size, slots_size;
465 	unsigned int area_order;
466 
467 	if (swiotlb_force_bounce)
468 		return;
469 
470 	if (!mem->nslabs)
471 		return;
472 
473 	pr_info("tearing down default memory pool\n");
474 	tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
475 	tbl_size = PAGE_ALIGN(mem->end - mem->start);
476 	slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
477 
478 	set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
479 	if (mem->late_alloc) {
480 		area_order = get_order(array_size(sizeof(*mem->areas),
481 			mem->nareas));
482 		free_pages((unsigned long)mem->areas, area_order);
483 		free_pages(tbl_vaddr, get_order(tbl_size));
484 		free_pages((unsigned long)mem->slots, get_order(slots_size));
485 	} else {
486 		memblock_free_late(__pa(mem->areas),
487 			array_size(sizeof(*mem->areas), mem->nareas));
488 		memblock_free_late(mem->start, tbl_size);
489 		memblock_free_late(__pa(mem->slots), slots_size);
490 	}
491 
492 	memset(mem, 0, sizeof(*mem));
493 }
494 
495 /*
496  * Return the offset into a iotlb slot required to keep the device happy.
497  */
498 static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
499 {
500 	return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
501 }
502 
503 /*
504  * Bounce: copy the swiotlb buffer from or back to the original dma location
505  */
506 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
507 			   enum dma_data_direction dir)
508 {
509 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
510 	int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
511 	phys_addr_t orig_addr = mem->slots[index].orig_addr;
512 	size_t alloc_size = mem->slots[index].alloc_size;
513 	unsigned long pfn = PFN_DOWN(orig_addr);
514 	unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
515 	unsigned int tlb_offset, orig_addr_offset;
516 
517 	if (orig_addr == INVALID_PHYS_ADDR)
518 		return;
519 
520 	tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
521 	orig_addr_offset = swiotlb_align_offset(dev, orig_addr);
522 	if (tlb_offset < orig_addr_offset) {
523 		dev_WARN_ONCE(dev, 1,
524 			"Access before mapping start detected. orig offset %u, requested offset %u.\n",
525 			orig_addr_offset, tlb_offset);
526 		return;
527 	}
528 
529 	tlb_offset -= orig_addr_offset;
530 	if (tlb_offset > alloc_size) {
531 		dev_WARN_ONCE(dev, 1,
532 			"Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
533 			alloc_size, size, tlb_offset);
534 		return;
535 	}
536 
537 	orig_addr += tlb_offset;
538 	alloc_size -= tlb_offset;
539 
540 	if (size > alloc_size) {
541 		dev_WARN_ONCE(dev, 1,
542 			"Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
543 			alloc_size, size);
544 		size = alloc_size;
545 	}
546 
547 	if (PageHighMem(pfn_to_page(pfn))) {
548 		/* The buffer does not have a mapping.  Map it in and copy */
549 		unsigned int offset = orig_addr & ~PAGE_MASK;
550 		char *buffer;
551 		unsigned int sz = 0;
552 		unsigned long flags;
553 
554 		while (size) {
555 			sz = min_t(size_t, PAGE_SIZE - offset, size);
556 
557 			local_irq_save(flags);
558 			buffer = kmap_atomic(pfn_to_page(pfn));
559 			if (dir == DMA_TO_DEVICE)
560 				memcpy(vaddr, buffer + offset, sz);
561 			else
562 				memcpy(buffer + offset, vaddr, sz);
563 			kunmap_atomic(buffer);
564 			local_irq_restore(flags);
565 
566 			size -= sz;
567 			pfn++;
568 			vaddr += sz;
569 			offset = 0;
570 		}
571 	} else if (dir == DMA_TO_DEVICE) {
572 		memcpy(vaddr, phys_to_virt(orig_addr), size);
573 	} else {
574 		memcpy(phys_to_virt(orig_addr), vaddr, size);
575 	}
576 }
577 
578 static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
579 {
580 	return start + (idx << IO_TLB_SHIFT);
581 }
582 
583 /*
584  * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
585  */
586 static inline unsigned long get_max_slots(unsigned long boundary_mask)
587 {
588 	if (boundary_mask == ~0UL)
589 		return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
590 	return nr_slots(boundary_mask + 1);
591 }
592 
593 static unsigned int wrap_area_index(struct io_tlb_mem *mem, unsigned int index)
594 {
595 	if (index >= mem->area_nslabs)
596 		return 0;
597 	return index;
598 }
599 
600 /*
601  * Find a suitable number of IO TLB entries size that will fit this request and
602  * allocate a buffer from that IO TLB pool.
603  */
604 static int swiotlb_do_find_slots(struct device *dev, int area_index,
605 		phys_addr_t orig_addr, size_t alloc_size,
606 		unsigned int alloc_align_mask)
607 {
608 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
609 	struct io_tlb_area *area = mem->areas + area_index;
610 	unsigned long boundary_mask = dma_get_seg_boundary(dev);
611 	dma_addr_t tbl_dma_addr =
612 		phys_to_dma_unencrypted(dev, mem->start) & boundary_mask;
613 	unsigned long max_slots = get_max_slots(boundary_mask);
614 	unsigned int iotlb_align_mask =
615 		dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1);
616 	unsigned int nslots = nr_slots(alloc_size), stride;
617 	unsigned int index, wrap, count = 0, i;
618 	unsigned int offset = swiotlb_align_offset(dev, orig_addr);
619 	unsigned long flags;
620 	unsigned int slot_base;
621 	unsigned int slot_index;
622 
623 	BUG_ON(!nslots);
624 	BUG_ON(area_index >= mem->nareas);
625 
626 	/*
627 	 * For mappings with an alignment requirement don't bother looping to
628 	 * unaligned slots once we found an aligned one.  For allocations of
629 	 * PAGE_SIZE or larger only look for page aligned allocations.
630 	 */
631 	stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
632 	if (alloc_size >= PAGE_SIZE)
633 		stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT));
634 	stride = max(stride, (alloc_align_mask >> IO_TLB_SHIFT) + 1);
635 
636 	spin_lock_irqsave(&area->lock, flags);
637 	if (unlikely(nslots > mem->area_nslabs - area->used))
638 		goto not_found;
639 
640 	slot_base = area_index * mem->area_nslabs;
641 	index = wrap = wrap_area_index(mem, ALIGN(area->index, stride));
642 
643 	do {
644 		slot_index = slot_base + index;
645 
646 		if (orig_addr &&
647 		    (slot_addr(tbl_dma_addr, slot_index) &
648 		     iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
649 			index = wrap_area_index(mem, index + 1);
650 			continue;
651 		}
652 
653 		/*
654 		 * If we find a slot that indicates we have 'nslots' number of
655 		 * contiguous buffers, we allocate the buffers from that slot
656 		 * and mark the entries as '0' indicating unavailable.
657 		 */
658 		if (!iommu_is_span_boundary(slot_index, nslots,
659 					    nr_slots(tbl_dma_addr),
660 					    max_slots)) {
661 			if (mem->slots[slot_index].list >= nslots)
662 				goto found;
663 		}
664 		index = wrap_area_index(mem, index + stride);
665 	} while (index != wrap);
666 
667 not_found:
668 	spin_unlock_irqrestore(&area->lock, flags);
669 	return -1;
670 
671 found:
672 	for (i = slot_index; i < slot_index + nslots; i++) {
673 		mem->slots[i].list = 0;
674 		mem->slots[i].alloc_size = alloc_size - (offset +
675 				((i - slot_index) << IO_TLB_SHIFT));
676 	}
677 	for (i = slot_index - 1;
678 	     io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
679 	     mem->slots[i].list; i--)
680 		mem->slots[i].list = ++count;
681 
682 	/*
683 	 * Update the indices to avoid searching in the next round.
684 	 */
685 	if (index + nslots < mem->area_nslabs)
686 		area->index = index + nslots;
687 	else
688 		area->index = 0;
689 	area->used += nslots;
690 	spin_unlock_irqrestore(&area->lock, flags);
691 	return slot_index;
692 }
693 
694 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
695 		size_t alloc_size, unsigned int alloc_align_mask)
696 {
697 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
698 	int start = raw_smp_processor_id() & (mem->nareas - 1);
699 	int i = start, index;
700 
701 	do {
702 		index = swiotlb_do_find_slots(dev, i, orig_addr, alloc_size,
703 					      alloc_align_mask);
704 		if (index >= 0)
705 			return index;
706 		if (++i >= mem->nareas)
707 			i = 0;
708 	} while (i != start);
709 
710 	return -1;
711 }
712 
713 static unsigned long mem_used(struct io_tlb_mem *mem)
714 {
715 	int i;
716 	unsigned long used = 0;
717 
718 	for (i = 0; i < mem->nareas; i++)
719 		used += mem->areas[i].used;
720 	return used;
721 }
722 
723 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
724 		size_t mapping_size, size_t alloc_size,
725 		unsigned int alloc_align_mask, enum dma_data_direction dir,
726 		unsigned long attrs)
727 {
728 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
729 	unsigned int offset = swiotlb_align_offset(dev, orig_addr);
730 	unsigned int i;
731 	int index;
732 	phys_addr_t tlb_addr;
733 
734 	if (!mem || !mem->nslabs)
735 		panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
736 
737 	if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
738 		pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
739 
740 	if (mapping_size > alloc_size) {
741 		dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
742 			      mapping_size, alloc_size);
743 		return (phys_addr_t)DMA_MAPPING_ERROR;
744 	}
745 
746 	index = swiotlb_find_slots(dev, orig_addr,
747 				   alloc_size + offset, alloc_align_mask);
748 	if (index == -1) {
749 		if (!(attrs & DMA_ATTR_NO_WARN))
750 			dev_warn_ratelimited(dev,
751 	"swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
752 				 alloc_size, mem->nslabs, mem_used(mem));
753 		return (phys_addr_t)DMA_MAPPING_ERROR;
754 	}
755 
756 	/*
757 	 * Save away the mapping from the original address to the DMA address.
758 	 * This is needed when we sync the memory.  Then we sync the buffer if
759 	 * needed.
760 	 */
761 	for (i = 0; i < nr_slots(alloc_size + offset); i++)
762 		mem->slots[index + i].orig_addr = slot_addr(orig_addr, i);
763 	tlb_addr = slot_addr(mem->start, index) + offset;
764 	/*
765 	 * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
766 	 * to the tlb buffer, if we knew for sure the device will
767 	 * overwrite the entire current content. But we don't. Thus
768 	 * unconditional bounce may prevent leaking swiotlb content (i.e.
769 	 * kernel memory) to user-space.
770 	 */
771 	swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
772 	return tlb_addr;
773 }
774 
775 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
776 {
777 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
778 	unsigned long flags;
779 	unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
780 	int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
781 	int nslots = nr_slots(mem->slots[index].alloc_size + offset);
782 	int aindex = index / mem->area_nslabs;
783 	struct io_tlb_area *area = &mem->areas[aindex];
784 	int count, i;
785 
786 	/*
787 	 * Return the buffer to the free list by setting the corresponding
788 	 * entries to indicate the number of contiguous entries available.
789 	 * While returning the entries to the free list, we merge the entries
790 	 * with slots below and above the pool being returned.
791 	 */
792 	BUG_ON(aindex >= mem->nareas);
793 
794 	spin_lock_irqsave(&area->lock, flags);
795 	if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
796 		count = mem->slots[index + nslots].list;
797 	else
798 		count = 0;
799 
800 	/*
801 	 * Step 1: return the slots to the free list, merging the slots with
802 	 * superceeding slots
803 	 */
804 	for (i = index + nslots - 1; i >= index; i--) {
805 		mem->slots[i].list = ++count;
806 		mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
807 		mem->slots[i].alloc_size = 0;
808 	}
809 
810 	/*
811 	 * Step 2: merge the returned slots with the preceding slots, if
812 	 * available (non zero)
813 	 */
814 	for (i = index - 1;
815 	     io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
816 	     i--)
817 		mem->slots[i].list = ++count;
818 	area->used -= nslots;
819 	spin_unlock_irqrestore(&area->lock, flags);
820 }
821 
822 /*
823  * tlb_addr is the physical address of the bounce buffer to unmap.
824  */
825 void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
826 			      size_t mapping_size, enum dma_data_direction dir,
827 			      unsigned long attrs)
828 {
829 	/*
830 	 * First, sync the memory before unmapping the entry
831 	 */
832 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
833 	    (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
834 		swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
835 
836 	swiotlb_release_slots(dev, tlb_addr);
837 }
838 
839 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
840 		size_t size, enum dma_data_direction dir)
841 {
842 	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
843 		swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
844 	else
845 		BUG_ON(dir != DMA_FROM_DEVICE);
846 }
847 
848 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
849 		size_t size, enum dma_data_direction dir)
850 {
851 	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
852 		swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
853 	else
854 		BUG_ON(dir != DMA_TO_DEVICE);
855 }
856 
857 /*
858  * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
859  * to the device copy the data into it as well.
860  */
861 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
862 		enum dma_data_direction dir, unsigned long attrs)
863 {
864 	phys_addr_t swiotlb_addr;
865 	dma_addr_t dma_addr;
866 
867 	trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size);
868 
869 	swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
870 			attrs);
871 	if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
872 		return DMA_MAPPING_ERROR;
873 
874 	/* Ensure that the address returned is DMA'ble */
875 	dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
876 	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
877 		swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
878 			attrs | DMA_ATTR_SKIP_CPU_SYNC);
879 		dev_WARN_ONCE(dev, 1,
880 			"swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
881 			&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
882 		return DMA_MAPPING_ERROR;
883 	}
884 
885 	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
886 		arch_sync_dma_for_device(swiotlb_addr, size, dir);
887 	return dma_addr;
888 }
889 
890 size_t swiotlb_max_mapping_size(struct device *dev)
891 {
892 	int min_align_mask = dma_get_min_align_mask(dev);
893 	int min_align = 0;
894 
895 	/*
896 	 * swiotlb_find_slots() skips slots according to
897 	 * min align mask. This affects max mapping size.
898 	 * Take it into acount here.
899 	 */
900 	if (min_align_mask)
901 		min_align = roundup(min_align_mask, IO_TLB_SIZE);
902 
903 	return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
904 }
905 
906 bool is_swiotlb_active(struct device *dev)
907 {
908 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
909 
910 	return mem && mem->nslabs;
911 }
912 EXPORT_SYMBOL_GPL(is_swiotlb_active);
913 
914 static int io_tlb_used_get(void *data, u64 *val)
915 {
916 	*val = mem_used(&io_tlb_default_mem);
917 	return 0;
918 }
919 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
920 
921 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
922 					 const char *dirname)
923 {
924 	mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
925 	if (!mem->nslabs)
926 		return;
927 
928 	debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
929 	debugfs_create_file("io_tlb_used", 0400, mem->debugfs, NULL,
930 			&fops_io_tlb_used);
931 }
932 
933 static int __init __maybe_unused swiotlb_create_default_debugfs(void)
934 {
935 	swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb");
936 	return 0;
937 }
938 
939 #ifdef CONFIG_DEBUG_FS
940 late_initcall(swiotlb_create_default_debugfs);
941 #endif
942 
943 #ifdef CONFIG_DMA_RESTRICTED_POOL
944 
945 struct page *swiotlb_alloc(struct device *dev, size_t size)
946 {
947 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
948 	phys_addr_t tlb_addr;
949 	int index;
950 
951 	if (!mem)
952 		return NULL;
953 
954 	index = swiotlb_find_slots(dev, 0, size, 0);
955 	if (index == -1)
956 		return NULL;
957 
958 	tlb_addr = slot_addr(mem->start, index);
959 
960 	return pfn_to_page(PFN_DOWN(tlb_addr));
961 }
962 
963 bool swiotlb_free(struct device *dev, struct page *page, size_t size)
964 {
965 	phys_addr_t tlb_addr = page_to_phys(page);
966 
967 	if (!is_swiotlb_buffer(dev, tlb_addr))
968 		return false;
969 
970 	swiotlb_release_slots(dev, tlb_addr);
971 
972 	return true;
973 }
974 
975 static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
976 				    struct device *dev)
977 {
978 	struct io_tlb_mem *mem = rmem->priv;
979 	unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
980 
981 	/* Set Per-device io tlb area to one */
982 	unsigned int nareas = 1;
983 
984 	/*
985 	 * Since multiple devices can share the same pool, the private data,
986 	 * io_tlb_mem struct, will be initialized by the first device attached
987 	 * to it.
988 	 */
989 	if (!mem) {
990 		mem = kzalloc(sizeof(*mem), GFP_KERNEL);
991 		if (!mem)
992 			return -ENOMEM;
993 
994 		mem->slots = kcalloc(nslabs, sizeof(*mem->slots), GFP_KERNEL);
995 		if (!mem->slots) {
996 			kfree(mem);
997 			return -ENOMEM;
998 		}
999 
1000 		mem->areas = kcalloc(nareas, sizeof(*mem->areas),
1001 				GFP_KERNEL);
1002 		if (!mem->areas) {
1003 			kfree(mem->slots);
1004 			kfree(mem);
1005 			return -ENOMEM;
1006 		}
1007 
1008 		set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
1009 				     rmem->size >> PAGE_SHIFT);
1010 		swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE,
1011 					false, nareas);
1012 		mem->for_alloc = true;
1013 
1014 		rmem->priv = mem;
1015 
1016 		swiotlb_create_debugfs_files(mem, rmem->name);
1017 	}
1018 
1019 	dev->dma_io_tlb_mem = mem;
1020 
1021 	return 0;
1022 }
1023 
1024 static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
1025 					struct device *dev)
1026 {
1027 	dev->dma_io_tlb_mem = &io_tlb_default_mem;
1028 }
1029 
1030 static const struct reserved_mem_ops rmem_swiotlb_ops = {
1031 	.device_init = rmem_swiotlb_device_init,
1032 	.device_release = rmem_swiotlb_device_release,
1033 };
1034 
1035 static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
1036 {
1037 	unsigned long node = rmem->fdt_node;
1038 
1039 	if (of_get_flat_dt_prop(node, "reusable", NULL) ||
1040 	    of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
1041 	    of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
1042 	    of_get_flat_dt_prop(node, "no-map", NULL))
1043 		return -EINVAL;
1044 
1045 	if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
1046 		pr_err("Restricted DMA pool must be accessible within the linear mapping.");
1047 		return -EINVAL;
1048 	}
1049 
1050 	rmem->ops = &rmem_swiotlb_ops;
1051 	pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
1052 		&rmem->base, (unsigned long)rmem->size / SZ_1M);
1053 	return 0;
1054 }
1055 
1056 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
1057 #endif /* CONFIG_DMA_RESTRICTED_POOL */
1058