xref: /openbmc/linux/kernel/dma/swiotlb.c (revision 17bfcd6a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Dynamic DMA mapping support.
4  *
5  * This implementation is a fallback for platforms that do not support
6  * I/O TLBs (aka DMA address translation hardware).
7  * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8  * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9  * Copyright (C) 2000, 2003 Hewlett-Packard Co
10  *	David Mosberger-Tang <davidm@hpl.hp.com>
11  *
12  * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
13  * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
14  *			unnecessary i-cache flushing.
15  * 04/07/.. ak		Better overflow handling. Assorted fixes.
16  * 05/09/10 linville	Add support for syncing ranges, support syncing for
17  *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
18  * 08/12/11 beckyb	Add highmem support
19  */
20 
21 #define pr_fmt(fmt) "software IO TLB: " fmt
22 
23 #include <linux/cache.h>
24 #include <linux/cc_platform.h>
25 #include <linux/ctype.h>
26 #include <linux/debugfs.h>
27 #include <linux/dma-direct.h>
28 #include <linux/dma-map-ops.h>
29 #include <linux/export.h>
30 #include <linux/gfp.h>
31 #include <linux/highmem.h>
32 #include <linux/io.h>
33 #include <linux/iommu-helper.h>
34 #include <linux/init.h>
35 #include <linux/memblock.h>
36 #include <linux/mm.h>
37 #include <linux/pfn.h>
38 #include <linux/rculist.h>
39 #include <linux/scatterlist.h>
40 #include <linux/set_memory.h>
41 #include <linux/spinlock.h>
42 #include <linux/string.h>
43 #include <linux/swiotlb.h>
44 #include <linux/types.h>
45 #ifdef CONFIG_DMA_RESTRICTED_POOL
46 #include <linux/of.h>
47 #include <linux/of_fdt.h>
48 #include <linux/of_reserved_mem.h>
49 #include <linux/slab.h>
50 #endif
51 
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/swiotlb.h>
54 
55 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
56 
57 /*
58  * Minimum IO TLB size to bother booting with.  Systems with mainly
59  * 64bit capable cards will only lightly use the swiotlb.  If we can't
60  * allocate a contiguous 1MB, we're probably in trouble anyway.
61  */
62 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
63 
64 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
65 
66 /**
67  * struct io_tlb_slot - IO TLB slot descriptor
68  * @orig_addr:	The original address corresponding to a mapped entry.
69  * @alloc_size:	Size of the allocated buffer.
70  * @list:	The free list describing the number of free entries available
71  *		from each index.
72  */
73 struct io_tlb_slot {
74 	phys_addr_t orig_addr;
75 	size_t alloc_size;
76 	unsigned int list;
77 };
78 
79 static bool swiotlb_force_bounce;
80 static bool swiotlb_force_disable;
81 
82 #ifdef CONFIG_SWIOTLB_DYNAMIC
83 
84 static void swiotlb_dyn_alloc(struct work_struct *work);
85 
86 static struct io_tlb_mem io_tlb_default_mem = {
87 	.lock = __SPIN_LOCK_UNLOCKED(io_tlb_default_mem.lock),
88 	.pools = LIST_HEAD_INIT(io_tlb_default_mem.pools),
89 	.dyn_alloc = __WORK_INITIALIZER(io_tlb_default_mem.dyn_alloc,
90 					swiotlb_dyn_alloc),
91 };
92 
93 #else  /* !CONFIG_SWIOTLB_DYNAMIC */
94 
95 static struct io_tlb_mem io_tlb_default_mem;
96 
97 #endif	/* CONFIG_SWIOTLB_DYNAMIC */
98 
99 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
100 static unsigned long default_nareas;
101 
102 /**
103  * struct io_tlb_area - IO TLB memory area descriptor
104  *
105  * This is a single area with a single lock.
106  *
107  * @used:	The number of used IO TLB block.
108  * @index:	The slot index to start searching in this area for next round.
109  * @lock:	The lock to protect the above data structures in the map and
110  *		unmap calls.
111  */
112 struct io_tlb_area {
113 	unsigned long used;
114 	unsigned int index;
115 	spinlock_t lock;
116 };
117 
118 /*
119  * Round up number of slabs to the next power of 2. The last area is going
120  * be smaller than the rest if default_nslabs is not power of two.
121  * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE,
122  * otherwise a segment may span two or more areas. It conflicts with free
123  * contiguous slots tracking: free slots are treated contiguous no matter
124  * whether they cross an area boundary.
125  *
126  * Return true if default_nslabs is rounded up.
127  */
128 static bool round_up_default_nslabs(void)
129 {
130 	if (!default_nareas)
131 		return false;
132 
133 	if (default_nslabs < IO_TLB_SEGSIZE * default_nareas)
134 		default_nslabs = IO_TLB_SEGSIZE * default_nareas;
135 	else if (is_power_of_2(default_nslabs))
136 		return false;
137 	default_nslabs = roundup_pow_of_two(default_nslabs);
138 	return true;
139 }
140 
141 /**
142  * swiotlb_adjust_nareas() - adjust the number of areas and slots
143  * @nareas:	Desired number of areas. Zero is treated as 1.
144  *
145  * Adjust the default number of areas in a memory pool.
146  * The default size of the memory pool may also change to meet minimum area
147  * size requirements.
148  */
149 static void swiotlb_adjust_nareas(unsigned int nareas)
150 {
151 	if (!nareas)
152 		nareas = 1;
153 	else if (!is_power_of_2(nareas))
154 		nareas = roundup_pow_of_two(nareas);
155 
156 	default_nareas = nareas;
157 
158 	pr_info("area num %d.\n", nareas);
159 	if (round_up_default_nslabs())
160 		pr_info("SWIOTLB bounce buffer size roundup to %luMB",
161 			(default_nslabs << IO_TLB_SHIFT) >> 20);
162 }
163 
164 /**
165  * limit_nareas() - get the maximum number of areas for a given memory pool size
166  * @nareas:	Desired number of areas.
167  * @nslots:	Total number of slots in the memory pool.
168  *
169  * Limit the number of areas to the maximum possible number of areas in
170  * a memory pool of the given size.
171  *
172  * Return: Maximum possible number of areas.
173  */
174 static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots)
175 {
176 	if (nslots < nareas * IO_TLB_SEGSIZE)
177 		return nslots / IO_TLB_SEGSIZE;
178 	return nareas;
179 }
180 
181 static int __init
182 setup_io_tlb_npages(char *str)
183 {
184 	if (isdigit(*str)) {
185 		/* avoid tail segment of size < IO_TLB_SEGSIZE */
186 		default_nslabs =
187 			ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
188 	}
189 	if (*str == ',')
190 		++str;
191 	if (isdigit(*str))
192 		swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
193 	if (*str == ',')
194 		++str;
195 	if (!strcmp(str, "force"))
196 		swiotlb_force_bounce = true;
197 	else if (!strcmp(str, "noforce"))
198 		swiotlb_force_disable = true;
199 
200 	return 0;
201 }
202 early_param("swiotlb", setup_io_tlb_npages);
203 
204 unsigned long swiotlb_size_or_default(void)
205 {
206 	return default_nslabs << IO_TLB_SHIFT;
207 }
208 
209 void __init swiotlb_adjust_size(unsigned long size)
210 {
211 	/*
212 	 * If swiotlb parameter has not been specified, give a chance to
213 	 * architectures such as those supporting memory encryption to
214 	 * adjust/expand SWIOTLB size for their use.
215 	 */
216 	if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
217 		return;
218 
219 	size = ALIGN(size, IO_TLB_SIZE);
220 	default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
221 	if (round_up_default_nslabs())
222 		size = default_nslabs << IO_TLB_SHIFT;
223 	pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
224 }
225 
226 void swiotlb_print_info(void)
227 {
228 	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
229 
230 	if (!mem->nslabs) {
231 		pr_warn("No low mem\n");
232 		return;
233 	}
234 
235 	pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
236 	       (mem->nslabs << IO_TLB_SHIFT) >> 20);
237 }
238 
239 static inline unsigned long io_tlb_offset(unsigned long val)
240 {
241 	return val & (IO_TLB_SEGSIZE - 1);
242 }
243 
244 static inline unsigned long nr_slots(u64 val)
245 {
246 	return DIV_ROUND_UP(val, IO_TLB_SIZE);
247 }
248 
249 /*
250  * Early SWIOTLB allocation may be too early to allow an architecture to
251  * perform the desired operations.  This function allows the architecture to
252  * call SWIOTLB when the operations are possible.  It needs to be called
253  * before the SWIOTLB memory is used.
254  */
255 void __init swiotlb_update_mem_attributes(void)
256 {
257 	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
258 	unsigned long bytes;
259 
260 	if (!mem->nslabs || mem->late_alloc)
261 		return;
262 	bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
263 	set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT);
264 }
265 
266 static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
267 		unsigned long nslabs, bool late_alloc, unsigned int nareas)
268 {
269 	void *vaddr = phys_to_virt(start);
270 	unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
271 
272 	mem->nslabs = nslabs;
273 	mem->start = start;
274 	mem->end = mem->start + bytes;
275 	mem->late_alloc = late_alloc;
276 	mem->nareas = nareas;
277 	mem->area_nslabs = nslabs / mem->nareas;
278 
279 	for (i = 0; i < mem->nareas; i++) {
280 		spin_lock_init(&mem->areas[i].lock);
281 		mem->areas[i].index = 0;
282 		mem->areas[i].used = 0;
283 	}
284 
285 	for (i = 0; i < mem->nslabs; i++) {
286 		mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
287 		mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
288 		mem->slots[i].alloc_size = 0;
289 	}
290 
291 	memset(vaddr, 0, bytes);
292 	mem->vaddr = vaddr;
293 	return;
294 }
295 
296 /**
297  * add_mem_pool() - add a memory pool to the allocator
298  * @mem:	Software IO TLB allocator.
299  * @pool:	Memory pool to be added.
300  */
301 static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool)
302 {
303 #ifdef CONFIG_SWIOTLB_DYNAMIC
304 	spin_lock(&mem->lock);
305 	list_add_rcu(&pool->node, &mem->pools);
306 	mem->nslabs += pool->nslabs;
307 	spin_unlock(&mem->lock);
308 #else
309 	mem->nslabs = pool->nslabs;
310 #endif
311 }
312 
313 static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
314 		unsigned int flags,
315 		int (*remap)(void *tlb, unsigned long nslabs))
316 {
317 	size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
318 	void *tlb;
319 
320 	/*
321 	 * By default allocate the bounce buffer memory from low memory, but
322 	 * allow to pick a location everywhere for hypervisors with guest
323 	 * memory encryption.
324 	 */
325 	if (flags & SWIOTLB_ANY)
326 		tlb = memblock_alloc(bytes, PAGE_SIZE);
327 	else
328 		tlb = memblock_alloc_low(bytes, PAGE_SIZE);
329 
330 	if (!tlb) {
331 		pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
332 			__func__, bytes);
333 		return NULL;
334 	}
335 
336 	if (remap && remap(tlb, nslabs) < 0) {
337 		memblock_free(tlb, PAGE_ALIGN(bytes));
338 		pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
339 		return NULL;
340 	}
341 
342 	return tlb;
343 }
344 
345 /*
346  * Statically reserve bounce buffer space and initialize bounce buffer data
347  * structures for the software IO TLB used to implement the DMA API.
348  */
349 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
350 		int (*remap)(void *tlb, unsigned long nslabs))
351 {
352 	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
353 	unsigned long nslabs;
354 	unsigned int nareas;
355 	size_t alloc_size;
356 	void *tlb;
357 
358 	if (!addressing_limit && !swiotlb_force_bounce)
359 		return;
360 	if (swiotlb_force_disable)
361 		return;
362 
363 	io_tlb_default_mem.force_bounce =
364 		swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
365 
366 #ifdef CONFIG_SWIOTLB_DYNAMIC
367 	if (!remap)
368 		io_tlb_default_mem.can_grow = true;
369 	if (flags & SWIOTLB_ANY)
370 		io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
371 	else
372 		io_tlb_default_mem.phys_limit = ARCH_LOW_ADDRESS_LIMIT;
373 #endif
374 
375 	if (!default_nareas)
376 		swiotlb_adjust_nareas(num_possible_cpus());
377 
378 	nslabs = default_nslabs;
379 	nareas = limit_nareas(default_nareas, nslabs);
380 	while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
381 		if (nslabs <= IO_TLB_MIN_SLABS)
382 			return;
383 		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
384 		nareas = limit_nareas(nareas, nslabs);
385 	}
386 
387 	if (default_nslabs != nslabs) {
388 		pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs",
389 			default_nslabs, nslabs);
390 		default_nslabs = nslabs;
391 	}
392 
393 	alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
394 	mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
395 	if (!mem->slots) {
396 		pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n",
397 			__func__, alloc_size, PAGE_SIZE);
398 		return;
399 	}
400 
401 	mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
402 		nareas), SMP_CACHE_BYTES);
403 	if (!mem->areas) {
404 		pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
405 		return;
406 	}
407 
408 	swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, nareas);
409 	add_mem_pool(&io_tlb_default_mem, mem);
410 
411 	if (flags & SWIOTLB_VERBOSE)
412 		swiotlb_print_info();
413 }
414 
415 void __init swiotlb_init(bool addressing_limit, unsigned int flags)
416 {
417 	swiotlb_init_remap(addressing_limit, flags, NULL);
418 }
419 
420 /*
421  * Systems with larger DMA zones (those that don't support ISA) can
422  * initialize the swiotlb later using the slab allocator if needed.
423  * This should be just like above, but with some error catching.
424  */
425 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
426 		int (*remap)(void *tlb, unsigned long nslabs))
427 {
428 	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
429 	unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
430 	unsigned int nareas;
431 	unsigned char *vstart = NULL;
432 	unsigned int order, area_order;
433 	bool retried = false;
434 	int rc = 0;
435 
436 	if (io_tlb_default_mem.nslabs)
437 		return 0;
438 
439 	if (swiotlb_force_disable)
440 		return 0;
441 
442 	io_tlb_default_mem.force_bounce = swiotlb_force_bounce;
443 
444 #ifdef CONFIG_SWIOTLB_DYNAMIC
445 	if (!remap)
446 		io_tlb_default_mem.can_grow = true;
447 	if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
448 		io_tlb_default_mem.phys_limit = DMA_BIT_MASK(zone_dma_bits);
449 	else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
450 		io_tlb_default_mem.phys_limit = DMA_BIT_MASK(32);
451 	else
452 		io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
453 #endif
454 
455 	if (!default_nareas)
456 		swiotlb_adjust_nareas(num_possible_cpus());
457 
458 retry:
459 	order = get_order(nslabs << IO_TLB_SHIFT);
460 	nslabs = SLABS_PER_PAGE << order;
461 
462 	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
463 		vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
464 						  order);
465 		if (vstart)
466 			break;
467 		order--;
468 		nslabs = SLABS_PER_PAGE << order;
469 		retried = true;
470 	}
471 
472 	if (!vstart)
473 		return -ENOMEM;
474 
475 	if (remap)
476 		rc = remap(vstart, nslabs);
477 	if (rc) {
478 		free_pages((unsigned long)vstart, order);
479 
480 		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
481 		if (nslabs < IO_TLB_MIN_SLABS)
482 			return rc;
483 		retried = true;
484 		goto retry;
485 	}
486 
487 	if (retried) {
488 		pr_warn("only able to allocate %ld MB\n",
489 			(PAGE_SIZE << order) >> 20);
490 	}
491 
492 	nareas = limit_nareas(default_nareas, nslabs);
493 	area_order = get_order(array_size(sizeof(*mem->areas), nareas));
494 	mem->areas = (struct io_tlb_area *)
495 		__get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
496 	if (!mem->areas)
497 		goto error_area;
498 
499 	mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
500 		get_order(array_size(sizeof(*mem->slots), nslabs)));
501 	if (!mem->slots)
502 		goto error_slots;
503 
504 	set_memory_decrypted((unsigned long)vstart,
505 			     (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
506 	swiotlb_init_io_tlb_pool(mem, virt_to_phys(vstart), nslabs, true,
507 				 nareas);
508 	add_mem_pool(&io_tlb_default_mem, mem);
509 
510 	swiotlb_print_info();
511 	return 0;
512 
513 error_slots:
514 	free_pages((unsigned long)mem->areas, area_order);
515 error_area:
516 	free_pages((unsigned long)vstart, order);
517 	return -ENOMEM;
518 }
519 
520 void __init swiotlb_exit(void)
521 {
522 	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
523 	unsigned long tbl_vaddr;
524 	size_t tbl_size, slots_size;
525 	unsigned int area_order;
526 
527 	if (swiotlb_force_bounce)
528 		return;
529 
530 	if (!mem->nslabs)
531 		return;
532 
533 	pr_info("tearing down default memory pool\n");
534 	tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
535 	tbl_size = PAGE_ALIGN(mem->end - mem->start);
536 	slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
537 
538 	set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
539 	if (mem->late_alloc) {
540 		area_order = get_order(array_size(sizeof(*mem->areas),
541 			mem->nareas));
542 		free_pages((unsigned long)mem->areas, area_order);
543 		free_pages(tbl_vaddr, get_order(tbl_size));
544 		free_pages((unsigned long)mem->slots, get_order(slots_size));
545 	} else {
546 		memblock_free_late(__pa(mem->areas),
547 			array_size(sizeof(*mem->areas), mem->nareas));
548 		memblock_free_late(mem->start, tbl_size);
549 		memblock_free_late(__pa(mem->slots), slots_size);
550 	}
551 
552 	memset(mem, 0, sizeof(*mem));
553 }
554 
555 #ifdef CONFIG_SWIOTLB_DYNAMIC
556 
557 /**
558  * alloc_dma_pages() - allocate pages to be used for DMA
559  * @gfp:	GFP flags for the allocation.
560  * @bytes:	Size of the buffer.
561  *
562  * Allocate pages from the buddy allocator. If successful, make the allocated
563  * pages decrypted that they can be used for DMA.
564  *
565  * Return: Decrypted pages, or %NULL on failure.
566  */
567 static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes)
568 {
569 	unsigned int order = get_order(bytes);
570 	struct page *page;
571 	void *vaddr;
572 
573 	page = alloc_pages(gfp, order);
574 	if (!page)
575 		return NULL;
576 
577 	vaddr = page_address(page);
578 	if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
579 		goto error;
580 	return page;
581 
582 error:
583 	__free_pages(page, order);
584 	return NULL;
585 }
586 
587 /**
588  * swiotlb_alloc_tlb() - allocate a dynamic IO TLB buffer
589  * @dev:	Device for which a memory pool is allocated.
590  * @bytes:	Size of the buffer.
591  * @phys_limit:	Maximum allowed physical address of the buffer.
592  * @gfp:	GFP flags for the allocation.
593  *
594  * Return: Allocated pages, or %NULL on allocation failure.
595  */
596 static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
597 		u64 phys_limit, gfp_t gfp)
598 {
599 	struct page *page;
600 
601 	/*
602 	 * Allocate from the atomic pools if memory is encrypted and
603 	 * the allocation is atomic, because decrypting may block.
604 	 */
605 	if (!gfpflags_allow_blocking(gfp) && dev && force_dma_unencrypted(dev)) {
606 		void *vaddr;
607 
608 		if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
609 			return NULL;
610 
611 		return dma_alloc_from_pool(dev, bytes, &vaddr, gfp,
612 					   dma_coherent_ok);
613 	}
614 
615 	gfp &= ~GFP_ZONEMASK;
616 	if (phys_limit <= DMA_BIT_MASK(zone_dma_bits))
617 		gfp |= __GFP_DMA;
618 	else if (phys_limit <= DMA_BIT_MASK(32))
619 		gfp |= __GFP_DMA32;
620 
621 	while ((page = alloc_dma_pages(gfp, bytes)) &&
622 	       page_to_phys(page) + bytes - 1 > phys_limit) {
623 		/* allocated, but too high */
624 		__free_pages(page, get_order(bytes));
625 
626 		if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
627 		    phys_limit < DMA_BIT_MASK(64) &&
628 		    !(gfp & (__GFP_DMA32 | __GFP_DMA)))
629 			gfp |= __GFP_DMA32;
630 		else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
631 			 !(gfp & __GFP_DMA))
632 			gfp = (gfp & ~__GFP_DMA32) | __GFP_DMA;
633 		else
634 			return NULL;
635 	}
636 
637 	return page;
638 }
639 
640 /**
641  * swiotlb_free_tlb() - free a dynamically allocated IO TLB buffer
642  * @vaddr:	Virtual address of the buffer.
643  * @bytes:	Size of the buffer.
644  */
645 static void swiotlb_free_tlb(void *vaddr, size_t bytes)
646 {
647 	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
648 	    dma_free_from_pool(NULL, vaddr, bytes))
649 		return;
650 
651 	/* Intentional leak if pages cannot be encrypted again. */
652 	if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
653 		__free_pages(virt_to_page(vaddr), get_order(bytes));
654 }
655 
656 /**
657  * swiotlb_alloc_pool() - allocate a new IO TLB memory pool
658  * @dev:	Device for which a memory pool is allocated.
659  * @minslabs:	Minimum number of slabs.
660  * @nslabs:	Desired (maximum) number of slabs.
661  * @nareas:	Number of areas.
662  * @phys_limit:	Maximum DMA buffer physical address.
663  * @gfp:	GFP flags for the allocations.
664  *
665  * Allocate and initialize a new IO TLB memory pool. The actual number of
666  * slabs may be reduced if allocation of @nslabs fails. If even
667  * @minslabs cannot be allocated, this function fails.
668  *
669  * Return: New memory pool, or %NULL on allocation failure.
670  */
671 static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
672 		unsigned long minslabs, unsigned long nslabs,
673 		unsigned int nareas, u64 phys_limit, gfp_t gfp)
674 {
675 	struct io_tlb_pool *pool;
676 	unsigned int slot_order;
677 	struct page *tlb;
678 	size_t pool_size;
679 	size_t tlb_size;
680 
681 	pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas);
682 	pool = kzalloc(pool_size, gfp);
683 	if (!pool)
684 		goto error;
685 	pool->areas = (void *)pool + sizeof(*pool);
686 
687 	tlb_size = nslabs << IO_TLB_SHIFT;
688 	while (!(tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp))) {
689 		if (nslabs <= minslabs)
690 			goto error_tlb;
691 		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
692 		nareas = limit_nareas(nareas, nslabs);
693 		tlb_size = nslabs << IO_TLB_SHIFT;
694 	}
695 
696 	slot_order = get_order(array_size(sizeof(*pool->slots), nslabs));
697 	pool->slots = (struct io_tlb_slot *)
698 		__get_free_pages(gfp, slot_order);
699 	if (!pool->slots)
700 		goto error_slots;
701 
702 	swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, nareas);
703 	return pool;
704 
705 error_slots:
706 	swiotlb_free_tlb(page_address(tlb), tlb_size);
707 error_tlb:
708 	kfree(pool);
709 error:
710 	return NULL;
711 }
712 
713 /**
714  * swiotlb_dyn_alloc() - dynamic memory pool allocation worker
715  * @work:	Pointer to dyn_alloc in struct io_tlb_mem.
716  */
717 static void swiotlb_dyn_alloc(struct work_struct *work)
718 {
719 	struct io_tlb_mem *mem =
720 		container_of(work, struct io_tlb_mem, dyn_alloc);
721 	struct io_tlb_pool *pool;
722 
723 	pool = swiotlb_alloc_pool(NULL, IO_TLB_MIN_SLABS, default_nslabs,
724 				  default_nareas, mem->phys_limit, GFP_KERNEL);
725 	if (!pool) {
726 		pr_warn_ratelimited("Failed to allocate new pool");
727 		return;
728 	}
729 
730 	add_mem_pool(mem, pool);
731 }
732 
733 /**
734  * swiotlb_dyn_free() - RCU callback to free a memory pool
735  * @rcu:	RCU head in the corresponding struct io_tlb_pool.
736  */
737 static void swiotlb_dyn_free(struct rcu_head *rcu)
738 {
739 	struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu);
740 	size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs);
741 	size_t tlb_size = pool->end - pool->start;
742 
743 	free_pages((unsigned long)pool->slots, get_order(slots_size));
744 	swiotlb_free_tlb(pool->vaddr, tlb_size);
745 	kfree(pool);
746 }
747 
748 /**
749  * swiotlb_find_pool() - find the IO TLB pool for a physical address
750  * @dev:        Device which has mapped the DMA buffer.
751  * @paddr:      Physical address within the DMA buffer.
752  *
753  * Find the IO TLB memory pool descriptor which contains the given physical
754  * address, if any.
755  *
756  * Return: Memory pool which contains @paddr, or %NULL if none.
757  */
758 struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr)
759 {
760 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
761 	struct io_tlb_pool *pool;
762 
763 	rcu_read_lock();
764 	list_for_each_entry_rcu(pool, &mem->pools, node) {
765 		if (paddr >= pool->start && paddr < pool->end)
766 			goto out;
767 	}
768 
769 	list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) {
770 		if (paddr >= pool->start && paddr < pool->end)
771 			goto out;
772 	}
773 	pool = NULL;
774 out:
775 	rcu_read_unlock();
776 	return pool;
777 }
778 
779 /**
780  * swiotlb_del_pool() - remove an IO TLB pool from a device
781  * @dev:	Owning device.
782  * @pool:	Memory pool to be removed.
783  */
784 static void swiotlb_del_pool(struct device *dev, struct io_tlb_pool *pool)
785 {
786 	unsigned long flags;
787 
788 	spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
789 	list_del_rcu(&pool->node);
790 	spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
791 
792 	call_rcu(&pool->rcu, swiotlb_dyn_free);
793 }
794 
795 #endif	/* CONFIG_SWIOTLB_DYNAMIC */
796 
797 /**
798  * swiotlb_dev_init() - initialize swiotlb fields in &struct device
799  * @dev:	Device to be initialized.
800  */
801 void swiotlb_dev_init(struct device *dev)
802 {
803 	dev->dma_io_tlb_mem = &io_tlb_default_mem;
804 #ifdef CONFIG_SWIOTLB_DYNAMIC
805 	INIT_LIST_HEAD(&dev->dma_io_tlb_pools);
806 	spin_lock_init(&dev->dma_io_tlb_lock);
807 	dev->dma_uses_io_tlb = false;
808 #endif
809 }
810 
811 /*
812  * Return the offset into a iotlb slot required to keep the device happy.
813  */
814 static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
815 {
816 	return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
817 }
818 
819 /*
820  * Bounce: copy the swiotlb buffer from or back to the original dma location
821  */
822 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
823 			   enum dma_data_direction dir)
824 {
825 	struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr);
826 	int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
827 	phys_addr_t orig_addr = mem->slots[index].orig_addr;
828 	size_t alloc_size = mem->slots[index].alloc_size;
829 	unsigned long pfn = PFN_DOWN(orig_addr);
830 	unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
831 	unsigned int tlb_offset, orig_addr_offset;
832 
833 	if (orig_addr == INVALID_PHYS_ADDR)
834 		return;
835 
836 	tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
837 	orig_addr_offset = swiotlb_align_offset(dev, orig_addr);
838 	if (tlb_offset < orig_addr_offset) {
839 		dev_WARN_ONCE(dev, 1,
840 			"Access before mapping start detected. orig offset %u, requested offset %u.\n",
841 			orig_addr_offset, tlb_offset);
842 		return;
843 	}
844 
845 	tlb_offset -= orig_addr_offset;
846 	if (tlb_offset > alloc_size) {
847 		dev_WARN_ONCE(dev, 1,
848 			"Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
849 			alloc_size, size, tlb_offset);
850 		return;
851 	}
852 
853 	orig_addr += tlb_offset;
854 	alloc_size -= tlb_offset;
855 
856 	if (size > alloc_size) {
857 		dev_WARN_ONCE(dev, 1,
858 			"Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
859 			alloc_size, size);
860 		size = alloc_size;
861 	}
862 
863 	if (PageHighMem(pfn_to_page(pfn))) {
864 		unsigned int offset = orig_addr & ~PAGE_MASK;
865 		struct page *page;
866 		unsigned int sz = 0;
867 		unsigned long flags;
868 
869 		while (size) {
870 			sz = min_t(size_t, PAGE_SIZE - offset, size);
871 
872 			local_irq_save(flags);
873 			page = pfn_to_page(pfn);
874 			if (dir == DMA_TO_DEVICE)
875 				memcpy_from_page(vaddr, page, offset, sz);
876 			else
877 				memcpy_to_page(page, offset, vaddr, sz);
878 			local_irq_restore(flags);
879 
880 			size -= sz;
881 			pfn++;
882 			vaddr += sz;
883 			offset = 0;
884 		}
885 	} else if (dir == DMA_TO_DEVICE) {
886 		memcpy(vaddr, phys_to_virt(orig_addr), size);
887 	} else {
888 		memcpy(phys_to_virt(orig_addr), vaddr, size);
889 	}
890 }
891 
892 static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
893 {
894 	return start + (idx << IO_TLB_SHIFT);
895 }
896 
897 /*
898  * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
899  */
900 static inline unsigned long get_max_slots(unsigned long boundary_mask)
901 {
902 	return (boundary_mask >> IO_TLB_SHIFT) + 1;
903 }
904 
905 static unsigned int wrap_area_index(struct io_tlb_pool *mem, unsigned int index)
906 {
907 	if (index >= mem->area_nslabs)
908 		return 0;
909 	return index;
910 }
911 
912 /*
913  * Track the total used slots with a global atomic value in order to have
914  * correct information to determine the high water mark. The mem_used()
915  * function gives imprecise results because there's no locking across
916  * multiple areas.
917  */
918 #ifdef CONFIG_DEBUG_FS
919 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
920 {
921 	unsigned long old_hiwater, new_used;
922 
923 	new_used = atomic_long_add_return(nslots, &mem->total_used);
924 	old_hiwater = atomic_long_read(&mem->used_hiwater);
925 	do {
926 		if (new_used <= old_hiwater)
927 			break;
928 	} while (!atomic_long_try_cmpxchg(&mem->used_hiwater,
929 					  &old_hiwater, new_used));
930 }
931 
932 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
933 {
934 	atomic_long_sub(nslots, &mem->total_used);
935 }
936 
937 #else /* !CONFIG_DEBUG_FS */
938 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
939 {
940 }
941 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
942 {
943 }
944 #endif /* CONFIG_DEBUG_FS */
945 
946 /**
947  * swiotlb_area_find_slots() - search for slots in one IO TLB memory area
948  * @dev:	Device which maps the buffer.
949  * @pool:	Memory pool to be searched.
950  * @area_index:	Index of the IO TLB memory area to be searched.
951  * @orig_addr:	Original (non-bounced) IO buffer address.
952  * @alloc_size: Total requested size of the bounce buffer,
953  *		including initial alignment padding.
954  * @alloc_align_mask:	Required alignment of the allocated buffer.
955  *
956  * Find a suitable sequence of IO TLB entries for the request and allocate
957  * a buffer from the given IO TLB memory area.
958  * This function takes care of locking.
959  *
960  * Return: Index of the first allocated slot, or -1 on error.
961  */
962 static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
963 		int area_index, phys_addr_t orig_addr, size_t alloc_size,
964 		unsigned int alloc_align_mask)
965 {
966 	struct io_tlb_area *area = pool->areas + area_index;
967 	unsigned long boundary_mask = dma_get_seg_boundary(dev);
968 	dma_addr_t tbl_dma_addr =
969 		phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
970 	unsigned long max_slots = get_max_slots(boundary_mask);
971 	unsigned int iotlb_align_mask =
972 		dma_get_min_align_mask(dev) | alloc_align_mask;
973 	unsigned int nslots = nr_slots(alloc_size), stride;
974 	unsigned int offset = swiotlb_align_offset(dev, orig_addr);
975 	unsigned int index, slots_checked, count = 0, i;
976 	unsigned long flags;
977 	unsigned int slot_base;
978 	unsigned int slot_index;
979 
980 	BUG_ON(!nslots);
981 	BUG_ON(area_index >= pool->nareas);
982 
983 	/*
984 	 * For allocations of PAGE_SIZE or larger only look for page aligned
985 	 * allocations.
986 	 */
987 	if (alloc_size >= PAGE_SIZE)
988 		iotlb_align_mask |= ~PAGE_MASK;
989 	iotlb_align_mask &= ~(IO_TLB_SIZE - 1);
990 
991 	/*
992 	 * For mappings with an alignment requirement don't bother looping to
993 	 * unaligned slots once we found an aligned one.
994 	 */
995 	stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1;
996 
997 	spin_lock_irqsave(&area->lock, flags);
998 	if (unlikely(nslots > pool->area_nslabs - area->used))
999 		goto not_found;
1000 
1001 	slot_base = area_index * pool->area_nslabs;
1002 	index = area->index;
1003 
1004 	for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
1005 		slot_index = slot_base + index;
1006 
1007 		if (orig_addr &&
1008 		    (slot_addr(tbl_dma_addr, slot_index) &
1009 		     iotlb_align_mask) != (orig_addr & iotlb_align_mask)) {
1010 			index = wrap_area_index(pool, index + 1);
1011 			slots_checked++;
1012 			continue;
1013 		}
1014 
1015 		if (!iommu_is_span_boundary(slot_index, nslots,
1016 					    nr_slots(tbl_dma_addr),
1017 					    max_slots)) {
1018 			if (pool->slots[slot_index].list >= nslots)
1019 				goto found;
1020 		}
1021 		index = wrap_area_index(pool, index + stride);
1022 		slots_checked += stride;
1023 	}
1024 
1025 not_found:
1026 	spin_unlock_irqrestore(&area->lock, flags);
1027 	return -1;
1028 
1029 found:
1030 	/*
1031 	 * If we find a slot that indicates we have 'nslots' number of
1032 	 * contiguous buffers, we allocate the buffers from that slot onwards
1033 	 * and set the list of free entries to '0' indicating unavailable.
1034 	 */
1035 	for (i = slot_index; i < slot_index + nslots; i++) {
1036 		pool->slots[i].list = 0;
1037 		pool->slots[i].alloc_size = alloc_size - (offset +
1038 				((i - slot_index) << IO_TLB_SHIFT));
1039 	}
1040 	for (i = slot_index - 1;
1041 	     io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
1042 	     pool->slots[i].list; i--)
1043 		pool->slots[i].list = ++count;
1044 
1045 	/*
1046 	 * Update the indices to avoid searching in the next round.
1047 	 */
1048 	area->index = wrap_area_index(pool, index + nslots);
1049 	area->used += nslots;
1050 	spin_unlock_irqrestore(&area->lock, flags);
1051 
1052 	inc_used_and_hiwater(dev->dma_io_tlb_mem, nslots);
1053 	return slot_index;
1054 }
1055 
1056 /**
1057  * swiotlb_pool_find_slots() - search for slots in one memory pool
1058  * @dev:	Device which maps the buffer.
1059  * @pool:	Memory pool to be searched.
1060  * @orig_addr:	Original (non-bounced) IO buffer address.
1061  * @alloc_size: Total requested size of the bounce buffer,
1062  *		including initial alignment padding.
1063  * @alloc_align_mask:	Required alignment of the allocated buffer.
1064  *
1065  * Search through one memory pool to find a sequence of slots that match the
1066  * allocation constraints.
1067  *
1068  * Return: Index of the first allocated slot, or -1 on error.
1069  */
1070 static int swiotlb_pool_find_slots(struct device *dev, struct io_tlb_pool *pool,
1071 		phys_addr_t orig_addr, size_t alloc_size,
1072 		unsigned int alloc_align_mask)
1073 {
1074 	int start = raw_smp_processor_id() & (pool->nareas - 1);
1075 	int i = start, index;
1076 
1077 	do {
1078 		index = swiotlb_area_find_slots(dev, pool, i, orig_addr,
1079 						alloc_size, alloc_align_mask);
1080 		if (index >= 0)
1081 			return index;
1082 		if (++i >= pool->nareas)
1083 			i = 0;
1084 	} while (i != start);
1085 
1086 	return -1;
1087 }
1088 
1089 #ifdef CONFIG_SWIOTLB_DYNAMIC
1090 
1091 /**
1092  * swiotlb_find_slots() - search for slots in the whole swiotlb
1093  * @dev:	Device which maps the buffer.
1094  * @orig_addr:	Original (non-bounced) IO buffer address.
1095  * @alloc_size: Total requested size of the bounce buffer,
1096  *		including initial alignment padding.
1097  * @alloc_align_mask:	Required alignment of the allocated buffer.
1098  * @retpool:	Used memory pool, updated on return.
1099  *
1100  * Search through the whole software IO TLB to find a sequence of slots that
1101  * match the allocation constraints.
1102  *
1103  * Return: Index of the first allocated slot, or -1 on error.
1104  */
1105 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
1106 		size_t alloc_size, unsigned int alloc_align_mask,
1107 		struct io_tlb_pool **retpool)
1108 {
1109 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1110 	struct io_tlb_pool *pool;
1111 	unsigned long nslabs;
1112 	unsigned long flags;
1113 	u64 phys_limit;
1114 	int index;
1115 
1116 	rcu_read_lock();
1117 	list_for_each_entry_rcu(pool, &mem->pools, node) {
1118 		index = swiotlb_pool_find_slots(dev, pool, orig_addr,
1119 						alloc_size, alloc_align_mask);
1120 		if (index >= 0) {
1121 			rcu_read_unlock();
1122 			goto found;
1123 		}
1124 	}
1125 	rcu_read_unlock();
1126 	if (!mem->can_grow)
1127 		return -1;
1128 
1129 	schedule_work(&mem->dyn_alloc);
1130 
1131 	nslabs = nr_slots(alloc_size);
1132 	phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
1133 	pool = swiotlb_alloc_pool(dev, nslabs, nslabs, 1, phys_limit,
1134 				  GFP_NOWAIT | __GFP_NOWARN);
1135 	if (!pool)
1136 		return -1;
1137 
1138 	index = swiotlb_pool_find_slots(dev, pool, orig_addr,
1139 					alloc_size, alloc_align_mask);
1140 	if (index < 0) {
1141 		swiotlb_dyn_free(&pool->rcu);
1142 		return -1;
1143 	}
1144 
1145 	pool->transient = true;
1146 	spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
1147 	list_add_rcu(&pool->node, &dev->dma_io_tlb_pools);
1148 	spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
1149 
1150 found:
1151 	WRITE_ONCE(dev->dma_uses_io_tlb, true);
1152 
1153 	/*
1154 	 * The general barrier orders reads and writes against a presumed store
1155 	 * of the SWIOTLB buffer address by a device driver (to a driver private
1156 	 * data structure). It serves two purposes.
1157 	 *
1158 	 * First, the store to dev->dma_uses_io_tlb must be ordered before the
1159 	 * presumed store. This guarantees that the returned buffer address
1160 	 * cannot be passed to another CPU before updating dev->dma_uses_io_tlb.
1161 	 *
1162 	 * Second, the load from mem->pools must be ordered before the same
1163 	 * presumed store. This guarantees that the returned buffer address
1164 	 * cannot be observed by another CPU before an update of the RCU list
1165 	 * that was made by swiotlb_dyn_alloc() on a third CPU (cf. multicopy
1166 	 * atomicity).
1167 	 *
1168 	 * See also the comment in is_swiotlb_buffer().
1169 	 */
1170 	smp_mb();
1171 
1172 	*retpool = pool;
1173 	return index;
1174 }
1175 
1176 #else  /* !CONFIG_SWIOTLB_DYNAMIC */
1177 
1178 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
1179 		size_t alloc_size, unsigned int alloc_align_mask,
1180 		struct io_tlb_pool **retpool)
1181 {
1182 	*retpool = &dev->dma_io_tlb_mem->defpool;
1183 	return swiotlb_pool_find_slots(dev, *retpool,
1184 				       orig_addr, alloc_size, alloc_align_mask);
1185 }
1186 
1187 #endif /* CONFIG_SWIOTLB_DYNAMIC */
1188 
1189 #ifdef CONFIG_DEBUG_FS
1190 
1191 /**
1192  * mem_used() - get number of used slots in an allocator
1193  * @mem:	Software IO TLB allocator.
1194  *
1195  * The result is accurate in this version of the function, because an atomic
1196  * counter is available if CONFIG_DEBUG_FS is set.
1197  *
1198  * Return: Number of used slots.
1199  */
1200 static unsigned long mem_used(struct io_tlb_mem *mem)
1201 {
1202 	return atomic_long_read(&mem->total_used);
1203 }
1204 
1205 #else /* !CONFIG_DEBUG_FS */
1206 
1207 /**
1208  * mem_pool_used() - get number of used slots in a memory pool
1209  * @pool:	Software IO TLB memory pool.
1210  *
1211  * The result is not accurate, see mem_used().
1212  *
1213  * Return: Approximate number of used slots.
1214  */
1215 static unsigned long mem_pool_used(struct io_tlb_pool *pool)
1216 {
1217 	int i;
1218 	unsigned long used = 0;
1219 
1220 	for (i = 0; i < pool->nareas; i++)
1221 		used += pool->areas[i].used;
1222 	return used;
1223 }
1224 
1225 /**
1226  * mem_used() - get number of used slots in an allocator
1227  * @mem:	Software IO TLB allocator.
1228  *
1229  * The result is not accurate, because there is no locking of individual
1230  * areas.
1231  *
1232  * Return: Approximate number of used slots.
1233  */
1234 static unsigned long mem_used(struct io_tlb_mem *mem)
1235 {
1236 #ifdef CONFIG_SWIOTLB_DYNAMIC
1237 	struct io_tlb_pool *pool;
1238 	unsigned long used = 0;
1239 
1240 	rcu_read_lock();
1241 	list_for_each_entry_rcu(pool, &mem->pools, node)
1242 		used += mem_pool_used(pool);
1243 	rcu_read_unlock();
1244 
1245 	return used;
1246 #else
1247 	return mem_pool_used(&mem->defpool);
1248 #endif
1249 }
1250 
1251 #endif /* CONFIG_DEBUG_FS */
1252 
1253 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
1254 		size_t mapping_size, size_t alloc_size,
1255 		unsigned int alloc_align_mask, enum dma_data_direction dir,
1256 		unsigned long attrs)
1257 {
1258 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1259 	unsigned int offset = swiotlb_align_offset(dev, orig_addr);
1260 	struct io_tlb_pool *pool;
1261 	unsigned int i;
1262 	int index;
1263 	phys_addr_t tlb_addr;
1264 
1265 	if (!mem || !mem->nslabs) {
1266 		dev_warn_ratelimited(dev,
1267 			"Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
1268 		return (phys_addr_t)DMA_MAPPING_ERROR;
1269 	}
1270 
1271 	if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
1272 		pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
1273 
1274 	if (mapping_size > alloc_size) {
1275 		dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
1276 			      mapping_size, alloc_size);
1277 		return (phys_addr_t)DMA_MAPPING_ERROR;
1278 	}
1279 
1280 	index = swiotlb_find_slots(dev, orig_addr,
1281 				   alloc_size + offset, alloc_align_mask, &pool);
1282 	if (index == -1) {
1283 		if (!(attrs & DMA_ATTR_NO_WARN))
1284 			dev_warn_ratelimited(dev,
1285 	"swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
1286 				 alloc_size, mem->nslabs, mem_used(mem));
1287 		return (phys_addr_t)DMA_MAPPING_ERROR;
1288 	}
1289 
1290 	/*
1291 	 * Save away the mapping from the original address to the DMA address.
1292 	 * This is needed when we sync the memory.  Then we sync the buffer if
1293 	 * needed.
1294 	 */
1295 	for (i = 0; i < nr_slots(alloc_size + offset); i++)
1296 		pool->slots[index + i].orig_addr = slot_addr(orig_addr, i);
1297 	tlb_addr = slot_addr(pool->start, index) + offset;
1298 	/*
1299 	 * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
1300 	 * to the tlb buffer, if we knew for sure the device will
1301 	 * overwrite the entire current content. But we don't. Thus
1302 	 * unconditional bounce may prevent leaking swiotlb content (i.e.
1303 	 * kernel memory) to user-space.
1304 	 */
1305 	swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
1306 	return tlb_addr;
1307 }
1308 
1309 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
1310 {
1311 	struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr);
1312 	unsigned long flags;
1313 	unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
1314 	int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
1315 	int nslots = nr_slots(mem->slots[index].alloc_size + offset);
1316 	int aindex = index / mem->area_nslabs;
1317 	struct io_tlb_area *area = &mem->areas[aindex];
1318 	int count, i;
1319 
1320 	/*
1321 	 * Return the buffer to the free list by setting the corresponding
1322 	 * entries to indicate the number of contiguous entries available.
1323 	 * While returning the entries to the free list, we merge the entries
1324 	 * with slots below and above the pool being returned.
1325 	 */
1326 	BUG_ON(aindex >= mem->nareas);
1327 
1328 	spin_lock_irqsave(&area->lock, flags);
1329 	if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
1330 		count = mem->slots[index + nslots].list;
1331 	else
1332 		count = 0;
1333 
1334 	/*
1335 	 * Step 1: return the slots to the free list, merging the slots with
1336 	 * superceeding slots
1337 	 */
1338 	for (i = index + nslots - 1; i >= index; i--) {
1339 		mem->slots[i].list = ++count;
1340 		mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
1341 		mem->slots[i].alloc_size = 0;
1342 	}
1343 
1344 	/*
1345 	 * Step 2: merge the returned slots with the preceding slots, if
1346 	 * available (non zero)
1347 	 */
1348 	for (i = index - 1;
1349 	     io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
1350 	     i--)
1351 		mem->slots[i].list = ++count;
1352 	area->used -= nslots;
1353 	spin_unlock_irqrestore(&area->lock, flags);
1354 
1355 	dec_used(dev->dma_io_tlb_mem, nslots);
1356 }
1357 
1358 #ifdef CONFIG_SWIOTLB_DYNAMIC
1359 
1360 /**
1361  * swiotlb_del_transient() - delete a transient memory pool
1362  * @dev:	Device which mapped the buffer.
1363  * @tlb_addr:	Physical address within a bounce buffer.
1364  *
1365  * Check whether the address belongs to a transient SWIOTLB memory pool.
1366  * If yes, then delete the pool.
1367  *
1368  * Return: %true if @tlb_addr belonged to a transient pool that was released.
1369  */
1370 static bool swiotlb_del_transient(struct device *dev, phys_addr_t tlb_addr)
1371 {
1372 	struct io_tlb_pool *pool;
1373 
1374 	pool = swiotlb_find_pool(dev, tlb_addr);
1375 	if (!pool->transient)
1376 		return false;
1377 
1378 	dec_used(dev->dma_io_tlb_mem, pool->nslabs);
1379 	swiotlb_del_pool(dev, pool);
1380 	return true;
1381 }
1382 
1383 #else  /* !CONFIG_SWIOTLB_DYNAMIC */
1384 
1385 static inline bool swiotlb_del_transient(struct device *dev,
1386 					 phys_addr_t tlb_addr)
1387 {
1388 	return false;
1389 }
1390 
1391 #endif	/* CONFIG_SWIOTLB_DYNAMIC */
1392 
1393 /*
1394  * tlb_addr is the physical address of the bounce buffer to unmap.
1395  */
1396 void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
1397 			      size_t mapping_size, enum dma_data_direction dir,
1398 			      unsigned long attrs)
1399 {
1400 	/*
1401 	 * First, sync the memory before unmapping the entry
1402 	 */
1403 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
1404 	    (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
1405 		swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
1406 
1407 	if (swiotlb_del_transient(dev, tlb_addr))
1408 		return;
1409 	swiotlb_release_slots(dev, tlb_addr);
1410 }
1411 
1412 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
1413 		size_t size, enum dma_data_direction dir)
1414 {
1415 	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
1416 		swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
1417 	else
1418 		BUG_ON(dir != DMA_FROM_DEVICE);
1419 }
1420 
1421 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
1422 		size_t size, enum dma_data_direction dir)
1423 {
1424 	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
1425 		swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
1426 	else
1427 		BUG_ON(dir != DMA_TO_DEVICE);
1428 }
1429 
1430 /*
1431  * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
1432  * to the device copy the data into it as well.
1433  */
1434 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
1435 		enum dma_data_direction dir, unsigned long attrs)
1436 {
1437 	phys_addr_t swiotlb_addr;
1438 	dma_addr_t dma_addr;
1439 
1440 	trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size);
1441 
1442 	swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
1443 			attrs);
1444 	if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
1445 		return DMA_MAPPING_ERROR;
1446 
1447 	/* Ensure that the address returned is DMA'ble */
1448 	dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
1449 	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
1450 		swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
1451 			attrs | DMA_ATTR_SKIP_CPU_SYNC);
1452 		dev_WARN_ONCE(dev, 1,
1453 			"swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
1454 			&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
1455 		return DMA_MAPPING_ERROR;
1456 	}
1457 
1458 	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1459 		arch_sync_dma_for_device(swiotlb_addr, size, dir);
1460 	return dma_addr;
1461 }
1462 
1463 size_t swiotlb_max_mapping_size(struct device *dev)
1464 {
1465 	int min_align_mask = dma_get_min_align_mask(dev);
1466 	int min_align = 0;
1467 
1468 	/*
1469 	 * swiotlb_find_slots() skips slots according to
1470 	 * min align mask. This affects max mapping size.
1471 	 * Take it into acount here.
1472 	 */
1473 	if (min_align_mask)
1474 		min_align = roundup(min_align_mask, IO_TLB_SIZE);
1475 
1476 	return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
1477 }
1478 
1479 /**
1480  * is_swiotlb_allocated() - check if the default software IO TLB is initialized
1481  */
1482 bool is_swiotlb_allocated(void)
1483 {
1484 	return io_tlb_default_mem.nslabs;
1485 }
1486 
1487 bool is_swiotlb_active(struct device *dev)
1488 {
1489 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1490 
1491 	return mem && mem->nslabs;
1492 }
1493 
1494 /**
1495  * default_swiotlb_base() - get the base address of the default SWIOTLB
1496  *
1497  * Get the lowest physical address used by the default software IO TLB pool.
1498  */
1499 phys_addr_t default_swiotlb_base(void)
1500 {
1501 #ifdef CONFIG_SWIOTLB_DYNAMIC
1502 	io_tlb_default_mem.can_grow = false;
1503 #endif
1504 	return io_tlb_default_mem.defpool.start;
1505 }
1506 
1507 /**
1508  * default_swiotlb_limit() - get the address limit of the default SWIOTLB
1509  *
1510  * Get the highest physical address used by the default software IO TLB pool.
1511  */
1512 phys_addr_t default_swiotlb_limit(void)
1513 {
1514 #ifdef CONFIG_SWIOTLB_DYNAMIC
1515 	return io_tlb_default_mem.phys_limit;
1516 #else
1517 	return io_tlb_default_mem.defpool.end - 1;
1518 #endif
1519 }
1520 
1521 #ifdef CONFIG_DEBUG_FS
1522 
1523 static int io_tlb_used_get(void *data, u64 *val)
1524 {
1525 	struct io_tlb_mem *mem = data;
1526 
1527 	*val = mem_used(mem);
1528 	return 0;
1529 }
1530 
1531 static int io_tlb_hiwater_get(void *data, u64 *val)
1532 {
1533 	struct io_tlb_mem *mem = data;
1534 
1535 	*val = atomic_long_read(&mem->used_hiwater);
1536 	return 0;
1537 }
1538 
1539 static int io_tlb_hiwater_set(void *data, u64 val)
1540 {
1541 	struct io_tlb_mem *mem = data;
1542 
1543 	/* Only allow setting to zero */
1544 	if (val != 0)
1545 		return -EINVAL;
1546 
1547 	atomic_long_set(&mem->used_hiwater, val);
1548 	return 0;
1549 }
1550 
1551 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
1552 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_hiwater, io_tlb_hiwater_get,
1553 				io_tlb_hiwater_set, "%llu\n");
1554 
1555 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
1556 					 const char *dirname)
1557 {
1558 	atomic_long_set(&mem->total_used, 0);
1559 	atomic_long_set(&mem->used_hiwater, 0);
1560 
1561 	mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
1562 	if (!mem->nslabs)
1563 		return;
1564 
1565 	debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
1566 	debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem,
1567 			&fops_io_tlb_used);
1568 	debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem,
1569 			&fops_io_tlb_hiwater);
1570 }
1571 
1572 static int __init swiotlb_create_default_debugfs(void)
1573 {
1574 	swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb");
1575 	return 0;
1576 }
1577 
1578 late_initcall(swiotlb_create_default_debugfs);
1579 
1580 #else  /* !CONFIG_DEBUG_FS */
1581 
1582 static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
1583 						const char *dirname)
1584 {
1585 }
1586 
1587 #endif	/* CONFIG_DEBUG_FS */
1588 
1589 #ifdef CONFIG_DMA_RESTRICTED_POOL
1590 
1591 struct page *swiotlb_alloc(struct device *dev, size_t size)
1592 {
1593 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1594 	struct io_tlb_pool *pool;
1595 	phys_addr_t tlb_addr;
1596 	int index;
1597 
1598 	if (!mem)
1599 		return NULL;
1600 
1601 	index = swiotlb_find_slots(dev, 0, size, 0, &pool);
1602 	if (index == -1)
1603 		return NULL;
1604 
1605 	tlb_addr = slot_addr(pool->start, index);
1606 
1607 	return pfn_to_page(PFN_DOWN(tlb_addr));
1608 }
1609 
1610 bool swiotlb_free(struct device *dev, struct page *page, size_t size)
1611 {
1612 	phys_addr_t tlb_addr = page_to_phys(page);
1613 
1614 	if (!is_swiotlb_buffer(dev, tlb_addr))
1615 		return false;
1616 
1617 	swiotlb_release_slots(dev, tlb_addr);
1618 
1619 	return true;
1620 }
1621 
1622 static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
1623 				    struct device *dev)
1624 {
1625 	struct io_tlb_mem *mem = rmem->priv;
1626 	unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
1627 
1628 	/* Set Per-device io tlb area to one */
1629 	unsigned int nareas = 1;
1630 
1631 	if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
1632 		dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping.");
1633 		return -EINVAL;
1634 	}
1635 
1636 	/*
1637 	 * Since multiple devices can share the same pool, the private data,
1638 	 * io_tlb_mem struct, will be initialized by the first device attached
1639 	 * to it.
1640 	 */
1641 	if (!mem) {
1642 		struct io_tlb_pool *pool;
1643 
1644 		mem = kzalloc(sizeof(*mem), GFP_KERNEL);
1645 		if (!mem)
1646 			return -ENOMEM;
1647 		pool = &mem->defpool;
1648 
1649 		pool->slots = kcalloc(nslabs, sizeof(*pool->slots), GFP_KERNEL);
1650 		if (!pool->slots) {
1651 			kfree(mem);
1652 			return -ENOMEM;
1653 		}
1654 
1655 		pool->areas = kcalloc(nareas, sizeof(*pool->areas),
1656 				GFP_KERNEL);
1657 		if (!pool->areas) {
1658 			kfree(pool->slots);
1659 			kfree(mem);
1660 			return -ENOMEM;
1661 		}
1662 
1663 		set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
1664 				     rmem->size >> PAGE_SHIFT);
1665 		swiotlb_init_io_tlb_pool(pool, rmem->base, nslabs,
1666 					 false, nareas);
1667 		mem->force_bounce = true;
1668 		mem->for_alloc = true;
1669 #ifdef CONFIG_SWIOTLB_DYNAMIC
1670 		spin_lock_init(&mem->lock);
1671 #endif
1672 		add_mem_pool(mem, pool);
1673 
1674 		rmem->priv = mem;
1675 
1676 		swiotlb_create_debugfs_files(mem, rmem->name);
1677 	}
1678 
1679 	dev->dma_io_tlb_mem = mem;
1680 
1681 	return 0;
1682 }
1683 
1684 static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
1685 					struct device *dev)
1686 {
1687 	dev->dma_io_tlb_mem = &io_tlb_default_mem;
1688 }
1689 
1690 static const struct reserved_mem_ops rmem_swiotlb_ops = {
1691 	.device_init = rmem_swiotlb_device_init,
1692 	.device_release = rmem_swiotlb_device_release,
1693 };
1694 
1695 static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
1696 {
1697 	unsigned long node = rmem->fdt_node;
1698 
1699 	if (of_get_flat_dt_prop(node, "reusable", NULL) ||
1700 	    of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
1701 	    of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
1702 	    of_get_flat_dt_prop(node, "no-map", NULL))
1703 		return -EINVAL;
1704 
1705 	rmem->ops = &rmem_swiotlb_ops;
1706 	pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
1707 		&rmem->base, (unsigned long)rmem->size / SZ_1M);
1708 	return 0;
1709 }
1710 
1711 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
1712 #endif /* CONFIG_DMA_RESTRICTED_POOL */
1713