xref: /openbmc/linux/kernel/dma/swiotlb.c (revision 586b4106)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Dynamic DMA mapping support.
4  *
5  * This implementation is a fallback for platforms that do not support
6  * I/O TLBs (aka DMA address translation hardware).
7  * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
8  * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
9  * Copyright (C) 2000, 2003 Hewlett-Packard Co
10  *	David Mosberger-Tang <davidm@hpl.hp.com>
11  *
12  * 03/05/07 davidm	Switch from PCI-DMA to generic device DMA API.
13  * 00/12/13 davidm	Rename to swiotlb.c and add mark_clean() to avoid
14  *			unnecessary i-cache flushing.
15  * 04/07/.. ak		Better overflow handling. Assorted fixes.
16  * 05/09/10 linville	Add support for syncing ranges, support syncing for
17  *			DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
18  * 08/12/11 beckyb	Add highmem support
19  */
20 
21 #define pr_fmt(fmt) "software IO TLB: " fmt
22 
23 #include <linux/cache.h>
24 #include <linux/cc_platform.h>
25 #include <linux/ctype.h>
26 #include <linux/debugfs.h>
27 #include <linux/dma-direct.h>
28 #include <linux/dma-map-ops.h>
29 #include <linux/export.h>
30 #include <linux/gfp.h>
31 #include <linux/highmem.h>
32 #include <linux/io.h>
33 #include <linux/iommu-helper.h>
34 #include <linux/init.h>
35 #include <linux/memblock.h>
36 #include <linux/mm.h>
37 #include <linux/pfn.h>
38 #include <linux/rculist.h>
39 #include <linux/scatterlist.h>
40 #include <linux/set_memory.h>
41 #include <linux/spinlock.h>
42 #include <linux/string.h>
43 #include <linux/swiotlb.h>
44 #include <linux/types.h>
45 #ifdef CONFIG_DMA_RESTRICTED_POOL
46 #include <linux/of.h>
47 #include <linux/of_fdt.h>
48 #include <linux/of_reserved_mem.h>
49 #include <linux/slab.h>
50 #endif
51 
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/swiotlb.h>
54 
55 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
56 
57 /*
58  * Minimum IO TLB size to bother booting with.  Systems with mainly
59  * 64bit capable cards will only lightly use the swiotlb.  If we can't
60  * allocate a contiguous 1MB, we're probably in trouble anyway.
61  */
62 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
63 
64 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
65 
66 /**
67  * struct io_tlb_slot - IO TLB slot descriptor
68  * @orig_addr:	The original address corresponding to a mapped entry.
69  * @alloc_size:	Size of the allocated buffer.
70  * @list:	The free list describing the number of free entries available
71  *		from each index.
72  * @pad_slots:	Number of preceding padding slots. Valid only in the first
73  *		allocated non-padding slot.
74  */
75 struct io_tlb_slot {
76 	phys_addr_t orig_addr;
77 	size_t alloc_size;
78 	unsigned short list;
79 	unsigned short pad_slots;
80 };
81 
82 static bool swiotlb_force_bounce;
83 static bool swiotlb_force_disable;
84 
85 #ifdef CONFIG_SWIOTLB_DYNAMIC
86 
87 static void swiotlb_dyn_alloc(struct work_struct *work);
88 
89 static struct io_tlb_mem io_tlb_default_mem = {
90 	.lock = __SPIN_LOCK_UNLOCKED(io_tlb_default_mem.lock),
91 	.pools = LIST_HEAD_INIT(io_tlb_default_mem.pools),
92 	.dyn_alloc = __WORK_INITIALIZER(io_tlb_default_mem.dyn_alloc,
93 					swiotlb_dyn_alloc),
94 };
95 
96 #else  /* !CONFIG_SWIOTLB_DYNAMIC */
97 
98 static struct io_tlb_mem io_tlb_default_mem;
99 
100 #endif	/* CONFIG_SWIOTLB_DYNAMIC */
101 
102 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
103 static unsigned long default_nareas;
104 
105 /**
106  * struct io_tlb_area - IO TLB memory area descriptor
107  *
108  * This is a single area with a single lock.
109  *
110  * @used:	The number of used IO TLB block.
111  * @index:	The slot index to start searching in this area for next round.
112  * @lock:	The lock to protect the above data structures in the map and
113  *		unmap calls.
114  */
115 struct io_tlb_area {
116 	unsigned long used;
117 	unsigned int index;
118 	spinlock_t lock;
119 };
120 
121 /*
122  * Round up number of slabs to the next power of 2. The last area is going
123  * be smaller than the rest if default_nslabs is not power of two.
124  * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE,
125  * otherwise a segment may span two or more areas. It conflicts with free
126  * contiguous slots tracking: free slots are treated contiguous no matter
127  * whether they cross an area boundary.
128  *
129  * Return true if default_nslabs is rounded up.
130  */
131 static bool round_up_default_nslabs(void)
132 {
133 	if (!default_nareas)
134 		return false;
135 
136 	if (default_nslabs < IO_TLB_SEGSIZE * default_nareas)
137 		default_nslabs = IO_TLB_SEGSIZE * default_nareas;
138 	else if (is_power_of_2(default_nslabs))
139 		return false;
140 	default_nslabs = roundup_pow_of_two(default_nslabs);
141 	return true;
142 }
143 
144 /**
145  * swiotlb_adjust_nareas() - adjust the number of areas and slots
146  * @nareas:	Desired number of areas. Zero is treated as 1.
147  *
148  * Adjust the default number of areas in a memory pool.
149  * The default size of the memory pool may also change to meet minimum area
150  * size requirements.
151  */
152 static void swiotlb_adjust_nareas(unsigned int nareas)
153 {
154 	if (!nareas)
155 		nareas = 1;
156 	else if (!is_power_of_2(nareas))
157 		nareas = roundup_pow_of_two(nareas);
158 
159 	default_nareas = nareas;
160 
161 	pr_info("area num %d.\n", nareas);
162 	if (round_up_default_nslabs())
163 		pr_info("SWIOTLB bounce buffer size roundup to %luMB",
164 			(default_nslabs << IO_TLB_SHIFT) >> 20);
165 }
166 
167 /**
168  * limit_nareas() - get the maximum number of areas for a given memory pool size
169  * @nareas:	Desired number of areas.
170  * @nslots:	Total number of slots in the memory pool.
171  *
172  * Limit the number of areas to the maximum possible number of areas in
173  * a memory pool of the given size.
174  *
175  * Return: Maximum possible number of areas.
176  */
177 static unsigned int limit_nareas(unsigned int nareas, unsigned long nslots)
178 {
179 	if (nslots < nareas * IO_TLB_SEGSIZE)
180 		return nslots / IO_TLB_SEGSIZE;
181 	return nareas;
182 }
183 
184 static int __init
185 setup_io_tlb_npages(char *str)
186 {
187 	if (isdigit(*str)) {
188 		/* avoid tail segment of size < IO_TLB_SEGSIZE */
189 		default_nslabs =
190 			ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE);
191 	}
192 	if (*str == ',')
193 		++str;
194 	if (isdigit(*str))
195 		swiotlb_adjust_nareas(simple_strtoul(str, &str, 0));
196 	if (*str == ',')
197 		++str;
198 	if (!strcmp(str, "force"))
199 		swiotlb_force_bounce = true;
200 	else if (!strcmp(str, "noforce"))
201 		swiotlb_force_disable = true;
202 
203 	return 0;
204 }
205 early_param("swiotlb", setup_io_tlb_npages);
206 
207 unsigned long swiotlb_size_or_default(void)
208 {
209 	return default_nslabs << IO_TLB_SHIFT;
210 }
211 
212 void __init swiotlb_adjust_size(unsigned long size)
213 {
214 	/*
215 	 * If swiotlb parameter has not been specified, give a chance to
216 	 * architectures such as those supporting memory encryption to
217 	 * adjust/expand SWIOTLB size for their use.
218 	 */
219 	if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT)
220 		return;
221 
222 	size = ALIGN(size, IO_TLB_SIZE);
223 	default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
224 	if (round_up_default_nslabs())
225 		size = default_nslabs << IO_TLB_SHIFT;
226 	pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
227 }
228 
229 void swiotlb_print_info(void)
230 {
231 	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
232 
233 	if (!mem->nslabs) {
234 		pr_warn("No low mem\n");
235 		return;
236 	}
237 
238 	pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end,
239 	       (mem->nslabs << IO_TLB_SHIFT) >> 20);
240 }
241 
242 static inline unsigned long io_tlb_offset(unsigned long val)
243 {
244 	return val & (IO_TLB_SEGSIZE - 1);
245 }
246 
247 static inline unsigned long nr_slots(u64 val)
248 {
249 	return DIV_ROUND_UP(val, IO_TLB_SIZE);
250 }
251 
252 /*
253  * Early SWIOTLB allocation may be too early to allow an architecture to
254  * perform the desired operations.  This function allows the architecture to
255  * call SWIOTLB when the operations are possible.  It needs to be called
256  * before the SWIOTLB memory is used.
257  */
258 void __init swiotlb_update_mem_attributes(void)
259 {
260 	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
261 	unsigned long bytes;
262 
263 	if (!mem->nslabs || mem->late_alloc)
264 		return;
265 	bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
266 	set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT);
267 }
268 
269 static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
270 		unsigned long nslabs, bool late_alloc, unsigned int nareas)
271 {
272 	void *vaddr = phys_to_virt(start);
273 	unsigned long bytes = nslabs << IO_TLB_SHIFT, i;
274 
275 	mem->nslabs = nslabs;
276 	mem->start = start;
277 	mem->end = mem->start + bytes;
278 	mem->late_alloc = late_alloc;
279 	mem->nareas = nareas;
280 	mem->area_nslabs = nslabs / mem->nareas;
281 
282 	for (i = 0; i < mem->nareas; i++) {
283 		spin_lock_init(&mem->areas[i].lock);
284 		mem->areas[i].index = 0;
285 		mem->areas[i].used = 0;
286 	}
287 
288 	for (i = 0; i < mem->nslabs; i++) {
289 		mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i),
290 					 mem->nslabs - i);
291 		mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
292 		mem->slots[i].alloc_size = 0;
293 		mem->slots[i].pad_slots = 0;
294 	}
295 
296 	memset(vaddr, 0, bytes);
297 	mem->vaddr = vaddr;
298 	return;
299 }
300 
301 /**
302  * add_mem_pool() - add a memory pool to the allocator
303  * @mem:	Software IO TLB allocator.
304  * @pool:	Memory pool to be added.
305  */
306 static void add_mem_pool(struct io_tlb_mem *mem, struct io_tlb_pool *pool)
307 {
308 #ifdef CONFIG_SWIOTLB_DYNAMIC
309 	spin_lock(&mem->lock);
310 	list_add_rcu(&pool->node, &mem->pools);
311 	mem->nslabs += pool->nslabs;
312 	spin_unlock(&mem->lock);
313 #else
314 	mem->nslabs = pool->nslabs;
315 #endif
316 }
317 
318 static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
319 		unsigned int flags,
320 		int (*remap)(void *tlb, unsigned long nslabs))
321 {
322 	size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
323 	void *tlb;
324 
325 	/*
326 	 * By default allocate the bounce buffer memory from low memory, but
327 	 * allow to pick a location everywhere for hypervisors with guest
328 	 * memory encryption.
329 	 */
330 	if (flags & SWIOTLB_ANY)
331 		tlb = memblock_alloc(bytes, PAGE_SIZE);
332 	else
333 		tlb = memblock_alloc_low(bytes, PAGE_SIZE);
334 
335 	if (!tlb) {
336 		pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
337 			__func__, bytes);
338 		return NULL;
339 	}
340 
341 	if (remap && remap(tlb, nslabs) < 0) {
342 		memblock_free(tlb, PAGE_ALIGN(bytes));
343 		pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
344 		return NULL;
345 	}
346 
347 	return tlb;
348 }
349 
350 /*
351  * Statically reserve bounce buffer space and initialize bounce buffer data
352  * structures for the software IO TLB used to implement the DMA API.
353  */
354 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
355 		int (*remap)(void *tlb, unsigned long nslabs))
356 {
357 	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
358 	unsigned long nslabs;
359 	unsigned int nareas;
360 	size_t alloc_size;
361 	void *tlb;
362 
363 	if (!addressing_limit && !swiotlb_force_bounce)
364 		return;
365 	if (swiotlb_force_disable)
366 		return;
367 
368 	io_tlb_default_mem.force_bounce =
369 		swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
370 
371 #ifdef CONFIG_SWIOTLB_DYNAMIC
372 	if (!remap)
373 		io_tlb_default_mem.can_grow = true;
374 	if (flags & SWIOTLB_ANY)
375 		io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
376 	else
377 		io_tlb_default_mem.phys_limit = ARCH_LOW_ADDRESS_LIMIT;
378 #endif
379 
380 	if (!default_nareas)
381 		swiotlb_adjust_nareas(num_possible_cpus());
382 
383 	nslabs = default_nslabs;
384 	nareas = limit_nareas(default_nareas, nslabs);
385 	while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) {
386 		if (nslabs <= IO_TLB_MIN_SLABS)
387 			return;
388 		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
389 		nareas = limit_nareas(nareas, nslabs);
390 	}
391 
392 	if (default_nslabs != nslabs) {
393 		pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs",
394 			default_nslabs, nslabs);
395 		default_nslabs = nslabs;
396 	}
397 
398 	alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
399 	mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
400 	if (!mem->slots) {
401 		pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n",
402 			__func__, alloc_size, PAGE_SIZE);
403 		return;
404 	}
405 
406 	mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
407 		nareas), SMP_CACHE_BYTES);
408 	if (!mem->areas) {
409 		pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
410 		return;
411 	}
412 
413 	swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, nareas);
414 	add_mem_pool(&io_tlb_default_mem, mem);
415 
416 	if (flags & SWIOTLB_VERBOSE)
417 		swiotlb_print_info();
418 }
419 
420 void __init swiotlb_init(bool addressing_limit, unsigned int flags)
421 {
422 	swiotlb_init_remap(addressing_limit, flags, NULL);
423 }
424 
425 /*
426  * Systems with larger DMA zones (those that don't support ISA) can
427  * initialize the swiotlb later using the slab allocator if needed.
428  * This should be just like above, but with some error catching.
429  */
430 int swiotlb_init_late(size_t size, gfp_t gfp_mask,
431 		int (*remap)(void *tlb, unsigned long nslabs))
432 {
433 	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
434 	unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE);
435 	unsigned int nareas;
436 	unsigned char *vstart = NULL;
437 	unsigned int order, area_order;
438 	bool retried = false;
439 	int rc = 0;
440 
441 	if (io_tlb_default_mem.nslabs)
442 		return 0;
443 
444 	if (swiotlb_force_disable)
445 		return 0;
446 
447 	io_tlb_default_mem.force_bounce = swiotlb_force_bounce;
448 
449 #ifdef CONFIG_SWIOTLB_DYNAMIC
450 	if (!remap)
451 		io_tlb_default_mem.can_grow = true;
452 	if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
453 		io_tlb_default_mem.phys_limit = DMA_BIT_MASK(zone_dma_bits);
454 	else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
455 		io_tlb_default_mem.phys_limit = DMA_BIT_MASK(32);
456 	else
457 		io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
458 #endif
459 
460 	if (!default_nareas)
461 		swiotlb_adjust_nareas(num_possible_cpus());
462 
463 retry:
464 	order = get_order(nslabs << IO_TLB_SHIFT);
465 	nslabs = SLABS_PER_PAGE << order;
466 
467 	while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
468 		vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
469 						  order);
470 		if (vstart)
471 			break;
472 		order--;
473 		nslabs = SLABS_PER_PAGE << order;
474 		retried = true;
475 	}
476 
477 	if (!vstart)
478 		return -ENOMEM;
479 
480 	if (remap)
481 		rc = remap(vstart, nslabs);
482 	if (rc) {
483 		free_pages((unsigned long)vstart, order);
484 
485 		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
486 		if (nslabs < IO_TLB_MIN_SLABS)
487 			return rc;
488 		retried = true;
489 		goto retry;
490 	}
491 
492 	if (retried) {
493 		pr_warn("only able to allocate %ld MB\n",
494 			(PAGE_SIZE << order) >> 20);
495 	}
496 
497 	nareas = limit_nareas(default_nareas, nslabs);
498 	area_order = get_order(array_size(sizeof(*mem->areas), nareas));
499 	mem->areas = (struct io_tlb_area *)
500 		__get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order);
501 	if (!mem->areas)
502 		goto error_area;
503 
504 	mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
505 		get_order(array_size(sizeof(*mem->slots), nslabs)));
506 	if (!mem->slots)
507 		goto error_slots;
508 
509 	set_memory_decrypted((unsigned long)vstart,
510 			     (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT);
511 	swiotlb_init_io_tlb_pool(mem, virt_to_phys(vstart), nslabs, true,
512 				 nareas);
513 	add_mem_pool(&io_tlb_default_mem, mem);
514 
515 	swiotlb_print_info();
516 	return 0;
517 
518 error_slots:
519 	free_pages((unsigned long)mem->areas, area_order);
520 error_area:
521 	free_pages((unsigned long)vstart, order);
522 	return -ENOMEM;
523 }
524 
525 void __init swiotlb_exit(void)
526 {
527 	struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
528 	unsigned long tbl_vaddr;
529 	size_t tbl_size, slots_size;
530 	unsigned int area_order;
531 
532 	if (swiotlb_force_bounce)
533 		return;
534 
535 	if (!mem->nslabs)
536 		return;
537 
538 	pr_info("tearing down default memory pool\n");
539 	tbl_vaddr = (unsigned long)phys_to_virt(mem->start);
540 	tbl_size = PAGE_ALIGN(mem->end - mem->start);
541 	slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs));
542 
543 	set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT);
544 	if (mem->late_alloc) {
545 		area_order = get_order(array_size(sizeof(*mem->areas),
546 			mem->nareas));
547 		free_pages((unsigned long)mem->areas, area_order);
548 		free_pages(tbl_vaddr, get_order(tbl_size));
549 		free_pages((unsigned long)mem->slots, get_order(slots_size));
550 	} else {
551 		memblock_free_late(__pa(mem->areas),
552 			array_size(sizeof(*mem->areas), mem->nareas));
553 		memblock_free_late(mem->start, tbl_size);
554 		memblock_free_late(__pa(mem->slots), slots_size);
555 	}
556 
557 	memset(mem, 0, sizeof(*mem));
558 }
559 
560 #ifdef CONFIG_SWIOTLB_DYNAMIC
561 
562 /**
563  * alloc_dma_pages() - allocate pages to be used for DMA
564  * @gfp:	GFP flags for the allocation.
565  * @bytes:	Size of the buffer.
566  * @phys_limit:	Maximum allowed physical address of the buffer.
567  *
568  * Allocate pages from the buddy allocator. If successful, make the allocated
569  * pages decrypted that they can be used for DMA.
570  *
571  * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
572  * if the allocated physical address was above @phys_limit.
573  */
574 static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit)
575 {
576 	unsigned int order = get_order(bytes);
577 	struct page *page;
578 	phys_addr_t paddr;
579 	void *vaddr;
580 
581 	page = alloc_pages(gfp, order);
582 	if (!page)
583 		return NULL;
584 
585 	paddr = page_to_phys(page);
586 	if (paddr + bytes - 1 > phys_limit) {
587 		__free_pages(page, order);
588 		return ERR_PTR(-EAGAIN);
589 	}
590 
591 	vaddr = phys_to_virt(paddr);
592 	if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
593 		goto error;
594 	return page;
595 
596 error:
597 	/* Intentional leak if pages cannot be encrypted again. */
598 	if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
599 		__free_pages(page, order);
600 	return NULL;
601 }
602 
603 /**
604  * swiotlb_alloc_tlb() - allocate a dynamic IO TLB buffer
605  * @dev:	Device for which a memory pool is allocated.
606  * @bytes:	Size of the buffer.
607  * @phys_limit:	Maximum allowed physical address of the buffer.
608  * @gfp:	GFP flags for the allocation.
609  *
610  * Return: Allocated pages, or %NULL on allocation failure.
611  */
612 static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
613 		u64 phys_limit, gfp_t gfp)
614 {
615 	struct page *page;
616 
617 	/*
618 	 * Allocate from the atomic pools if memory is encrypted and
619 	 * the allocation is atomic, because decrypting may block.
620 	 */
621 	if (!gfpflags_allow_blocking(gfp) && dev && force_dma_unencrypted(dev)) {
622 		void *vaddr;
623 
624 		if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
625 			return NULL;
626 
627 		return dma_alloc_from_pool(dev, bytes, &vaddr, gfp,
628 					   dma_coherent_ok);
629 	}
630 
631 	gfp &= ~GFP_ZONEMASK;
632 	if (phys_limit <= DMA_BIT_MASK(zone_dma_bits))
633 		gfp |= __GFP_DMA;
634 	else if (phys_limit <= DMA_BIT_MASK(32))
635 		gfp |= __GFP_DMA32;
636 
637 	while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) {
638 		if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
639 		    phys_limit < DMA_BIT_MASK(64) &&
640 		    !(gfp & (__GFP_DMA32 | __GFP_DMA)))
641 			gfp |= __GFP_DMA32;
642 		else if (IS_ENABLED(CONFIG_ZONE_DMA) &&
643 			 !(gfp & __GFP_DMA))
644 			gfp = (gfp & ~__GFP_DMA32) | __GFP_DMA;
645 		else
646 			return NULL;
647 	}
648 
649 	return page;
650 }
651 
652 /**
653  * swiotlb_free_tlb() - free a dynamically allocated IO TLB buffer
654  * @vaddr:	Virtual address of the buffer.
655  * @bytes:	Size of the buffer.
656  */
657 static void swiotlb_free_tlb(void *vaddr, size_t bytes)
658 {
659 	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
660 	    dma_free_from_pool(NULL, vaddr, bytes))
661 		return;
662 
663 	/* Intentional leak if pages cannot be encrypted again. */
664 	if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
665 		__free_pages(virt_to_page(vaddr), get_order(bytes));
666 }
667 
668 /**
669  * swiotlb_alloc_pool() - allocate a new IO TLB memory pool
670  * @dev:	Device for which a memory pool is allocated.
671  * @minslabs:	Minimum number of slabs.
672  * @nslabs:	Desired (maximum) number of slabs.
673  * @nareas:	Number of areas.
674  * @phys_limit:	Maximum DMA buffer physical address.
675  * @gfp:	GFP flags for the allocations.
676  *
677  * Allocate and initialize a new IO TLB memory pool. The actual number of
678  * slabs may be reduced if allocation of @nslabs fails. If even
679  * @minslabs cannot be allocated, this function fails.
680  *
681  * Return: New memory pool, or %NULL on allocation failure.
682  */
683 static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev,
684 		unsigned long minslabs, unsigned long nslabs,
685 		unsigned int nareas, u64 phys_limit, gfp_t gfp)
686 {
687 	struct io_tlb_pool *pool;
688 	unsigned int slot_order;
689 	struct page *tlb;
690 	size_t pool_size;
691 	size_t tlb_size;
692 
693 	if (nslabs > SLABS_PER_PAGE << MAX_ORDER) {
694 		nslabs = SLABS_PER_PAGE << MAX_ORDER;
695 		nareas = limit_nareas(nareas, nslabs);
696 	}
697 
698 	pool_size = sizeof(*pool) + array_size(sizeof(*pool->areas), nareas);
699 	pool = kzalloc(pool_size, gfp);
700 	if (!pool)
701 		goto error;
702 	pool->areas = (void *)pool + sizeof(*pool);
703 
704 	tlb_size = nslabs << IO_TLB_SHIFT;
705 	while (!(tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp))) {
706 		if (nslabs <= minslabs)
707 			goto error_tlb;
708 		nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
709 		nareas = limit_nareas(nareas, nslabs);
710 		tlb_size = nslabs << IO_TLB_SHIFT;
711 	}
712 
713 	slot_order = get_order(array_size(sizeof(*pool->slots), nslabs));
714 	pool->slots = (struct io_tlb_slot *)
715 		__get_free_pages(gfp, slot_order);
716 	if (!pool->slots)
717 		goto error_slots;
718 
719 	swiotlb_init_io_tlb_pool(pool, page_to_phys(tlb), nslabs, true, nareas);
720 	return pool;
721 
722 error_slots:
723 	swiotlb_free_tlb(page_address(tlb), tlb_size);
724 error_tlb:
725 	kfree(pool);
726 error:
727 	return NULL;
728 }
729 
730 /**
731  * swiotlb_dyn_alloc() - dynamic memory pool allocation worker
732  * @work:	Pointer to dyn_alloc in struct io_tlb_mem.
733  */
734 static void swiotlb_dyn_alloc(struct work_struct *work)
735 {
736 	struct io_tlb_mem *mem =
737 		container_of(work, struct io_tlb_mem, dyn_alloc);
738 	struct io_tlb_pool *pool;
739 
740 	pool = swiotlb_alloc_pool(NULL, IO_TLB_MIN_SLABS, default_nslabs,
741 				  default_nareas, mem->phys_limit, GFP_KERNEL);
742 	if (!pool) {
743 		pr_warn_ratelimited("Failed to allocate new pool");
744 		return;
745 	}
746 
747 	add_mem_pool(mem, pool);
748 }
749 
750 /**
751  * swiotlb_dyn_free() - RCU callback to free a memory pool
752  * @rcu:	RCU head in the corresponding struct io_tlb_pool.
753  */
754 static void swiotlb_dyn_free(struct rcu_head *rcu)
755 {
756 	struct io_tlb_pool *pool = container_of(rcu, struct io_tlb_pool, rcu);
757 	size_t slots_size = array_size(sizeof(*pool->slots), pool->nslabs);
758 	size_t tlb_size = pool->end - pool->start;
759 
760 	free_pages((unsigned long)pool->slots, get_order(slots_size));
761 	swiotlb_free_tlb(pool->vaddr, tlb_size);
762 	kfree(pool);
763 }
764 
765 /**
766  * swiotlb_find_pool() - find the IO TLB pool for a physical address
767  * @dev:        Device which has mapped the DMA buffer.
768  * @paddr:      Physical address within the DMA buffer.
769  *
770  * Find the IO TLB memory pool descriptor which contains the given physical
771  * address, if any.
772  *
773  * Return: Memory pool which contains @paddr, or %NULL if none.
774  */
775 struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr)
776 {
777 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
778 	struct io_tlb_pool *pool;
779 
780 	rcu_read_lock();
781 	list_for_each_entry_rcu(pool, &mem->pools, node) {
782 		if (paddr >= pool->start && paddr < pool->end)
783 			goto out;
784 	}
785 
786 	list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) {
787 		if (paddr >= pool->start && paddr < pool->end)
788 			goto out;
789 	}
790 	pool = NULL;
791 out:
792 	rcu_read_unlock();
793 	return pool;
794 }
795 
796 /**
797  * swiotlb_del_pool() - remove an IO TLB pool from a device
798  * @dev:	Owning device.
799  * @pool:	Memory pool to be removed.
800  */
801 static void swiotlb_del_pool(struct device *dev, struct io_tlb_pool *pool)
802 {
803 	unsigned long flags;
804 
805 	spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
806 	list_del_rcu(&pool->node);
807 	spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
808 
809 	call_rcu(&pool->rcu, swiotlb_dyn_free);
810 }
811 
812 #endif	/* CONFIG_SWIOTLB_DYNAMIC */
813 
814 /**
815  * swiotlb_dev_init() - initialize swiotlb fields in &struct device
816  * @dev:	Device to be initialized.
817  */
818 void swiotlb_dev_init(struct device *dev)
819 {
820 	dev->dma_io_tlb_mem = &io_tlb_default_mem;
821 #ifdef CONFIG_SWIOTLB_DYNAMIC
822 	INIT_LIST_HEAD(&dev->dma_io_tlb_pools);
823 	spin_lock_init(&dev->dma_io_tlb_lock);
824 	dev->dma_uses_io_tlb = false;
825 #endif
826 }
827 
828 /**
829  * swiotlb_align_offset() - Get required offset into an IO TLB allocation.
830  * @dev:         Owning device.
831  * @align_mask:  Allocation alignment mask.
832  * @addr:        DMA address.
833  *
834  * Return the minimum offset from the start of an IO TLB allocation which is
835  * required for a given buffer address and allocation alignment to keep the
836  * device happy.
837  *
838  * First, the address bits covered by min_align_mask must be identical in the
839  * original address and the bounce buffer address. High bits are preserved by
840  * choosing a suitable IO TLB slot, but bits below IO_TLB_SHIFT require extra
841  * padding bytes before the bounce buffer.
842  *
843  * Second, @align_mask specifies which bits of the first allocated slot must
844  * be zero. This may require allocating additional padding slots, and then the
845  * offset (in bytes) from the first such padding slot is returned.
846  */
847 static unsigned int swiotlb_align_offset(struct device *dev,
848 					 unsigned int align_mask, u64 addr)
849 {
850 	return addr & dma_get_min_align_mask(dev) &
851 		(align_mask | (IO_TLB_SIZE - 1));
852 }
853 
854 /*
855  * Bounce: copy the swiotlb buffer from or back to the original dma location
856  */
857 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size,
858 			   enum dma_data_direction dir)
859 {
860 	struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr);
861 	int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT;
862 	phys_addr_t orig_addr = mem->slots[index].orig_addr;
863 	size_t alloc_size = mem->slots[index].alloc_size;
864 	unsigned long pfn = PFN_DOWN(orig_addr);
865 	unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
866 	unsigned int tlb_offset, orig_addr_offset;
867 
868 	if (orig_addr == INVALID_PHYS_ADDR)
869 		return;
870 
871 	tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
872 	orig_addr_offset = swiotlb_align_offset(dev, 0, orig_addr);
873 	if (tlb_offset < orig_addr_offset) {
874 		dev_WARN_ONCE(dev, 1,
875 			"Access before mapping start detected. orig offset %u, requested offset %u.\n",
876 			orig_addr_offset, tlb_offset);
877 		return;
878 	}
879 
880 	tlb_offset -= orig_addr_offset;
881 	if (tlb_offset > alloc_size) {
882 		dev_WARN_ONCE(dev, 1,
883 			"Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
884 			alloc_size, size, tlb_offset);
885 		return;
886 	}
887 
888 	orig_addr += tlb_offset;
889 	alloc_size -= tlb_offset;
890 
891 	if (size > alloc_size) {
892 		dev_WARN_ONCE(dev, 1,
893 			"Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n",
894 			alloc_size, size);
895 		size = alloc_size;
896 	}
897 
898 	if (PageHighMem(pfn_to_page(pfn))) {
899 		unsigned int offset = orig_addr & ~PAGE_MASK;
900 		struct page *page;
901 		unsigned int sz = 0;
902 		unsigned long flags;
903 
904 		while (size) {
905 			sz = min_t(size_t, PAGE_SIZE - offset, size);
906 
907 			local_irq_save(flags);
908 			page = pfn_to_page(pfn);
909 			if (dir == DMA_TO_DEVICE)
910 				memcpy_from_page(vaddr, page, offset, sz);
911 			else
912 				memcpy_to_page(page, offset, vaddr, sz);
913 			local_irq_restore(flags);
914 
915 			size -= sz;
916 			pfn++;
917 			vaddr += sz;
918 			offset = 0;
919 		}
920 	} else if (dir == DMA_TO_DEVICE) {
921 		memcpy(vaddr, phys_to_virt(orig_addr), size);
922 	} else {
923 		memcpy(phys_to_virt(orig_addr), vaddr, size);
924 	}
925 }
926 
927 static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
928 {
929 	return start + (idx << IO_TLB_SHIFT);
930 }
931 
932 /*
933  * Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
934  */
935 static inline unsigned long get_max_slots(unsigned long boundary_mask)
936 {
937 	return (boundary_mask >> IO_TLB_SHIFT) + 1;
938 }
939 
940 static unsigned int wrap_area_index(struct io_tlb_pool *mem, unsigned int index)
941 {
942 	if (index >= mem->area_nslabs)
943 		return 0;
944 	return index;
945 }
946 
947 /*
948  * Track the total used slots with a global atomic value in order to have
949  * correct information to determine the high water mark. The mem_used()
950  * function gives imprecise results because there's no locking across
951  * multiple areas.
952  */
953 #ifdef CONFIG_DEBUG_FS
954 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
955 {
956 	unsigned long old_hiwater, new_used;
957 
958 	new_used = atomic_long_add_return(nslots, &mem->total_used);
959 	old_hiwater = atomic_long_read(&mem->used_hiwater);
960 	do {
961 		if (new_used <= old_hiwater)
962 			break;
963 	} while (!atomic_long_try_cmpxchg(&mem->used_hiwater,
964 					  &old_hiwater, new_used));
965 }
966 
967 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
968 {
969 	atomic_long_sub(nslots, &mem->total_used);
970 }
971 
972 #else /* !CONFIG_DEBUG_FS */
973 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots)
974 {
975 }
976 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots)
977 {
978 }
979 #endif /* CONFIG_DEBUG_FS */
980 
981 /**
982  * swiotlb_area_find_slots() - search for slots in one IO TLB memory area
983  * @dev:	Device which maps the buffer.
984  * @pool:	Memory pool to be searched.
985  * @area_index:	Index of the IO TLB memory area to be searched.
986  * @orig_addr:	Original (non-bounced) IO buffer address.
987  * @alloc_size: Total requested size of the bounce buffer,
988  *		including initial alignment padding.
989  * @alloc_align_mask:	Required alignment of the allocated buffer.
990  *
991  * Find a suitable sequence of IO TLB entries for the request and allocate
992  * a buffer from the given IO TLB memory area.
993  * This function takes care of locking.
994  *
995  * Return: Index of the first allocated slot, or -1 on error.
996  */
997 static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool,
998 		int area_index, phys_addr_t orig_addr, size_t alloc_size,
999 		unsigned int alloc_align_mask)
1000 {
1001 	struct io_tlb_area *area = pool->areas + area_index;
1002 	unsigned long boundary_mask = dma_get_seg_boundary(dev);
1003 	dma_addr_t tbl_dma_addr =
1004 		phys_to_dma_unencrypted(dev, pool->start) & boundary_mask;
1005 	unsigned long max_slots = get_max_slots(boundary_mask);
1006 	unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
1007 	unsigned int nslots = nr_slots(alloc_size), stride;
1008 	unsigned int offset = swiotlb_align_offset(dev, 0, orig_addr);
1009 	unsigned int index, slots_checked, count = 0, i;
1010 	unsigned long flags;
1011 	unsigned int slot_base;
1012 	unsigned int slot_index;
1013 
1014 	BUG_ON(!nslots);
1015 	BUG_ON(area_index >= pool->nareas);
1016 
1017 	/*
1018 	 * Historically, swiotlb allocations >= PAGE_SIZE were guaranteed to be
1019 	 * page-aligned in the absence of any other alignment requirements.
1020 	 * 'alloc_align_mask' was later introduced to specify the alignment
1021 	 * explicitly, however this is passed as zero for streaming mappings
1022 	 * and so we preserve the old behaviour there in case any drivers are
1023 	 * relying on it.
1024 	 */
1025 	if (!alloc_align_mask && !iotlb_align_mask && alloc_size >= PAGE_SIZE)
1026 		alloc_align_mask = PAGE_SIZE - 1;
1027 
1028 	/*
1029 	 * Ensure that the allocation is at least slot-aligned and update
1030 	 * 'iotlb_align_mask' to ignore bits that will be preserved when
1031 	 * offsetting into the allocation.
1032 	 */
1033 	alloc_align_mask |= (IO_TLB_SIZE - 1);
1034 	iotlb_align_mask &= ~alloc_align_mask;
1035 
1036 	/*
1037 	 * For mappings with an alignment requirement don't bother looping to
1038 	 * unaligned slots once we found an aligned one.
1039 	 */
1040 	stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask));
1041 
1042 	spin_lock_irqsave(&area->lock, flags);
1043 	if (unlikely(nslots > pool->area_nslabs - area->used))
1044 		goto not_found;
1045 
1046 	slot_base = area_index * pool->area_nslabs;
1047 	index = area->index;
1048 
1049 	for (slots_checked = 0; slots_checked < pool->area_nslabs; ) {
1050 		phys_addr_t tlb_addr;
1051 
1052 		slot_index = slot_base + index;
1053 		tlb_addr = slot_addr(tbl_dma_addr, slot_index);
1054 
1055 		if ((tlb_addr & alloc_align_mask) ||
1056 		    (orig_addr && (tlb_addr & iotlb_align_mask) !=
1057 				  (orig_addr & iotlb_align_mask))) {
1058 			index = wrap_area_index(pool, index + 1);
1059 			slots_checked++;
1060 			continue;
1061 		}
1062 
1063 		if (!iommu_is_span_boundary(slot_index, nslots,
1064 					    nr_slots(tbl_dma_addr),
1065 					    max_slots)) {
1066 			if (pool->slots[slot_index].list >= nslots)
1067 				goto found;
1068 		}
1069 		index = wrap_area_index(pool, index + stride);
1070 		slots_checked += stride;
1071 	}
1072 
1073 not_found:
1074 	spin_unlock_irqrestore(&area->lock, flags);
1075 	return -1;
1076 
1077 found:
1078 	/*
1079 	 * If we find a slot that indicates we have 'nslots' number of
1080 	 * contiguous buffers, we allocate the buffers from that slot onwards
1081 	 * and set the list of free entries to '0' indicating unavailable.
1082 	 */
1083 	for (i = slot_index; i < slot_index + nslots; i++) {
1084 		pool->slots[i].list = 0;
1085 		pool->slots[i].alloc_size = alloc_size - (offset +
1086 				((i - slot_index) << IO_TLB_SHIFT));
1087 	}
1088 	for (i = slot_index - 1;
1089 	     io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 &&
1090 	     pool->slots[i].list; i--)
1091 		pool->slots[i].list = ++count;
1092 
1093 	/*
1094 	 * Update the indices to avoid searching in the next round.
1095 	 */
1096 	area->index = wrap_area_index(pool, index + nslots);
1097 	area->used += nslots;
1098 	spin_unlock_irqrestore(&area->lock, flags);
1099 
1100 	inc_used_and_hiwater(dev->dma_io_tlb_mem, nslots);
1101 	return slot_index;
1102 }
1103 
1104 /**
1105  * swiotlb_pool_find_slots() - search for slots in one memory pool
1106  * @dev:	Device which maps the buffer.
1107  * @pool:	Memory pool to be searched.
1108  * @orig_addr:	Original (non-bounced) IO buffer address.
1109  * @alloc_size: Total requested size of the bounce buffer,
1110  *		including initial alignment padding.
1111  * @alloc_align_mask:	Required alignment of the allocated buffer.
1112  *
1113  * Search through one memory pool to find a sequence of slots that match the
1114  * allocation constraints.
1115  *
1116  * Return: Index of the first allocated slot, or -1 on error.
1117  */
1118 static int swiotlb_pool_find_slots(struct device *dev, struct io_tlb_pool *pool,
1119 		phys_addr_t orig_addr, size_t alloc_size,
1120 		unsigned int alloc_align_mask)
1121 {
1122 	int start = raw_smp_processor_id() & (pool->nareas - 1);
1123 	int i = start, index;
1124 
1125 	do {
1126 		index = swiotlb_area_find_slots(dev, pool, i, orig_addr,
1127 						alloc_size, alloc_align_mask);
1128 		if (index >= 0)
1129 			return index;
1130 		if (++i >= pool->nareas)
1131 			i = 0;
1132 	} while (i != start);
1133 
1134 	return -1;
1135 }
1136 
1137 #ifdef CONFIG_SWIOTLB_DYNAMIC
1138 
1139 /**
1140  * swiotlb_find_slots() - search for slots in the whole swiotlb
1141  * @dev:	Device which maps the buffer.
1142  * @orig_addr:	Original (non-bounced) IO buffer address.
1143  * @alloc_size: Total requested size of the bounce buffer,
1144  *		including initial alignment padding.
1145  * @alloc_align_mask:	Required alignment of the allocated buffer.
1146  * @retpool:	Used memory pool, updated on return.
1147  *
1148  * Search through the whole software IO TLB to find a sequence of slots that
1149  * match the allocation constraints.
1150  *
1151  * Return: Index of the first allocated slot, or -1 on error.
1152  */
1153 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
1154 		size_t alloc_size, unsigned int alloc_align_mask,
1155 		struct io_tlb_pool **retpool)
1156 {
1157 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1158 	struct io_tlb_pool *pool;
1159 	unsigned long nslabs;
1160 	unsigned long flags;
1161 	u64 phys_limit;
1162 	int index;
1163 
1164 	rcu_read_lock();
1165 	list_for_each_entry_rcu(pool, &mem->pools, node) {
1166 		index = swiotlb_pool_find_slots(dev, pool, orig_addr,
1167 						alloc_size, alloc_align_mask);
1168 		if (index >= 0) {
1169 			rcu_read_unlock();
1170 			goto found;
1171 		}
1172 	}
1173 	rcu_read_unlock();
1174 	if (!mem->can_grow)
1175 		return -1;
1176 
1177 	schedule_work(&mem->dyn_alloc);
1178 
1179 	nslabs = nr_slots(alloc_size);
1180 	phys_limit = min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
1181 	pool = swiotlb_alloc_pool(dev, nslabs, nslabs, 1, phys_limit,
1182 				  GFP_NOWAIT | __GFP_NOWARN);
1183 	if (!pool)
1184 		return -1;
1185 
1186 	index = swiotlb_pool_find_slots(dev, pool, orig_addr,
1187 					alloc_size, alloc_align_mask);
1188 	if (index < 0) {
1189 		swiotlb_dyn_free(&pool->rcu);
1190 		return -1;
1191 	}
1192 
1193 	pool->transient = true;
1194 	spin_lock_irqsave(&dev->dma_io_tlb_lock, flags);
1195 	list_add_rcu(&pool->node, &dev->dma_io_tlb_pools);
1196 	spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
1197 
1198 found:
1199 	WRITE_ONCE(dev->dma_uses_io_tlb, true);
1200 
1201 	/*
1202 	 * The general barrier orders reads and writes against a presumed store
1203 	 * of the SWIOTLB buffer address by a device driver (to a driver private
1204 	 * data structure). It serves two purposes.
1205 	 *
1206 	 * First, the store to dev->dma_uses_io_tlb must be ordered before the
1207 	 * presumed store. This guarantees that the returned buffer address
1208 	 * cannot be passed to another CPU before updating dev->dma_uses_io_tlb.
1209 	 *
1210 	 * Second, the load from mem->pools must be ordered before the same
1211 	 * presumed store. This guarantees that the returned buffer address
1212 	 * cannot be observed by another CPU before an update of the RCU list
1213 	 * that was made by swiotlb_dyn_alloc() on a third CPU (cf. multicopy
1214 	 * atomicity).
1215 	 *
1216 	 * See also the comment in is_swiotlb_buffer().
1217 	 */
1218 	smp_mb();
1219 
1220 	*retpool = pool;
1221 	return index;
1222 }
1223 
1224 #else  /* !CONFIG_SWIOTLB_DYNAMIC */
1225 
1226 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
1227 		size_t alloc_size, unsigned int alloc_align_mask,
1228 		struct io_tlb_pool **retpool)
1229 {
1230 	*retpool = &dev->dma_io_tlb_mem->defpool;
1231 	return swiotlb_pool_find_slots(dev, *retpool,
1232 				       orig_addr, alloc_size, alloc_align_mask);
1233 }
1234 
1235 #endif /* CONFIG_SWIOTLB_DYNAMIC */
1236 
1237 #ifdef CONFIG_DEBUG_FS
1238 
1239 /**
1240  * mem_used() - get number of used slots in an allocator
1241  * @mem:	Software IO TLB allocator.
1242  *
1243  * The result is accurate in this version of the function, because an atomic
1244  * counter is available if CONFIG_DEBUG_FS is set.
1245  *
1246  * Return: Number of used slots.
1247  */
1248 static unsigned long mem_used(struct io_tlb_mem *mem)
1249 {
1250 	return atomic_long_read(&mem->total_used);
1251 }
1252 
1253 #else /* !CONFIG_DEBUG_FS */
1254 
1255 /**
1256  * mem_pool_used() - get number of used slots in a memory pool
1257  * @pool:	Software IO TLB memory pool.
1258  *
1259  * The result is not accurate, see mem_used().
1260  *
1261  * Return: Approximate number of used slots.
1262  */
1263 static unsigned long mem_pool_used(struct io_tlb_pool *pool)
1264 {
1265 	int i;
1266 	unsigned long used = 0;
1267 
1268 	for (i = 0; i < pool->nareas; i++)
1269 		used += pool->areas[i].used;
1270 	return used;
1271 }
1272 
1273 /**
1274  * mem_used() - get number of used slots in an allocator
1275  * @mem:	Software IO TLB allocator.
1276  *
1277  * The result is not accurate, because there is no locking of individual
1278  * areas.
1279  *
1280  * Return: Approximate number of used slots.
1281  */
1282 static unsigned long mem_used(struct io_tlb_mem *mem)
1283 {
1284 #ifdef CONFIG_SWIOTLB_DYNAMIC
1285 	struct io_tlb_pool *pool;
1286 	unsigned long used = 0;
1287 
1288 	rcu_read_lock();
1289 	list_for_each_entry_rcu(pool, &mem->pools, node)
1290 		used += mem_pool_used(pool);
1291 	rcu_read_unlock();
1292 
1293 	return used;
1294 #else
1295 	return mem_pool_used(&mem->defpool);
1296 #endif
1297 }
1298 
1299 #endif /* CONFIG_DEBUG_FS */
1300 
1301 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
1302 		size_t mapping_size, size_t alloc_size,
1303 		unsigned int alloc_align_mask, enum dma_data_direction dir,
1304 		unsigned long attrs)
1305 {
1306 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1307 	unsigned int offset;
1308 	struct io_tlb_pool *pool;
1309 	unsigned int i;
1310 	int index;
1311 	phys_addr_t tlb_addr;
1312 	unsigned short pad_slots;
1313 
1314 	if (!mem || !mem->nslabs) {
1315 		dev_warn_ratelimited(dev,
1316 			"Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
1317 		return (phys_addr_t)DMA_MAPPING_ERROR;
1318 	}
1319 
1320 	if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
1321 		pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
1322 
1323 	if (mapping_size > alloc_size) {
1324 		dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)",
1325 			      mapping_size, alloc_size);
1326 		return (phys_addr_t)DMA_MAPPING_ERROR;
1327 	}
1328 
1329 	offset = swiotlb_align_offset(dev, alloc_align_mask, orig_addr);
1330 	index = swiotlb_find_slots(dev, orig_addr,
1331 				   alloc_size + offset, alloc_align_mask, &pool);
1332 	if (index == -1) {
1333 		if (!(attrs & DMA_ATTR_NO_WARN))
1334 			dev_warn_ratelimited(dev,
1335 	"swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
1336 				 alloc_size, mem->nslabs, mem_used(mem));
1337 		return (phys_addr_t)DMA_MAPPING_ERROR;
1338 	}
1339 
1340 	/*
1341 	 * Save away the mapping from the original address to the DMA address.
1342 	 * This is needed when we sync the memory.  Then we sync the buffer if
1343 	 * needed.
1344 	 */
1345 	pad_slots = offset >> IO_TLB_SHIFT;
1346 	offset &= (IO_TLB_SIZE - 1);
1347 	index += pad_slots;
1348 	pool->slots[index].pad_slots = pad_slots;
1349 	for (i = 0; i < nr_slots(alloc_size + offset); i++)
1350 		pool->slots[index + i].orig_addr = slot_addr(orig_addr, i);
1351 	tlb_addr = slot_addr(pool->start, index) + offset;
1352 	/*
1353 	 * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
1354 	 * to the tlb buffer, if we knew for sure the device will
1355 	 * overwrite the entire current content. But we don't. Thus
1356 	 * unconditional bounce may prevent leaking swiotlb content (i.e.
1357 	 * kernel memory) to user-space.
1358 	 */
1359 	swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE);
1360 	return tlb_addr;
1361 }
1362 
1363 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
1364 {
1365 	struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr);
1366 	unsigned long flags;
1367 	unsigned int offset = swiotlb_align_offset(dev, 0, tlb_addr);
1368 	int index, nslots, aindex;
1369 	struct io_tlb_area *area;
1370 	int count, i;
1371 
1372 	index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
1373 	index -= mem->slots[index].pad_slots;
1374 	nslots = nr_slots(mem->slots[index].alloc_size + offset);
1375 	aindex = index / mem->area_nslabs;
1376 	area = &mem->areas[aindex];
1377 
1378 	/*
1379 	 * Return the buffer to the free list by setting the corresponding
1380 	 * entries to indicate the number of contiguous entries available.
1381 	 * While returning the entries to the free list, we merge the entries
1382 	 * with slots below and above the pool being returned.
1383 	 */
1384 	BUG_ON(aindex >= mem->nareas);
1385 
1386 	spin_lock_irqsave(&area->lock, flags);
1387 	if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE))
1388 		count = mem->slots[index + nslots].list;
1389 	else
1390 		count = 0;
1391 
1392 	/*
1393 	 * Step 1: return the slots to the free list, merging the slots with
1394 	 * superceeding slots
1395 	 */
1396 	for (i = index + nslots - 1; i >= index; i--) {
1397 		mem->slots[i].list = ++count;
1398 		mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
1399 		mem->slots[i].alloc_size = 0;
1400 		mem->slots[i].pad_slots = 0;
1401 	}
1402 
1403 	/*
1404 	 * Step 2: merge the returned slots with the preceding slots, if
1405 	 * available (non zero)
1406 	 */
1407 	for (i = index - 1;
1408 	     io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list;
1409 	     i--)
1410 		mem->slots[i].list = ++count;
1411 	area->used -= nslots;
1412 	spin_unlock_irqrestore(&area->lock, flags);
1413 
1414 	dec_used(dev->dma_io_tlb_mem, nslots);
1415 }
1416 
1417 #ifdef CONFIG_SWIOTLB_DYNAMIC
1418 
1419 /**
1420  * swiotlb_del_transient() - delete a transient memory pool
1421  * @dev:	Device which mapped the buffer.
1422  * @tlb_addr:	Physical address within a bounce buffer.
1423  *
1424  * Check whether the address belongs to a transient SWIOTLB memory pool.
1425  * If yes, then delete the pool.
1426  *
1427  * Return: %true if @tlb_addr belonged to a transient pool that was released.
1428  */
1429 static bool swiotlb_del_transient(struct device *dev, phys_addr_t tlb_addr)
1430 {
1431 	struct io_tlb_pool *pool;
1432 
1433 	pool = swiotlb_find_pool(dev, tlb_addr);
1434 	if (!pool->transient)
1435 		return false;
1436 
1437 	dec_used(dev->dma_io_tlb_mem, pool->nslabs);
1438 	swiotlb_del_pool(dev, pool);
1439 	return true;
1440 }
1441 
1442 #else  /* !CONFIG_SWIOTLB_DYNAMIC */
1443 
1444 static inline bool swiotlb_del_transient(struct device *dev,
1445 					 phys_addr_t tlb_addr)
1446 {
1447 	return false;
1448 }
1449 
1450 #endif	/* CONFIG_SWIOTLB_DYNAMIC */
1451 
1452 /*
1453  * tlb_addr is the physical address of the bounce buffer to unmap.
1454  */
1455 void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr,
1456 			      size_t mapping_size, enum dma_data_direction dir,
1457 			      unsigned long attrs)
1458 {
1459 	/*
1460 	 * First, sync the memory before unmapping the entry
1461 	 */
1462 	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
1463 	    (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
1464 		swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE);
1465 
1466 	if (swiotlb_del_transient(dev, tlb_addr))
1467 		return;
1468 	swiotlb_release_slots(dev, tlb_addr);
1469 }
1470 
1471 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr,
1472 		size_t size, enum dma_data_direction dir)
1473 {
1474 	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
1475 		swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE);
1476 	else
1477 		BUG_ON(dir != DMA_FROM_DEVICE);
1478 }
1479 
1480 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr,
1481 		size_t size, enum dma_data_direction dir)
1482 {
1483 	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
1484 		swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE);
1485 	else
1486 		BUG_ON(dir != DMA_TO_DEVICE);
1487 }
1488 
1489 /*
1490  * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
1491  * to the device copy the data into it as well.
1492  */
1493 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
1494 		enum dma_data_direction dir, unsigned long attrs)
1495 {
1496 	phys_addr_t swiotlb_addr;
1497 	dma_addr_t dma_addr;
1498 
1499 	trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size);
1500 
1501 	swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir,
1502 			attrs);
1503 	if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
1504 		return DMA_MAPPING_ERROR;
1505 
1506 	/* Ensure that the address returned is DMA'ble */
1507 	dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr);
1508 	if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
1509 		swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir,
1510 			attrs | DMA_ATTR_SKIP_CPU_SYNC);
1511 		dev_WARN_ONCE(dev, 1,
1512 			"swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
1513 			&dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
1514 		return DMA_MAPPING_ERROR;
1515 	}
1516 
1517 	if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
1518 		arch_sync_dma_for_device(swiotlb_addr, size, dir);
1519 	return dma_addr;
1520 }
1521 
1522 size_t swiotlb_max_mapping_size(struct device *dev)
1523 {
1524 	int min_align_mask = dma_get_min_align_mask(dev);
1525 	int min_align = 0;
1526 
1527 	/*
1528 	 * swiotlb_find_slots() skips slots according to
1529 	 * min align mask. This affects max mapping size.
1530 	 * Take it into acount here.
1531 	 */
1532 	if (min_align_mask)
1533 		min_align = roundup(min_align_mask, IO_TLB_SIZE);
1534 
1535 	return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align;
1536 }
1537 
1538 /**
1539  * is_swiotlb_allocated() - check if the default software IO TLB is initialized
1540  */
1541 bool is_swiotlb_allocated(void)
1542 {
1543 	return io_tlb_default_mem.nslabs;
1544 }
1545 
1546 bool is_swiotlb_active(struct device *dev)
1547 {
1548 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1549 
1550 	return mem && mem->nslabs;
1551 }
1552 
1553 /**
1554  * default_swiotlb_base() - get the base address of the default SWIOTLB
1555  *
1556  * Get the lowest physical address used by the default software IO TLB pool.
1557  */
1558 phys_addr_t default_swiotlb_base(void)
1559 {
1560 #ifdef CONFIG_SWIOTLB_DYNAMIC
1561 	io_tlb_default_mem.can_grow = false;
1562 #endif
1563 	return io_tlb_default_mem.defpool.start;
1564 }
1565 
1566 /**
1567  * default_swiotlb_limit() - get the address limit of the default SWIOTLB
1568  *
1569  * Get the highest physical address used by the default software IO TLB pool.
1570  */
1571 phys_addr_t default_swiotlb_limit(void)
1572 {
1573 #ifdef CONFIG_SWIOTLB_DYNAMIC
1574 	return io_tlb_default_mem.phys_limit;
1575 #else
1576 	return io_tlb_default_mem.defpool.end - 1;
1577 #endif
1578 }
1579 
1580 #ifdef CONFIG_DEBUG_FS
1581 
1582 static int io_tlb_used_get(void *data, u64 *val)
1583 {
1584 	struct io_tlb_mem *mem = data;
1585 
1586 	*val = mem_used(mem);
1587 	return 0;
1588 }
1589 
1590 static int io_tlb_hiwater_get(void *data, u64 *val)
1591 {
1592 	struct io_tlb_mem *mem = data;
1593 
1594 	*val = atomic_long_read(&mem->used_hiwater);
1595 	return 0;
1596 }
1597 
1598 static int io_tlb_hiwater_set(void *data, u64 val)
1599 {
1600 	struct io_tlb_mem *mem = data;
1601 
1602 	/* Only allow setting to zero */
1603 	if (val != 0)
1604 		return -EINVAL;
1605 
1606 	atomic_long_set(&mem->used_hiwater, val);
1607 	return 0;
1608 }
1609 
1610 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n");
1611 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_hiwater, io_tlb_hiwater_get,
1612 				io_tlb_hiwater_set, "%llu\n");
1613 
1614 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
1615 					 const char *dirname)
1616 {
1617 	atomic_long_set(&mem->total_used, 0);
1618 	atomic_long_set(&mem->used_hiwater, 0);
1619 
1620 	mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
1621 	if (!mem->nslabs)
1622 		return;
1623 
1624 	debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs);
1625 	debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem,
1626 			&fops_io_tlb_used);
1627 	debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem,
1628 			&fops_io_tlb_hiwater);
1629 }
1630 
1631 static int __init swiotlb_create_default_debugfs(void)
1632 {
1633 	swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb");
1634 	return 0;
1635 }
1636 
1637 late_initcall(swiotlb_create_default_debugfs);
1638 
1639 #else  /* !CONFIG_DEBUG_FS */
1640 
1641 static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
1642 						const char *dirname)
1643 {
1644 }
1645 
1646 #endif	/* CONFIG_DEBUG_FS */
1647 
1648 #ifdef CONFIG_DMA_RESTRICTED_POOL
1649 
1650 struct page *swiotlb_alloc(struct device *dev, size_t size)
1651 {
1652 	struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
1653 	struct io_tlb_pool *pool;
1654 	phys_addr_t tlb_addr;
1655 	unsigned int align;
1656 	int index;
1657 
1658 	if (!mem)
1659 		return NULL;
1660 
1661 	align = (1 << (get_order(size) + PAGE_SHIFT)) - 1;
1662 	index = swiotlb_find_slots(dev, 0, size, align, &pool);
1663 	if (index == -1)
1664 		return NULL;
1665 
1666 	tlb_addr = slot_addr(pool->start, index);
1667 	if (unlikely(!PAGE_ALIGNED(tlb_addr))) {
1668 		dev_WARN_ONCE(dev, 1, "Cannot allocate pages from non page-aligned swiotlb addr 0x%pa.\n",
1669 			      &tlb_addr);
1670 		swiotlb_release_slots(dev, tlb_addr);
1671 		return NULL;
1672 	}
1673 
1674 	return pfn_to_page(PFN_DOWN(tlb_addr));
1675 }
1676 
1677 bool swiotlb_free(struct device *dev, struct page *page, size_t size)
1678 {
1679 	phys_addr_t tlb_addr = page_to_phys(page);
1680 
1681 	if (!is_swiotlb_buffer(dev, tlb_addr))
1682 		return false;
1683 
1684 	swiotlb_release_slots(dev, tlb_addr);
1685 
1686 	return true;
1687 }
1688 
1689 static int rmem_swiotlb_device_init(struct reserved_mem *rmem,
1690 				    struct device *dev)
1691 {
1692 	struct io_tlb_mem *mem = rmem->priv;
1693 	unsigned long nslabs = rmem->size >> IO_TLB_SHIFT;
1694 
1695 	/* Set Per-device io tlb area to one */
1696 	unsigned int nareas = 1;
1697 
1698 	if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) {
1699 		dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping.");
1700 		return -EINVAL;
1701 	}
1702 
1703 	/*
1704 	 * Since multiple devices can share the same pool, the private data,
1705 	 * io_tlb_mem struct, will be initialized by the first device attached
1706 	 * to it.
1707 	 */
1708 	if (!mem) {
1709 		struct io_tlb_pool *pool;
1710 
1711 		mem = kzalloc(sizeof(*mem), GFP_KERNEL);
1712 		if (!mem)
1713 			return -ENOMEM;
1714 		pool = &mem->defpool;
1715 
1716 		pool->slots = kcalloc(nslabs, sizeof(*pool->slots), GFP_KERNEL);
1717 		if (!pool->slots) {
1718 			kfree(mem);
1719 			return -ENOMEM;
1720 		}
1721 
1722 		pool->areas = kcalloc(nareas, sizeof(*pool->areas),
1723 				GFP_KERNEL);
1724 		if (!pool->areas) {
1725 			kfree(pool->slots);
1726 			kfree(mem);
1727 			return -ENOMEM;
1728 		}
1729 
1730 		set_memory_decrypted((unsigned long)phys_to_virt(rmem->base),
1731 				     rmem->size >> PAGE_SHIFT);
1732 		swiotlb_init_io_tlb_pool(pool, rmem->base, nslabs,
1733 					 false, nareas);
1734 		mem->force_bounce = true;
1735 		mem->for_alloc = true;
1736 #ifdef CONFIG_SWIOTLB_DYNAMIC
1737 		spin_lock_init(&mem->lock);
1738 		INIT_LIST_HEAD_RCU(&mem->pools);
1739 #endif
1740 		add_mem_pool(mem, pool);
1741 
1742 		rmem->priv = mem;
1743 
1744 		swiotlb_create_debugfs_files(mem, rmem->name);
1745 	}
1746 
1747 	dev->dma_io_tlb_mem = mem;
1748 
1749 	return 0;
1750 }
1751 
1752 static void rmem_swiotlb_device_release(struct reserved_mem *rmem,
1753 					struct device *dev)
1754 {
1755 	dev->dma_io_tlb_mem = &io_tlb_default_mem;
1756 }
1757 
1758 static const struct reserved_mem_ops rmem_swiotlb_ops = {
1759 	.device_init = rmem_swiotlb_device_init,
1760 	.device_release = rmem_swiotlb_device_release,
1761 };
1762 
1763 static int __init rmem_swiotlb_setup(struct reserved_mem *rmem)
1764 {
1765 	unsigned long node = rmem->fdt_node;
1766 
1767 	if (of_get_flat_dt_prop(node, "reusable", NULL) ||
1768 	    of_get_flat_dt_prop(node, "linux,cma-default", NULL) ||
1769 	    of_get_flat_dt_prop(node, "linux,dma-default", NULL) ||
1770 	    of_get_flat_dt_prop(node, "no-map", NULL))
1771 		return -EINVAL;
1772 
1773 	rmem->ops = &rmem_swiotlb_ops;
1774 	pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n",
1775 		&rmem->base, (unsigned long)rmem->size / SZ_1M);
1776 	return 0;
1777 }
1778 
1779 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup);
1780 #endif /* CONFIG_DMA_RESTRICTED_POOL */
1781