1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Dynamic DMA mapping support. 4 * 5 * This implementation is a fallback for platforms that do not support 6 * I/O TLBs (aka DMA address translation hardware). 7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> 8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> 9 * Copyright (C) 2000, 2003 Hewlett-Packard Co 10 * David Mosberger-Tang <davidm@hpl.hp.com> 11 * 12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. 13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid 14 * unnecessary i-cache flushing. 15 * 04/07/.. ak Better overflow handling. Assorted fixes. 16 * 05/09/10 linville Add support for syncing ranges, support syncing for 17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. 18 * 08/12/11 beckyb Add highmem support 19 */ 20 21 #define pr_fmt(fmt) "software IO TLB: " fmt 22 23 #include <linux/cache.h> 24 #include <linux/cc_platform.h> 25 #include <linux/ctype.h> 26 #include <linux/debugfs.h> 27 #include <linux/dma-direct.h> 28 #include <linux/dma-map-ops.h> 29 #include <linux/export.h> 30 #include <linux/gfp.h> 31 #include <linux/highmem.h> 32 #include <linux/io.h> 33 #include <linux/iommu-helper.h> 34 #include <linux/init.h> 35 #include <linux/memblock.h> 36 #include <linux/mm.h> 37 #include <linux/pfn.h> 38 #include <linux/scatterlist.h> 39 #include <linux/set_memory.h> 40 #include <linux/spinlock.h> 41 #include <linux/string.h> 42 #include <linux/swiotlb.h> 43 #include <linux/types.h> 44 #ifdef CONFIG_DMA_RESTRICTED_POOL 45 #include <linux/of.h> 46 #include <linux/of_fdt.h> 47 #include <linux/of_reserved_mem.h> 48 #include <linux/slab.h> 49 #endif 50 51 #define CREATE_TRACE_POINTS 52 #include <trace/events/swiotlb.h> 53 54 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 55 56 /* 57 * Minimum IO TLB size to bother booting with. Systems with mainly 58 * 64bit capable cards will only lightly use the swiotlb. If we can't 59 * allocate a contiguous 1MB, we're probably in trouble anyway. 60 */ 61 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 62 63 #define INVALID_PHYS_ADDR (~(phys_addr_t)0) 64 65 struct io_tlb_slot { 66 phys_addr_t orig_addr; 67 size_t alloc_size; 68 unsigned int list; 69 }; 70 71 static bool swiotlb_force_bounce; 72 static bool swiotlb_force_disable; 73 74 struct io_tlb_mem io_tlb_default_mem; 75 76 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT; 77 static unsigned long default_nareas; 78 79 /** 80 * struct io_tlb_area - IO TLB memory area descriptor 81 * 82 * This is a single area with a single lock. 83 * 84 * @used: The number of used IO TLB block. 85 * @index: The slot index to start searching in this area for next round. 86 * @lock: The lock to protect the above data structures in the map and 87 * unmap calls. 88 */ 89 struct io_tlb_area { 90 unsigned long used; 91 unsigned int index; 92 spinlock_t lock; 93 }; 94 95 /* 96 * Round up number of slabs to the next power of 2. The last area is going 97 * be smaller than the rest if default_nslabs is not power of two. 98 * The number of slot in an area should be a multiple of IO_TLB_SEGSIZE, 99 * otherwise a segment may span two or more areas. It conflicts with free 100 * contiguous slots tracking: free slots are treated contiguous no matter 101 * whether they cross an area boundary. 102 * 103 * Return true if default_nslabs is rounded up. 104 */ 105 static bool round_up_default_nslabs(void) 106 { 107 if (!default_nareas) 108 return false; 109 110 if (default_nslabs < IO_TLB_SEGSIZE * default_nareas) 111 default_nslabs = IO_TLB_SEGSIZE * default_nareas; 112 else if (is_power_of_2(default_nslabs)) 113 return false; 114 default_nslabs = roundup_pow_of_two(default_nslabs); 115 return true; 116 } 117 118 static void swiotlb_adjust_nareas(unsigned int nareas) 119 { 120 /* use a single area when non is specified */ 121 if (!nareas) 122 nareas = 1; 123 else if (!is_power_of_2(nareas)) 124 nareas = roundup_pow_of_two(nareas); 125 126 default_nareas = nareas; 127 128 pr_info("area num %d.\n", nareas); 129 if (round_up_default_nslabs()) 130 pr_info("SWIOTLB bounce buffer size roundup to %luMB", 131 (default_nslabs << IO_TLB_SHIFT) >> 20); 132 } 133 134 static int __init 135 setup_io_tlb_npages(char *str) 136 { 137 if (isdigit(*str)) { 138 /* avoid tail segment of size < IO_TLB_SEGSIZE */ 139 default_nslabs = 140 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE); 141 } 142 if (*str == ',') 143 ++str; 144 if (isdigit(*str)) 145 swiotlb_adjust_nareas(simple_strtoul(str, &str, 0)); 146 if (*str == ',') 147 ++str; 148 if (!strcmp(str, "force")) 149 swiotlb_force_bounce = true; 150 else if (!strcmp(str, "noforce")) 151 swiotlb_force_disable = true; 152 153 return 0; 154 } 155 early_param("swiotlb", setup_io_tlb_npages); 156 157 unsigned long swiotlb_size_or_default(void) 158 { 159 return default_nslabs << IO_TLB_SHIFT; 160 } 161 162 void __init swiotlb_adjust_size(unsigned long size) 163 { 164 /* 165 * If swiotlb parameter has not been specified, give a chance to 166 * architectures such as those supporting memory encryption to 167 * adjust/expand SWIOTLB size for their use. 168 */ 169 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT) 170 return; 171 172 size = ALIGN(size, IO_TLB_SIZE); 173 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); 174 if (round_up_default_nslabs()) 175 size = default_nslabs << IO_TLB_SHIFT; 176 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); 177 } 178 179 void swiotlb_print_info(void) 180 { 181 struct io_tlb_mem *mem = &io_tlb_default_mem; 182 183 if (!mem->nslabs) { 184 pr_warn("No low mem\n"); 185 return; 186 } 187 188 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, 189 (mem->nslabs << IO_TLB_SHIFT) >> 20); 190 } 191 192 static inline unsigned long io_tlb_offset(unsigned long val) 193 { 194 return val & (IO_TLB_SEGSIZE - 1); 195 } 196 197 static inline unsigned long nr_slots(u64 val) 198 { 199 return DIV_ROUND_UP(val, IO_TLB_SIZE); 200 } 201 202 /* 203 * Early SWIOTLB allocation may be too early to allow an architecture to 204 * perform the desired operations. This function allows the architecture to 205 * call SWIOTLB when the operations are possible. It needs to be called 206 * before the SWIOTLB memory is used. 207 */ 208 void __init swiotlb_update_mem_attributes(void) 209 { 210 struct io_tlb_mem *mem = &io_tlb_default_mem; 211 unsigned long bytes; 212 213 if (!mem->nslabs || mem->late_alloc) 214 return; 215 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); 216 set_memory_decrypted((unsigned long)mem->vaddr, bytes >> PAGE_SHIFT); 217 } 218 219 static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, 220 unsigned long nslabs, unsigned int flags, 221 bool late_alloc, unsigned int nareas) 222 { 223 void *vaddr = phys_to_virt(start); 224 unsigned long bytes = nslabs << IO_TLB_SHIFT, i; 225 226 mem->nslabs = nslabs; 227 mem->start = start; 228 mem->end = mem->start + bytes; 229 mem->late_alloc = late_alloc; 230 mem->nareas = nareas; 231 mem->area_nslabs = nslabs / mem->nareas; 232 233 mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE); 234 235 for (i = 0; i < mem->nareas; i++) { 236 spin_lock_init(&mem->areas[i].lock); 237 mem->areas[i].index = 0; 238 mem->areas[i].used = 0; 239 } 240 241 for (i = 0; i < mem->nslabs; i++) { 242 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); 243 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; 244 mem->slots[i].alloc_size = 0; 245 } 246 247 memset(vaddr, 0, bytes); 248 mem->vaddr = vaddr; 249 return; 250 } 251 252 static void __init *swiotlb_memblock_alloc(unsigned long nslabs, 253 unsigned int flags, 254 int (*remap)(void *tlb, unsigned long nslabs)) 255 { 256 size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT); 257 void *tlb; 258 259 /* 260 * By default allocate the bounce buffer memory from low memory, but 261 * allow to pick a location everywhere for hypervisors with guest 262 * memory encryption. 263 */ 264 if (flags & SWIOTLB_ANY) 265 tlb = memblock_alloc(bytes, PAGE_SIZE); 266 else 267 tlb = memblock_alloc_low(bytes, PAGE_SIZE); 268 269 if (!tlb) { 270 pr_warn("%s: Failed to allocate %zu bytes tlb structure\n", 271 __func__, bytes); 272 return NULL; 273 } 274 275 if (remap && remap(tlb, nslabs) < 0) { 276 memblock_free(tlb, PAGE_ALIGN(bytes)); 277 pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes); 278 return NULL; 279 } 280 281 return tlb; 282 } 283 284 /* 285 * Statically reserve bounce buffer space and initialize bounce buffer data 286 * structures for the software IO TLB used to implement the DMA API. 287 */ 288 void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, 289 int (*remap)(void *tlb, unsigned long nslabs)) 290 { 291 struct io_tlb_mem *mem = &io_tlb_default_mem; 292 unsigned long nslabs; 293 size_t alloc_size; 294 void *tlb; 295 296 if (!addressing_limit && !swiotlb_force_bounce) 297 return; 298 if (swiotlb_force_disable) 299 return; 300 301 /* 302 * default_nslabs maybe changed when adjust area number. 303 * So allocate bounce buffer after adjusting area number. 304 */ 305 if (!default_nareas) 306 swiotlb_adjust_nareas(num_possible_cpus()); 307 308 nslabs = default_nslabs; 309 while ((tlb = swiotlb_memblock_alloc(nslabs, flags, remap)) == NULL) { 310 if (nslabs <= IO_TLB_MIN_SLABS) 311 return; 312 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); 313 } 314 315 if (default_nslabs != nslabs) { 316 pr_info("SWIOTLB bounce buffer size adjusted %lu -> %lu slabs", 317 default_nslabs, nslabs); 318 default_nslabs = nslabs; 319 } 320 321 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); 322 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); 323 if (!mem->slots) { 324 pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n", 325 __func__, alloc_size, PAGE_SIZE); 326 return; 327 } 328 329 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area), 330 default_nareas), SMP_CACHE_BYTES); 331 if (!mem->areas) { 332 pr_warn("%s: Failed to allocate mem->areas.\n", __func__); 333 return; 334 } 335 336 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false, 337 default_nareas); 338 339 if (flags & SWIOTLB_VERBOSE) 340 swiotlb_print_info(); 341 } 342 343 void __init swiotlb_init(bool addressing_limit, unsigned int flags) 344 { 345 swiotlb_init_remap(addressing_limit, flags, NULL); 346 } 347 348 /* 349 * Systems with larger DMA zones (those that don't support ISA) can 350 * initialize the swiotlb later using the slab allocator if needed. 351 * This should be just like above, but with some error catching. 352 */ 353 int swiotlb_init_late(size_t size, gfp_t gfp_mask, 354 int (*remap)(void *tlb, unsigned long nslabs)) 355 { 356 struct io_tlb_mem *mem = &io_tlb_default_mem; 357 unsigned long nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); 358 unsigned char *vstart = NULL; 359 unsigned int order, area_order; 360 bool retried = false; 361 int rc = 0; 362 363 if (swiotlb_force_disable) 364 return 0; 365 366 retry: 367 order = get_order(nslabs << IO_TLB_SHIFT); 368 nslabs = SLABS_PER_PAGE << order; 369 370 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 371 vstart = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN, 372 order); 373 if (vstart) 374 break; 375 order--; 376 nslabs = SLABS_PER_PAGE << order; 377 retried = true; 378 } 379 380 if (!vstart) 381 return -ENOMEM; 382 383 if (remap) 384 rc = remap(vstart, nslabs); 385 if (rc) { 386 free_pages((unsigned long)vstart, order); 387 388 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); 389 if (nslabs < IO_TLB_MIN_SLABS) 390 return rc; 391 retried = true; 392 goto retry; 393 } 394 395 if (retried) { 396 pr_warn("only able to allocate %ld MB\n", 397 (PAGE_SIZE << order) >> 20); 398 } 399 400 if (!default_nareas) 401 swiotlb_adjust_nareas(num_possible_cpus()); 402 403 area_order = get_order(array_size(sizeof(*mem->areas), 404 default_nareas)); 405 mem->areas = (struct io_tlb_area *) 406 __get_free_pages(GFP_KERNEL | __GFP_ZERO, area_order); 407 if (!mem->areas) 408 goto error_area; 409 410 mem->slots = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 411 get_order(array_size(sizeof(*mem->slots), nslabs))); 412 if (!mem->slots) 413 goto error_slots; 414 415 set_memory_decrypted((unsigned long)vstart, 416 (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT); 417 swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true, 418 default_nareas); 419 420 swiotlb_print_info(); 421 return 0; 422 423 error_slots: 424 free_pages((unsigned long)mem->areas, area_order); 425 error_area: 426 free_pages((unsigned long)vstart, order); 427 return -ENOMEM; 428 } 429 430 void __init swiotlb_exit(void) 431 { 432 struct io_tlb_mem *mem = &io_tlb_default_mem; 433 unsigned long tbl_vaddr; 434 size_t tbl_size, slots_size; 435 unsigned int area_order; 436 437 if (swiotlb_force_bounce) 438 return; 439 440 if (!mem->nslabs) 441 return; 442 443 pr_info("tearing down default memory pool\n"); 444 tbl_vaddr = (unsigned long)phys_to_virt(mem->start); 445 tbl_size = PAGE_ALIGN(mem->end - mem->start); 446 slots_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), mem->nslabs)); 447 448 set_memory_encrypted(tbl_vaddr, tbl_size >> PAGE_SHIFT); 449 if (mem->late_alloc) { 450 area_order = get_order(array_size(sizeof(*mem->areas), 451 mem->nareas)); 452 free_pages((unsigned long)mem->areas, area_order); 453 free_pages(tbl_vaddr, get_order(tbl_size)); 454 free_pages((unsigned long)mem->slots, get_order(slots_size)); 455 } else { 456 memblock_free_late(__pa(mem->areas), 457 array_size(sizeof(*mem->areas), mem->nareas)); 458 memblock_free_late(mem->start, tbl_size); 459 memblock_free_late(__pa(mem->slots), slots_size); 460 } 461 462 memset(mem, 0, sizeof(*mem)); 463 } 464 465 /* 466 * Return the offset into a iotlb slot required to keep the device happy. 467 */ 468 static unsigned int swiotlb_align_offset(struct device *dev, u64 addr) 469 { 470 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); 471 } 472 473 /* 474 * Bounce: copy the swiotlb buffer from or back to the original dma location 475 */ 476 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, 477 enum dma_data_direction dir) 478 { 479 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 480 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; 481 phys_addr_t orig_addr = mem->slots[index].orig_addr; 482 size_t alloc_size = mem->slots[index].alloc_size; 483 unsigned long pfn = PFN_DOWN(orig_addr); 484 unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start; 485 unsigned int tlb_offset, orig_addr_offset; 486 487 if (orig_addr == INVALID_PHYS_ADDR) 488 return; 489 490 tlb_offset = tlb_addr & (IO_TLB_SIZE - 1); 491 orig_addr_offset = swiotlb_align_offset(dev, orig_addr); 492 if (tlb_offset < orig_addr_offset) { 493 dev_WARN_ONCE(dev, 1, 494 "Access before mapping start detected. orig offset %u, requested offset %u.\n", 495 orig_addr_offset, tlb_offset); 496 return; 497 } 498 499 tlb_offset -= orig_addr_offset; 500 if (tlb_offset > alloc_size) { 501 dev_WARN_ONCE(dev, 1, 502 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n", 503 alloc_size, size, tlb_offset); 504 return; 505 } 506 507 orig_addr += tlb_offset; 508 alloc_size -= tlb_offset; 509 510 if (size > alloc_size) { 511 dev_WARN_ONCE(dev, 1, 512 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n", 513 alloc_size, size); 514 size = alloc_size; 515 } 516 517 if (PageHighMem(pfn_to_page(pfn))) { 518 unsigned int offset = orig_addr & ~PAGE_MASK; 519 struct page *page; 520 unsigned int sz = 0; 521 unsigned long flags; 522 523 while (size) { 524 sz = min_t(size_t, PAGE_SIZE - offset, size); 525 526 local_irq_save(flags); 527 page = pfn_to_page(pfn); 528 if (dir == DMA_TO_DEVICE) 529 memcpy_from_page(vaddr, page, offset, sz); 530 else 531 memcpy_to_page(page, offset, vaddr, sz); 532 local_irq_restore(flags); 533 534 size -= sz; 535 pfn++; 536 vaddr += sz; 537 offset = 0; 538 } 539 } else if (dir == DMA_TO_DEVICE) { 540 memcpy(vaddr, phys_to_virt(orig_addr), size); 541 } else { 542 memcpy(phys_to_virt(orig_addr), vaddr, size); 543 } 544 } 545 546 static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx) 547 { 548 return start + (idx << IO_TLB_SHIFT); 549 } 550 551 /* 552 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL. 553 */ 554 static inline unsigned long get_max_slots(unsigned long boundary_mask) 555 { 556 if (boundary_mask == ~0UL) 557 return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); 558 return nr_slots(boundary_mask + 1); 559 } 560 561 static unsigned int wrap_area_index(struct io_tlb_mem *mem, unsigned int index) 562 { 563 if (index >= mem->area_nslabs) 564 return 0; 565 return index; 566 } 567 568 /* 569 * Track the total used slots with a global atomic value in order to have 570 * correct information to determine the high water mark. The mem_used() 571 * function gives imprecise results because there's no locking across 572 * multiple areas. 573 */ 574 #ifdef CONFIG_DEBUG_FS 575 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots) 576 { 577 unsigned long old_hiwater, new_used; 578 579 new_used = atomic_long_add_return(nslots, &mem->total_used); 580 old_hiwater = atomic_long_read(&mem->used_hiwater); 581 do { 582 if (new_used <= old_hiwater) 583 break; 584 } while (!atomic_long_try_cmpxchg(&mem->used_hiwater, 585 &old_hiwater, new_used)); 586 } 587 588 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots) 589 { 590 atomic_long_sub(nslots, &mem->total_used); 591 } 592 593 #else /* !CONFIG_DEBUG_FS */ 594 static void inc_used_and_hiwater(struct io_tlb_mem *mem, unsigned int nslots) 595 { 596 } 597 static void dec_used(struct io_tlb_mem *mem, unsigned int nslots) 598 { 599 } 600 #endif /* CONFIG_DEBUG_FS */ 601 602 /* 603 * Find a suitable number of IO TLB entries size that will fit this request and 604 * allocate a buffer from that IO TLB pool. 605 */ 606 static int swiotlb_do_find_slots(struct device *dev, int area_index, 607 phys_addr_t orig_addr, size_t alloc_size, 608 unsigned int alloc_align_mask) 609 { 610 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 611 struct io_tlb_area *area = mem->areas + area_index; 612 unsigned long boundary_mask = dma_get_seg_boundary(dev); 613 dma_addr_t tbl_dma_addr = 614 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; 615 unsigned long max_slots = get_max_slots(boundary_mask); 616 unsigned int iotlb_align_mask = 617 dma_get_min_align_mask(dev) | alloc_align_mask; 618 unsigned int nslots = nr_slots(alloc_size), stride; 619 unsigned int offset = swiotlb_align_offset(dev, orig_addr); 620 unsigned int index, slots_checked, count = 0, i; 621 unsigned long flags; 622 unsigned int slot_base; 623 unsigned int slot_index; 624 625 BUG_ON(!nslots); 626 BUG_ON(area_index >= mem->nareas); 627 628 /* 629 * For allocations of PAGE_SIZE or larger only look for page aligned 630 * allocations. 631 */ 632 if (alloc_size >= PAGE_SIZE) 633 iotlb_align_mask |= ~PAGE_MASK; 634 iotlb_align_mask &= ~(IO_TLB_SIZE - 1); 635 636 /* 637 * For mappings with an alignment requirement don't bother looping to 638 * unaligned slots once we found an aligned one. 639 */ 640 stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1; 641 642 spin_lock_irqsave(&area->lock, flags); 643 if (unlikely(nslots > mem->area_nslabs - area->used)) 644 goto not_found; 645 646 slot_base = area_index * mem->area_nslabs; 647 index = area->index; 648 649 for (slots_checked = 0; slots_checked < mem->area_nslabs; ) { 650 slot_index = slot_base + index; 651 652 if (orig_addr && 653 (slot_addr(tbl_dma_addr, slot_index) & 654 iotlb_align_mask) != (orig_addr & iotlb_align_mask)) { 655 index = wrap_area_index(mem, index + 1); 656 slots_checked++; 657 continue; 658 } 659 660 /* 661 * If we find a slot that indicates we have 'nslots' number of 662 * contiguous buffers, we allocate the buffers from that slot 663 * and mark the entries as '0' indicating unavailable. 664 */ 665 if (!iommu_is_span_boundary(slot_index, nslots, 666 nr_slots(tbl_dma_addr), 667 max_slots)) { 668 if (mem->slots[slot_index].list >= nslots) 669 goto found; 670 } 671 index = wrap_area_index(mem, index + stride); 672 slots_checked += stride; 673 } 674 675 not_found: 676 spin_unlock_irqrestore(&area->lock, flags); 677 return -1; 678 679 found: 680 for (i = slot_index; i < slot_index + nslots; i++) { 681 mem->slots[i].list = 0; 682 mem->slots[i].alloc_size = alloc_size - (offset + 683 ((i - slot_index) << IO_TLB_SHIFT)); 684 } 685 for (i = slot_index - 1; 686 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && 687 mem->slots[i].list; i--) 688 mem->slots[i].list = ++count; 689 690 /* 691 * Update the indices to avoid searching in the next round. 692 */ 693 area->index = wrap_area_index(mem, index + nslots); 694 area->used += nslots; 695 spin_unlock_irqrestore(&area->lock, flags); 696 697 inc_used_and_hiwater(mem, nslots); 698 return slot_index; 699 } 700 701 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, 702 size_t alloc_size, unsigned int alloc_align_mask) 703 { 704 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 705 int start = raw_smp_processor_id() & (mem->nareas - 1); 706 int i = start, index; 707 708 do { 709 index = swiotlb_do_find_slots(dev, i, orig_addr, alloc_size, 710 alloc_align_mask); 711 if (index >= 0) 712 return index; 713 if (++i >= mem->nareas) 714 i = 0; 715 } while (i != start); 716 717 return -1; 718 } 719 720 #ifdef CONFIG_DEBUG_FS 721 722 static unsigned long mem_used(struct io_tlb_mem *mem) 723 { 724 return atomic_long_read(&mem->total_used); 725 } 726 727 #else /* !CONFIG_DEBUG_FS */ 728 729 static unsigned long mem_used(struct io_tlb_mem *mem) 730 { 731 int i; 732 unsigned long used = 0; 733 734 for (i = 0; i < mem->nareas; i++) 735 used += mem->areas[i].used; 736 return used; 737 } 738 739 #endif /* CONFIG_DEBUG_FS */ 740 741 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, 742 size_t mapping_size, size_t alloc_size, 743 unsigned int alloc_align_mask, enum dma_data_direction dir, 744 unsigned long attrs) 745 { 746 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 747 unsigned int offset = swiotlb_align_offset(dev, orig_addr); 748 unsigned int i; 749 int index; 750 phys_addr_t tlb_addr; 751 752 if (!mem || !mem->nslabs) { 753 dev_warn_ratelimited(dev, 754 "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); 755 return (phys_addr_t)DMA_MAPPING_ERROR; 756 } 757 758 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) 759 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); 760 761 if (mapping_size > alloc_size) { 762 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)", 763 mapping_size, alloc_size); 764 return (phys_addr_t)DMA_MAPPING_ERROR; 765 } 766 767 index = swiotlb_find_slots(dev, orig_addr, 768 alloc_size + offset, alloc_align_mask); 769 if (index == -1) { 770 if (!(attrs & DMA_ATTR_NO_WARN)) 771 dev_warn_ratelimited(dev, 772 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", 773 alloc_size, mem->nslabs, mem_used(mem)); 774 return (phys_addr_t)DMA_MAPPING_ERROR; 775 } 776 777 /* 778 * Save away the mapping from the original address to the DMA address. 779 * This is needed when we sync the memory. Then we sync the buffer if 780 * needed. 781 */ 782 for (i = 0; i < nr_slots(alloc_size + offset); i++) 783 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); 784 tlb_addr = slot_addr(mem->start, index) + offset; 785 /* 786 * When dir == DMA_FROM_DEVICE we could omit the copy from the orig 787 * to the tlb buffer, if we knew for sure the device will 788 * overwrite the entire current content. But we don't. Thus 789 * unconditional bounce may prevent leaking swiotlb content (i.e. 790 * kernel memory) to user-space. 791 */ 792 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); 793 return tlb_addr; 794 } 795 796 static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr) 797 { 798 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 799 unsigned long flags; 800 unsigned int offset = swiotlb_align_offset(dev, tlb_addr); 801 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; 802 int nslots = nr_slots(mem->slots[index].alloc_size + offset); 803 int aindex = index / mem->area_nslabs; 804 struct io_tlb_area *area = &mem->areas[aindex]; 805 int count, i; 806 807 /* 808 * Return the buffer to the free list by setting the corresponding 809 * entries to indicate the number of contiguous entries available. 810 * While returning the entries to the free list, we merge the entries 811 * with slots below and above the pool being returned. 812 */ 813 BUG_ON(aindex >= mem->nareas); 814 815 spin_lock_irqsave(&area->lock, flags); 816 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE)) 817 count = mem->slots[index + nslots].list; 818 else 819 count = 0; 820 821 /* 822 * Step 1: return the slots to the free list, merging the slots with 823 * superceeding slots 824 */ 825 for (i = index + nslots - 1; i >= index; i--) { 826 mem->slots[i].list = ++count; 827 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; 828 mem->slots[i].alloc_size = 0; 829 } 830 831 /* 832 * Step 2: merge the returned slots with the preceding slots, if 833 * available (non zero) 834 */ 835 for (i = index - 1; 836 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; 837 i--) 838 mem->slots[i].list = ++count; 839 area->used -= nslots; 840 spin_unlock_irqrestore(&area->lock, flags); 841 842 dec_used(mem, nslots); 843 } 844 845 /* 846 * tlb_addr is the physical address of the bounce buffer to unmap. 847 */ 848 void swiotlb_tbl_unmap_single(struct device *dev, phys_addr_t tlb_addr, 849 size_t mapping_size, enum dma_data_direction dir, 850 unsigned long attrs) 851 { 852 /* 853 * First, sync the memory before unmapping the entry 854 */ 855 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 856 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 857 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_FROM_DEVICE); 858 859 swiotlb_release_slots(dev, tlb_addr); 860 } 861 862 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, 863 size_t size, enum dma_data_direction dir) 864 { 865 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 866 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE); 867 else 868 BUG_ON(dir != DMA_FROM_DEVICE); 869 } 870 871 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, 872 size_t size, enum dma_data_direction dir) 873 { 874 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 875 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE); 876 else 877 BUG_ON(dir != DMA_TO_DEVICE); 878 } 879 880 /* 881 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing 882 * to the device copy the data into it as well. 883 */ 884 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, 885 enum dma_data_direction dir, unsigned long attrs) 886 { 887 phys_addr_t swiotlb_addr; 888 dma_addr_t dma_addr; 889 890 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size); 891 892 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, 0, dir, 893 attrs); 894 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR) 895 return DMA_MAPPING_ERROR; 896 897 /* Ensure that the address returned is DMA'ble */ 898 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr); 899 if (unlikely(!dma_capable(dev, dma_addr, size, true))) { 900 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir, 901 attrs | DMA_ATTR_SKIP_CPU_SYNC); 902 dev_WARN_ONCE(dev, 1, 903 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", 904 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); 905 return DMA_MAPPING_ERROR; 906 } 907 908 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 909 arch_sync_dma_for_device(swiotlb_addr, size, dir); 910 return dma_addr; 911 } 912 913 size_t swiotlb_max_mapping_size(struct device *dev) 914 { 915 int min_align_mask = dma_get_min_align_mask(dev); 916 int min_align = 0; 917 918 /* 919 * swiotlb_find_slots() skips slots according to 920 * min align mask. This affects max mapping size. 921 * Take it into acount here. 922 */ 923 if (min_align_mask) 924 min_align = roundup(min_align_mask, IO_TLB_SIZE); 925 926 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE - min_align; 927 } 928 929 bool is_swiotlb_active(struct device *dev) 930 { 931 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 932 933 return mem && mem->nslabs; 934 } 935 EXPORT_SYMBOL_GPL(is_swiotlb_active); 936 937 #ifdef CONFIG_DEBUG_FS 938 939 static int io_tlb_used_get(void *data, u64 *val) 940 { 941 struct io_tlb_mem *mem = data; 942 943 *val = mem_used(mem); 944 return 0; 945 } 946 947 static int io_tlb_hiwater_get(void *data, u64 *val) 948 { 949 struct io_tlb_mem *mem = data; 950 951 *val = atomic_long_read(&mem->used_hiwater); 952 return 0; 953 } 954 955 static int io_tlb_hiwater_set(void *data, u64 val) 956 { 957 struct io_tlb_mem *mem = data; 958 959 /* Only allow setting to zero */ 960 if (val != 0) 961 return -EINVAL; 962 963 atomic_long_set(&mem->used_hiwater, val); 964 return 0; 965 } 966 967 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_used, io_tlb_used_get, NULL, "%llu\n"); 968 DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_hiwater, io_tlb_hiwater_get, 969 io_tlb_hiwater_set, "%llu\n"); 970 971 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, 972 const char *dirname) 973 { 974 atomic_long_set(&mem->total_used, 0); 975 atomic_long_set(&mem->used_hiwater, 0); 976 977 mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs); 978 if (!mem->nslabs) 979 return; 980 981 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); 982 debugfs_create_file("io_tlb_used", 0400, mem->debugfs, mem, 983 &fops_io_tlb_used); 984 debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem, 985 &fops_io_tlb_hiwater); 986 } 987 988 static int __init swiotlb_create_default_debugfs(void) 989 { 990 swiotlb_create_debugfs_files(&io_tlb_default_mem, "swiotlb"); 991 return 0; 992 } 993 994 late_initcall(swiotlb_create_default_debugfs); 995 996 #else /* !CONFIG_DEBUG_FS */ 997 998 static inline void swiotlb_create_debugfs_files(struct io_tlb_mem *mem, 999 const char *dirname) 1000 { 1001 } 1002 1003 #endif /* CONFIG_DEBUG_FS */ 1004 1005 #ifdef CONFIG_DMA_RESTRICTED_POOL 1006 1007 struct page *swiotlb_alloc(struct device *dev, size_t size) 1008 { 1009 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 1010 phys_addr_t tlb_addr; 1011 int index; 1012 1013 if (!mem) 1014 return NULL; 1015 1016 index = swiotlb_find_slots(dev, 0, size, 0); 1017 if (index == -1) 1018 return NULL; 1019 1020 tlb_addr = slot_addr(mem->start, index); 1021 1022 return pfn_to_page(PFN_DOWN(tlb_addr)); 1023 } 1024 1025 bool swiotlb_free(struct device *dev, struct page *page, size_t size) 1026 { 1027 phys_addr_t tlb_addr = page_to_phys(page); 1028 1029 if (!is_swiotlb_buffer(dev, tlb_addr)) 1030 return false; 1031 1032 swiotlb_release_slots(dev, tlb_addr); 1033 1034 return true; 1035 } 1036 1037 static int rmem_swiotlb_device_init(struct reserved_mem *rmem, 1038 struct device *dev) 1039 { 1040 struct io_tlb_mem *mem = rmem->priv; 1041 unsigned long nslabs = rmem->size >> IO_TLB_SHIFT; 1042 1043 /* Set Per-device io tlb area to one */ 1044 unsigned int nareas = 1; 1045 1046 if (PageHighMem(pfn_to_page(PHYS_PFN(rmem->base)))) { 1047 dev_err(dev, "Restricted DMA pool must be accessible within the linear mapping."); 1048 return -EINVAL; 1049 } 1050 1051 /* 1052 * Since multiple devices can share the same pool, the private data, 1053 * io_tlb_mem struct, will be initialized by the first device attached 1054 * to it. 1055 */ 1056 if (!mem) { 1057 mem = kzalloc(sizeof(*mem), GFP_KERNEL); 1058 if (!mem) 1059 return -ENOMEM; 1060 1061 mem->slots = kcalloc(nslabs, sizeof(*mem->slots), GFP_KERNEL); 1062 if (!mem->slots) { 1063 kfree(mem); 1064 return -ENOMEM; 1065 } 1066 1067 mem->areas = kcalloc(nareas, sizeof(*mem->areas), 1068 GFP_KERNEL); 1069 if (!mem->areas) { 1070 kfree(mem->slots); 1071 kfree(mem); 1072 return -ENOMEM; 1073 } 1074 1075 set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), 1076 rmem->size >> PAGE_SHIFT); 1077 swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE, 1078 false, nareas); 1079 mem->for_alloc = true; 1080 1081 rmem->priv = mem; 1082 1083 swiotlb_create_debugfs_files(mem, rmem->name); 1084 } 1085 1086 dev->dma_io_tlb_mem = mem; 1087 1088 return 0; 1089 } 1090 1091 static void rmem_swiotlb_device_release(struct reserved_mem *rmem, 1092 struct device *dev) 1093 { 1094 dev->dma_io_tlb_mem = &io_tlb_default_mem; 1095 } 1096 1097 static const struct reserved_mem_ops rmem_swiotlb_ops = { 1098 .device_init = rmem_swiotlb_device_init, 1099 .device_release = rmem_swiotlb_device_release, 1100 }; 1101 1102 static int __init rmem_swiotlb_setup(struct reserved_mem *rmem) 1103 { 1104 unsigned long node = rmem->fdt_node; 1105 1106 if (of_get_flat_dt_prop(node, "reusable", NULL) || 1107 of_get_flat_dt_prop(node, "linux,cma-default", NULL) || 1108 of_get_flat_dt_prop(node, "linux,dma-default", NULL) || 1109 of_get_flat_dt_prop(node, "no-map", NULL)) 1110 return -EINVAL; 1111 1112 rmem->ops = &rmem_swiotlb_ops; 1113 pr_info("Reserved memory: created restricted DMA pool at %pa, size %ld MiB\n", 1114 &rmem->base, (unsigned long)rmem->size / SZ_1M); 1115 return 0; 1116 } 1117 1118 RESERVEDMEM_OF_DECLARE(dma, "restricted-dma-pool", rmem_swiotlb_setup); 1119 #endif /* CONFIG_DMA_RESTRICTED_POOL */ 1120