1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Dynamic DMA mapping support. 4 * 5 * This implementation is a fallback for platforms that do not support 6 * I/O TLBs (aka DMA address translation hardware). 7 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> 8 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> 9 * Copyright (C) 2000, 2003 Hewlett-Packard Co 10 * David Mosberger-Tang <davidm@hpl.hp.com> 11 * 12 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. 13 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid 14 * unnecessary i-cache flushing. 15 * 04/07/.. ak Better overflow handling. Assorted fixes. 16 * 05/09/10 linville Add support for syncing ranges, support syncing for 17 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. 18 * 08/12/11 beckyb Add highmem support 19 */ 20 21 #define pr_fmt(fmt) "software IO TLB: " fmt 22 23 #include <linux/cache.h> 24 #include <linux/dma-direct.h> 25 #include <linux/dma-map-ops.h> 26 #include <linux/mm.h> 27 #include <linux/export.h> 28 #include <linux/spinlock.h> 29 #include <linux/string.h> 30 #include <linux/swiotlb.h> 31 #include <linux/pfn.h> 32 #include <linux/types.h> 33 #include <linux/ctype.h> 34 #include <linux/highmem.h> 35 #include <linux/gfp.h> 36 #include <linux/scatterlist.h> 37 #include <linux/mem_encrypt.h> 38 #include <linux/set_memory.h> 39 #ifdef CONFIG_DEBUG_FS 40 #include <linux/debugfs.h> 41 #endif 42 43 #include <asm/io.h> 44 #include <asm/dma.h> 45 46 #include <linux/init.h> 47 #include <linux/memblock.h> 48 #include <linux/iommu-helper.h> 49 50 #define CREATE_TRACE_POINTS 51 #include <trace/events/swiotlb.h> 52 53 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 54 55 /* 56 * Minimum IO TLB size to bother booting with. Systems with mainly 57 * 64bit capable cards will only lightly use the swiotlb. If we can't 58 * allocate a contiguous 1MB, we're probably in trouble anyway. 59 */ 60 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 61 62 #define INVALID_PHYS_ADDR (~(phys_addr_t)0) 63 64 enum swiotlb_force swiotlb_force; 65 66 struct io_tlb_mem *io_tlb_default_mem; 67 68 /* 69 * Max segment that we can provide which (if pages are contingous) will 70 * not be bounced (unless SWIOTLB_FORCE is set). 71 */ 72 static unsigned int max_segment; 73 74 static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT; 75 76 static int __init 77 setup_io_tlb_npages(char *str) 78 { 79 if (isdigit(*str)) { 80 /* avoid tail segment of size < IO_TLB_SEGSIZE */ 81 default_nslabs = 82 ALIGN(simple_strtoul(str, &str, 0), IO_TLB_SEGSIZE); 83 } 84 if (*str == ',') 85 ++str; 86 if (!strcmp(str, "force")) 87 swiotlb_force = SWIOTLB_FORCE; 88 else if (!strcmp(str, "noforce")) 89 swiotlb_force = SWIOTLB_NO_FORCE; 90 91 return 0; 92 } 93 early_param("swiotlb", setup_io_tlb_npages); 94 95 unsigned int swiotlb_max_segment(void) 96 { 97 return io_tlb_default_mem ? max_segment : 0; 98 } 99 EXPORT_SYMBOL_GPL(swiotlb_max_segment); 100 101 void swiotlb_set_max_segment(unsigned int val) 102 { 103 if (swiotlb_force == SWIOTLB_FORCE) 104 max_segment = 1; 105 else 106 max_segment = rounddown(val, PAGE_SIZE); 107 } 108 109 unsigned long swiotlb_size_or_default(void) 110 { 111 return default_nslabs << IO_TLB_SHIFT; 112 } 113 114 void __init swiotlb_adjust_size(unsigned long size) 115 { 116 /* 117 * If swiotlb parameter has not been specified, give a chance to 118 * architectures such as those supporting memory encryption to 119 * adjust/expand SWIOTLB size for their use. 120 */ 121 if (default_nslabs != IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT) 122 return; 123 size = ALIGN(size, IO_TLB_SIZE); 124 default_nslabs = ALIGN(size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); 125 pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20); 126 } 127 128 void swiotlb_print_info(void) 129 { 130 struct io_tlb_mem *mem = io_tlb_default_mem; 131 132 if (!mem) { 133 pr_warn("No low mem\n"); 134 return; 135 } 136 137 pr_info("mapped [mem %pa-%pa] (%luMB)\n", &mem->start, &mem->end, 138 (mem->nslabs << IO_TLB_SHIFT) >> 20); 139 } 140 141 static inline unsigned long io_tlb_offset(unsigned long val) 142 { 143 return val & (IO_TLB_SEGSIZE - 1); 144 } 145 146 static inline unsigned long nr_slots(u64 val) 147 { 148 return DIV_ROUND_UP(val, IO_TLB_SIZE); 149 } 150 151 /* 152 * Early SWIOTLB allocation may be too early to allow an architecture to 153 * perform the desired operations. This function allows the architecture to 154 * call SWIOTLB when the operations are possible. It needs to be called 155 * before the SWIOTLB memory is used. 156 */ 157 void __init swiotlb_update_mem_attributes(void) 158 { 159 struct io_tlb_mem *mem = io_tlb_default_mem; 160 void *vaddr; 161 unsigned long bytes; 162 163 if (!mem || mem->late_alloc) 164 return; 165 vaddr = phys_to_virt(mem->start); 166 bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT); 167 set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); 168 memset(vaddr, 0, bytes); 169 } 170 171 int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) 172 { 173 unsigned long bytes = nslabs << IO_TLB_SHIFT, i; 174 struct io_tlb_mem *mem; 175 size_t alloc_size; 176 177 if (swiotlb_force == SWIOTLB_NO_FORCE) 178 return 0; 179 180 /* protect against double initialization */ 181 if (WARN_ON_ONCE(io_tlb_default_mem)) 182 return -ENOMEM; 183 184 alloc_size = PAGE_ALIGN(struct_size(mem, slots, nslabs)); 185 mem = memblock_alloc(alloc_size, PAGE_SIZE); 186 if (!mem) 187 panic("%s: Failed to allocate %zu bytes align=0x%lx\n", 188 __func__, alloc_size, PAGE_SIZE); 189 mem->nslabs = nslabs; 190 mem->start = __pa(tlb); 191 mem->end = mem->start + bytes; 192 mem->index = 0; 193 spin_lock_init(&mem->lock); 194 for (i = 0; i < mem->nslabs; i++) { 195 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); 196 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; 197 mem->slots[i].alloc_size = 0; 198 } 199 200 io_tlb_default_mem = mem; 201 if (verbose) 202 swiotlb_print_info(); 203 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); 204 return 0; 205 } 206 207 /* 208 * Statically reserve bounce buffer space and initialize bounce buffer data 209 * structures for the software IO TLB used to implement the DMA API. 210 */ 211 void __init 212 swiotlb_init(int verbose) 213 { 214 size_t bytes = PAGE_ALIGN(default_nslabs << IO_TLB_SHIFT); 215 void *tlb; 216 217 if (swiotlb_force == SWIOTLB_NO_FORCE) 218 return; 219 220 /* Get IO TLB memory from the low pages */ 221 tlb = memblock_alloc_low(bytes, PAGE_SIZE); 222 if (!tlb) 223 goto fail; 224 if (swiotlb_init_with_tbl(tlb, default_nslabs, verbose)) 225 goto fail_free_mem; 226 return; 227 228 fail_free_mem: 229 memblock_free_early(__pa(tlb), bytes); 230 fail: 231 pr_warn("Cannot allocate buffer"); 232 } 233 234 /* 235 * Systems with larger DMA zones (those that don't support ISA) can 236 * initialize the swiotlb later using the slab allocator if needed. 237 * This should be just like above, but with some error catching. 238 */ 239 int 240 swiotlb_late_init_with_default_size(size_t default_size) 241 { 242 unsigned long nslabs = 243 ALIGN(default_size >> IO_TLB_SHIFT, IO_TLB_SEGSIZE); 244 unsigned long bytes; 245 unsigned char *vstart = NULL; 246 unsigned int order; 247 int rc = 0; 248 249 if (swiotlb_force == SWIOTLB_NO_FORCE) 250 return 0; 251 252 /* 253 * Get IO TLB memory from the low pages 254 */ 255 order = get_order(nslabs << IO_TLB_SHIFT); 256 nslabs = SLABS_PER_PAGE << order; 257 bytes = nslabs << IO_TLB_SHIFT; 258 259 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 260 vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, 261 order); 262 if (vstart) 263 break; 264 order--; 265 } 266 267 if (!vstart) 268 return -ENOMEM; 269 270 if (order != get_order(bytes)) { 271 pr_warn("only able to allocate %ld MB\n", 272 (PAGE_SIZE << order) >> 20); 273 nslabs = SLABS_PER_PAGE << order; 274 } 275 rc = swiotlb_late_init_with_tbl(vstart, nslabs); 276 if (rc) 277 free_pages((unsigned long)vstart, order); 278 279 return rc; 280 } 281 282 int 283 swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) 284 { 285 unsigned long bytes = nslabs << IO_TLB_SHIFT, i; 286 struct io_tlb_mem *mem; 287 288 if (swiotlb_force == SWIOTLB_NO_FORCE) 289 return 0; 290 291 /* protect against double initialization */ 292 if (WARN_ON_ONCE(io_tlb_default_mem)) 293 return -ENOMEM; 294 295 mem = (void *)__get_free_pages(GFP_KERNEL, 296 get_order(struct_size(mem, slots, nslabs))); 297 if (!mem) 298 return -ENOMEM; 299 300 mem->nslabs = nslabs; 301 mem->start = virt_to_phys(tlb); 302 mem->end = mem->start + bytes; 303 mem->index = 0; 304 mem->late_alloc = 1; 305 spin_lock_init(&mem->lock); 306 for (i = 0; i < mem->nslabs; i++) { 307 mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i); 308 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; 309 mem->slots[i].alloc_size = 0; 310 } 311 312 set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); 313 memset(tlb, 0, bytes); 314 315 io_tlb_default_mem = mem; 316 swiotlb_print_info(); 317 swiotlb_set_max_segment(mem->nslabs << IO_TLB_SHIFT); 318 return 0; 319 } 320 321 void __init swiotlb_exit(void) 322 { 323 struct io_tlb_mem *mem = io_tlb_default_mem; 324 size_t size; 325 326 if (!mem) 327 return; 328 329 size = struct_size(mem, slots, mem->nslabs); 330 if (mem->late_alloc) 331 free_pages((unsigned long)mem, get_order(size)); 332 else 333 memblock_free_late(__pa(mem), PAGE_ALIGN(size)); 334 io_tlb_default_mem = NULL; 335 } 336 337 /* 338 * Return the offset into a iotlb slot required to keep the device happy. 339 */ 340 static unsigned int swiotlb_align_offset(struct device *dev, u64 addr) 341 { 342 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); 343 } 344 345 /* 346 * Bounce: copy the swiotlb buffer from or back to the original dma location 347 */ 348 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, 349 enum dma_data_direction dir) 350 { 351 struct io_tlb_mem *mem = io_tlb_default_mem; 352 int index = (tlb_addr - mem->start) >> IO_TLB_SHIFT; 353 phys_addr_t orig_addr = mem->slots[index].orig_addr; 354 size_t alloc_size = mem->slots[index].alloc_size; 355 unsigned long pfn = PFN_DOWN(orig_addr); 356 unsigned char *vaddr = phys_to_virt(tlb_addr); 357 unsigned int tlb_offset; 358 359 if (orig_addr == INVALID_PHYS_ADDR) 360 return; 361 362 tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) - 363 swiotlb_align_offset(dev, orig_addr); 364 365 orig_addr += tlb_offset; 366 alloc_size -= tlb_offset; 367 368 if (size > alloc_size) { 369 dev_WARN_ONCE(dev, 1, 370 "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu.\n", 371 alloc_size, size); 372 size = alloc_size; 373 } 374 375 if (PageHighMem(pfn_to_page(pfn))) { 376 /* The buffer does not have a mapping. Map it in and copy */ 377 unsigned int offset = orig_addr & ~PAGE_MASK; 378 char *buffer; 379 unsigned int sz = 0; 380 unsigned long flags; 381 382 while (size) { 383 sz = min_t(size_t, PAGE_SIZE - offset, size); 384 385 local_irq_save(flags); 386 buffer = kmap_atomic(pfn_to_page(pfn)); 387 if (dir == DMA_TO_DEVICE) 388 memcpy(vaddr, buffer + offset, sz); 389 else 390 memcpy(buffer + offset, vaddr, sz); 391 kunmap_atomic(buffer); 392 local_irq_restore(flags); 393 394 size -= sz; 395 pfn++; 396 vaddr += sz; 397 offset = 0; 398 } 399 } else if (dir == DMA_TO_DEVICE) { 400 memcpy(vaddr, phys_to_virt(orig_addr), size); 401 } else { 402 memcpy(phys_to_virt(orig_addr), vaddr, size); 403 } 404 } 405 406 #define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT)) 407 408 /* 409 * Carefully handle integer overflow which can occur when boundary_mask == ~0UL. 410 */ 411 static inline unsigned long get_max_slots(unsigned long boundary_mask) 412 { 413 if (boundary_mask == ~0UL) 414 return 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); 415 return nr_slots(boundary_mask + 1); 416 } 417 418 static unsigned int wrap_index(struct io_tlb_mem *mem, unsigned int index) 419 { 420 if (index >= mem->nslabs) 421 return 0; 422 return index; 423 } 424 425 /* 426 * Find a suitable number of IO TLB entries size that will fit this request and 427 * allocate a buffer from that IO TLB pool. 428 */ 429 static int find_slots(struct device *dev, phys_addr_t orig_addr, 430 size_t alloc_size) 431 { 432 struct io_tlb_mem *mem = io_tlb_default_mem; 433 unsigned long boundary_mask = dma_get_seg_boundary(dev); 434 dma_addr_t tbl_dma_addr = 435 phys_to_dma_unencrypted(dev, mem->start) & boundary_mask; 436 unsigned long max_slots = get_max_slots(boundary_mask); 437 unsigned int iotlb_align_mask = 438 dma_get_min_align_mask(dev) & ~(IO_TLB_SIZE - 1); 439 unsigned int nslots = nr_slots(alloc_size), stride; 440 unsigned int index, wrap, count = 0, i; 441 unsigned long flags; 442 443 BUG_ON(!nslots); 444 445 /* 446 * For mappings with an alignment requirement don't bother looping to 447 * unaligned slots once we found an aligned one. For allocations of 448 * PAGE_SIZE or larger only look for page aligned allocations. 449 */ 450 stride = (iotlb_align_mask >> IO_TLB_SHIFT) + 1; 451 if (alloc_size >= PAGE_SIZE) 452 stride = max(stride, stride << (PAGE_SHIFT - IO_TLB_SHIFT)); 453 454 spin_lock_irqsave(&mem->lock, flags); 455 if (unlikely(nslots > mem->nslabs - mem->used)) 456 goto not_found; 457 458 index = wrap = wrap_index(mem, ALIGN(mem->index, stride)); 459 do { 460 if ((slot_addr(tbl_dma_addr, index) & iotlb_align_mask) != 461 (orig_addr & iotlb_align_mask)) { 462 index = wrap_index(mem, index + 1); 463 continue; 464 } 465 466 /* 467 * If we find a slot that indicates we have 'nslots' number of 468 * contiguous buffers, we allocate the buffers from that slot 469 * and mark the entries as '0' indicating unavailable. 470 */ 471 if (!iommu_is_span_boundary(index, nslots, 472 nr_slots(tbl_dma_addr), 473 max_slots)) { 474 if (mem->slots[index].list >= nslots) 475 goto found; 476 } 477 index = wrap_index(mem, index + stride); 478 } while (index != wrap); 479 480 not_found: 481 spin_unlock_irqrestore(&mem->lock, flags); 482 return -1; 483 484 found: 485 for (i = index; i < index + nslots; i++) 486 mem->slots[i].list = 0; 487 for (i = index - 1; 488 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && 489 mem->slots[i].list; i--) 490 mem->slots[i].list = ++count; 491 492 /* 493 * Update the indices to avoid searching in the next round. 494 */ 495 if (index + nslots < mem->nslabs) 496 mem->index = index + nslots; 497 else 498 mem->index = 0; 499 mem->used += nslots; 500 501 spin_unlock_irqrestore(&mem->lock, flags); 502 return index; 503 } 504 505 phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, 506 size_t mapping_size, size_t alloc_size, 507 enum dma_data_direction dir, unsigned long attrs) 508 { 509 struct io_tlb_mem *mem = io_tlb_default_mem; 510 unsigned int offset = swiotlb_align_offset(dev, orig_addr); 511 unsigned int i; 512 int index; 513 phys_addr_t tlb_addr; 514 515 if (!mem) 516 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); 517 518 if (mem_encrypt_active()) 519 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); 520 521 if (mapping_size > alloc_size) { 522 dev_warn_once(dev, "Invalid sizes (mapping: %zd bytes, alloc: %zd bytes)", 523 mapping_size, alloc_size); 524 return (phys_addr_t)DMA_MAPPING_ERROR; 525 } 526 527 index = find_slots(dev, orig_addr, alloc_size + offset); 528 if (index == -1) { 529 if (!(attrs & DMA_ATTR_NO_WARN)) 530 dev_warn_ratelimited(dev, 531 "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n", 532 alloc_size, mem->nslabs, mem->used); 533 return (phys_addr_t)DMA_MAPPING_ERROR; 534 } 535 536 /* 537 * Save away the mapping from the original address to the DMA address. 538 * This is needed when we sync the memory. Then we sync the buffer if 539 * needed. 540 */ 541 for (i = 0; i < nr_slots(alloc_size + offset); i++) { 542 mem->slots[index + i].orig_addr = slot_addr(orig_addr, i); 543 mem->slots[index + i].alloc_size = 544 alloc_size - (i << IO_TLB_SHIFT); 545 } 546 tlb_addr = slot_addr(mem->start, index) + offset; 547 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 548 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 549 swiotlb_bounce(dev, tlb_addr, mapping_size, DMA_TO_DEVICE); 550 return tlb_addr; 551 } 552 553 /* 554 * tlb_addr is the physical address of the bounce buffer to unmap. 555 */ 556 void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, 557 size_t mapping_size, enum dma_data_direction dir, 558 unsigned long attrs) 559 { 560 struct io_tlb_mem *mem = io_tlb_default_mem; 561 unsigned long flags; 562 unsigned int offset = swiotlb_align_offset(hwdev, tlb_addr); 563 int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT; 564 int nslots = nr_slots(mem->slots[index].alloc_size + offset); 565 int count, i; 566 567 /* 568 * First, sync the memory before unmapping the entry 569 */ 570 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 571 (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 572 swiotlb_bounce(hwdev, tlb_addr, mapping_size, DMA_FROM_DEVICE); 573 574 /* 575 * Return the buffer to the free list by setting the corresponding 576 * entries to indicate the number of contiguous entries available. 577 * While returning the entries to the free list, we merge the entries 578 * with slots below and above the pool being returned. 579 */ 580 spin_lock_irqsave(&mem->lock, flags); 581 if (index + nslots < ALIGN(index + 1, IO_TLB_SEGSIZE)) 582 count = mem->slots[index + nslots].list; 583 else 584 count = 0; 585 586 /* 587 * Step 1: return the slots to the free list, merging the slots with 588 * superceeding slots 589 */ 590 for (i = index + nslots - 1; i >= index; i--) { 591 mem->slots[i].list = ++count; 592 mem->slots[i].orig_addr = INVALID_PHYS_ADDR; 593 mem->slots[i].alloc_size = 0; 594 } 595 596 /* 597 * Step 2: merge the returned slots with the preceding slots, if 598 * available (non zero) 599 */ 600 for (i = index - 1; 601 io_tlb_offset(i) != IO_TLB_SEGSIZE - 1 && mem->slots[i].list; 602 i--) 603 mem->slots[i].list = ++count; 604 mem->used -= nslots; 605 spin_unlock_irqrestore(&mem->lock, flags); 606 } 607 608 void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, 609 size_t size, enum dma_data_direction dir) 610 { 611 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 612 swiotlb_bounce(dev, tlb_addr, size, DMA_TO_DEVICE); 613 else 614 BUG_ON(dir != DMA_FROM_DEVICE); 615 } 616 617 void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, 618 size_t size, enum dma_data_direction dir) 619 { 620 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) 621 swiotlb_bounce(dev, tlb_addr, size, DMA_FROM_DEVICE); 622 else 623 BUG_ON(dir != DMA_TO_DEVICE); 624 } 625 626 /* 627 * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing 628 * to the device copy the data into it as well. 629 */ 630 dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size, 631 enum dma_data_direction dir, unsigned long attrs) 632 { 633 phys_addr_t swiotlb_addr; 634 dma_addr_t dma_addr; 635 636 trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size, 637 swiotlb_force); 638 639 swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir, 640 attrs); 641 if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR) 642 return DMA_MAPPING_ERROR; 643 644 /* Ensure that the address returned is DMA'ble */ 645 dma_addr = phys_to_dma_unencrypted(dev, swiotlb_addr); 646 if (unlikely(!dma_capable(dev, dma_addr, size, true))) { 647 swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, dir, 648 attrs | DMA_ATTR_SKIP_CPU_SYNC); 649 dev_WARN_ONCE(dev, 1, 650 "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", 651 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); 652 return DMA_MAPPING_ERROR; 653 } 654 655 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 656 arch_sync_dma_for_device(swiotlb_addr, size, dir); 657 return dma_addr; 658 } 659 660 size_t swiotlb_max_mapping_size(struct device *dev) 661 { 662 return ((size_t)IO_TLB_SIZE) * IO_TLB_SEGSIZE; 663 } 664 665 bool is_swiotlb_active(void) 666 { 667 return io_tlb_default_mem != NULL; 668 } 669 EXPORT_SYMBOL_GPL(is_swiotlb_active); 670 671 #ifdef CONFIG_DEBUG_FS 672 673 static int __init swiotlb_create_debugfs(void) 674 { 675 struct io_tlb_mem *mem = io_tlb_default_mem; 676 677 if (!mem) 678 return 0; 679 mem->debugfs = debugfs_create_dir("swiotlb", NULL); 680 debugfs_create_ulong("io_tlb_nslabs", 0400, mem->debugfs, &mem->nslabs); 681 debugfs_create_ulong("io_tlb_used", 0400, mem->debugfs, &mem->used); 682 return 0; 683 } 684 685 late_initcall(swiotlb_create_debugfs); 686 687 #endif 688