1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * A fairly generic DMA-API to IOMMU-API glue layer. 4 * 5 * Copyright (C) 2014-2015 ARM Ltd. 6 * 7 * based in part on arch/arm/mm/dma-mapping.c: 8 * Copyright (C) 2000-2004 Russell King 9 */ 10 11 #include <linux/acpi_iort.h> 12 #include <linux/device.h> 13 #include <linux/dma-map-ops.h> 14 #include <linux/dma-iommu.h> 15 #include <linux/gfp.h> 16 #include <linux/huge_mm.h> 17 #include <linux/iommu.h> 18 #include <linux/iova.h> 19 #include <linux/irq.h> 20 #include <linux/mm.h> 21 #include <linux/mutex.h> 22 #include <linux/pci.h> 23 #include <linux/swiotlb.h> 24 #include <linux/scatterlist.h> 25 #include <linux/vmalloc.h> 26 #include <linux/crash_dump.h> 27 #include <linux/dma-direct.h> 28 29 struct iommu_dma_msi_page { 30 struct list_head list; 31 dma_addr_t iova; 32 phys_addr_t phys; 33 }; 34 35 enum iommu_dma_cookie_type { 36 IOMMU_DMA_IOVA_COOKIE, 37 IOMMU_DMA_MSI_COOKIE, 38 }; 39 40 struct iommu_dma_cookie { 41 enum iommu_dma_cookie_type type; 42 union { 43 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ 44 struct iova_domain iovad; 45 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ 46 dma_addr_t msi_iova; 47 }; 48 struct list_head msi_page_list; 49 50 /* Domain for flush queue callback; NULL if flush queue not in use */ 51 struct iommu_domain *fq_domain; 52 }; 53 54 static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled); 55 56 void iommu_dma_free_cpu_cached_iovas(unsigned int cpu, 57 struct iommu_domain *domain) 58 { 59 struct iommu_dma_cookie *cookie = domain->iova_cookie; 60 struct iova_domain *iovad = &cookie->iovad; 61 62 free_cpu_cached_iovas(cpu, iovad); 63 } 64 65 static void iommu_dma_entry_dtor(unsigned long data) 66 { 67 struct page *freelist = (struct page *)data; 68 69 while (freelist) { 70 unsigned long p = (unsigned long)page_address(freelist); 71 72 freelist = freelist->freelist; 73 free_page(p); 74 } 75 } 76 77 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) 78 { 79 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) 80 return cookie->iovad.granule; 81 return PAGE_SIZE; 82 } 83 84 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) 85 { 86 struct iommu_dma_cookie *cookie; 87 88 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); 89 if (cookie) { 90 INIT_LIST_HEAD(&cookie->msi_page_list); 91 cookie->type = type; 92 } 93 return cookie; 94 } 95 96 /** 97 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain 98 * @domain: IOMMU domain to prepare for DMA-API usage 99 * 100 * IOMMU drivers should normally call this from their domain_alloc 101 * callback when domain->type == IOMMU_DOMAIN_DMA. 102 */ 103 int iommu_get_dma_cookie(struct iommu_domain *domain) 104 { 105 if (domain->iova_cookie) 106 return -EEXIST; 107 108 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); 109 if (!domain->iova_cookie) 110 return -ENOMEM; 111 112 return 0; 113 } 114 EXPORT_SYMBOL(iommu_get_dma_cookie); 115 116 /** 117 * iommu_get_msi_cookie - Acquire just MSI remapping resources 118 * @domain: IOMMU domain to prepare 119 * @base: Start address of IOVA region for MSI mappings 120 * 121 * Users who manage their own IOVA allocation and do not want DMA API support, 122 * but would still like to take advantage of automatic MSI remapping, can use 123 * this to initialise their own domain appropriately. Users should reserve a 124 * contiguous IOVA region, starting at @base, large enough to accommodate the 125 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address 126 * used by the devices attached to @domain. 127 */ 128 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) 129 { 130 struct iommu_dma_cookie *cookie; 131 132 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 133 return -EINVAL; 134 135 if (domain->iova_cookie) 136 return -EEXIST; 137 138 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); 139 if (!cookie) 140 return -ENOMEM; 141 142 cookie->msi_iova = base; 143 domain->iova_cookie = cookie; 144 return 0; 145 } 146 EXPORT_SYMBOL(iommu_get_msi_cookie); 147 148 /** 149 * iommu_put_dma_cookie - Release a domain's DMA mapping resources 150 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or 151 * iommu_get_msi_cookie() 152 * 153 * IOMMU drivers should normally call this from their domain_free callback. 154 */ 155 void iommu_put_dma_cookie(struct iommu_domain *domain) 156 { 157 struct iommu_dma_cookie *cookie = domain->iova_cookie; 158 struct iommu_dma_msi_page *msi, *tmp; 159 160 if (!cookie) 161 return; 162 163 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) 164 put_iova_domain(&cookie->iovad); 165 166 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { 167 list_del(&msi->list); 168 kfree(msi); 169 } 170 kfree(cookie); 171 domain->iova_cookie = NULL; 172 } 173 EXPORT_SYMBOL(iommu_put_dma_cookie); 174 175 /** 176 * iommu_dma_get_resv_regions - Reserved region driver helper 177 * @dev: Device from iommu_get_resv_regions() 178 * @list: Reserved region list from iommu_get_resv_regions() 179 * 180 * IOMMU drivers can use this to implement their .get_resv_regions callback 181 * for general non-IOMMU-specific reservations. Currently, this covers GICv3 182 * ITS region reservation on ACPI based ARM platforms that may require HW MSI 183 * reservation. 184 */ 185 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) 186 { 187 188 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) 189 iort_iommu_msi_get_resv_regions(dev, list); 190 191 } 192 EXPORT_SYMBOL(iommu_dma_get_resv_regions); 193 194 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, 195 phys_addr_t start, phys_addr_t end) 196 { 197 struct iova_domain *iovad = &cookie->iovad; 198 struct iommu_dma_msi_page *msi_page; 199 int i, num_pages; 200 201 start -= iova_offset(iovad, start); 202 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); 203 204 for (i = 0; i < num_pages; i++) { 205 msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL); 206 if (!msi_page) 207 return -ENOMEM; 208 209 msi_page->phys = start; 210 msi_page->iova = start; 211 INIT_LIST_HEAD(&msi_page->list); 212 list_add(&msi_page->list, &cookie->msi_page_list); 213 start += iovad->granule; 214 } 215 216 return 0; 217 } 218 219 static int iova_reserve_pci_windows(struct pci_dev *dev, 220 struct iova_domain *iovad) 221 { 222 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); 223 struct resource_entry *window; 224 unsigned long lo, hi; 225 phys_addr_t start = 0, end; 226 227 resource_list_for_each_entry(window, &bridge->windows) { 228 if (resource_type(window->res) != IORESOURCE_MEM) 229 continue; 230 231 lo = iova_pfn(iovad, window->res->start - window->offset); 232 hi = iova_pfn(iovad, window->res->end - window->offset); 233 reserve_iova(iovad, lo, hi); 234 } 235 236 /* Get reserved DMA windows from host bridge */ 237 resource_list_for_each_entry(window, &bridge->dma_ranges) { 238 end = window->res->start - window->offset; 239 resv_iova: 240 if (end > start) { 241 lo = iova_pfn(iovad, start); 242 hi = iova_pfn(iovad, end); 243 reserve_iova(iovad, lo, hi); 244 } else { 245 /* dma_ranges list should be sorted */ 246 dev_err(&dev->dev, "Failed to reserve IOVA\n"); 247 return -EINVAL; 248 } 249 250 start = window->res->end - window->offset + 1; 251 /* If window is last entry */ 252 if (window->node.next == &bridge->dma_ranges && 253 end != ~(phys_addr_t)0) { 254 end = ~(phys_addr_t)0; 255 goto resv_iova; 256 } 257 } 258 259 return 0; 260 } 261 262 static int iova_reserve_iommu_regions(struct device *dev, 263 struct iommu_domain *domain) 264 { 265 struct iommu_dma_cookie *cookie = domain->iova_cookie; 266 struct iova_domain *iovad = &cookie->iovad; 267 struct iommu_resv_region *region; 268 LIST_HEAD(resv_regions); 269 int ret = 0; 270 271 if (dev_is_pci(dev)) { 272 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); 273 if (ret) 274 return ret; 275 } 276 277 iommu_get_resv_regions(dev, &resv_regions); 278 list_for_each_entry(region, &resv_regions, list) { 279 unsigned long lo, hi; 280 281 /* We ARE the software that manages these! */ 282 if (region->type == IOMMU_RESV_SW_MSI) 283 continue; 284 285 lo = iova_pfn(iovad, region->start); 286 hi = iova_pfn(iovad, region->start + region->length - 1); 287 reserve_iova(iovad, lo, hi); 288 289 if (region->type == IOMMU_RESV_MSI) 290 ret = cookie_init_hw_msi_region(cookie, region->start, 291 region->start + region->length); 292 if (ret) 293 break; 294 } 295 iommu_put_resv_regions(dev, &resv_regions); 296 297 return ret; 298 } 299 300 static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) 301 { 302 struct iommu_dma_cookie *cookie; 303 struct iommu_domain *domain; 304 305 cookie = container_of(iovad, struct iommu_dma_cookie, iovad); 306 domain = cookie->fq_domain; 307 /* 308 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE 309 * implies that ops->flush_iotlb_all must be non-NULL. 310 */ 311 domain->ops->flush_iotlb_all(domain); 312 } 313 314 /** 315 * iommu_dma_init_domain - Initialise a DMA mapping domain 316 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() 317 * @base: IOVA at which the mappable address space starts 318 * @size: Size of IOVA space 319 * @dev: Device the domain is being initialised for 320 * 321 * @base and @size should be exact multiples of IOMMU page granularity to 322 * avoid rounding surprises. If necessary, we reserve the page at address 0 323 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but 324 * any change which could make prior IOVAs invalid will fail. 325 */ 326 static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, 327 u64 size, struct device *dev) 328 { 329 struct iommu_dma_cookie *cookie = domain->iova_cookie; 330 unsigned long order, base_pfn; 331 struct iova_domain *iovad; 332 int attr; 333 334 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) 335 return -EINVAL; 336 337 iovad = &cookie->iovad; 338 339 /* Use the smallest supported page size for IOVA granularity */ 340 order = __ffs(domain->pgsize_bitmap); 341 base_pfn = max_t(unsigned long, 1, base >> order); 342 343 /* Check the domain allows at least some access to the device... */ 344 if (domain->geometry.force_aperture) { 345 if (base > domain->geometry.aperture_end || 346 base + size <= domain->geometry.aperture_start) { 347 pr_warn("specified DMA range outside IOMMU capability\n"); 348 return -EFAULT; 349 } 350 /* ...then finally give it a kicking to make sure it fits */ 351 base_pfn = max_t(unsigned long, base_pfn, 352 domain->geometry.aperture_start >> order); 353 } 354 355 /* start_pfn is always nonzero for an already-initialised domain */ 356 if (iovad->start_pfn) { 357 if (1UL << order != iovad->granule || 358 base_pfn != iovad->start_pfn) { 359 pr_warn("Incompatible range for DMA domain\n"); 360 return -EFAULT; 361 } 362 363 return 0; 364 } 365 366 init_iova_domain(iovad, 1UL << order, base_pfn); 367 368 if (!cookie->fq_domain && !iommu_domain_get_attr(domain, 369 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { 370 if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, 371 iommu_dma_entry_dtor)) 372 pr_warn("iova flush queue initialization failed\n"); 373 else 374 cookie->fq_domain = domain; 375 } 376 377 if (!dev) 378 return 0; 379 380 return iova_reserve_iommu_regions(dev, domain); 381 } 382 383 /** 384 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API 385 * page flags. 386 * @dir: Direction of DMA transfer 387 * @coherent: Is the DMA master cache-coherent? 388 * @attrs: DMA attributes for the mapping 389 * 390 * Return: corresponding IOMMU API page protection flags 391 */ 392 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, 393 unsigned long attrs) 394 { 395 int prot = coherent ? IOMMU_CACHE : 0; 396 397 if (attrs & DMA_ATTR_PRIVILEGED) 398 prot |= IOMMU_PRIV; 399 400 switch (dir) { 401 case DMA_BIDIRECTIONAL: 402 return prot | IOMMU_READ | IOMMU_WRITE; 403 case DMA_TO_DEVICE: 404 return prot | IOMMU_READ; 405 case DMA_FROM_DEVICE: 406 return prot | IOMMU_WRITE; 407 default: 408 return 0; 409 } 410 } 411 412 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, 413 size_t size, u64 dma_limit, struct device *dev) 414 { 415 struct iommu_dma_cookie *cookie = domain->iova_cookie; 416 struct iova_domain *iovad = &cookie->iovad; 417 unsigned long shift, iova_len, iova = 0; 418 419 if (cookie->type == IOMMU_DMA_MSI_COOKIE) { 420 cookie->msi_iova += size; 421 return cookie->msi_iova - size; 422 } 423 424 shift = iova_shift(iovad); 425 iova_len = size >> shift; 426 /* 427 * Freeing non-power-of-two-sized allocations back into the IOVA caches 428 * will come back to bite us badly, so we have to waste a bit of space 429 * rounding up anything cacheable to make sure that can't happen. The 430 * order of the unadjusted size will still match upon freeing. 431 */ 432 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) 433 iova_len = roundup_pow_of_two(iova_len); 434 435 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); 436 437 if (domain->geometry.force_aperture) 438 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); 439 440 /* Try to get PCI devices a SAC address */ 441 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) 442 iova = alloc_iova_fast(iovad, iova_len, 443 DMA_BIT_MASK(32) >> shift, false); 444 445 if (!iova) 446 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, 447 true); 448 449 return (dma_addr_t)iova << shift; 450 } 451 452 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, 453 dma_addr_t iova, size_t size, struct page *freelist) 454 { 455 struct iova_domain *iovad = &cookie->iovad; 456 457 /* The MSI case is only ever cleaning up its most recent allocation */ 458 if (cookie->type == IOMMU_DMA_MSI_COOKIE) 459 cookie->msi_iova -= size; 460 else if (cookie->fq_domain) /* non-strict mode */ 461 queue_iova(iovad, iova_pfn(iovad, iova), 462 size >> iova_shift(iovad), 463 (unsigned long)freelist); 464 else 465 free_iova_fast(iovad, iova_pfn(iovad, iova), 466 size >> iova_shift(iovad)); 467 } 468 469 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, 470 size_t size) 471 { 472 struct iommu_domain *domain = iommu_get_dma_domain(dev); 473 struct iommu_dma_cookie *cookie = domain->iova_cookie; 474 struct iova_domain *iovad = &cookie->iovad; 475 size_t iova_off = iova_offset(iovad, dma_addr); 476 struct iommu_iotlb_gather iotlb_gather; 477 size_t unmapped; 478 479 dma_addr -= iova_off; 480 size = iova_align(iovad, size + iova_off); 481 iommu_iotlb_gather_init(&iotlb_gather); 482 483 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); 484 WARN_ON(unmapped != size); 485 486 if (!cookie->fq_domain) 487 iommu_iotlb_sync(domain, &iotlb_gather); 488 iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist); 489 } 490 491 static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr, 492 size_t size, enum dma_data_direction dir, 493 unsigned long attrs) 494 { 495 struct iommu_domain *domain = iommu_get_dma_domain(dev); 496 struct iommu_dma_cookie *cookie = domain->iova_cookie; 497 struct iova_domain *iovad = &cookie->iovad; 498 phys_addr_t phys; 499 500 phys = iommu_iova_to_phys(domain, dma_addr); 501 if (WARN_ON(!phys)) 502 return; 503 504 __iommu_dma_unmap(dev, dma_addr, size); 505 506 if (unlikely(is_swiotlb_buffer(phys))) 507 swiotlb_tbl_unmap_single(dev, phys, size, 508 iova_align(iovad, size), dir, attrs); 509 } 510 511 static bool dev_is_untrusted(struct device *dev) 512 { 513 return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; 514 } 515 516 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, 517 size_t size, int prot, u64 dma_mask) 518 { 519 struct iommu_domain *domain = iommu_get_dma_domain(dev); 520 struct iommu_dma_cookie *cookie = domain->iova_cookie; 521 struct iova_domain *iovad = &cookie->iovad; 522 size_t iova_off = iova_offset(iovad, phys); 523 dma_addr_t iova; 524 525 if (static_branch_unlikely(&iommu_deferred_attach_enabled) && 526 iommu_deferred_attach(dev, domain)) 527 return DMA_MAPPING_ERROR; 528 529 size = iova_align(iovad, size + iova_off); 530 531 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); 532 if (!iova) 533 return DMA_MAPPING_ERROR; 534 535 if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { 536 iommu_dma_free_iova(cookie, iova, size, NULL); 537 return DMA_MAPPING_ERROR; 538 } 539 return iova + iova_off; 540 } 541 542 static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys, 543 size_t org_size, dma_addr_t dma_mask, bool coherent, 544 enum dma_data_direction dir, unsigned long attrs) 545 { 546 int prot = dma_info_to_prot(dir, coherent, attrs); 547 struct iommu_domain *domain = iommu_get_dma_domain(dev); 548 struct iommu_dma_cookie *cookie = domain->iova_cookie; 549 struct iova_domain *iovad = &cookie->iovad; 550 size_t aligned_size = org_size; 551 void *padding_start; 552 size_t padding_size; 553 dma_addr_t iova; 554 555 /* 556 * If both the physical buffer start address and size are 557 * page aligned, we don't need to use a bounce page. 558 */ 559 if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) && 560 iova_offset(iovad, phys | org_size)) { 561 aligned_size = iova_align(iovad, org_size); 562 phys = swiotlb_tbl_map_single(dev, phys, org_size, 563 aligned_size, dir, attrs); 564 565 if (phys == DMA_MAPPING_ERROR) 566 return DMA_MAPPING_ERROR; 567 568 /* Cleanup the padding area. */ 569 padding_start = phys_to_virt(phys); 570 padding_size = aligned_size; 571 572 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 573 (dir == DMA_TO_DEVICE || 574 dir == DMA_BIDIRECTIONAL)) { 575 padding_start += org_size; 576 padding_size -= org_size; 577 } 578 579 memset(padding_start, 0, padding_size); 580 } 581 582 iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask); 583 if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys)) 584 swiotlb_tbl_unmap_single(dev, phys, org_size, 585 aligned_size, dir, attrs); 586 587 return iova; 588 } 589 590 static void __iommu_dma_free_pages(struct page **pages, int count) 591 { 592 while (count--) 593 __free_page(pages[count]); 594 kvfree(pages); 595 } 596 597 static struct page **__iommu_dma_alloc_pages(struct device *dev, 598 unsigned int count, unsigned long order_mask, gfp_t gfp) 599 { 600 struct page **pages; 601 unsigned int i = 0, nid = dev_to_node(dev); 602 603 order_mask &= (2U << MAX_ORDER) - 1; 604 if (!order_mask) 605 return NULL; 606 607 pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL); 608 if (!pages) 609 return NULL; 610 611 /* IOMMU can map any pages, so himem can also be used here */ 612 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 613 614 /* It makes no sense to muck about with huge pages */ 615 gfp &= ~__GFP_COMP; 616 617 while (count) { 618 struct page *page = NULL; 619 unsigned int order_size; 620 621 /* 622 * Higher-order allocations are a convenience rather 623 * than a necessity, hence using __GFP_NORETRY until 624 * falling back to minimum-order allocations. 625 */ 626 for (order_mask &= (2U << __fls(count)) - 1; 627 order_mask; order_mask &= ~order_size) { 628 unsigned int order = __fls(order_mask); 629 gfp_t alloc_flags = gfp; 630 631 order_size = 1U << order; 632 if (order_mask > order_size) 633 alloc_flags |= __GFP_NORETRY; 634 page = alloc_pages_node(nid, alloc_flags, order); 635 if (!page) 636 continue; 637 if (order) 638 split_page(page, order); 639 break; 640 } 641 if (!page) { 642 __iommu_dma_free_pages(pages, i); 643 return NULL; 644 } 645 count -= order_size; 646 while (order_size--) 647 pages[i++] = page++; 648 } 649 return pages; 650 } 651 652 /** 653 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space 654 * @dev: Device to allocate memory for. Must be a real device 655 * attached to an iommu_dma_domain 656 * @size: Size of buffer in bytes 657 * @dma_handle: Out argument for allocated DMA handle 658 * @gfp: Allocation flags 659 * @prot: pgprot_t to use for the remapped mapping 660 * @attrs: DMA attributes for this allocation 661 * 662 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, 663 * but an IOMMU which supports smaller pages might not map the whole thing. 664 * 665 * Return: Mapped virtual address, or NULL on failure. 666 */ 667 static void *iommu_dma_alloc_remap(struct device *dev, size_t size, 668 dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, 669 unsigned long attrs) 670 { 671 struct iommu_domain *domain = iommu_get_dma_domain(dev); 672 struct iommu_dma_cookie *cookie = domain->iova_cookie; 673 struct iova_domain *iovad = &cookie->iovad; 674 bool coherent = dev_is_dma_coherent(dev); 675 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 676 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; 677 struct page **pages; 678 struct sg_table sgt; 679 dma_addr_t iova; 680 void *vaddr; 681 682 *dma_handle = DMA_MAPPING_ERROR; 683 684 if (static_branch_unlikely(&iommu_deferred_attach_enabled) && 685 iommu_deferred_attach(dev, domain)) 686 return NULL; 687 688 min_size = alloc_sizes & -alloc_sizes; 689 if (min_size < PAGE_SIZE) { 690 min_size = PAGE_SIZE; 691 alloc_sizes |= PAGE_SIZE; 692 } else { 693 size = ALIGN(size, min_size); 694 } 695 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 696 alloc_sizes = min_size; 697 698 count = PAGE_ALIGN(size) >> PAGE_SHIFT; 699 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, 700 gfp); 701 if (!pages) 702 return NULL; 703 704 size = iova_align(iovad, size); 705 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); 706 if (!iova) 707 goto out_free_pages; 708 709 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) 710 goto out_free_iova; 711 712 if (!(ioprot & IOMMU_CACHE)) { 713 struct scatterlist *sg; 714 int i; 715 716 for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) 717 arch_dma_prep_coherent(sg_page(sg), sg->length); 718 } 719 720 if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) 721 < size) 722 goto out_free_sg; 723 724 vaddr = dma_common_pages_remap(pages, size, prot, 725 __builtin_return_address(0)); 726 if (!vaddr) 727 goto out_unmap; 728 729 *dma_handle = iova; 730 sg_free_table(&sgt); 731 return vaddr; 732 733 out_unmap: 734 __iommu_dma_unmap(dev, iova, size); 735 out_free_sg: 736 sg_free_table(&sgt); 737 out_free_iova: 738 iommu_dma_free_iova(cookie, iova, size, NULL); 739 out_free_pages: 740 __iommu_dma_free_pages(pages, count); 741 return NULL; 742 } 743 744 static void iommu_dma_sync_single_for_cpu(struct device *dev, 745 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 746 { 747 phys_addr_t phys; 748 749 if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev)) 750 return; 751 752 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 753 if (!dev_is_dma_coherent(dev)) 754 arch_sync_dma_for_cpu(phys, size, dir); 755 756 if (is_swiotlb_buffer(phys)) 757 swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU); 758 } 759 760 static void iommu_dma_sync_single_for_device(struct device *dev, 761 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 762 { 763 phys_addr_t phys; 764 765 if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev)) 766 return; 767 768 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 769 if (is_swiotlb_buffer(phys)) 770 swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE); 771 772 if (!dev_is_dma_coherent(dev)) 773 arch_sync_dma_for_device(phys, size, dir); 774 } 775 776 static void iommu_dma_sync_sg_for_cpu(struct device *dev, 777 struct scatterlist *sgl, int nelems, 778 enum dma_data_direction dir) 779 { 780 struct scatterlist *sg; 781 int i; 782 783 if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev)) 784 return; 785 786 for_each_sg(sgl, sg, nelems, i) { 787 if (!dev_is_dma_coherent(dev)) 788 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); 789 790 if (is_swiotlb_buffer(sg_phys(sg))) 791 swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, 792 dir, SYNC_FOR_CPU); 793 } 794 } 795 796 static void iommu_dma_sync_sg_for_device(struct device *dev, 797 struct scatterlist *sgl, int nelems, 798 enum dma_data_direction dir) 799 { 800 struct scatterlist *sg; 801 int i; 802 803 if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev)) 804 return; 805 806 for_each_sg(sgl, sg, nelems, i) { 807 if (is_swiotlb_buffer(sg_phys(sg))) 808 swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, 809 dir, SYNC_FOR_DEVICE); 810 811 if (!dev_is_dma_coherent(dev)) 812 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); 813 } 814 } 815 816 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, 817 unsigned long offset, size_t size, enum dma_data_direction dir, 818 unsigned long attrs) 819 { 820 phys_addr_t phys = page_to_phys(page) + offset; 821 bool coherent = dev_is_dma_coherent(dev); 822 dma_addr_t dma_handle; 823 824 dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev), 825 coherent, dir, attrs); 826 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 827 dma_handle != DMA_MAPPING_ERROR) 828 arch_sync_dma_for_device(phys, size, dir); 829 return dma_handle; 830 } 831 832 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, 833 size_t size, enum dma_data_direction dir, unsigned long attrs) 834 { 835 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 836 iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); 837 __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs); 838 } 839 840 /* 841 * Prepare a successfully-mapped scatterlist to give back to the caller. 842 * 843 * At this point the segments are already laid out by iommu_dma_map_sg() to 844 * avoid individually crossing any boundaries, so we merely need to check a 845 * segment's start address to avoid concatenating across one. 846 */ 847 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, 848 dma_addr_t dma_addr) 849 { 850 struct scatterlist *s, *cur = sg; 851 unsigned long seg_mask = dma_get_seg_boundary(dev); 852 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); 853 int i, count = 0; 854 855 for_each_sg(sg, s, nents, i) { 856 /* Restore this segment's original unaligned fields first */ 857 unsigned int s_iova_off = sg_dma_address(s); 858 unsigned int s_length = sg_dma_len(s); 859 unsigned int s_iova_len = s->length; 860 861 s->offset += s_iova_off; 862 s->length = s_length; 863 sg_dma_address(s) = DMA_MAPPING_ERROR; 864 sg_dma_len(s) = 0; 865 866 /* 867 * Now fill in the real DMA data. If... 868 * - there is a valid output segment to append to 869 * - and this segment starts on an IOVA page boundary 870 * - but doesn't fall at a segment boundary 871 * - and wouldn't make the resulting output segment too long 872 */ 873 if (cur_len && !s_iova_off && (dma_addr & seg_mask) && 874 (max_len - cur_len >= s_length)) { 875 /* ...then concatenate it with the previous one */ 876 cur_len += s_length; 877 } else { 878 /* Otherwise start the next output segment */ 879 if (i > 0) 880 cur = sg_next(cur); 881 cur_len = s_length; 882 count++; 883 884 sg_dma_address(cur) = dma_addr + s_iova_off; 885 } 886 887 sg_dma_len(cur) = cur_len; 888 dma_addr += s_iova_len; 889 890 if (s_length + s_iova_off < s_iova_len) 891 cur_len = 0; 892 } 893 return count; 894 } 895 896 /* 897 * If mapping failed, then just restore the original list, 898 * but making sure the DMA fields are invalidated. 899 */ 900 static void __invalidate_sg(struct scatterlist *sg, int nents) 901 { 902 struct scatterlist *s; 903 int i; 904 905 for_each_sg(sg, s, nents, i) { 906 if (sg_dma_address(s) != DMA_MAPPING_ERROR) 907 s->offset += sg_dma_address(s); 908 if (sg_dma_len(s)) 909 s->length = sg_dma_len(s); 910 sg_dma_address(s) = DMA_MAPPING_ERROR; 911 sg_dma_len(s) = 0; 912 } 913 } 914 915 static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg, 916 int nents, enum dma_data_direction dir, unsigned long attrs) 917 { 918 struct scatterlist *s; 919 int i; 920 921 for_each_sg(sg, s, nents, i) 922 __iommu_dma_unmap_swiotlb(dev, sg_dma_address(s), 923 sg_dma_len(s), dir, attrs); 924 } 925 926 static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg, 927 int nents, enum dma_data_direction dir, unsigned long attrs) 928 { 929 struct scatterlist *s; 930 int i; 931 932 for_each_sg(sg, s, nents, i) { 933 sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s), 934 s->length, dma_get_mask(dev), 935 dev_is_dma_coherent(dev), dir, attrs); 936 if (sg_dma_address(s) == DMA_MAPPING_ERROR) 937 goto out_unmap; 938 sg_dma_len(s) = s->length; 939 } 940 941 return nents; 942 943 out_unmap: 944 iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); 945 return 0; 946 } 947 948 /* 949 * The DMA API client is passing in a scatterlist which could describe 950 * any old buffer layout, but the IOMMU API requires everything to be 951 * aligned to IOMMU pages. Hence the need for this complicated bit of 952 * impedance-matching, to be able to hand off a suitably-aligned list, 953 * but still preserve the original offsets and sizes for the caller. 954 */ 955 static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, 956 int nents, enum dma_data_direction dir, unsigned long attrs) 957 { 958 struct iommu_domain *domain = iommu_get_dma_domain(dev); 959 struct iommu_dma_cookie *cookie = domain->iova_cookie; 960 struct iova_domain *iovad = &cookie->iovad; 961 struct scatterlist *s, *prev = NULL; 962 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); 963 dma_addr_t iova; 964 size_t iova_len = 0; 965 unsigned long mask = dma_get_seg_boundary(dev); 966 int i; 967 968 if (static_branch_unlikely(&iommu_deferred_attach_enabled) && 969 iommu_deferred_attach(dev, domain)) 970 return 0; 971 972 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 973 iommu_dma_sync_sg_for_device(dev, sg, nents, dir); 974 975 if (dev_is_untrusted(dev)) 976 return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs); 977 978 /* 979 * Work out how much IOVA space we need, and align the segments to 980 * IOVA granules for the IOMMU driver to handle. With some clever 981 * trickery we can modify the list in-place, but reversibly, by 982 * stashing the unaligned parts in the as-yet-unused DMA fields. 983 */ 984 for_each_sg(sg, s, nents, i) { 985 size_t s_iova_off = iova_offset(iovad, s->offset); 986 size_t s_length = s->length; 987 size_t pad_len = (mask - iova_len + 1) & mask; 988 989 sg_dma_address(s) = s_iova_off; 990 sg_dma_len(s) = s_length; 991 s->offset -= s_iova_off; 992 s_length = iova_align(iovad, s_length + s_iova_off); 993 s->length = s_length; 994 995 /* 996 * Due to the alignment of our single IOVA allocation, we can 997 * depend on these assumptions about the segment boundary mask: 998 * - If mask size >= IOVA size, then the IOVA range cannot 999 * possibly fall across a boundary, so we don't care. 1000 * - If mask size < IOVA size, then the IOVA range must start 1001 * exactly on a boundary, therefore we can lay things out 1002 * based purely on segment lengths without needing to know 1003 * the actual addresses beforehand. 1004 * - The mask must be a power of 2, so pad_len == 0 if 1005 * iova_len == 0, thus we cannot dereference prev the first 1006 * time through here (i.e. before it has a meaningful value). 1007 */ 1008 if (pad_len && pad_len < s_length - 1) { 1009 prev->length += pad_len; 1010 iova_len += pad_len; 1011 } 1012 1013 iova_len += s_length; 1014 prev = s; 1015 } 1016 1017 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); 1018 if (!iova) 1019 goto out_restore_sg; 1020 1021 /* 1022 * We'll leave any physical concatenation to the IOMMU driver's 1023 * implementation - it knows better than we do. 1024 */ 1025 if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len) 1026 goto out_free_iova; 1027 1028 return __finalise_sg(dev, sg, nents, iova); 1029 1030 out_free_iova: 1031 iommu_dma_free_iova(cookie, iova, iova_len, NULL); 1032 out_restore_sg: 1033 __invalidate_sg(sg, nents); 1034 return 0; 1035 } 1036 1037 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 1038 int nents, enum dma_data_direction dir, unsigned long attrs) 1039 { 1040 dma_addr_t start, end; 1041 struct scatterlist *tmp; 1042 int i; 1043 1044 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 1045 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); 1046 1047 if (dev_is_untrusted(dev)) { 1048 iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs); 1049 return; 1050 } 1051 1052 /* 1053 * The scatterlist segments are mapped into a single 1054 * contiguous IOVA allocation, so this is incredibly easy. 1055 */ 1056 start = sg_dma_address(sg); 1057 for_each_sg(sg_next(sg), tmp, nents - 1, i) { 1058 if (sg_dma_len(tmp) == 0) 1059 break; 1060 sg = tmp; 1061 } 1062 end = sg_dma_address(sg) + sg_dma_len(sg); 1063 __iommu_dma_unmap(dev, start, end - start); 1064 } 1065 1066 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, 1067 size_t size, enum dma_data_direction dir, unsigned long attrs) 1068 { 1069 return __iommu_dma_map(dev, phys, size, 1070 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, 1071 dma_get_mask(dev)); 1072 } 1073 1074 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, 1075 size_t size, enum dma_data_direction dir, unsigned long attrs) 1076 { 1077 __iommu_dma_unmap(dev, handle, size); 1078 } 1079 1080 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) 1081 { 1082 size_t alloc_size = PAGE_ALIGN(size); 1083 int count = alloc_size >> PAGE_SHIFT; 1084 struct page *page = NULL, **pages = NULL; 1085 1086 /* Non-coherent atomic allocation? Easy */ 1087 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 1088 dma_free_from_pool(dev, cpu_addr, alloc_size)) 1089 return; 1090 1091 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1092 /* 1093 * If it the address is remapped, then it's either non-coherent 1094 * or highmem CMA, or an iommu_dma_alloc_remap() construction. 1095 */ 1096 pages = dma_common_find_pages(cpu_addr); 1097 if (!pages) 1098 page = vmalloc_to_page(cpu_addr); 1099 dma_common_free_remap(cpu_addr, alloc_size); 1100 } else { 1101 /* Lowmem means a coherent atomic or CMA allocation */ 1102 page = virt_to_page(cpu_addr); 1103 } 1104 1105 if (pages) 1106 __iommu_dma_free_pages(pages, count); 1107 if (page) 1108 dma_free_contiguous(dev, page, alloc_size); 1109 } 1110 1111 static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, 1112 dma_addr_t handle, unsigned long attrs) 1113 { 1114 __iommu_dma_unmap(dev, handle, size); 1115 __iommu_dma_free(dev, size, cpu_addr); 1116 } 1117 1118 static void *iommu_dma_alloc_pages(struct device *dev, size_t size, 1119 struct page **pagep, gfp_t gfp, unsigned long attrs) 1120 { 1121 bool coherent = dev_is_dma_coherent(dev); 1122 size_t alloc_size = PAGE_ALIGN(size); 1123 int node = dev_to_node(dev); 1124 struct page *page = NULL; 1125 void *cpu_addr; 1126 1127 page = dma_alloc_contiguous(dev, alloc_size, gfp); 1128 if (!page) 1129 page = alloc_pages_node(node, gfp, get_order(alloc_size)); 1130 if (!page) 1131 return NULL; 1132 1133 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 1134 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 1135 1136 cpu_addr = dma_common_contiguous_remap(page, alloc_size, 1137 prot, __builtin_return_address(0)); 1138 if (!cpu_addr) 1139 goto out_free_pages; 1140 1141 if (!coherent) 1142 arch_dma_prep_coherent(page, size); 1143 } else { 1144 cpu_addr = page_address(page); 1145 } 1146 1147 *pagep = page; 1148 memset(cpu_addr, 0, alloc_size); 1149 return cpu_addr; 1150 out_free_pages: 1151 dma_free_contiguous(dev, page, alloc_size); 1152 return NULL; 1153 } 1154 1155 static void *iommu_dma_alloc(struct device *dev, size_t size, 1156 dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 1157 { 1158 bool coherent = dev_is_dma_coherent(dev); 1159 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 1160 struct page *page = NULL; 1161 void *cpu_addr; 1162 1163 gfp |= __GFP_ZERO; 1164 1165 if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && 1166 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) { 1167 return iommu_dma_alloc_remap(dev, size, handle, gfp, 1168 dma_pgprot(dev, PAGE_KERNEL, attrs), attrs); 1169 } 1170 1171 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 1172 !gfpflags_allow_blocking(gfp) && !coherent) 1173 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr, 1174 gfp, NULL); 1175 else 1176 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); 1177 if (!cpu_addr) 1178 return NULL; 1179 1180 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, 1181 dev->coherent_dma_mask); 1182 if (*handle == DMA_MAPPING_ERROR) { 1183 __iommu_dma_free(dev, size, cpu_addr); 1184 return NULL; 1185 } 1186 1187 return cpu_addr; 1188 } 1189 1190 #ifdef CONFIG_DMA_REMAP 1191 static void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size, 1192 dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp) 1193 { 1194 if (!gfpflags_allow_blocking(gfp)) { 1195 struct page *page; 1196 1197 page = dma_common_alloc_pages(dev, size, handle, dir, gfp); 1198 if (!page) 1199 return NULL; 1200 return page_address(page); 1201 } 1202 1203 return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO, 1204 PAGE_KERNEL, 0); 1205 } 1206 1207 static void iommu_dma_free_noncoherent(struct device *dev, size_t size, 1208 void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir) 1209 { 1210 __iommu_dma_unmap(dev, handle, size); 1211 __iommu_dma_free(dev, size, cpu_addr); 1212 } 1213 #else 1214 #define iommu_dma_alloc_noncoherent NULL 1215 #define iommu_dma_free_noncoherent NULL 1216 #endif /* CONFIG_DMA_REMAP */ 1217 1218 static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, 1219 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1220 unsigned long attrs) 1221 { 1222 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1223 unsigned long pfn, off = vma->vm_pgoff; 1224 int ret; 1225 1226 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 1227 1228 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 1229 return ret; 1230 1231 if (off >= nr_pages || vma_pages(vma) > nr_pages - off) 1232 return -ENXIO; 1233 1234 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1235 struct page **pages = dma_common_find_pages(cpu_addr); 1236 1237 if (pages) 1238 return vm_map_pages(vma, pages, nr_pages); 1239 pfn = vmalloc_to_pfn(cpu_addr); 1240 } else { 1241 pfn = page_to_pfn(virt_to_page(cpu_addr)); 1242 } 1243 1244 return remap_pfn_range(vma, vma->vm_start, pfn + off, 1245 vma->vm_end - vma->vm_start, 1246 vma->vm_page_prot); 1247 } 1248 1249 static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 1250 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1251 unsigned long attrs) 1252 { 1253 struct page *page; 1254 int ret; 1255 1256 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1257 struct page **pages = dma_common_find_pages(cpu_addr); 1258 1259 if (pages) { 1260 return sg_alloc_table_from_pages(sgt, pages, 1261 PAGE_ALIGN(size) >> PAGE_SHIFT, 1262 0, size, GFP_KERNEL); 1263 } 1264 1265 page = vmalloc_to_page(cpu_addr); 1266 } else { 1267 page = virt_to_page(cpu_addr); 1268 } 1269 1270 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 1271 if (!ret) 1272 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 1273 return ret; 1274 } 1275 1276 static unsigned long iommu_dma_get_merge_boundary(struct device *dev) 1277 { 1278 struct iommu_domain *domain = iommu_get_dma_domain(dev); 1279 1280 return (1UL << __ffs(domain->pgsize_bitmap)) - 1; 1281 } 1282 1283 static const struct dma_map_ops iommu_dma_ops = { 1284 .alloc = iommu_dma_alloc, 1285 .free = iommu_dma_free, 1286 .alloc_pages = dma_common_alloc_pages, 1287 .free_pages = dma_common_free_pages, 1288 .alloc_noncoherent = iommu_dma_alloc_noncoherent, 1289 .free_noncoherent = iommu_dma_free_noncoherent, 1290 .mmap = iommu_dma_mmap, 1291 .get_sgtable = iommu_dma_get_sgtable, 1292 .map_page = iommu_dma_map_page, 1293 .unmap_page = iommu_dma_unmap_page, 1294 .map_sg = iommu_dma_map_sg, 1295 .unmap_sg = iommu_dma_unmap_sg, 1296 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, 1297 .sync_single_for_device = iommu_dma_sync_single_for_device, 1298 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, 1299 .sync_sg_for_device = iommu_dma_sync_sg_for_device, 1300 .map_resource = iommu_dma_map_resource, 1301 .unmap_resource = iommu_dma_unmap_resource, 1302 .get_merge_boundary = iommu_dma_get_merge_boundary, 1303 }; 1304 1305 /* 1306 * The IOMMU core code allocates the default DMA domain, which the underlying 1307 * IOMMU driver needs to support via the dma-iommu layer. 1308 */ 1309 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) 1310 { 1311 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1312 1313 if (!domain) 1314 goto out_err; 1315 1316 /* 1317 * The IOMMU core code allocates the default DMA domain, which the 1318 * underlying IOMMU driver needs to support via the dma-iommu layer. 1319 */ 1320 if (domain->type == IOMMU_DOMAIN_DMA) { 1321 if (iommu_dma_init_domain(domain, dma_base, size, dev)) 1322 goto out_err; 1323 dev->dma_ops = &iommu_dma_ops; 1324 } 1325 1326 return; 1327 out_err: 1328 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", 1329 dev_name(dev)); 1330 } 1331 1332 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, 1333 phys_addr_t msi_addr, struct iommu_domain *domain) 1334 { 1335 struct iommu_dma_cookie *cookie = domain->iova_cookie; 1336 struct iommu_dma_msi_page *msi_page; 1337 dma_addr_t iova; 1338 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 1339 size_t size = cookie_msi_granule(cookie); 1340 1341 msi_addr &= ~(phys_addr_t)(size - 1); 1342 list_for_each_entry(msi_page, &cookie->msi_page_list, list) 1343 if (msi_page->phys == msi_addr) 1344 return msi_page; 1345 1346 msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL); 1347 if (!msi_page) 1348 return NULL; 1349 1350 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 1351 if (!iova) 1352 goto out_free_page; 1353 1354 if (iommu_map(domain, iova, msi_addr, size, prot)) 1355 goto out_free_iova; 1356 1357 INIT_LIST_HEAD(&msi_page->list); 1358 msi_page->phys = msi_addr; 1359 msi_page->iova = iova; 1360 list_add(&msi_page->list, &cookie->msi_page_list); 1361 return msi_page; 1362 1363 out_free_iova: 1364 iommu_dma_free_iova(cookie, iova, size, NULL); 1365 out_free_page: 1366 kfree(msi_page); 1367 return NULL; 1368 } 1369 1370 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) 1371 { 1372 struct device *dev = msi_desc_to_dev(desc); 1373 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1374 struct iommu_dma_msi_page *msi_page; 1375 static DEFINE_MUTEX(msi_prepare_lock); /* see below */ 1376 1377 if (!domain || !domain->iova_cookie) { 1378 desc->iommu_cookie = NULL; 1379 return 0; 1380 } 1381 1382 /* 1383 * In fact the whole prepare operation should already be serialised by 1384 * irq_domain_mutex further up the callchain, but that's pretty subtle 1385 * on its own, so consider this locking as failsafe documentation... 1386 */ 1387 mutex_lock(&msi_prepare_lock); 1388 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); 1389 mutex_unlock(&msi_prepare_lock); 1390 1391 msi_desc_set_iommu_cookie(desc, msi_page); 1392 1393 if (!msi_page) 1394 return -ENOMEM; 1395 return 0; 1396 } 1397 1398 void iommu_dma_compose_msi_msg(struct msi_desc *desc, 1399 struct msi_msg *msg) 1400 { 1401 struct device *dev = msi_desc_to_dev(desc); 1402 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1403 const struct iommu_dma_msi_page *msi_page; 1404 1405 msi_page = msi_desc_get_iommu_cookie(desc); 1406 1407 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) 1408 return; 1409 1410 msg->address_hi = upper_32_bits(msi_page->iova); 1411 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; 1412 msg->address_lo += lower_32_bits(msi_page->iova); 1413 } 1414 1415 static int iommu_dma_init(void) 1416 { 1417 if (is_kdump_kernel()) 1418 static_branch_enable(&iommu_deferred_attach_enabled); 1419 1420 return iova_cache_get(); 1421 } 1422 arch_initcall(iommu_dma_init); 1423