1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * A fairly generic DMA-API to IOMMU-API glue layer. 4 * 5 * Copyright (C) 2014-2015 ARM Ltd. 6 * 7 * based in part on arch/arm/mm/dma-mapping.c: 8 * Copyright (C) 2000-2004 Russell King 9 */ 10 11 #include <linux/acpi_iort.h> 12 #include <linux/device.h> 13 #include <linux/dma-contiguous.h> 14 #include <linux/dma-iommu.h> 15 #include <linux/dma-noncoherent.h> 16 #include <linux/gfp.h> 17 #include <linux/huge_mm.h> 18 #include <linux/iommu.h> 19 #include <linux/iova.h> 20 #include <linux/irq.h> 21 #include <linux/mm.h> 22 #include <linux/pci.h> 23 #include <linux/scatterlist.h> 24 #include <linux/vmalloc.h> 25 26 struct iommu_dma_msi_page { 27 struct list_head list; 28 dma_addr_t iova; 29 phys_addr_t phys; 30 }; 31 32 enum iommu_dma_cookie_type { 33 IOMMU_DMA_IOVA_COOKIE, 34 IOMMU_DMA_MSI_COOKIE, 35 }; 36 37 struct iommu_dma_cookie { 38 enum iommu_dma_cookie_type type; 39 union { 40 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ 41 struct iova_domain iovad; 42 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ 43 dma_addr_t msi_iova; 44 }; 45 struct list_head msi_page_list; 46 spinlock_t msi_lock; 47 48 /* Domain for flush queue callback; NULL if flush queue not in use */ 49 struct iommu_domain *fq_domain; 50 }; 51 52 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) 53 { 54 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) 55 return cookie->iovad.granule; 56 return PAGE_SIZE; 57 } 58 59 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) 60 { 61 struct iommu_dma_cookie *cookie; 62 63 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); 64 if (cookie) { 65 spin_lock_init(&cookie->msi_lock); 66 INIT_LIST_HEAD(&cookie->msi_page_list); 67 cookie->type = type; 68 } 69 return cookie; 70 } 71 72 /** 73 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain 74 * @domain: IOMMU domain to prepare for DMA-API usage 75 * 76 * IOMMU drivers should normally call this from their domain_alloc 77 * callback when domain->type == IOMMU_DOMAIN_DMA. 78 */ 79 int iommu_get_dma_cookie(struct iommu_domain *domain) 80 { 81 if (domain->iova_cookie) 82 return -EEXIST; 83 84 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); 85 if (!domain->iova_cookie) 86 return -ENOMEM; 87 88 return 0; 89 } 90 EXPORT_SYMBOL(iommu_get_dma_cookie); 91 92 /** 93 * iommu_get_msi_cookie - Acquire just MSI remapping resources 94 * @domain: IOMMU domain to prepare 95 * @base: Start address of IOVA region for MSI mappings 96 * 97 * Users who manage their own IOVA allocation and do not want DMA API support, 98 * but would still like to take advantage of automatic MSI remapping, can use 99 * this to initialise their own domain appropriately. Users should reserve a 100 * contiguous IOVA region, starting at @base, large enough to accommodate the 101 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address 102 * used by the devices attached to @domain. 103 */ 104 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) 105 { 106 struct iommu_dma_cookie *cookie; 107 108 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 109 return -EINVAL; 110 111 if (domain->iova_cookie) 112 return -EEXIST; 113 114 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); 115 if (!cookie) 116 return -ENOMEM; 117 118 cookie->msi_iova = base; 119 domain->iova_cookie = cookie; 120 return 0; 121 } 122 EXPORT_SYMBOL(iommu_get_msi_cookie); 123 124 /** 125 * iommu_put_dma_cookie - Release a domain's DMA mapping resources 126 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or 127 * iommu_get_msi_cookie() 128 * 129 * IOMMU drivers should normally call this from their domain_free callback. 130 */ 131 void iommu_put_dma_cookie(struct iommu_domain *domain) 132 { 133 struct iommu_dma_cookie *cookie = domain->iova_cookie; 134 struct iommu_dma_msi_page *msi, *tmp; 135 136 if (!cookie) 137 return; 138 139 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) 140 put_iova_domain(&cookie->iovad); 141 142 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { 143 list_del(&msi->list); 144 kfree(msi); 145 } 146 kfree(cookie); 147 domain->iova_cookie = NULL; 148 } 149 EXPORT_SYMBOL(iommu_put_dma_cookie); 150 151 /** 152 * iommu_dma_get_resv_regions - Reserved region driver helper 153 * @dev: Device from iommu_get_resv_regions() 154 * @list: Reserved region list from iommu_get_resv_regions() 155 * 156 * IOMMU drivers can use this to implement their .get_resv_regions callback 157 * for general non-IOMMU-specific reservations. Currently, this covers GICv3 158 * ITS region reservation on ACPI based ARM platforms that may require HW MSI 159 * reservation. 160 */ 161 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) 162 { 163 164 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) 165 iort_iommu_msi_get_resv_regions(dev, list); 166 167 } 168 EXPORT_SYMBOL(iommu_dma_get_resv_regions); 169 170 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, 171 phys_addr_t start, phys_addr_t end) 172 { 173 struct iova_domain *iovad = &cookie->iovad; 174 struct iommu_dma_msi_page *msi_page; 175 int i, num_pages; 176 177 start -= iova_offset(iovad, start); 178 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); 179 180 msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); 181 if (!msi_page) 182 return -ENOMEM; 183 184 for (i = 0; i < num_pages; i++) { 185 msi_page[i].phys = start; 186 msi_page[i].iova = start; 187 INIT_LIST_HEAD(&msi_page[i].list); 188 list_add(&msi_page[i].list, &cookie->msi_page_list); 189 start += iovad->granule; 190 } 191 192 return 0; 193 } 194 195 static int iova_reserve_pci_windows(struct pci_dev *dev, 196 struct iova_domain *iovad) 197 { 198 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); 199 struct resource_entry *window; 200 unsigned long lo, hi; 201 phys_addr_t start = 0, end; 202 203 resource_list_for_each_entry(window, &bridge->windows) { 204 if (resource_type(window->res) != IORESOURCE_MEM) 205 continue; 206 207 lo = iova_pfn(iovad, window->res->start - window->offset); 208 hi = iova_pfn(iovad, window->res->end - window->offset); 209 reserve_iova(iovad, lo, hi); 210 } 211 212 /* Get reserved DMA windows from host bridge */ 213 resource_list_for_each_entry(window, &bridge->dma_ranges) { 214 end = window->res->start - window->offset; 215 resv_iova: 216 if (end > start) { 217 lo = iova_pfn(iovad, start); 218 hi = iova_pfn(iovad, end); 219 reserve_iova(iovad, lo, hi); 220 } else { 221 /* dma_ranges list should be sorted */ 222 dev_err(&dev->dev, "Failed to reserve IOVA\n"); 223 return -EINVAL; 224 } 225 226 start = window->res->end - window->offset + 1; 227 /* If window is last entry */ 228 if (window->node.next == &bridge->dma_ranges && 229 end != ~(phys_addr_t)0) { 230 end = ~(phys_addr_t)0; 231 goto resv_iova; 232 } 233 } 234 235 return 0; 236 } 237 238 static int iova_reserve_iommu_regions(struct device *dev, 239 struct iommu_domain *domain) 240 { 241 struct iommu_dma_cookie *cookie = domain->iova_cookie; 242 struct iova_domain *iovad = &cookie->iovad; 243 struct iommu_resv_region *region; 244 LIST_HEAD(resv_regions); 245 int ret = 0; 246 247 if (dev_is_pci(dev)) { 248 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); 249 if (ret) 250 return ret; 251 } 252 253 iommu_get_resv_regions(dev, &resv_regions); 254 list_for_each_entry(region, &resv_regions, list) { 255 unsigned long lo, hi; 256 257 /* We ARE the software that manages these! */ 258 if (region->type == IOMMU_RESV_SW_MSI) 259 continue; 260 261 lo = iova_pfn(iovad, region->start); 262 hi = iova_pfn(iovad, region->start + region->length - 1); 263 reserve_iova(iovad, lo, hi); 264 265 if (region->type == IOMMU_RESV_MSI) 266 ret = cookie_init_hw_msi_region(cookie, region->start, 267 region->start + region->length); 268 if (ret) 269 break; 270 } 271 iommu_put_resv_regions(dev, &resv_regions); 272 273 return ret; 274 } 275 276 static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) 277 { 278 struct iommu_dma_cookie *cookie; 279 struct iommu_domain *domain; 280 281 cookie = container_of(iovad, struct iommu_dma_cookie, iovad); 282 domain = cookie->fq_domain; 283 /* 284 * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE 285 * implies that ops->flush_iotlb_all must be non-NULL. 286 */ 287 domain->ops->flush_iotlb_all(domain); 288 } 289 290 /** 291 * iommu_dma_init_domain - Initialise a DMA mapping domain 292 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() 293 * @base: IOVA at which the mappable address space starts 294 * @size: Size of IOVA space 295 * @dev: Device the domain is being initialised for 296 * 297 * @base and @size should be exact multiples of IOMMU page granularity to 298 * avoid rounding surprises. If necessary, we reserve the page at address 0 299 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but 300 * any change which could make prior IOVAs invalid will fail. 301 */ 302 static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, 303 u64 size, struct device *dev) 304 { 305 struct iommu_dma_cookie *cookie = domain->iova_cookie; 306 unsigned long order, base_pfn; 307 struct iova_domain *iovad; 308 int attr; 309 310 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) 311 return -EINVAL; 312 313 iovad = &cookie->iovad; 314 315 /* Use the smallest supported page size for IOVA granularity */ 316 order = __ffs(domain->pgsize_bitmap); 317 base_pfn = max_t(unsigned long, 1, base >> order); 318 319 /* Check the domain allows at least some access to the device... */ 320 if (domain->geometry.force_aperture) { 321 if (base > domain->geometry.aperture_end || 322 base + size <= domain->geometry.aperture_start) { 323 pr_warn("specified DMA range outside IOMMU capability\n"); 324 return -EFAULT; 325 } 326 /* ...then finally give it a kicking to make sure it fits */ 327 base_pfn = max_t(unsigned long, base_pfn, 328 domain->geometry.aperture_start >> order); 329 } 330 331 /* start_pfn is always nonzero for an already-initialised domain */ 332 if (iovad->start_pfn) { 333 if (1UL << order != iovad->granule || 334 base_pfn != iovad->start_pfn) { 335 pr_warn("Incompatible range for DMA domain\n"); 336 return -EFAULT; 337 } 338 339 return 0; 340 } 341 342 init_iova_domain(iovad, 1UL << order, base_pfn); 343 344 if (!cookie->fq_domain && !iommu_domain_get_attr(domain, 345 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { 346 cookie->fq_domain = domain; 347 init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL); 348 } 349 350 if (!dev) 351 return 0; 352 353 return iova_reserve_iommu_regions(dev, domain); 354 } 355 356 /** 357 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API 358 * page flags. 359 * @dir: Direction of DMA transfer 360 * @coherent: Is the DMA master cache-coherent? 361 * @attrs: DMA attributes for the mapping 362 * 363 * Return: corresponding IOMMU API page protection flags 364 */ 365 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, 366 unsigned long attrs) 367 { 368 int prot = coherent ? IOMMU_CACHE : 0; 369 370 if (attrs & DMA_ATTR_PRIVILEGED) 371 prot |= IOMMU_PRIV; 372 373 switch (dir) { 374 case DMA_BIDIRECTIONAL: 375 return prot | IOMMU_READ | IOMMU_WRITE; 376 case DMA_TO_DEVICE: 377 return prot | IOMMU_READ; 378 case DMA_FROM_DEVICE: 379 return prot | IOMMU_WRITE; 380 default: 381 return 0; 382 } 383 } 384 385 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, 386 size_t size, dma_addr_t dma_limit, struct device *dev) 387 { 388 struct iommu_dma_cookie *cookie = domain->iova_cookie; 389 struct iova_domain *iovad = &cookie->iovad; 390 unsigned long shift, iova_len, iova = 0; 391 392 if (cookie->type == IOMMU_DMA_MSI_COOKIE) { 393 cookie->msi_iova += size; 394 return cookie->msi_iova - size; 395 } 396 397 shift = iova_shift(iovad); 398 iova_len = size >> shift; 399 /* 400 * Freeing non-power-of-two-sized allocations back into the IOVA caches 401 * will come back to bite us badly, so we have to waste a bit of space 402 * rounding up anything cacheable to make sure that can't happen. The 403 * order of the unadjusted size will still match upon freeing. 404 */ 405 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) 406 iova_len = roundup_pow_of_two(iova_len); 407 408 if (dev->bus_dma_mask) 409 dma_limit &= dev->bus_dma_mask; 410 411 if (domain->geometry.force_aperture) 412 dma_limit = min(dma_limit, domain->geometry.aperture_end); 413 414 /* Try to get PCI devices a SAC address */ 415 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) 416 iova = alloc_iova_fast(iovad, iova_len, 417 DMA_BIT_MASK(32) >> shift, false); 418 419 if (!iova) 420 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, 421 true); 422 423 return (dma_addr_t)iova << shift; 424 } 425 426 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, 427 dma_addr_t iova, size_t size) 428 { 429 struct iova_domain *iovad = &cookie->iovad; 430 431 /* The MSI case is only ever cleaning up its most recent allocation */ 432 if (cookie->type == IOMMU_DMA_MSI_COOKIE) 433 cookie->msi_iova -= size; 434 else if (cookie->fq_domain) /* non-strict mode */ 435 queue_iova(iovad, iova_pfn(iovad, iova), 436 size >> iova_shift(iovad), 0); 437 else 438 free_iova_fast(iovad, iova_pfn(iovad, iova), 439 size >> iova_shift(iovad)); 440 } 441 442 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, 443 size_t size) 444 { 445 struct iommu_domain *domain = iommu_get_dma_domain(dev); 446 struct iommu_dma_cookie *cookie = domain->iova_cookie; 447 struct iova_domain *iovad = &cookie->iovad; 448 size_t iova_off = iova_offset(iovad, dma_addr); 449 struct iommu_iotlb_gather iotlb_gather; 450 size_t unmapped; 451 452 dma_addr -= iova_off; 453 size = iova_align(iovad, size + iova_off); 454 iommu_iotlb_gather_init(&iotlb_gather); 455 456 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); 457 WARN_ON(unmapped != size); 458 459 if (!cookie->fq_domain) 460 iommu_tlb_sync(domain, &iotlb_gather); 461 iommu_dma_free_iova(cookie, dma_addr, size); 462 } 463 464 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, 465 size_t size, int prot) 466 { 467 struct iommu_domain *domain = iommu_get_dma_domain(dev); 468 struct iommu_dma_cookie *cookie = domain->iova_cookie; 469 struct iova_domain *iovad = &cookie->iovad; 470 size_t iova_off = iova_offset(iovad, phys); 471 dma_addr_t iova; 472 473 size = iova_align(iovad, size + iova_off); 474 475 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 476 if (!iova) 477 return DMA_MAPPING_ERROR; 478 479 if (iommu_map(domain, iova, phys - iova_off, size, prot)) { 480 iommu_dma_free_iova(cookie, iova, size); 481 return DMA_MAPPING_ERROR; 482 } 483 return iova + iova_off; 484 } 485 486 static void __iommu_dma_free_pages(struct page **pages, int count) 487 { 488 while (count--) 489 __free_page(pages[count]); 490 kvfree(pages); 491 } 492 493 static struct page **__iommu_dma_alloc_pages(struct device *dev, 494 unsigned int count, unsigned long order_mask, gfp_t gfp) 495 { 496 struct page **pages; 497 unsigned int i = 0, nid = dev_to_node(dev); 498 499 order_mask &= (2U << MAX_ORDER) - 1; 500 if (!order_mask) 501 return NULL; 502 503 pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL); 504 if (!pages) 505 return NULL; 506 507 /* IOMMU can map any pages, so himem can also be used here */ 508 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 509 510 while (count) { 511 struct page *page = NULL; 512 unsigned int order_size; 513 514 /* 515 * Higher-order allocations are a convenience rather 516 * than a necessity, hence using __GFP_NORETRY until 517 * falling back to minimum-order allocations. 518 */ 519 for (order_mask &= (2U << __fls(count)) - 1; 520 order_mask; order_mask &= ~order_size) { 521 unsigned int order = __fls(order_mask); 522 gfp_t alloc_flags = gfp; 523 524 order_size = 1U << order; 525 if (order_mask > order_size) 526 alloc_flags |= __GFP_NORETRY; 527 page = alloc_pages_node(nid, alloc_flags, order); 528 if (!page) 529 continue; 530 if (!order) 531 break; 532 if (!PageCompound(page)) { 533 split_page(page, order); 534 break; 535 } else if (!split_huge_page(page)) { 536 break; 537 } 538 __free_pages(page, order); 539 } 540 if (!page) { 541 __iommu_dma_free_pages(pages, i); 542 return NULL; 543 } 544 count -= order_size; 545 while (order_size--) 546 pages[i++] = page++; 547 } 548 return pages; 549 } 550 551 /** 552 * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space 553 * @dev: Device to allocate memory for. Must be a real device 554 * attached to an iommu_dma_domain 555 * @size: Size of buffer in bytes 556 * @dma_handle: Out argument for allocated DMA handle 557 * @gfp: Allocation flags 558 * @attrs: DMA attributes for this allocation 559 * 560 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, 561 * but an IOMMU which supports smaller pages might not map the whole thing. 562 * 563 * Return: Mapped virtual address, or NULL on failure. 564 */ 565 static void *iommu_dma_alloc_remap(struct device *dev, size_t size, 566 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 567 { 568 struct iommu_domain *domain = iommu_get_dma_domain(dev); 569 struct iommu_dma_cookie *cookie = domain->iova_cookie; 570 struct iova_domain *iovad = &cookie->iovad; 571 bool coherent = dev_is_dma_coherent(dev); 572 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 573 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 574 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; 575 struct page **pages; 576 struct sg_table sgt; 577 dma_addr_t iova; 578 void *vaddr; 579 580 *dma_handle = DMA_MAPPING_ERROR; 581 582 min_size = alloc_sizes & -alloc_sizes; 583 if (min_size < PAGE_SIZE) { 584 min_size = PAGE_SIZE; 585 alloc_sizes |= PAGE_SIZE; 586 } else { 587 size = ALIGN(size, min_size); 588 } 589 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 590 alloc_sizes = min_size; 591 592 count = PAGE_ALIGN(size) >> PAGE_SHIFT; 593 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, 594 gfp); 595 if (!pages) 596 return NULL; 597 598 size = iova_align(iovad, size); 599 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); 600 if (!iova) 601 goto out_free_pages; 602 603 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) 604 goto out_free_iova; 605 606 if (!(ioprot & IOMMU_CACHE)) { 607 struct scatterlist *sg; 608 int i; 609 610 for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) 611 arch_dma_prep_coherent(sg_page(sg), sg->length); 612 } 613 614 if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) 615 < size) 616 goto out_free_sg; 617 618 vaddr = dma_common_pages_remap(pages, size, prot, 619 __builtin_return_address(0)); 620 if (!vaddr) 621 goto out_unmap; 622 623 *dma_handle = iova; 624 sg_free_table(&sgt); 625 return vaddr; 626 627 out_unmap: 628 __iommu_dma_unmap(dev, iova, size); 629 out_free_sg: 630 sg_free_table(&sgt); 631 out_free_iova: 632 iommu_dma_free_iova(cookie, iova, size); 633 out_free_pages: 634 __iommu_dma_free_pages(pages, count); 635 return NULL; 636 } 637 638 /** 639 * __iommu_dma_mmap - Map a buffer into provided user VMA 640 * @pages: Array representing buffer from __iommu_dma_alloc() 641 * @size: Size of buffer in bytes 642 * @vma: VMA describing requested userspace mapping 643 * 644 * Maps the pages of the buffer in @pages into @vma. The caller is responsible 645 * for verifying the correct size and protection of @vma beforehand. 646 */ 647 static int __iommu_dma_mmap(struct page **pages, size_t size, 648 struct vm_area_struct *vma) 649 { 650 return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT); 651 } 652 653 static void iommu_dma_sync_single_for_cpu(struct device *dev, 654 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 655 { 656 phys_addr_t phys; 657 658 if (dev_is_dma_coherent(dev)) 659 return; 660 661 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 662 arch_sync_dma_for_cpu(dev, phys, size, dir); 663 } 664 665 static void iommu_dma_sync_single_for_device(struct device *dev, 666 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 667 { 668 phys_addr_t phys; 669 670 if (dev_is_dma_coherent(dev)) 671 return; 672 673 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 674 arch_sync_dma_for_device(dev, phys, size, dir); 675 } 676 677 static void iommu_dma_sync_sg_for_cpu(struct device *dev, 678 struct scatterlist *sgl, int nelems, 679 enum dma_data_direction dir) 680 { 681 struct scatterlist *sg; 682 int i; 683 684 if (dev_is_dma_coherent(dev)) 685 return; 686 687 for_each_sg(sgl, sg, nelems, i) 688 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); 689 } 690 691 static void iommu_dma_sync_sg_for_device(struct device *dev, 692 struct scatterlist *sgl, int nelems, 693 enum dma_data_direction dir) 694 { 695 struct scatterlist *sg; 696 int i; 697 698 if (dev_is_dma_coherent(dev)) 699 return; 700 701 for_each_sg(sgl, sg, nelems, i) 702 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); 703 } 704 705 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, 706 unsigned long offset, size_t size, enum dma_data_direction dir, 707 unsigned long attrs) 708 { 709 phys_addr_t phys = page_to_phys(page) + offset; 710 bool coherent = dev_is_dma_coherent(dev); 711 int prot = dma_info_to_prot(dir, coherent, attrs); 712 dma_addr_t dma_handle; 713 714 dma_handle =__iommu_dma_map(dev, phys, size, prot); 715 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 716 dma_handle != DMA_MAPPING_ERROR) 717 arch_sync_dma_for_device(dev, phys, size, dir); 718 return dma_handle; 719 } 720 721 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, 722 size_t size, enum dma_data_direction dir, unsigned long attrs) 723 { 724 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 725 iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); 726 __iommu_dma_unmap(dev, dma_handle, size); 727 } 728 729 /* 730 * Prepare a successfully-mapped scatterlist to give back to the caller. 731 * 732 * At this point the segments are already laid out by iommu_dma_map_sg() to 733 * avoid individually crossing any boundaries, so we merely need to check a 734 * segment's start address to avoid concatenating across one. 735 */ 736 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, 737 dma_addr_t dma_addr) 738 { 739 struct scatterlist *s, *cur = sg; 740 unsigned long seg_mask = dma_get_seg_boundary(dev); 741 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); 742 int i, count = 0; 743 744 for_each_sg(sg, s, nents, i) { 745 /* Restore this segment's original unaligned fields first */ 746 unsigned int s_iova_off = sg_dma_address(s); 747 unsigned int s_length = sg_dma_len(s); 748 unsigned int s_iova_len = s->length; 749 750 s->offset += s_iova_off; 751 s->length = s_length; 752 sg_dma_address(s) = DMA_MAPPING_ERROR; 753 sg_dma_len(s) = 0; 754 755 /* 756 * Now fill in the real DMA data. If... 757 * - there is a valid output segment to append to 758 * - and this segment starts on an IOVA page boundary 759 * - but doesn't fall at a segment boundary 760 * - and wouldn't make the resulting output segment too long 761 */ 762 if (cur_len && !s_iova_off && (dma_addr & seg_mask) && 763 (max_len - cur_len >= s_length)) { 764 /* ...then concatenate it with the previous one */ 765 cur_len += s_length; 766 } else { 767 /* Otherwise start the next output segment */ 768 if (i > 0) 769 cur = sg_next(cur); 770 cur_len = s_length; 771 count++; 772 773 sg_dma_address(cur) = dma_addr + s_iova_off; 774 } 775 776 sg_dma_len(cur) = cur_len; 777 dma_addr += s_iova_len; 778 779 if (s_length + s_iova_off < s_iova_len) 780 cur_len = 0; 781 } 782 return count; 783 } 784 785 /* 786 * If mapping failed, then just restore the original list, 787 * but making sure the DMA fields are invalidated. 788 */ 789 static void __invalidate_sg(struct scatterlist *sg, int nents) 790 { 791 struct scatterlist *s; 792 int i; 793 794 for_each_sg(sg, s, nents, i) { 795 if (sg_dma_address(s) != DMA_MAPPING_ERROR) 796 s->offset += sg_dma_address(s); 797 if (sg_dma_len(s)) 798 s->length = sg_dma_len(s); 799 sg_dma_address(s) = DMA_MAPPING_ERROR; 800 sg_dma_len(s) = 0; 801 } 802 } 803 804 /* 805 * The DMA API client is passing in a scatterlist which could describe 806 * any old buffer layout, but the IOMMU API requires everything to be 807 * aligned to IOMMU pages. Hence the need for this complicated bit of 808 * impedance-matching, to be able to hand off a suitably-aligned list, 809 * but still preserve the original offsets and sizes for the caller. 810 */ 811 static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, 812 int nents, enum dma_data_direction dir, unsigned long attrs) 813 { 814 struct iommu_domain *domain = iommu_get_dma_domain(dev); 815 struct iommu_dma_cookie *cookie = domain->iova_cookie; 816 struct iova_domain *iovad = &cookie->iovad; 817 struct scatterlist *s, *prev = NULL; 818 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); 819 dma_addr_t iova; 820 size_t iova_len = 0; 821 unsigned long mask = dma_get_seg_boundary(dev); 822 int i; 823 824 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 825 iommu_dma_sync_sg_for_device(dev, sg, nents, dir); 826 827 /* 828 * Work out how much IOVA space we need, and align the segments to 829 * IOVA granules for the IOMMU driver to handle. With some clever 830 * trickery we can modify the list in-place, but reversibly, by 831 * stashing the unaligned parts in the as-yet-unused DMA fields. 832 */ 833 for_each_sg(sg, s, nents, i) { 834 size_t s_iova_off = iova_offset(iovad, s->offset); 835 size_t s_length = s->length; 836 size_t pad_len = (mask - iova_len + 1) & mask; 837 838 sg_dma_address(s) = s_iova_off; 839 sg_dma_len(s) = s_length; 840 s->offset -= s_iova_off; 841 s_length = iova_align(iovad, s_length + s_iova_off); 842 s->length = s_length; 843 844 /* 845 * Due to the alignment of our single IOVA allocation, we can 846 * depend on these assumptions about the segment boundary mask: 847 * - If mask size >= IOVA size, then the IOVA range cannot 848 * possibly fall across a boundary, so we don't care. 849 * - If mask size < IOVA size, then the IOVA range must start 850 * exactly on a boundary, therefore we can lay things out 851 * based purely on segment lengths without needing to know 852 * the actual addresses beforehand. 853 * - The mask must be a power of 2, so pad_len == 0 if 854 * iova_len == 0, thus we cannot dereference prev the first 855 * time through here (i.e. before it has a meaningful value). 856 */ 857 if (pad_len && pad_len < s_length - 1) { 858 prev->length += pad_len; 859 iova_len += pad_len; 860 } 861 862 iova_len += s_length; 863 prev = s; 864 } 865 866 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); 867 if (!iova) 868 goto out_restore_sg; 869 870 /* 871 * We'll leave any physical concatenation to the IOMMU driver's 872 * implementation - it knows better than we do. 873 */ 874 if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) 875 goto out_free_iova; 876 877 return __finalise_sg(dev, sg, nents, iova); 878 879 out_free_iova: 880 iommu_dma_free_iova(cookie, iova, iova_len); 881 out_restore_sg: 882 __invalidate_sg(sg, nents); 883 return 0; 884 } 885 886 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 887 int nents, enum dma_data_direction dir, unsigned long attrs) 888 { 889 dma_addr_t start, end; 890 struct scatterlist *tmp; 891 int i; 892 893 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 894 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); 895 896 /* 897 * The scatterlist segments are mapped into a single 898 * contiguous IOVA allocation, so this is incredibly easy. 899 */ 900 start = sg_dma_address(sg); 901 for_each_sg(sg_next(sg), tmp, nents - 1, i) { 902 if (sg_dma_len(tmp) == 0) 903 break; 904 sg = tmp; 905 } 906 end = sg_dma_address(sg) + sg_dma_len(sg); 907 __iommu_dma_unmap(dev, start, end - start); 908 } 909 910 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, 911 size_t size, enum dma_data_direction dir, unsigned long attrs) 912 { 913 return __iommu_dma_map(dev, phys, size, 914 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); 915 } 916 917 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, 918 size_t size, enum dma_data_direction dir, unsigned long attrs) 919 { 920 __iommu_dma_unmap(dev, handle, size); 921 } 922 923 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) 924 { 925 size_t alloc_size = PAGE_ALIGN(size); 926 int count = alloc_size >> PAGE_SHIFT; 927 struct page *page = NULL, **pages = NULL; 928 929 /* Non-coherent atomic allocation? Easy */ 930 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 931 dma_free_from_pool(cpu_addr, alloc_size)) 932 return; 933 934 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 935 /* 936 * If it the address is remapped, then it's either non-coherent 937 * or highmem CMA, or an iommu_dma_alloc_remap() construction. 938 */ 939 pages = dma_common_find_pages(cpu_addr); 940 if (!pages) 941 page = vmalloc_to_page(cpu_addr); 942 dma_common_free_remap(cpu_addr, alloc_size); 943 } else { 944 /* Lowmem means a coherent atomic or CMA allocation */ 945 page = virt_to_page(cpu_addr); 946 } 947 948 if (pages) 949 __iommu_dma_free_pages(pages, count); 950 if (page) 951 dma_free_contiguous(dev, page, alloc_size); 952 } 953 954 static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, 955 dma_addr_t handle, unsigned long attrs) 956 { 957 __iommu_dma_unmap(dev, handle, size); 958 __iommu_dma_free(dev, size, cpu_addr); 959 } 960 961 static void *iommu_dma_alloc_pages(struct device *dev, size_t size, 962 struct page **pagep, gfp_t gfp, unsigned long attrs) 963 { 964 bool coherent = dev_is_dma_coherent(dev); 965 size_t alloc_size = PAGE_ALIGN(size); 966 int node = dev_to_node(dev); 967 struct page *page = NULL; 968 void *cpu_addr; 969 970 page = dma_alloc_contiguous(dev, alloc_size, gfp); 971 if (!page) 972 page = alloc_pages_node(node, gfp, get_order(alloc_size)); 973 if (!page) 974 return NULL; 975 976 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 977 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 978 979 cpu_addr = dma_common_contiguous_remap(page, alloc_size, 980 prot, __builtin_return_address(0)); 981 if (!cpu_addr) 982 goto out_free_pages; 983 984 if (!coherent) 985 arch_dma_prep_coherent(page, size); 986 } else { 987 cpu_addr = page_address(page); 988 } 989 990 *pagep = page; 991 memset(cpu_addr, 0, alloc_size); 992 return cpu_addr; 993 out_free_pages: 994 dma_free_contiguous(dev, page, alloc_size); 995 return NULL; 996 } 997 998 static void *iommu_dma_alloc(struct device *dev, size_t size, 999 dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 1000 { 1001 bool coherent = dev_is_dma_coherent(dev); 1002 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 1003 struct page *page = NULL; 1004 void *cpu_addr; 1005 1006 gfp |= __GFP_ZERO; 1007 1008 if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && 1009 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) 1010 return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs); 1011 1012 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 1013 !gfpflags_allow_blocking(gfp) && !coherent) 1014 cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp); 1015 else 1016 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); 1017 if (!cpu_addr) 1018 return NULL; 1019 1020 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot); 1021 if (*handle == DMA_MAPPING_ERROR) { 1022 __iommu_dma_free(dev, size, cpu_addr); 1023 return NULL; 1024 } 1025 1026 return cpu_addr; 1027 } 1028 1029 static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, 1030 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1031 unsigned long attrs) 1032 { 1033 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1034 unsigned long pfn, off = vma->vm_pgoff; 1035 int ret; 1036 1037 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 1038 1039 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 1040 return ret; 1041 1042 if (off >= nr_pages || vma_pages(vma) > nr_pages - off) 1043 return -ENXIO; 1044 1045 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1046 struct page **pages = dma_common_find_pages(cpu_addr); 1047 1048 if (pages) 1049 return __iommu_dma_mmap(pages, size, vma); 1050 pfn = vmalloc_to_pfn(cpu_addr); 1051 } else { 1052 pfn = page_to_pfn(virt_to_page(cpu_addr)); 1053 } 1054 1055 return remap_pfn_range(vma, vma->vm_start, pfn + off, 1056 vma->vm_end - vma->vm_start, 1057 vma->vm_page_prot); 1058 } 1059 1060 static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 1061 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1062 unsigned long attrs) 1063 { 1064 struct page *page; 1065 int ret; 1066 1067 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1068 struct page **pages = dma_common_find_pages(cpu_addr); 1069 1070 if (pages) { 1071 return sg_alloc_table_from_pages(sgt, pages, 1072 PAGE_ALIGN(size) >> PAGE_SHIFT, 1073 0, size, GFP_KERNEL); 1074 } 1075 1076 page = vmalloc_to_page(cpu_addr); 1077 } else { 1078 page = virt_to_page(cpu_addr); 1079 } 1080 1081 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 1082 if (!ret) 1083 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 1084 return ret; 1085 } 1086 1087 static unsigned long iommu_dma_get_merge_boundary(struct device *dev) 1088 { 1089 struct iommu_domain *domain = iommu_get_dma_domain(dev); 1090 1091 return (1UL << __ffs(domain->pgsize_bitmap)) - 1; 1092 } 1093 1094 static const struct dma_map_ops iommu_dma_ops = { 1095 .alloc = iommu_dma_alloc, 1096 .free = iommu_dma_free, 1097 .mmap = iommu_dma_mmap, 1098 .get_sgtable = iommu_dma_get_sgtable, 1099 .map_page = iommu_dma_map_page, 1100 .unmap_page = iommu_dma_unmap_page, 1101 .map_sg = iommu_dma_map_sg, 1102 .unmap_sg = iommu_dma_unmap_sg, 1103 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, 1104 .sync_single_for_device = iommu_dma_sync_single_for_device, 1105 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, 1106 .sync_sg_for_device = iommu_dma_sync_sg_for_device, 1107 .map_resource = iommu_dma_map_resource, 1108 .unmap_resource = iommu_dma_unmap_resource, 1109 .get_merge_boundary = iommu_dma_get_merge_boundary, 1110 }; 1111 1112 /* 1113 * The IOMMU core code allocates the default DMA domain, which the underlying 1114 * IOMMU driver needs to support via the dma-iommu layer. 1115 */ 1116 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) 1117 { 1118 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1119 1120 if (!domain) 1121 goto out_err; 1122 1123 /* 1124 * The IOMMU core code allocates the default DMA domain, which the 1125 * underlying IOMMU driver needs to support via the dma-iommu layer. 1126 */ 1127 if (domain->type == IOMMU_DOMAIN_DMA) { 1128 if (iommu_dma_init_domain(domain, dma_base, size, dev)) 1129 goto out_err; 1130 dev->dma_ops = &iommu_dma_ops; 1131 } 1132 1133 return; 1134 out_err: 1135 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", 1136 dev_name(dev)); 1137 } 1138 1139 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, 1140 phys_addr_t msi_addr, struct iommu_domain *domain) 1141 { 1142 struct iommu_dma_cookie *cookie = domain->iova_cookie; 1143 struct iommu_dma_msi_page *msi_page; 1144 dma_addr_t iova; 1145 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 1146 size_t size = cookie_msi_granule(cookie); 1147 1148 msi_addr &= ~(phys_addr_t)(size - 1); 1149 list_for_each_entry(msi_page, &cookie->msi_page_list, list) 1150 if (msi_page->phys == msi_addr) 1151 return msi_page; 1152 1153 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); 1154 if (!msi_page) 1155 return NULL; 1156 1157 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 1158 if (!iova) 1159 goto out_free_page; 1160 1161 if (iommu_map(domain, iova, msi_addr, size, prot)) 1162 goto out_free_iova; 1163 1164 INIT_LIST_HEAD(&msi_page->list); 1165 msi_page->phys = msi_addr; 1166 msi_page->iova = iova; 1167 list_add(&msi_page->list, &cookie->msi_page_list); 1168 return msi_page; 1169 1170 out_free_iova: 1171 iommu_dma_free_iova(cookie, iova, size); 1172 out_free_page: 1173 kfree(msi_page); 1174 return NULL; 1175 } 1176 1177 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) 1178 { 1179 struct device *dev = msi_desc_to_dev(desc); 1180 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1181 struct iommu_dma_cookie *cookie; 1182 struct iommu_dma_msi_page *msi_page; 1183 unsigned long flags; 1184 1185 if (!domain || !domain->iova_cookie) { 1186 desc->iommu_cookie = NULL; 1187 return 0; 1188 } 1189 1190 cookie = domain->iova_cookie; 1191 1192 /* 1193 * We disable IRQs to rule out a possible inversion against 1194 * irq_desc_lock if, say, someone tries to retarget the affinity 1195 * of an MSI from within an IPI handler. 1196 */ 1197 spin_lock_irqsave(&cookie->msi_lock, flags); 1198 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); 1199 spin_unlock_irqrestore(&cookie->msi_lock, flags); 1200 1201 msi_desc_set_iommu_cookie(desc, msi_page); 1202 1203 if (!msi_page) 1204 return -ENOMEM; 1205 return 0; 1206 } 1207 1208 void iommu_dma_compose_msi_msg(struct msi_desc *desc, 1209 struct msi_msg *msg) 1210 { 1211 struct device *dev = msi_desc_to_dev(desc); 1212 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1213 const struct iommu_dma_msi_page *msi_page; 1214 1215 msi_page = msi_desc_get_iommu_cookie(desc); 1216 1217 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) 1218 return; 1219 1220 msg->address_hi = upper_32_bits(msi_page->iova); 1221 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; 1222 msg->address_lo += lower_32_bits(msi_page->iova); 1223 } 1224 1225 static int iommu_dma_init(void) 1226 { 1227 return iova_cache_get(); 1228 } 1229 arch_initcall(iommu_dma_init); 1230