1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * A fairly generic DMA-API to IOMMU-API glue layer. 4 * 5 * Copyright (C) 2014-2015 ARM Ltd. 6 * 7 * based in part on arch/arm/mm/dma-mapping.c: 8 * Copyright (C) 2000-2004 Russell King 9 */ 10 11 #include <linux/acpi_iort.h> 12 #include <linux/atomic.h> 13 #include <linux/crash_dump.h> 14 #include <linux/device.h> 15 #include <linux/dma-direct.h> 16 #include <linux/dma-iommu.h> 17 #include <linux/dma-map-ops.h> 18 #include <linux/gfp.h> 19 #include <linux/huge_mm.h> 20 #include <linux/iommu.h> 21 #include <linux/iova.h> 22 #include <linux/irq.h> 23 #include <linux/mm.h> 24 #include <linux/mutex.h> 25 #include <linux/pci.h> 26 #include <linux/scatterlist.h> 27 #include <linux/spinlock.h> 28 #include <linux/swiotlb.h> 29 #include <linux/vmalloc.h> 30 31 struct iommu_dma_msi_page { 32 struct list_head list; 33 dma_addr_t iova; 34 phys_addr_t phys; 35 }; 36 37 enum iommu_dma_cookie_type { 38 IOMMU_DMA_IOVA_COOKIE, 39 IOMMU_DMA_MSI_COOKIE, 40 }; 41 42 struct iommu_dma_cookie { 43 enum iommu_dma_cookie_type type; 44 union { 45 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ 46 struct { 47 struct iova_domain iovad; 48 49 struct iova_fq __percpu *fq; /* Flush queue */ 50 /* Number of TLB flushes that have been started */ 51 atomic64_t fq_flush_start_cnt; 52 /* Number of TLB flushes that have been finished */ 53 atomic64_t fq_flush_finish_cnt; 54 /* Timer to regularily empty the flush queues */ 55 struct timer_list fq_timer; 56 /* 1 when timer is active, 0 when not */ 57 atomic_t fq_timer_on; 58 }; 59 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ 60 dma_addr_t msi_iova; 61 }; 62 struct list_head msi_page_list; 63 64 /* Domain for flush queue callback; NULL if flush queue not in use */ 65 struct iommu_domain *fq_domain; 66 }; 67 68 static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled); 69 bool iommu_dma_forcedac __read_mostly; 70 71 static int __init iommu_dma_forcedac_setup(char *str) 72 { 73 int ret = kstrtobool(str, &iommu_dma_forcedac); 74 75 if (!ret && iommu_dma_forcedac) 76 pr_info("Forcing DAC for PCI devices\n"); 77 return ret; 78 } 79 early_param("iommu.forcedac", iommu_dma_forcedac_setup); 80 81 /* Number of entries per flush queue */ 82 #define IOVA_FQ_SIZE 256 83 84 /* Timeout (in ms) after which entries are flushed from the queue */ 85 #define IOVA_FQ_TIMEOUT 10 86 87 /* Flush queue entry for deferred flushing */ 88 struct iova_fq_entry { 89 unsigned long iova_pfn; 90 unsigned long pages; 91 struct list_head freelist; 92 u64 counter; /* Flush counter when this entry was added */ 93 }; 94 95 /* Per-CPU flush queue structure */ 96 struct iova_fq { 97 struct iova_fq_entry entries[IOVA_FQ_SIZE]; 98 unsigned int head, tail; 99 spinlock_t lock; 100 }; 101 102 #define fq_ring_for_each(i, fq) \ 103 for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE) 104 105 static inline bool fq_full(struct iova_fq *fq) 106 { 107 assert_spin_locked(&fq->lock); 108 return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); 109 } 110 111 static inline unsigned int fq_ring_add(struct iova_fq *fq) 112 { 113 unsigned int idx = fq->tail; 114 115 assert_spin_locked(&fq->lock); 116 117 fq->tail = (idx + 1) % IOVA_FQ_SIZE; 118 119 return idx; 120 } 121 122 static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq) 123 { 124 u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt); 125 unsigned int idx; 126 127 assert_spin_locked(&fq->lock); 128 129 fq_ring_for_each(idx, fq) { 130 131 if (fq->entries[idx].counter >= counter) 132 break; 133 134 put_pages_list(&fq->entries[idx].freelist); 135 free_iova_fast(&cookie->iovad, 136 fq->entries[idx].iova_pfn, 137 fq->entries[idx].pages); 138 139 fq->head = (fq->head + 1) % IOVA_FQ_SIZE; 140 } 141 } 142 143 static void fq_flush_iotlb(struct iommu_dma_cookie *cookie) 144 { 145 atomic64_inc(&cookie->fq_flush_start_cnt); 146 cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain); 147 atomic64_inc(&cookie->fq_flush_finish_cnt); 148 } 149 150 static void fq_flush_timeout(struct timer_list *t) 151 { 152 struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer); 153 int cpu; 154 155 atomic_set(&cookie->fq_timer_on, 0); 156 fq_flush_iotlb(cookie); 157 158 for_each_possible_cpu(cpu) { 159 unsigned long flags; 160 struct iova_fq *fq; 161 162 fq = per_cpu_ptr(cookie->fq, cpu); 163 spin_lock_irqsave(&fq->lock, flags); 164 fq_ring_free(cookie, fq); 165 spin_unlock_irqrestore(&fq->lock, flags); 166 } 167 } 168 169 static void queue_iova(struct iommu_dma_cookie *cookie, 170 unsigned long pfn, unsigned long pages, 171 struct list_head *freelist) 172 { 173 struct iova_fq *fq; 174 unsigned long flags; 175 unsigned int idx; 176 177 /* 178 * Order against the IOMMU driver's pagetable update from unmapping 179 * @pte, to guarantee that fq_flush_iotlb() observes that if called 180 * from a different CPU before we release the lock below. Full barrier 181 * so it also pairs with iommu_dma_init_fq() to avoid seeing partially 182 * written fq state here. 183 */ 184 smp_mb(); 185 186 fq = raw_cpu_ptr(cookie->fq); 187 spin_lock_irqsave(&fq->lock, flags); 188 189 /* 190 * First remove all entries from the flush queue that have already been 191 * flushed out on another CPU. This makes the fq_full() check below less 192 * likely to be true. 193 */ 194 fq_ring_free(cookie, fq); 195 196 if (fq_full(fq)) { 197 fq_flush_iotlb(cookie); 198 fq_ring_free(cookie, fq); 199 } 200 201 idx = fq_ring_add(fq); 202 203 fq->entries[idx].iova_pfn = pfn; 204 fq->entries[idx].pages = pages; 205 fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt); 206 list_splice(freelist, &fq->entries[idx].freelist); 207 208 spin_unlock_irqrestore(&fq->lock, flags); 209 210 /* Avoid false sharing as much as possible. */ 211 if (!atomic_read(&cookie->fq_timer_on) && 212 !atomic_xchg(&cookie->fq_timer_on, 1)) 213 mod_timer(&cookie->fq_timer, 214 jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); 215 } 216 217 static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie) 218 { 219 int cpu, idx; 220 221 if (!cookie->fq) 222 return; 223 224 del_timer_sync(&cookie->fq_timer); 225 /* The IOVAs will be torn down separately, so just free our queued pages */ 226 for_each_possible_cpu(cpu) { 227 struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu); 228 229 fq_ring_for_each(idx, fq) 230 put_pages_list(&fq->entries[idx].freelist); 231 } 232 233 free_percpu(cookie->fq); 234 } 235 236 /* sysfs updates are serialised by the mutex of the group owning @domain */ 237 int iommu_dma_init_fq(struct iommu_domain *domain) 238 { 239 struct iommu_dma_cookie *cookie = domain->iova_cookie; 240 struct iova_fq __percpu *queue; 241 int i, cpu; 242 243 if (cookie->fq_domain) 244 return 0; 245 246 atomic64_set(&cookie->fq_flush_start_cnt, 0); 247 atomic64_set(&cookie->fq_flush_finish_cnt, 0); 248 249 queue = alloc_percpu(struct iova_fq); 250 if (!queue) { 251 pr_warn("iova flush queue initialization failed\n"); 252 return -ENOMEM; 253 } 254 255 for_each_possible_cpu(cpu) { 256 struct iova_fq *fq = per_cpu_ptr(queue, cpu); 257 258 fq->head = 0; 259 fq->tail = 0; 260 261 spin_lock_init(&fq->lock); 262 263 for (i = 0; i < IOVA_FQ_SIZE; i++) 264 INIT_LIST_HEAD(&fq->entries[i].freelist); 265 } 266 267 cookie->fq = queue; 268 269 timer_setup(&cookie->fq_timer, fq_flush_timeout, 0); 270 atomic_set(&cookie->fq_timer_on, 0); 271 /* 272 * Prevent incomplete fq state being observable. Pairs with path from 273 * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova() 274 */ 275 smp_wmb(); 276 WRITE_ONCE(cookie->fq_domain, domain); 277 return 0; 278 } 279 280 static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) 281 { 282 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) 283 return cookie->iovad.granule; 284 return PAGE_SIZE; 285 } 286 287 static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) 288 { 289 struct iommu_dma_cookie *cookie; 290 291 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); 292 if (cookie) { 293 INIT_LIST_HEAD(&cookie->msi_page_list); 294 cookie->type = type; 295 } 296 return cookie; 297 } 298 299 /** 300 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain 301 * @domain: IOMMU domain to prepare for DMA-API usage 302 */ 303 int iommu_get_dma_cookie(struct iommu_domain *domain) 304 { 305 if (domain->iova_cookie) 306 return -EEXIST; 307 308 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); 309 if (!domain->iova_cookie) 310 return -ENOMEM; 311 312 return 0; 313 } 314 315 /** 316 * iommu_get_msi_cookie - Acquire just MSI remapping resources 317 * @domain: IOMMU domain to prepare 318 * @base: Start address of IOVA region for MSI mappings 319 * 320 * Users who manage their own IOVA allocation and do not want DMA API support, 321 * but would still like to take advantage of automatic MSI remapping, can use 322 * this to initialise their own domain appropriately. Users should reserve a 323 * contiguous IOVA region, starting at @base, large enough to accommodate the 324 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address 325 * used by the devices attached to @domain. 326 */ 327 int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) 328 { 329 struct iommu_dma_cookie *cookie; 330 331 if (domain->type != IOMMU_DOMAIN_UNMANAGED) 332 return -EINVAL; 333 334 if (domain->iova_cookie) 335 return -EEXIST; 336 337 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); 338 if (!cookie) 339 return -ENOMEM; 340 341 cookie->msi_iova = base; 342 domain->iova_cookie = cookie; 343 return 0; 344 } 345 EXPORT_SYMBOL(iommu_get_msi_cookie); 346 347 /** 348 * iommu_put_dma_cookie - Release a domain's DMA mapping resources 349 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or 350 * iommu_get_msi_cookie() 351 */ 352 void iommu_put_dma_cookie(struct iommu_domain *domain) 353 { 354 struct iommu_dma_cookie *cookie = domain->iova_cookie; 355 struct iommu_dma_msi_page *msi, *tmp; 356 357 if (!cookie) 358 return; 359 360 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) { 361 iommu_dma_free_fq(cookie); 362 put_iova_domain(&cookie->iovad); 363 } 364 365 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { 366 list_del(&msi->list); 367 kfree(msi); 368 } 369 kfree(cookie); 370 domain->iova_cookie = NULL; 371 } 372 373 /** 374 * iommu_dma_get_resv_regions - Reserved region driver helper 375 * @dev: Device from iommu_get_resv_regions() 376 * @list: Reserved region list from iommu_get_resv_regions() 377 * 378 * IOMMU drivers can use this to implement their .get_resv_regions callback 379 * for general non-IOMMU-specific reservations. Currently, this covers GICv3 380 * ITS region reservation on ACPI based ARM platforms that may require HW MSI 381 * reservation. 382 */ 383 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) 384 { 385 386 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) 387 iort_iommu_msi_get_resv_regions(dev, list); 388 389 } 390 EXPORT_SYMBOL(iommu_dma_get_resv_regions); 391 392 static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, 393 phys_addr_t start, phys_addr_t end) 394 { 395 struct iova_domain *iovad = &cookie->iovad; 396 struct iommu_dma_msi_page *msi_page; 397 int i, num_pages; 398 399 start -= iova_offset(iovad, start); 400 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); 401 402 for (i = 0; i < num_pages; i++) { 403 msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL); 404 if (!msi_page) 405 return -ENOMEM; 406 407 msi_page->phys = start; 408 msi_page->iova = start; 409 INIT_LIST_HEAD(&msi_page->list); 410 list_add(&msi_page->list, &cookie->msi_page_list); 411 start += iovad->granule; 412 } 413 414 return 0; 415 } 416 417 static int iova_reserve_pci_windows(struct pci_dev *dev, 418 struct iova_domain *iovad) 419 { 420 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); 421 struct resource_entry *window; 422 unsigned long lo, hi; 423 phys_addr_t start = 0, end; 424 425 resource_list_for_each_entry(window, &bridge->windows) { 426 if (resource_type(window->res) != IORESOURCE_MEM) 427 continue; 428 429 lo = iova_pfn(iovad, window->res->start - window->offset); 430 hi = iova_pfn(iovad, window->res->end - window->offset); 431 reserve_iova(iovad, lo, hi); 432 } 433 434 /* Get reserved DMA windows from host bridge */ 435 resource_list_for_each_entry(window, &bridge->dma_ranges) { 436 end = window->res->start - window->offset; 437 resv_iova: 438 if (end > start) { 439 lo = iova_pfn(iovad, start); 440 hi = iova_pfn(iovad, end); 441 reserve_iova(iovad, lo, hi); 442 } else if (end < start) { 443 /* dma_ranges list should be sorted */ 444 dev_err(&dev->dev, 445 "Failed to reserve IOVA [%pa-%pa]\n", 446 &start, &end); 447 return -EINVAL; 448 } 449 450 start = window->res->end - window->offset + 1; 451 /* If window is last entry */ 452 if (window->node.next == &bridge->dma_ranges && 453 end != ~(phys_addr_t)0) { 454 end = ~(phys_addr_t)0; 455 goto resv_iova; 456 } 457 } 458 459 return 0; 460 } 461 462 static int iova_reserve_iommu_regions(struct device *dev, 463 struct iommu_domain *domain) 464 { 465 struct iommu_dma_cookie *cookie = domain->iova_cookie; 466 struct iova_domain *iovad = &cookie->iovad; 467 struct iommu_resv_region *region; 468 LIST_HEAD(resv_regions); 469 int ret = 0; 470 471 if (dev_is_pci(dev)) { 472 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); 473 if (ret) 474 return ret; 475 } 476 477 iommu_get_resv_regions(dev, &resv_regions); 478 list_for_each_entry(region, &resv_regions, list) { 479 unsigned long lo, hi; 480 481 /* We ARE the software that manages these! */ 482 if (region->type == IOMMU_RESV_SW_MSI) 483 continue; 484 485 lo = iova_pfn(iovad, region->start); 486 hi = iova_pfn(iovad, region->start + region->length - 1); 487 reserve_iova(iovad, lo, hi); 488 489 if (region->type == IOMMU_RESV_MSI) 490 ret = cookie_init_hw_msi_region(cookie, region->start, 491 region->start + region->length); 492 if (ret) 493 break; 494 } 495 iommu_put_resv_regions(dev, &resv_regions); 496 497 return ret; 498 } 499 500 static bool dev_is_untrusted(struct device *dev) 501 { 502 return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; 503 } 504 505 static bool dev_use_swiotlb(struct device *dev) 506 { 507 return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev); 508 } 509 510 /** 511 * iommu_dma_init_domain - Initialise a DMA mapping domain 512 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() 513 * @base: IOVA at which the mappable address space starts 514 * @limit: Last address of the IOVA space 515 * @dev: Device the domain is being initialised for 516 * 517 * @base and @limit + 1 should be exact multiples of IOMMU page granularity to 518 * avoid rounding surprises. If necessary, we reserve the page at address 0 519 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but 520 * any change which could make prior IOVAs invalid will fail. 521 */ 522 static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, 523 dma_addr_t limit, struct device *dev) 524 { 525 struct iommu_dma_cookie *cookie = domain->iova_cookie; 526 unsigned long order, base_pfn; 527 struct iova_domain *iovad; 528 int ret; 529 530 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) 531 return -EINVAL; 532 533 iovad = &cookie->iovad; 534 535 /* Use the smallest supported page size for IOVA granularity */ 536 order = __ffs(domain->pgsize_bitmap); 537 base_pfn = max_t(unsigned long, 1, base >> order); 538 539 /* Check the domain allows at least some access to the device... */ 540 if (domain->geometry.force_aperture) { 541 if (base > domain->geometry.aperture_end || 542 limit < domain->geometry.aperture_start) { 543 pr_warn("specified DMA range outside IOMMU capability\n"); 544 return -EFAULT; 545 } 546 /* ...then finally give it a kicking to make sure it fits */ 547 base_pfn = max_t(unsigned long, base_pfn, 548 domain->geometry.aperture_start >> order); 549 } 550 551 /* start_pfn is always nonzero for an already-initialised domain */ 552 if (iovad->start_pfn) { 553 if (1UL << order != iovad->granule || 554 base_pfn != iovad->start_pfn) { 555 pr_warn("Incompatible range for DMA domain\n"); 556 return -EFAULT; 557 } 558 559 return 0; 560 } 561 562 init_iova_domain(iovad, 1UL << order, base_pfn); 563 ret = iova_domain_init_rcaches(iovad); 564 if (ret) 565 return ret; 566 567 /* If the FQ fails we can simply fall back to strict mode */ 568 if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain)) 569 domain->type = IOMMU_DOMAIN_DMA; 570 571 return iova_reserve_iommu_regions(dev, domain); 572 } 573 574 /** 575 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API 576 * page flags. 577 * @dir: Direction of DMA transfer 578 * @coherent: Is the DMA master cache-coherent? 579 * @attrs: DMA attributes for the mapping 580 * 581 * Return: corresponding IOMMU API page protection flags 582 */ 583 static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, 584 unsigned long attrs) 585 { 586 int prot = coherent ? IOMMU_CACHE : 0; 587 588 if (attrs & DMA_ATTR_PRIVILEGED) 589 prot |= IOMMU_PRIV; 590 591 switch (dir) { 592 case DMA_BIDIRECTIONAL: 593 return prot | IOMMU_READ | IOMMU_WRITE; 594 case DMA_TO_DEVICE: 595 return prot | IOMMU_READ; 596 case DMA_FROM_DEVICE: 597 return prot | IOMMU_WRITE; 598 default: 599 return 0; 600 } 601 } 602 603 static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, 604 size_t size, u64 dma_limit, struct device *dev) 605 { 606 struct iommu_dma_cookie *cookie = domain->iova_cookie; 607 struct iova_domain *iovad = &cookie->iovad; 608 unsigned long shift, iova_len, iova = 0; 609 610 if (cookie->type == IOMMU_DMA_MSI_COOKIE) { 611 cookie->msi_iova += size; 612 return cookie->msi_iova - size; 613 } 614 615 shift = iova_shift(iovad); 616 iova_len = size >> shift; 617 618 dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); 619 620 if (domain->geometry.force_aperture) 621 dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); 622 623 /* Try to get PCI devices a SAC address */ 624 if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev)) 625 iova = alloc_iova_fast(iovad, iova_len, 626 DMA_BIT_MASK(32) >> shift, false); 627 628 if (!iova) 629 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, 630 true); 631 632 return (dma_addr_t)iova << shift; 633 } 634 635 static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, 636 dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather) 637 { 638 struct iova_domain *iovad = &cookie->iovad; 639 640 /* The MSI case is only ever cleaning up its most recent allocation */ 641 if (cookie->type == IOMMU_DMA_MSI_COOKIE) 642 cookie->msi_iova -= size; 643 else if (gather && gather->queued) 644 queue_iova(cookie, iova_pfn(iovad, iova), 645 size >> iova_shift(iovad), 646 &gather->freelist); 647 else 648 free_iova_fast(iovad, iova_pfn(iovad, iova), 649 size >> iova_shift(iovad)); 650 } 651 652 static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, 653 size_t size) 654 { 655 struct iommu_domain *domain = iommu_get_dma_domain(dev); 656 struct iommu_dma_cookie *cookie = domain->iova_cookie; 657 struct iova_domain *iovad = &cookie->iovad; 658 size_t iova_off = iova_offset(iovad, dma_addr); 659 struct iommu_iotlb_gather iotlb_gather; 660 size_t unmapped; 661 662 dma_addr -= iova_off; 663 size = iova_align(iovad, size + iova_off); 664 iommu_iotlb_gather_init(&iotlb_gather); 665 iotlb_gather.queued = READ_ONCE(cookie->fq_domain); 666 667 unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); 668 WARN_ON(unmapped != size); 669 670 if (!iotlb_gather.queued) 671 iommu_iotlb_sync(domain, &iotlb_gather); 672 iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather); 673 } 674 675 static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, 676 size_t size, int prot, u64 dma_mask) 677 { 678 struct iommu_domain *domain = iommu_get_dma_domain(dev); 679 struct iommu_dma_cookie *cookie = domain->iova_cookie; 680 struct iova_domain *iovad = &cookie->iovad; 681 size_t iova_off = iova_offset(iovad, phys); 682 dma_addr_t iova; 683 684 if (static_branch_unlikely(&iommu_deferred_attach_enabled) && 685 iommu_deferred_attach(dev, domain)) 686 return DMA_MAPPING_ERROR; 687 688 size = iova_align(iovad, size + iova_off); 689 690 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); 691 if (!iova) 692 return DMA_MAPPING_ERROR; 693 694 if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { 695 iommu_dma_free_iova(cookie, iova, size, NULL); 696 return DMA_MAPPING_ERROR; 697 } 698 return iova + iova_off; 699 } 700 701 static void __iommu_dma_free_pages(struct page **pages, int count) 702 { 703 while (count--) 704 __free_page(pages[count]); 705 kvfree(pages); 706 } 707 708 static struct page **__iommu_dma_alloc_pages(struct device *dev, 709 unsigned int count, unsigned long order_mask, gfp_t gfp) 710 { 711 struct page **pages; 712 unsigned int i = 0, nid = dev_to_node(dev); 713 714 order_mask &= (2U << MAX_ORDER) - 1; 715 if (!order_mask) 716 return NULL; 717 718 pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL); 719 if (!pages) 720 return NULL; 721 722 /* IOMMU can map any pages, so himem can also be used here */ 723 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 724 725 /* It makes no sense to muck about with huge pages */ 726 gfp &= ~__GFP_COMP; 727 728 while (count) { 729 struct page *page = NULL; 730 unsigned int order_size; 731 732 /* 733 * Higher-order allocations are a convenience rather 734 * than a necessity, hence using __GFP_NORETRY until 735 * falling back to minimum-order allocations. 736 */ 737 for (order_mask &= (2U << __fls(count)) - 1; 738 order_mask; order_mask &= ~order_size) { 739 unsigned int order = __fls(order_mask); 740 gfp_t alloc_flags = gfp; 741 742 order_size = 1U << order; 743 if (order_mask > order_size) 744 alloc_flags |= __GFP_NORETRY; 745 page = alloc_pages_node(nid, alloc_flags, order); 746 if (!page) 747 continue; 748 if (order) 749 split_page(page, order); 750 break; 751 } 752 if (!page) { 753 __iommu_dma_free_pages(pages, i); 754 return NULL; 755 } 756 count -= order_size; 757 while (order_size--) 758 pages[i++] = page++; 759 } 760 return pages; 761 } 762 763 /* 764 * If size is less than PAGE_SIZE, then a full CPU page will be allocated, 765 * but an IOMMU which supports smaller pages might not map the whole thing. 766 */ 767 static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, 768 size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot, 769 unsigned long attrs) 770 { 771 struct iommu_domain *domain = iommu_get_dma_domain(dev); 772 struct iommu_dma_cookie *cookie = domain->iova_cookie; 773 struct iova_domain *iovad = &cookie->iovad; 774 bool coherent = dev_is_dma_coherent(dev); 775 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 776 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; 777 struct page **pages; 778 dma_addr_t iova; 779 780 if (static_branch_unlikely(&iommu_deferred_attach_enabled) && 781 iommu_deferred_attach(dev, domain)) 782 return NULL; 783 784 min_size = alloc_sizes & -alloc_sizes; 785 if (min_size < PAGE_SIZE) { 786 min_size = PAGE_SIZE; 787 alloc_sizes |= PAGE_SIZE; 788 } else { 789 size = ALIGN(size, min_size); 790 } 791 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 792 alloc_sizes = min_size; 793 794 count = PAGE_ALIGN(size) >> PAGE_SHIFT; 795 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, 796 gfp); 797 if (!pages) 798 return NULL; 799 800 size = iova_align(iovad, size); 801 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); 802 if (!iova) 803 goto out_free_pages; 804 805 if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL)) 806 goto out_free_iova; 807 808 if (!(ioprot & IOMMU_CACHE)) { 809 struct scatterlist *sg; 810 int i; 811 812 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) 813 arch_dma_prep_coherent(sg_page(sg), sg->length); 814 } 815 816 if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot) 817 < size) 818 goto out_free_sg; 819 820 sgt->sgl->dma_address = iova; 821 sgt->sgl->dma_length = size; 822 return pages; 823 824 out_free_sg: 825 sg_free_table(sgt); 826 out_free_iova: 827 iommu_dma_free_iova(cookie, iova, size, NULL); 828 out_free_pages: 829 __iommu_dma_free_pages(pages, count); 830 return NULL; 831 } 832 833 static void *iommu_dma_alloc_remap(struct device *dev, size_t size, 834 dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, 835 unsigned long attrs) 836 { 837 struct page **pages; 838 struct sg_table sgt; 839 void *vaddr; 840 841 pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot, 842 attrs); 843 if (!pages) 844 return NULL; 845 *dma_handle = sgt.sgl->dma_address; 846 sg_free_table(&sgt); 847 vaddr = dma_common_pages_remap(pages, size, prot, 848 __builtin_return_address(0)); 849 if (!vaddr) 850 goto out_unmap; 851 return vaddr; 852 853 out_unmap: 854 __iommu_dma_unmap(dev, *dma_handle, size); 855 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); 856 return NULL; 857 } 858 859 #ifdef CONFIG_DMA_REMAP 860 static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, 861 size_t size, enum dma_data_direction dir, gfp_t gfp, 862 unsigned long attrs) 863 { 864 struct dma_sgt_handle *sh; 865 866 sh = kmalloc(sizeof(*sh), gfp); 867 if (!sh) 868 return NULL; 869 870 sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, 871 PAGE_KERNEL, attrs); 872 if (!sh->pages) { 873 kfree(sh); 874 return NULL; 875 } 876 return &sh->sgt; 877 } 878 879 static void iommu_dma_free_noncontiguous(struct device *dev, size_t size, 880 struct sg_table *sgt, enum dma_data_direction dir) 881 { 882 struct dma_sgt_handle *sh = sgt_handle(sgt); 883 884 __iommu_dma_unmap(dev, sgt->sgl->dma_address, size); 885 __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT); 886 sg_free_table(&sh->sgt); 887 kfree(sh); 888 } 889 #endif /* CONFIG_DMA_REMAP */ 890 891 static void iommu_dma_sync_single_for_cpu(struct device *dev, 892 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 893 { 894 phys_addr_t phys; 895 896 if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) 897 return; 898 899 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 900 if (!dev_is_dma_coherent(dev)) 901 arch_sync_dma_for_cpu(phys, size, dir); 902 903 if (is_swiotlb_buffer(dev, phys)) 904 swiotlb_sync_single_for_cpu(dev, phys, size, dir); 905 } 906 907 static void iommu_dma_sync_single_for_device(struct device *dev, 908 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 909 { 910 phys_addr_t phys; 911 912 if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) 913 return; 914 915 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 916 if (is_swiotlb_buffer(dev, phys)) 917 swiotlb_sync_single_for_device(dev, phys, size, dir); 918 919 if (!dev_is_dma_coherent(dev)) 920 arch_sync_dma_for_device(phys, size, dir); 921 } 922 923 static void iommu_dma_sync_sg_for_cpu(struct device *dev, 924 struct scatterlist *sgl, int nelems, 925 enum dma_data_direction dir) 926 { 927 struct scatterlist *sg; 928 int i; 929 930 if (dev_use_swiotlb(dev)) 931 for_each_sg(sgl, sg, nelems, i) 932 iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg), 933 sg->length, dir); 934 else if (!dev_is_dma_coherent(dev)) 935 for_each_sg(sgl, sg, nelems, i) 936 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); 937 } 938 939 static void iommu_dma_sync_sg_for_device(struct device *dev, 940 struct scatterlist *sgl, int nelems, 941 enum dma_data_direction dir) 942 { 943 struct scatterlist *sg; 944 int i; 945 946 if (dev_use_swiotlb(dev)) 947 for_each_sg(sgl, sg, nelems, i) 948 iommu_dma_sync_single_for_device(dev, 949 sg_dma_address(sg), 950 sg->length, dir); 951 else if (!dev_is_dma_coherent(dev)) 952 for_each_sg(sgl, sg, nelems, i) 953 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); 954 } 955 956 static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, 957 unsigned long offset, size_t size, enum dma_data_direction dir, 958 unsigned long attrs) 959 { 960 phys_addr_t phys = page_to_phys(page) + offset; 961 bool coherent = dev_is_dma_coherent(dev); 962 int prot = dma_info_to_prot(dir, coherent, attrs); 963 struct iommu_domain *domain = iommu_get_dma_domain(dev); 964 struct iommu_dma_cookie *cookie = domain->iova_cookie; 965 struct iova_domain *iovad = &cookie->iovad; 966 dma_addr_t iova, dma_mask = dma_get_mask(dev); 967 968 /* 969 * If both the physical buffer start address and size are 970 * page aligned, we don't need to use a bounce page. 971 */ 972 if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) { 973 void *padding_start; 974 size_t padding_size, aligned_size; 975 976 aligned_size = iova_align(iovad, size); 977 phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size, 978 iova_mask(iovad), dir, attrs); 979 980 if (phys == DMA_MAPPING_ERROR) 981 return DMA_MAPPING_ERROR; 982 983 /* Cleanup the padding area. */ 984 padding_start = phys_to_virt(phys); 985 padding_size = aligned_size; 986 987 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 988 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) { 989 padding_start += size; 990 padding_size -= size; 991 } 992 993 memset(padding_start, 0, padding_size); 994 } 995 996 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 997 arch_sync_dma_for_device(phys, size, dir); 998 999 iova = __iommu_dma_map(dev, phys, size, prot, dma_mask); 1000 if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys)) 1001 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); 1002 return iova; 1003 } 1004 1005 static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, 1006 size_t size, enum dma_data_direction dir, unsigned long attrs) 1007 { 1008 struct iommu_domain *domain = iommu_get_dma_domain(dev); 1009 phys_addr_t phys; 1010 1011 phys = iommu_iova_to_phys(domain, dma_handle); 1012 if (WARN_ON(!phys)) 1013 return; 1014 1015 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev)) 1016 arch_sync_dma_for_cpu(phys, size, dir); 1017 1018 __iommu_dma_unmap(dev, dma_handle, size); 1019 1020 if (unlikely(is_swiotlb_buffer(dev, phys))) 1021 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); 1022 } 1023 1024 /* 1025 * Prepare a successfully-mapped scatterlist to give back to the caller. 1026 * 1027 * At this point the segments are already laid out by iommu_dma_map_sg() to 1028 * avoid individually crossing any boundaries, so we merely need to check a 1029 * segment's start address to avoid concatenating across one. 1030 */ 1031 static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, 1032 dma_addr_t dma_addr) 1033 { 1034 struct scatterlist *s, *cur = sg; 1035 unsigned long seg_mask = dma_get_seg_boundary(dev); 1036 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); 1037 int i, count = 0; 1038 1039 for_each_sg(sg, s, nents, i) { 1040 /* Restore this segment's original unaligned fields first */ 1041 unsigned int s_iova_off = sg_dma_address(s); 1042 unsigned int s_length = sg_dma_len(s); 1043 unsigned int s_iova_len = s->length; 1044 1045 s->offset += s_iova_off; 1046 s->length = s_length; 1047 sg_dma_address(s) = DMA_MAPPING_ERROR; 1048 sg_dma_len(s) = 0; 1049 1050 /* 1051 * Now fill in the real DMA data. If... 1052 * - there is a valid output segment to append to 1053 * - and this segment starts on an IOVA page boundary 1054 * - but doesn't fall at a segment boundary 1055 * - and wouldn't make the resulting output segment too long 1056 */ 1057 if (cur_len && !s_iova_off && (dma_addr & seg_mask) && 1058 (max_len - cur_len >= s_length)) { 1059 /* ...then concatenate it with the previous one */ 1060 cur_len += s_length; 1061 } else { 1062 /* Otherwise start the next output segment */ 1063 if (i > 0) 1064 cur = sg_next(cur); 1065 cur_len = s_length; 1066 count++; 1067 1068 sg_dma_address(cur) = dma_addr + s_iova_off; 1069 } 1070 1071 sg_dma_len(cur) = cur_len; 1072 dma_addr += s_iova_len; 1073 1074 if (s_length + s_iova_off < s_iova_len) 1075 cur_len = 0; 1076 } 1077 return count; 1078 } 1079 1080 /* 1081 * If mapping failed, then just restore the original list, 1082 * but making sure the DMA fields are invalidated. 1083 */ 1084 static void __invalidate_sg(struct scatterlist *sg, int nents) 1085 { 1086 struct scatterlist *s; 1087 int i; 1088 1089 for_each_sg(sg, s, nents, i) { 1090 if (sg_dma_address(s) != DMA_MAPPING_ERROR) 1091 s->offset += sg_dma_address(s); 1092 if (sg_dma_len(s)) 1093 s->length = sg_dma_len(s); 1094 sg_dma_address(s) = DMA_MAPPING_ERROR; 1095 sg_dma_len(s) = 0; 1096 } 1097 } 1098 1099 static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg, 1100 int nents, enum dma_data_direction dir, unsigned long attrs) 1101 { 1102 struct scatterlist *s; 1103 int i; 1104 1105 for_each_sg(sg, s, nents, i) 1106 iommu_dma_unmap_page(dev, sg_dma_address(s), 1107 sg_dma_len(s), dir, attrs); 1108 } 1109 1110 static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg, 1111 int nents, enum dma_data_direction dir, unsigned long attrs) 1112 { 1113 struct scatterlist *s; 1114 int i; 1115 1116 for_each_sg(sg, s, nents, i) { 1117 sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s), 1118 s->offset, s->length, dir, attrs); 1119 if (sg_dma_address(s) == DMA_MAPPING_ERROR) 1120 goto out_unmap; 1121 sg_dma_len(s) = s->length; 1122 } 1123 1124 return nents; 1125 1126 out_unmap: 1127 iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); 1128 return -EIO; 1129 } 1130 1131 /* 1132 * The DMA API client is passing in a scatterlist which could describe 1133 * any old buffer layout, but the IOMMU API requires everything to be 1134 * aligned to IOMMU pages. Hence the need for this complicated bit of 1135 * impedance-matching, to be able to hand off a suitably-aligned list, 1136 * but still preserve the original offsets and sizes for the caller. 1137 */ 1138 static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, 1139 int nents, enum dma_data_direction dir, unsigned long attrs) 1140 { 1141 struct iommu_domain *domain = iommu_get_dma_domain(dev); 1142 struct iommu_dma_cookie *cookie = domain->iova_cookie; 1143 struct iova_domain *iovad = &cookie->iovad; 1144 struct scatterlist *s, *prev = NULL; 1145 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); 1146 dma_addr_t iova; 1147 size_t iova_len = 0; 1148 unsigned long mask = dma_get_seg_boundary(dev); 1149 ssize_t ret; 1150 int i; 1151 1152 if (static_branch_unlikely(&iommu_deferred_attach_enabled)) { 1153 ret = iommu_deferred_attach(dev, domain); 1154 if (ret) 1155 goto out; 1156 } 1157 1158 if (dev_use_swiotlb(dev)) 1159 return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs); 1160 1161 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 1162 iommu_dma_sync_sg_for_device(dev, sg, nents, dir); 1163 1164 /* 1165 * Work out how much IOVA space we need, and align the segments to 1166 * IOVA granules for the IOMMU driver to handle. With some clever 1167 * trickery we can modify the list in-place, but reversibly, by 1168 * stashing the unaligned parts in the as-yet-unused DMA fields. 1169 */ 1170 for_each_sg(sg, s, nents, i) { 1171 size_t s_iova_off = iova_offset(iovad, s->offset); 1172 size_t s_length = s->length; 1173 size_t pad_len = (mask - iova_len + 1) & mask; 1174 1175 sg_dma_address(s) = s_iova_off; 1176 sg_dma_len(s) = s_length; 1177 s->offset -= s_iova_off; 1178 s_length = iova_align(iovad, s_length + s_iova_off); 1179 s->length = s_length; 1180 1181 /* 1182 * Due to the alignment of our single IOVA allocation, we can 1183 * depend on these assumptions about the segment boundary mask: 1184 * - If mask size >= IOVA size, then the IOVA range cannot 1185 * possibly fall across a boundary, so we don't care. 1186 * - If mask size < IOVA size, then the IOVA range must start 1187 * exactly on a boundary, therefore we can lay things out 1188 * based purely on segment lengths without needing to know 1189 * the actual addresses beforehand. 1190 * - The mask must be a power of 2, so pad_len == 0 if 1191 * iova_len == 0, thus we cannot dereference prev the first 1192 * time through here (i.e. before it has a meaningful value). 1193 */ 1194 if (pad_len && pad_len < s_length - 1) { 1195 prev->length += pad_len; 1196 iova_len += pad_len; 1197 } 1198 1199 iova_len += s_length; 1200 prev = s; 1201 } 1202 1203 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); 1204 if (!iova) { 1205 ret = -ENOMEM; 1206 goto out_restore_sg; 1207 } 1208 1209 /* 1210 * We'll leave any physical concatenation to the IOMMU driver's 1211 * implementation - it knows better than we do. 1212 */ 1213 ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot); 1214 if (ret < iova_len) 1215 goto out_free_iova; 1216 1217 return __finalise_sg(dev, sg, nents, iova); 1218 1219 out_free_iova: 1220 iommu_dma_free_iova(cookie, iova, iova_len, NULL); 1221 out_restore_sg: 1222 __invalidate_sg(sg, nents); 1223 out: 1224 if (ret != -ENOMEM) 1225 return -EINVAL; 1226 return ret; 1227 } 1228 1229 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 1230 int nents, enum dma_data_direction dir, unsigned long attrs) 1231 { 1232 dma_addr_t start, end; 1233 struct scatterlist *tmp; 1234 int i; 1235 1236 if (dev_use_swiotlb(dev)) { 1237 iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs); 1238 return; 1239 } 1240 1241 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 1242 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); 1243 1244 /* 1245 * The scatterlist segments are mapped into a single 1246 * contiguous IOVA allocation, so this is incredibly easy. 1247 */ 1248 start = sg_dma_address(sg); 1249 for_each_sg(sg_next(sg), tmp, nents - 1, i) { 1250 if (sg_dma_len(tmp) == 0) 1251 break; 1252 sg = tmp; 1253 } 1254 end = sg_dma_address(sg) + sg_dma_len(sg); 1255 __iommu_dma_unmap(dev, start, end - start); 1256 } 1257 1258 static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, 1259 size_t size, enum dma_data_direction dir, unsigned long attrs) 1260 { 1261 return __iommu_dma_map(dev, phys, size, 1262 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, 1263 dma_get_mask(dev)); 1264 } 1265 1266 static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, 1267 size_t size, enum dma_data_direction dir, unsigned long attrs) 1268 { 1269 __iommu_dma_unmap(dev, handle, size); 1270 } 1271 1272 static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) 1273 { 1274 size_t alloc_size = PAGE_ALIGN(size); 1275 int count = alloc_size >> PAGE_SHIFT; 1276 struct page *page = NULL, **pages = NULL; 1277 1278 /* Non-coherent atomic allocation? Easy */ 1279 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 1280 dma_free_from_pool(dev, cpu_addr, alloc_size)) 1281 return; 1282 1283 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1284 /* 1285 * If it the address is remapped, then it's either non-coherent 1286 * or highmem CMA, or an iommu_dma_alloc_remap() construction. 1287 */ 1288 pages = dma_common_find_pages(cpu_addr); 1289 if (!pages) 1290 page = vmalloc_to_page(cpu_addr); 1291 dma_common_free_remap(cpu_addr, alloc_size); 1292 } else { 1293 /* Lowmem means a coherent atomic or CMA allocation */ 1294 page = virt_to_page(cpu_addr); 1295 } 1296 1297 if (pages) 1298 __iommu_dma_free_pages(pages, count); 1299 if (page) 1300 dma_free_contiguous(dev, page, alloc_size); 1301 } 1302 1303 static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, 1304 dma_addr_t handle, unsigned long attrs) 1305 { 1306 __iommu_dma_unmap(dev, handle, size); 1307 __iommu_dma_free(dev, size, cpu_addr); 1308 } 1309 1310 static void *iommu_dma_alloc_pages(struct device *dev, size_t size, 1311 struct page **pagep, gfp_t gfp, unsigned long attrs) 1312 { 1313 bool coherent = dev_is_dma_coherent(dev); 1314 size_t alloc_size = PAGE_ALIGN(size); 1315 int node = dev_to_node(dev); 1316 struct page *page = NULL; 1317 void *cpu_addr; 1318 1319 page = dma_alloc_contiguous(dev, alloc_size, gfp); 1320 if (!page) 1321 page = alloc_pages_node(node, gfp, get_order(alloc_size)); 1322 if (!page) 1323 return NULL; 1324 1325 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 1326 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 1327 1328 cpu_addr = dma_common_contiguous_remap(page, alloc_size, 1329 prot, __builtin_return_address(0)); 1330 if (!cpu_addr) 1331 goto out_free_pages; 1332 1333 if (!coherent) 1334 arch_dma_prep_coherent(page, size); 1335 } else { 1336 cpu_addr = page_address(page); 1337 } 1338 1339 *pagep = page; 1340 memset(cpu_addr, 0, alloc_size); 1341 return cpu_addr; 1342 out_free_pages: 1343 dma_free_contiguous(dev, page, alloc_size); 1344 return NULL; 1345 } 1346 1347 static void *iommu_dma_alloc(struct device *dev, size_t size, 1348 dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 1349 { 1350 bool coherent = dev_is_dma_coherent(dev); 1351 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 1352 struct page *page = NULL; 1353 void *cpu_addr; 1354 1355 gfp |= __GFP_ZERO; 1356 1357 if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && 1358 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) { 1359 return iommu_dma_alloc_remap(dev, size, handle, gfp, 1360 dma_pgprot(dev, PAGE_KERNEL, attrs), attrs); 1361 } 1362 1363 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 1364 !gfpflags_allow_blocking(gfp) && !coherent) 1365 page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr, 1366 gfp, NULL); 1367 else 1368 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); 1369 if (!cpu_addr) 1370 return NULL; 1371 1372 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, 1373 dev->coherent_dma_mask); 1374 if (*handle == DMA_MAPPING_ERROR) { 1375 __iommu_dma_free(dev, size, cpu_addr); 1376 return NULL; 1377 } 1378 1379 return cpu_addr; 1380 } 1381 1382 static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, 1383 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1384 unsigned long attrs) 1385 { 1386 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1387 unsigned long pfn, off = vma->vm_pgoff; 1388 int ret; 1389 1390 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 1391 1392 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 1393 return ret; 1394 1395 if (off >= nr_pages || vma_pages(vma) > nr_pages - off) 1396 return -ENXIO; 1397 1398 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1399 struct page **pages = dma_common_find_pages(cpu_addr); 1400 1401 if (pages) 1402 return vm_map_pages(vma, pages, nr_pages); 1403 pfn = vmalloc_to_pfn(cpu_addr); 1404 } else { 1405 pfn = page_to_pfn(virt_to_page(cpu_addr)); 1406 } 1407 1408 return remap_pfn_range(vma, vma->vm_start, pfn + off, 1409 vma->vm_end - vma->vm_start, 1410 vma->vm_page_prot); 1411 } 1412 1413 static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 1414 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1415 unsigned long attrs) 1416 { 1417 struct page *page; 1418 int ret; 1419 1420 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1421 struct page **pages = dma_common_find_pages(cpu_addr); 1422 1423 if (pages) { 1424 return sg_alloc_table_from_pages(sgt, pages, 1425 PAGE_ALIGN(size) >> PAGE_SHIFT, 1426 0, size, GFP_KERNEL); 1427 } 1428 1429 page = vmalloc_to_page(cpu_addr); 1430 } else { 1431 page = virt_to_page(cpu_addr); 1432 } 1433 1434 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 1435 if (!ret) 1436 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 1437 return ret; 1438 } 1439 1440 static unsigned long iommu_dma_get_merge_boundary(struct device *dev) 1441 { 1442 struct iommu_domain *domain = iommu_get_dma_domain(dev); 1443 1444 return (1UL << __ffs(domain->pgsize_bitmap)) - 1; 1445 } 1446 1447 static const struct dma_map_ops iommu_dma_ops = { 1448 .alloc = iommu_dma_alloc, 1449 .free = iommu_dma_free, 1450 .alloc_pages = dma_common_alloc_pages, 1451 .free_pages = dma_common_free_pages, 1452 #ifdef CONFIG_DMA_REMAP 1453 .alloc_noncontiguous = iommu_dma_alloc_noncontiguous, 1454 .free_noncontiguous = iommu_dma_free_noncontiguous, 1455 #endif 1456 .mmap = iommu_dma_mmap, 1457 .get_sgtable = iommu_dma_get_sgtable, 1458 .map_page = iommu_dma_map_page, 1459 .unmap_page = iommu_dma_unmap_page, 1460 .map_sg = iommu_dma_map_sg, 1461 .unmap_sg = iommu_dma_unmap_sg, 1462 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, 1463 .sync_single_for_device = iommu_dma_sync_single_for_device, 1464 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, 1465 .sync_sg_for_device = iommu_dma_sync_sg_for_device, 1466 .map_resource = iommu_dma_map_resource, 1467 .unmap_resource = iommu_dma_unmap_resource, 1468 .get_merge_boundary = iommu_dma_get_merge_boundary, 1469 }; 1470 1471 /* 1472 * The IOMMU core code allocates the default DMA domain, which the underlying 1473 * IOMMU driver needs to support via the dma-iommu layer. 1474 */ 1475 void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) 1476 { 1477 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1478 1479 if (!domain) 1480 goto out_err; 1481 1482 /* 1483 * The IOMMU core code allocates the default DMA domain, which the 1484 * underlying IOMMU driver needs to support via the dma-iommu layer. 1485 */ 1486 if (iommu_is_dma_domain(domain)) { 1487 if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev)) 1488 goto out_err; 1489 dev->dma_ops = &iommu_dma_ops; 1490 } 1491 1492 return; 1493 out_err: 1494 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", 1495 dev_name(dev)); 1496 } 1497 EXPORT_SYMBOL_GPL(iommu_setup_dma_ops); 1498 1499 static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, 1500 phys_addr_t msi_addr, struct iommu_domain *domain) 1501 { 1502 struct iommu_dma_cookie *cookie = domain->iova_cookie; 1503 struct iommu_dma_msi_page *msi_page; 1504 dma_addr_t iova; 1505 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 1506 size_t size = cookie_msi_granule(cookie); 1507 1508 msi_addr &= ~(phys_addr_t)(size - 1); 1509 list_for_each_entry(msi_page, &cookie->msi_page_list, list) 1510 if (msi_page->phys == msi_addr) 1511 return msi_page; 1512 1513 msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL); 1514 if (!msi_page) 1515 return NULL; 1516 1517 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 1518 if (!iova) 1519 goto out_free_page; 1520 1521 if (iommu_map(domain, iova, msi_addr, size, prot)) 1522 goto out_free_iova; 1523 1524 INIT_LIST_HEAD(&msi_page->list); 1525 msi_page->phys = msi_addr; 1526 msi_page->iova = iova; 1527 list_add(&msi_page->list, &cookie->msi_page_list); 1528 return msi_page; 1529 1530 out_free_iova: 1531 iommu_dma_free_iova(cookie, iova, size, NULL); 1532 out_free_page: 1533 kfree(msi_page); 1534 return NULL; 1535 } 1536 1537 int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) 1538 { 1539 struct device *dev = msi_desc_to_dev(desc); 1540 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1541 struct iommu_dma_msi_page *msi_page; 1542 static DEFINE_MUTEX(msi_prepare_lock); /* see below */ 1543 1544 if (!domain || !domain->iova_cookie) { 1545 desc->iommu_cookie = NULL; 1546 return 0; 1547 } 1548 1549 /* 1550 * In fact the whole prepare operation should already be serialised by 1551 * irq_domain_mutex further up the callchain, but that's pretty subtle 1552 * on its own, so consider this locking as failsafe documentation... 1553 */ 1554 mutex_lock(&msi_prepare_lock); 1555 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); 1556 mutex_unlock(&msi_prepare_lock); 1557 1558 msi_desc_set_iommu_cookie(desc, msi_page); 1559 1560 if (!msi_page) 1561 return -ENOMEM; 1562 return 0; 1563 } 1564 1565 void iommu_dma_compose_msi_msg(struct msi_desc *desc, 1566 struct msi_msg *msg) 1567 { 1568 struct device *dev = msi_desc_to_dev(desc); 1569 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1570 const struct iommu_dma_msi_page *msi_page; 1571 1572 msi_page = msi_desc_get_iommu_cookie(desc); 1573 1574 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) 1575 return; 1576 1577 msg->address_hi = upper_32_bits(msi_page->iova); 1578 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; 1579 msg->address_lo += lower_32_bits(msi_page->iova); 1580 } 1581 1582 static int iommu_dma_init(void) 1583 { 1584 if (is_kdump_kernel()) 1585 static_branch_enable(&iommu_deferred_attach_enabled); 1586 1587 return iova_cache_get(); 1588 } 1589 arch_initcall(iommu_dma_init); 1590