1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 20db2e5d1SRobin Murphy /* 30db2e5d1SRobin Murphy * A fairly generic DMA-API to IOMMU-API glue layer. 40db2e5d1SRobin Murphy * 50db2e5d1SRobin Murphy * Copyright (C) 2014-2015 ARM Ltd. 60db2e5d1SRobin Murphy * 70db2e5d1SRobin Murphy * based in part on arch/arm/mm/dma-mapping.c: 80db2e5d1SRobin Murphy * Copyright (C) 2000-2004 Russell King 90db2e5d1SRobin Murphy */ 100db2e5d1SRobin Murphy 11f51dc892SShameer Kolothum #include <linux/acpi_iort.h> 120db2e5d1SRobin Murphy #include <linux/device.h> 1306d60728SChristoph Hellwig #include <linux/dma-contiguous.h> 140db2e5d1SRobin Murphy #include <linux/dma-iommu.h> 15af751d43SChristoph Hellwig #include <linux/dma-noncoherent.h> 165b11e9cdSRobin Murphy #include <linux/gfp.h> 170db2e5d1SRobin Murphy #include <linux/huge_mm.h> 180db2e5d1SRobin Murphy #include <linux/iommu.h> 190db2e5d1SRobin Murphy #include <linux/iova.h> 2044bb7e24SRobin Murphy #include <linux/irq.h> 210db2e5d1SRobin Murphy #include <linux/mm.h> 22fade1ec0SRobin Murphy #include <linux/pci.h> 235b11e9cdSRobin Murphy #include <linux/scatterlist.h> 245b11e9cdSRobin Murphy #include <linux/vmalloc.h> 25795bbbb9STom Murphy #include <linux/crash_dump.h> 260db2e5d1SRobin Murphy 2744bb7e24SRobin Murphy struct iommu_dma_msi_page { 2844bb7e24SRobin Murphy struct list_head list; 2944bb7e24SRobin Murphy dma_addr_t iova; 3044bb7e24SRobin Murphy phys_addr_t phys; 3144bb7e24SRobin Murphy }; 3244bb7e24SRobin Murphy 33fdbe574eSRobin Murphy enum iommu_dma_cookie_type { 34fdbe574eSRobin Murphy IOMMU_DMA_IOVA_COOKIE, 35fdbe574eSRobin Murphy IOMMU_DMA_MSI_COOKIE, 36fdbe574eSRobin Murphy }; 37fdbe574eSRobin Murphy 3844bb7e24SRobin Murphy struct iommu_dma_cookie { 39fdbe574eSRobin Murphy enum iommu_dma_cookie_type type; 40fdbe574eSRobin Murphy union { 41fdbe574eSRobin Murphy /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ 4244bb7e24SRobin Murphy struct iova_domain iovad; 43fdbe574eSRobin Murphy /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ 44fdbe574eSRobin Murphy dma_addr_t msi_iova; 45fdbe574eSRobin Murphy }; 4644bb7e24SRobin Murphy struct list_head msi_page_list; 4744bb7e24SRobin Murphy spinlock_t msi_lock; 482da274cdSZhen Lei 492da274cdSZhen Lei /* Domain for flush queue callback; NULL if flush queue not in use */ 502da274cdSZhen Lei struct iommu_domain *fq_domain; 5144bb7e24SRobin Murphy }; 5244bb7e24SRobin Murphy 53fdbe574eSRobin Murphy static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) 54fdbe574eSRobin Murphy { 55fdbe574eSRobin Murphy if (cookie->type == IOMMU_DMA_IOVA_COOKIE) 56fdbe574eSRobin Murphy return cookie->iovad.granule; 57fdbe574eSRobin Murphy return PAGE_SIZE; 58fdbe574eSRobin Murphy } 59fdbe574eSRobin Murphy 60fdbe574eSRobin Murphy static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) 61fdbe574eSRobin Murphy { 62fdbe574eSRobin Murphy struct iommu_dma_cookie *cookie; 63fdbe574eSRobin Murphy 64fdbe574eSRobin Murphy cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); 65fdbe574eSRobin Murphy if (cookie) { 66fdbe574eSRobin Murphy spin_lock_init(&cookie->msi_lock); 67fdbe574eSRobin Murphy INIT_LIST_HEAD(&cookie->msi_page_list); 68fdbe574eSRobin Murphy cookie->type = type; 69fdbe574eSRobin Murphy } 70fdbe574eSRobin Murphy return cookie; 7144bb7e24SRobin Murphy } 7244bb7e24SRobin Murphy 730db2e5d1SRobin Murphy /** 740db2e5d1SRobin Murphy * iommu_get_dma_cookie - Acquire DMA-API resources for a domain 750db2e5d1SRobin Murphy * @domain: IOMMU domain to prepare for DMA-API usage 760db2e5d1SRobin Murphy * 770db2e5d1SRobin Murphy * IOMMU drivers should normally call this from their domain_alloc 780db2e5d1SRobin Murphy * callback when domain->type == IOMMU_DOMAIN_DMA. 790db2e5d1SRobin Murphy */ 800db2e5d1SRobin Murphy int iommu_get_dma_cookie(struct iommu_domain *domain) 810db2e5d1SRobin Murphy { 820db2e5d1SRobin Murphy if (domain->iova_cookie) 830db2e5d1SRobin Murphy return -EEXIST; 840db2e5d1SRobin Murphy 85fdbe574eSRobin Murphy domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); 86fdbe574eSRobin Murphy if (!domain->iova_cookie) 8744bb7e24SRobin Murphy return -ENOMEM; 880db2e5d1SRobin Murphy 8944bb7e24SRobin Murphy return 0; 900db2e5d1SRobin Murphy } 910db2e5d1SRobin Murphy EXPORT_SYMBOL(iommu_get_dma_cookie); 920db2e5d1SRobin Murphy 930db2e5d1SRobin Murphy /** 94fdbe574eSRobin Murphy * iommu_get_msi_cookie - Acquire just MSI remapping resources 95fdbe574eSRobin Murphy * @domain: IOMMU domain to prepare 96fdbe574eSRobin Murphy * @base: Start address of IOVA region for MSI mappings 97fdbe574eSRobin Murphy * 98fdbe574eSRobin Murphy * Users who manage their own IOVA allocation and do not want DMA API support, 99fdbe574eSRobin Murphy * but would still like to take advantage of automatic MSI remapping, can use 100fdbe574eSRobin Murphy * this to initialise their own domain appropriately. Users should reserve a 101fdbe574eSRobin Murphy * contiguous IOVA region, starting at @base, large enough to accommodate the 102fdbe574eSRobin Murphy * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address 103fdbe574eSRobin Murphy * used by the devices attached to @domain. 104fdbe574eSRobin Murphy */ 105fdbe574eSRobin Murphy int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) 106fdbe574eSRobin Murphy { 107fdbe574eSRobin Murphy struct iommu_dma_cookie *cookie; 108fdbe574eSRobin Murphy 109fdbe574eSRobin Murphy if (domain->type != IOMMU_DOMAIN_UNMANAGED) 110fdbe574eSRobin Murphy return -EINVAL; 111fdbe574eSRobin Murphy 112fdbe574eSRobin Murphy if (domain->iova_cookie) 113fdbe574eSRobin Murphy return -EEXIST; 114fdbe574eSRobin Murphy 115fdbe574eSRobin Murphy cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); 116fdbe574eSRobin Murphy if (!cookie) 117fdbe574eSRobin Murphy return -ENOMEM; 118fdbe574eSRobin Murphy 119fdbe574eSRobin Murphy cookie->msi_iova = base; 120fdbe574eSRobin Murphy domain->iova_cookie = cookie; 121fdbe574eSRobin Murphy return 0; 122fdbe574eSRobin Murphy } 123fdbe574eSRobin Murphy EXPORT_SYMBOL(iommu_get_msi_cookie); 124fdbe574eSRobin Murphy 125fdbe574eSRobin Murphy /** 1260db2e5d1SRobin Murphy * iommu_put_dma_cookie - Release a domain's DMA mapping resources 127fdbe574eSRobin Murphy * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or 128fdbe574eSRobin Murphy * iommu_get_msi_cookie() 1290db2e5d1SRobin Murphy * 1300db2e5d1SRobin Murphy * IOMMU drivers should normally call this from their domain_free callback. 1310db2e5d1SRobin Murphy */ 1320db2e5d1SRobin Murphy void iommu_put_dma_cookie(struct iommu_domain *domain) 1330db2e5d1SRobin Murphy { 13444bb7e24SRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 13544bb7e24SRobin Murphy struct iommu_dma_msi_page *msi, *tmp; 1360db2e5d1SRobin Murphy 13744bb7e24SRobin Murphy if (!cookie) 1380db2e5d1SRobin Murphy return; 1390db2e5d1SRobin Murphy 140fdbe574eSRobin Murphy if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) 14144bb7e24SRobin Murphy put_iova_domain(&cookie->iovad); 14244bb7e24SRobin Murphy 14344bb7e24SRobin Murphy list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { 14444bb7e24SRobin Murphy list_del(&msi->list); 14544bb7e24SRobin Murphy kfree(msi); 14644bb7e24SRobin Murphy } 14744bb7e24SRobin Murphy kfree(cookie); 1480db2e5d1SRobin Murphy domain->iova_cookie = NULL; 1490db2e5d1SRobin Murphy } 1500db2e5d1SRobin Murphy EXPORT_SYMBOL(iommu_put_dma_cookie); 1510db2e5d1SRobin Murphy 152273df963SRobin Murphy /** 153273df963SRobin Murphy * iommu_dma_get_resv_regions - Reserved region driver helper 154273df963SRobin Murphy * @dev: Device from iommu_get_resv_regions() 155273df963SRobin Murphy * @list: Reserved region list from iommu_get_resv_regions() 156273df963SRobin Murphy * 157273df963SRobin Murphy * IOMMU drivers can use this to implement their .get_resv_regions callback 158cd2c9fcfSShameer Kolothum * for general non-IOMMU-specific reservations. Currently, this covers GICv3 159cd2c9fcfSShameer Kolothum * ITS region reservation on ACPI based ARM platforms that may require HW MSI 160cd2c9fcfSShameer Kolothum * reservation. 161273df963SRobin Murphy */ 162273df963SRobin Murphy void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) 163fade1ec0SRobin Murphy { 164fade1ec0SRobin Murphy 16598cc4f71SJoerg Roedel if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) 166cd2c9fcfSShameer Kolothum iort_iommu_msi_get_resv_regions(dev, list); 167f51dc892SShameer Kolothum 168fade1ec0SRobin Murphy } 169273df963SRobin Murphy EXPORT_SYMBOL(iommu_dma_get_resv_regions); 170fade1ec0SRobin Murphy 1717c1b058cSRobin Murphy static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, 1727c1b058cSRobin Murphy phys_addr_t start, phys_addr_t end) 1737c1b058cSRobin Murphy { 1747c1b058cSRobin Murphy struct iova_domain *iovad = &cookie->iovad; 1757c1b058cSRobin Murphy struct iommu_dma_msi_page *msi_page; 1767c1b058cSRobin Murphy int i, num_pages; 1777c1b058cSRobin Murphy 1787c1b058cSRobin Murphy start -= iova_offset(iovad, start); 1797c1b058cSRobin Murphy num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); 1807c1b058cSRobin Murphy 1817c1b058cSRobin Murphy msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL); 1827c1b058cSRobin Murphy if (!msi_page) 1837c1b058cSRobin Murphy return -ENOMEM; 1847c1b058cSRobin Murphy 1857c1b058cSRobin Murphy for (i = 0; i < num_pages; i++) { 1867c1b058cSRobin Murphy msi_page[i].phys = start; 1877c1b058cSRobin Murphy msi_page[i].iova = start; 1887c1b058cSRobin Murphy INIT_LIST_HEAD(&msi_page[i].list); 1897c1b058cSRobin Murphy list_add(&msi_page[i].list, &cookie->msi_page_list); 1907c1b058cSRobin Murphy start += iovad->granule; 1917c1b058cSRobin Murphy } 1927c1b058cSRobin Murphy 1937c1b058cSRobin Murphy return 0; 1947c1b058cSRobin Murphy } 1957c1b058cSRobin Murphy 196aadad097SSrinath Mannam static int iova_reserve_pci_windows(struct pci_dev *dev, 197cd2c9fcfSShameer Kolothum struct iova_domain *iovad) 198cd2c9fcfSShameer Kolothum { 199cd2c9fcfSShameer Kolothum struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); 200cd2c9fcfSShameer Kolothum struct resource_entry *window; 201cd2c9fcfSShameer Kolothum unsigned long lo, hi; 202aadad097SSrinath Mannam phys_addr_t start = 0, end; 203cd2c9fcfSShameer Kolothum 204cd2c9fcfSShameer Kolothum resource_list_for_each_entry(window, &bridge->windows) { 205cd2c9fcfSShameer Kolothum if (resource_type(window->res) != IORESOURCE_MEM) 206cd2c9fcfSShameer Kolothum continue; 207cd2c9fcfSShameer Kolothum 208cd2c9fcfSShameer Kolothum lo = iova_pfn(iovad, window->res->start - window->offset); 209cd2c9fcfSShameer Kolothum hi = iova_pfn(iovad, window->res->end - window->offset); 210cd2c9fcfSShameer Kolothum reserve_iova(iovad, lo, hi); 211cd2c9fcfSShameer Kolothum } 212aadad097SSrinath Mannam 213aadad097SSrinath Mannam /* Get reserved DMA windows from host bridge */ 214aadad097SSrinath Mannam resource_list_for_each_entry(window, &bridge->dma_ranges) { 215aadad097SSrinath Mannam end = window->res->start - window->offset; 216aadad097SSrinath Mannam resv_iova: 217aadad097SSrinath Mannam if (end > start) { 218aadad097SSrinath Mannam lo = iova_pfn(iovad, start); 219aadad097SSrinath Mannam hi = iova_pfn(iovad, end); 220aadad097SSrinath Mannam reserve_iova(iovad, lo, hi); 221aadad097SSrinath Mannam } else { 222aadad097SSrinath Mannam /* dma_ranges list should be sorted */ 223aadad097SSrinath Mannam dev_err(&dev->dev, "Failed to reserve IOVA\n"); 224aadad097SSrinath Mannam return -EINVAL; 225aadad097SSrinath Mannam } 226aadad097SSrinath Mannam 227aadad097SSrinath Mannam start = window->res->end - window->offset + 1; 228aadad097SSrinath Mannam /* If window is last entry */ 229aadad097SSrinath Mannam if (window->node.next == &bridge->dma_ranges && 23029fcea8cSArnd Bergmann end != ~(phys_addr_t)0) { 23129fcea8cSArnd Bergmann end = ~(phys_addr_t)0; 232aadad097SSrinath Mannam goto resv_iova; 233aadad097SSrinath Mannam } 234aadad097SSrinath Mannam } 235aadad097SSrinath Mannam 236aadad097SSrinath Mannam return 0; 237cd2c9fcfSShameer Kolothum } 238cd2c9fcfSShameer Kolothum 2397c1b058cSRobin Murphy static int iova_reserve_iommu_regions(struct device *dev, 2407c1b058cSRobin Murphy struct iommu_domain *domain) 2417c1b058cSRobin Murphy { 2427c1b058cSRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 2437c1b058cSRobin Murphy struct iova_domain *iovad = &cookie->iovad; 2447c1b058cSRobin Murphy struct iommu_resv_region *region; 2457c1b058cSRobin Murphy LIST_HEAD(resv_regions); 2467c1b058cSRobin Murphy int ret = 0; 2477c1b058cSRobin Murphy 248aadad097SSrinath Mannam if (dev_is_pci(dev)) { 249aadad097SSrinath Mannam ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); 250aadad097SSrinath Mannam if (ret) 251aadad097SSrinath Mannam return ret; 252aadad097SSrinath Mannam } 253cd2c9fcfSShameer Kolothum 2547c1b058cSRobin Murphy iommu_get_resv_regions(dev, &resv_regions); 2557c1b058cSRobin Murphy list_for_each_entry(region, &resv_regions, list) { 2567c1b058cSRobin Murphy unsigned long lo, hi; 2577c1b058cSRobin Murphy 2587c1b058cSRobin Murphy /* We ARE the software that manages these! */ 2597c1b058cSRobin Murphy if (region->type == IOMMU_RESV_SW_MSI) 2607c1b058cSRobin Murphy continue; 2617c1b058cSRobin Murphy 2627c1b058cSRobin Murphy lo = iova_pfn(iovad, region->start); 2637c1b058cSRobin Murphy hi = iova_pfn(iovad, region->start + region->length - 1); 2647c1b058cSRobin Murphy reserve_iova(iovad, lo, hi); 2657c1b058cSRobin Murphy 2667c1b058cSRobin Murphy if (region->type == IOMMU_RESV_MSI) 2677c1b058cSRobin Murphy ret = cookie_init_hw_msi_region(cookie, region->start, 2687c1b058cSRobin Murphy region->start + region->length); 2697c1b058cSRobin Murphy if (ret) 2707c1b058cSRobin Murphy break; 2717c1b058cSRobin Murphy } 2727c1b058cSRobin Murphy iommu_put_resv_regions(dev, &resv_regions); 2737c1b058cSRobin Murphy 2747c1b058cSRobin Murphy return ret; 2757c1b058cSRobin Murphy } 2767c1b058cSRobin Murphy 2772da274cdSZhen Lei static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) 2782da274cdSZhen Lei { 2792da274cdSZhen Lei struct iommu_dma_cookie *cookie; 2802da274cdSZhen Lei struct iommu_domain *domain; 2812da274cdSZhen Lei 2822da274cdSZhen Lei cookie = container_of(iovad, struct iommu_dma_cookie, iovad); 2832da274cdSZhen Lei domain = cookie->fq_domain; 2842da274cdSZhen Lei /* 2852da274cdSZhen Lei * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE 2862da274cdSZhen Lei * implies that ops->flush_iotlb_all must be non-NULL. 2872da274cdSZhen Lei */ 2882da274cdSZhen Lei domain->ops->flush_iotlb_all(domain); 2892da274cdSZhen Lei } 2902da274cdSZhen Lei 2910db2e5d1SRobin Murphy /** 2920db2e5d1SRobin Murphy * iommu_dma_init_domain - Initialise a DMA mapping domain 2930db2e5d1SRobin Murphy * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() 2940db2e5d1SRobin Murphy * @base: IOVA at which the mappable address space starts 2950db2e5d1SRobin Murphy * @size: Size of IOVA space 296fade1ec0SRobin Murphy * @dev: Device the domain is being initialised for 2970db2e5d1SRobin Murphy * 2980db2e5d1SRobin Murphy * @base and @size should be exact multiples of IOMMU page granularity to 2990db2e5d1SRobin Murphy * avoid rounding surprises. If necessary, we reserve the page at address 0 3000db2e5d1SRobin Murphy * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but 3010db2e5d1SRobin Murphy * any change which could make prior IOVAs invalid will fail. 3020db2e5d1SRobin Murphy */ 30306d60728SChristoph Hellwig static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, 304fade1ec0SRobin Murphy u64 size, struct device *dev) 3050db2e5d1SRobin Murphy { 306fdbe574eSRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 307c61a4633SShaokun Zhang unsigned long order, base_pfn; 3086b0c54e7SYunsheng Lin struct iova_domain *iovad; 3092da274cdSZhen Lei int attr; 3100db2e5d1SRobin Murphy 311fdbe574eSRobin Murphy if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) 312fdbe574eSRobin Murphy return -EINVAL; 3130db2e5d1SRobin Murphy 3146b0c54e7SYunsheng Lin iovad = &cookie->iovad; 3156b0c54e7SYunsheng Lin 3160db2e5d1SRobin Murphy /* Use the smallest supported page size for IOVA granularity */ 317d16e0faaSRobin Murphy order = __ffs(domain->pgsize_bitmap); 3180db2e5d1SRobin Murphy base_pfn = max_t(unsigned long, 1, base >> order); 3190db2e5d1SRobin Murphy 3200db2e5d1SRobin Murphy /* Check the domain allows at least some access to the device... */ 3210db2e5d1SRobin Murphy if (domain->geometry.force_aperture) { 3220db2e5d1SRobin Murphy if (base > domain->geometry.aperture_end || 3230db2e5d1SRobin Murphy base + size <= domain->geometry.aperture_start) { 3240db2e5d1SRobin Murphy pr_warn("specified DMA range outside IOMMU capability\n"); 3250db2e5d1SRobin Murphy return -EFAULT; 3260db2e5d1SRobin Murphy } 3270db2e5d1SRobin Murphy /* ...then finally give it a kicking to make sure it fits */ 3280db2e5d1SRobin Murphy base_pfn = max_t(unsigned long, base_pfn, 3290db2e5d1SRobin Murphy domain->geometry.aperture_start >> order); 3300db2e5d1SRobin Murphy } 3310db2e5d1SRobin Murphy 332f51d7bb7SRobin Murphy /* start_pfn is always nonzero for an already-initialised domain */ 3330db2e5d1SRobin Murphy if (iovad->start_pfn) { 3340db2e5d1SRobin Murphy if (1UL << order != iovad->granule || 335f51d7bb7SRobin Murphy base_pfn != iovad->start_pfn) { 3360db2e5d1SRobin Murphy pr_warn("Incompatible range for DMA domain\n"); 3370db2e5d1SRobin Murphy return -EFAULT; 3380db2e5d1SRobin Murphy } 3397c1b058cSRobin Murphy 3400db2e5d1SRobin Murphy return 0; 3410db2e5d1SRobin Murphy } 3427c1b058cSRobin Murphy 343aa3ac946SZhen Lei init_iova_domain(iovad, 1UL << order, base_pfn); 3442da274cdSZhen Lei 3452da274cdSZhen Lei if (!cookie->fq_domain && !iommu_domain_get_attr(domain, 3462da274cdSZhen Lei DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { 3472da274cdSZhen Lei cookie->fq_domain = domain; 3482da274cdSZhen Lei init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL); 3492da274cdSZhen Lei } 3502da274cdSZhen Lei 3517c1b058cSRobin Murphy if (!dev) 3527c1b058cSRobin Murphy return 0; 3537c1b058cSRobin Murphy 3547c1b058cSRobin Murphy return iova_reserve_iommu_regions(dev, domain); 3557c1b058cSRobin Murphy } 3560db2e5d1SRobin Murphy 357795bbbb9STom Murphy static int iommu_dma_deferred_attach(struct device *dev, 358795bbbb9STom Murphy struct iommu_domain *domain) 359795bbbb9STom Murphy { 360795bbbb9STom Murphy const struct iommu_ops *ops = domain->ops; 361795bbbb9STom Murphy 362795bbbb9STom Murphy if (!is_kdump_kernel()) 363795bbbb9STom Murphy return 0; 364795bbbb9STom Murphy 365795bbbb9STom Murphy if (unlikely(ops->is_attach_deferred && 366795bbbb9STom Murphy ops->is_attach_deferred(domain, dev))) 367795bbbb9STom Murphy return iommu_attach_device(domain, dev); 368795bbbb9STom Murphy 369795bbbb9STom Murphy return 0; 370795bbbb9STom Murphy } 371795bbbb9STom Murphy 3720db2e5d1SRobin Murphy /** 373737c85caSMitchel Humpherys * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API 374737c85caSMitchel Humpherys * page flags. 3750db2e5d1SRobin Murphy * @dir: Direction of DMA transfer 3760db2e5d1SRobin Murphy * @coherent: Is the DMA master cache-coherent? 377737c85caSMitchel Humpherys * @attrs: DMA attributes for the mapping 3780db2e5d1SRobin Murphy * 3790db2e5d1SRobin Murphy * Return: corresponding IOMMU API page protection flags 3800db2e5d1SRobin Murphy */ 38106d60728SChristoph Hellwig static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, 382737c85caSMitchel Humpherys unsigned long attrs) 3830db2e5d1SRobin Murphy { 3840db2e5d1SRobin Murphy int prot = coherent ? IOMMU_CACHE : 0; 3850db2e5d1SRobin Murphy 386737c85caSMitchel Humpherys if (attrs & DMA_ATTR_PRIVILEGED) 387737c85caSMitchel Humpherys prot |= IOMMU_PRIV; 388737c85caSMitchel Humpherys 3890db2e5d1SRobin Murphy switch (dir) { 3900db2e5d1SRobin Murphy case DMA_BIDIRECTIONAL: 3910db2e5d1SRobin Murphy return prot | IOMMU_READ | IOMMU_WRITE; 3920db2e5d1SRobin Murphy case DMA_TO_DEVICE: 3930db2e5d1SRobin Murphy return prot | IOMMU_READ; 3940db2e5d1SRobin Murphy case DMA_FROM_DEVICE: 3950db2e5d1SRobin Murphy return prot | IOMMU_WRITE; 3960db2e5d1SRobin Murphy default: 3970db2e5d1SRobin Murphy return 0; 3980db2e5d1SRobin Murphy } 3990db2e5d1SRobin Murphy } 4000db2e5d1SRobin Murphy 401842fe519SRobin Murphy static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, 402bd036d2fSRobin Murphy size_t size, u64 dma_limit, struct device *dev) 4030db2e5d1SRobin Murphy { 404a44e6657SRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 405a44e6657SRobin Murphy struct iova_domain *iovad = &cookie->iovad; 406bb65a64cSRobin Murphy unsigned long shift, iova_len, iova = 0; 4070db2e5d1SRobin Murphy 408a44e6657SRobin Murphy if (cookie->type == IOMMU_DMA_MSI_COOKIE) { 409a44e6657SRobin Murphy cookie->msi_iova += size; 410a44e6657SRobin Murphy return cookie->msi_iova - size; 411a44e6657SRobin Murphy } 412a44e6657SRobin Murphy 413a44e6657SRobin Murphy shift = iova_shift(iovad); 414a44e6657SRobin Murphy iova_len = size >> shift; 415bb65a64cSRobin Murphy /* 416bb65a64cSRobin Murphy * Freeing non-power-of-two-sized allocations back into the IOVA caches 417bb65a64cSRobin Murphy * will come back to bite us badly, so we have to waste a bit of space 418bb65a64cSRobin Murphy * rounding up anything cacheable to make sure that can't happen. The 419bb65a64cSRobin Murphy * order of the unadjusted size will still match upon freeing. 420bb65a64cSRobin Murphy */ 421bb65a64cSRobin Murphy if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) 422bb65a64cSRobin Murphy iova_len = roundup_pow_of_two(iova_len); 423a44e6657SRobin Murphy 424a7ba70f1SNicolas Saenz Julienne dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); 42503bfdc31SRobin Murphy 426c987ff0dSRobin Murphy if (domain->geometry.force_aperture) 427bd036d2fSRobin Murphy dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); 428122fac03SRobin Murphy 429122fac03SRobin Murphy /* Try to get PCI devices a SAC address */ 430122fac03SRobin Murphy if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) 431538d5b33STomasz Nowicki iova = alloc_iova_fast(iovad, iova_len, 432538d5b33STomasz Nowicki DMA_BIT_MASK(32) >> shift, false); 433122fac03SRobin Murphy 434bb65a64cSRobin Murphy if (!iova) 435538d5b33STomasz Nowicki iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, 436538d5b33STomasz Nowicki true); 437bb65a64cSRobin Murphy 438bb65a64cSRobin Murphy return (dma_addr_t)iova << shift; 4390db2e5d1SRobin Murphy } 4400db2e5d1SRobin Murphy 441842fe519SRobin Murphy static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, 442842fe519SRobin Murphy dma_addr_t iova, size_t size) 4430db2e5d1SRobin Murphy { 444842fe519SRobin Murphy struct iova_domain *iovad = &cookie->iovad; 4450db2e5d1SRobin Murphy 446a44e6657SRobin Murphy /* The MSI case is only ever cleaning up its most recent allocation */ 447bb65a64cSRobin Murphy if (cookie->type == IOMMU_DMA_MSI_COOKIE) 448a44e6657SRobin Murphy cookie->msi_iova -= size; 4492da274cdSZhen Lei else if (cookie->fq_domain) /* non-strict mode */ 4502da274cdSZhen Lei queue_iova(iovad, iova_pfn(iovad, iova), 4512da274cdSZhen Lei size >> iova_shift(iovad), 0); 452bb65a64cSRobin Murphy else 4531cc896edSRobin Murphy free_iova_fast(iovad, iova_pfn(iovad, iova), 4541cc896edSRobin Murphy size >> iova_shift(iovad)); 455842fe519SRobin Murphy } 456842fe519SRobin Murphy 457b61d271eSRobin Murphy static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, 458842fe519SRobin Murphy size_t size) 459842fe519SRobin Murphy { 460b61d271eSRobin Murphy struct iommu_domain *domain = iommu_get_dma_domain(dev); 461a44e6657SRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 462a44e6657SRobin Murphy struct iova_domain *iovad = &cookie->iovad; 463842fe519SRobin Murphy size_t iova_off = iova_offset(iovad, dma_addr); 464a7d20dc1SWill Deacon struct iommu_iotlb_gather iotlb_gather; 465a7d20dc1SWill Deacon size_t unmapped; 466842fe519SRobin Murphy 467842fe519SRobin Murphy dma_addr -= iova_off; 468842fe519SRobin Murphy size = iova_align(iovad, size + iova_off); 469a7d20dc1SWill Deacon iommu_iotlb_gather_init(&iotlb_gather); 470842fe519SRobin Murphy 471a7d20dc1SWill Deacon unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); 472a7d20dc1SWill Deacon WARN_ON(unmapped != size); 473a7d20dc1SWill Deacon 4742da274cdSZhen Lei if (!cookie->fq_domain) 475a7d20dc1SWill Deacon iommu_tlb_sync(domain, &iotlb_gather); 476a44e6657SRobin Murphy iommu_dma_free_iova(cookie, dma_addr, size); 4770db2e5d1SRobin Murphy } 4780db2e5d1SRobin Murphy 47992aec09cSChristoph Hellwig static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, 480bd036d2fSRobin Murphy size_t size, int prot, u64 dma_mask) 48192aec09cSChristoph Hellwig { 482b61d271eSRobin Murphy struct iommu_domain *domain = iommu_get_dma_domain(dev); 48392aec09cSChristoph Hellwig struct iommu_dma_cookie *cookie = domain->iova_cookie; 4848af23fadSRobin Murphy struct iova_domain *iovad = &cookie->iovad; 4858af23fadSRobin Murphy size_t iova_off = iova_offset(iovad, phys); 48692aec09cSChristoph Hellwig dma_addr_t iova; 48792aec09cSChristoph Hellwig 488795bbbb9STom Murphy if (unlikely(iommu_dma_deferred_attach(dev, domain))) 489795bbbb9STom Murphy return DMA_MAPPING_ERROR; 490795bbbb9STom Murphy 4918af23fadSRobin Murphy size = iova_align(iovad, size + iova_off); 49292aec09cSChristoph Hellwig 4936e235020STom Murphy iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); 49492aec09cSChristoph Hellwig if (!iova) 49592aec09cSChristoph Hellwig return DMA_MAPPING_ERROR; 49692aec09cSChristoph Hellwig 497781ca2deSTom Murphy if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { 49892aec09cSChristoph Hellwig iommu_dma_free_iova(cookie, iova, size); 49992aec09cSChristoph Hellwig return DMA_MAPPING_ERROR; 50092aec09cSChristoph Hellwig } 50192aec09cSChristoph Hellwig return iova + iova_off; 50292aec09cSChristoph Hellwig } 50392aec09cSChristoph Hellwig 5040db2e5d1SRobin Murphy static void __iommu_dma_free_pages(struct page **pages, int count) 5050db2e5d1SRobin Murphy { 5060db2e5d1SRobin Murphy while (count--) 5070db2e5d1SRobin Murphy __free_page(pages[count]); 5080db2e5d1SRobin Murphy kvfree(pages); 5090db2e5d1SRobin Murphy } 5100db2e5d1SRobin Murphy 511c4b17afbSGanapatrao Kulkarni static struct page **__iommu_dma_alloc_pages(struct device *dev, 512c4b17afbSGanapatrao Kulkarni unsigned int count, unsigned long order_mask, gfp_t gfp) 5130db2e5d1SRobin Murphy { 5140db2e5d1SRobin Murphy struct page **pages; 515c4b17afbSGanapatrao Kulkarni unsigned int i = 0, nid = dev_to_node(dev); 5163b6b7e19SRobin Murphy 5173b6b7e19SRobin Murphy order_mask &= (2U << MAX_ORDER) - 1; 5183b6b7e19SRobin Murphy if (!order_mask) 5193b6b7e19SRobin Murphy return NULL; 5200db2e5d1SRobin Murphy 521c4b17afbSGanapatrao Kulkarni pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL); 5220db2e5d1SRobin Murphy if (!pages) 5230db2e5d1SRobin Murphy return NULL; 5240db2e5d1SRobin Murphy 5250db2e5d1SRobin Murphy /* IOMMU can map any pages, so himem can also be used here */ 5260db2e5d1SRobin Murphy gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 5270db2e5d1SRobin Murphy 5280db2e5d1SRobin Murphy while (count) { 5290db2e5d1SRobin Murphy struct page *page = NULL; 5303b6b7e19SRobin Murphy unsigned int order_size; 5310db2e5d1SRobin Murphy 5320db2e5d1SRobin Murphy /* 5330db2e5d1SRobin Murphy * Higher-order allocations are a convenience rather 5340db2e5d1SRobin Murphy * than a necessity, hence using __GFP_NORETRY until 5353b6b7e19SRobin Murphy * falling back to minimum-order allocations. 5360db2e5d1SRobin Murphy */ 5373b6b7e19SRobin Murphy for (order_mask &= (2U << __fls(count)) - 1; 5383b6b7e19SRobin Murphy order_mask; order_mask &= ~order_size) { 5393b6b7e19SRobin Murphy unsigned int order = __fls(order_mask); 540c4b17afbSGanapatrao Kulkarni gfp_t alloc_flags = gfp; 5413b6b7e19SRobin Murphy 5423b6b7e19SRobin Murphy order_size = 1U << order; 543c4b17afbSGanapatrao Kulkarni if (order_mask > order_size) 544c4b17afbSGanapatrao Kulkarni alloc_flags |= __GFP_NORETRY; 545c4b17afbSGanapatrao Kulkarni page = alloc_pages_node(nid, alloc_flags, order); 5460db2e5d1SRobin Murphy if (!page) 5470db2e5d1SRobin Murphy continue; 5483b6b7e19SRobin Murphy if (!order) 5490db2e5d1SRobin Murphy break; 5503b6b7e19SRobin Murphy if (!PageCompound(page)) { 5510db2e5d1SRobin Murphy split_page(page, order); 5520db2e5d1SRobin Murphy break; 5533b6b7e19SRobin Murphy } else if (!split_huge_page(page)) { 5543b6b7e19SRobin Murphy break; 5550db2e5d1SRobin Murphy } 5563b6b7e19SRobin Murphy __free_pages(page, order); 5570db2e5d1SRobin Murphy } 5580db2e5d1SRobin Murphy if (!page) { 5590db2e5d1SRobin Murphy __iommu_dma_free_pages(pages, i); 5600db2e5d1SRobin Murphy return NULL; 5610db2e5d1SRobin Murphy } 5623b6b7e19SRobin Murphy count -= order_size; 5633b6b7e19SRobin Murphy while (order_size--) 5640db2e5d1SRobin Murphy pages[i++] = page++; 5650db2e5d1SRobin Murphy } 5660db2e5d1SRobin Murphy return pages; 5670db2e5d1SRobin Murphy } 5680db2e5d1SRobin Murphy 5690db2e5d1SRobin Murphy /** 57021b95aafSChristoph Hellwig * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space 5710db2e5d1SRobin Murphy * @dev: Device to allocate memory for. Must be a real device 5720db2e5d1SRobin Murphy * attached to an iommu_dma_domain 5730db2e5d1SRobin Murphy * @size: Size of buffer in bytes 57421b95aafSChristoph Hellwig * @dma_handle: Out argument for allocated DMA handle 5750db2e5d1SRobin Murphy * @gfp: Allocation flags 5763b6b7e19SRobin Murphy * @attrs: DMA attributes for this allocation 5770db2e5d1SRobin Murphy * 5780db2e5d1SRobin Murphy * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, 5790db2e5d1SRobin Murphy * but an IOMMU which supports smaller pages might not map the whole thing. 5800db2e5d1SRobin Murphy * 58121b95aafSChristoph Hellwig * Return: Mapped virtual address, or NULL on failure. 5820db2e5d1SRobin Murphy */ 58321b95aafSChristoph Hellwig static void *iommu_dma_alloc_remap(struct device *dev, size_t size, 58421b95aafSChristoph Hellwig dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 5850db2e5d1SRobin Murphy { 58643c5bf11SRobin Murphy struct iommu_domain *domain = iommu_get_dma_domain(dev); 587842fe519SRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 588842fe519SRobin Murphy struct iova_domain *iovad = &cookie->iovad; 58921b95aafSChristoph Hellwig bool coherent = dev_is_dma_coherent(dev); 59021b95aafSChristoph Hellwig int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 59133dcb37cSChristoph Hellwig pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 59221b95aafSChristoph Hellwig unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; 5930db2e5d1SRobin Murphy struct page **pages; 5940db2e5d1SRobin Murphy struct sg_table sgt; 595842fe519SRobin Murphy dma_addr_t iova; 59621b95aafSChristoph Hellwig void *vaddr; 5970db2e5d1SRobin Murphy 59821b95aafSChristoph Hellwig *dma_handle = DMA_MAPPING_ERROR; 5990db2e5d1SRobin Murphy 600795bbbb9STom Murphy if (unlikely(iommu_dma_deferred_attach(dev, domain))) 601795bbbb9STom Murphy return NULL; 602795bbbb9STom Murphy 6033b6b7e19SRobin Murphy min_size = alloc_sizes & -alloc_sizes; 6043b6b7e19SRobin Murphy if (min_size < PAGE_SIZE) { 6053b6b7e19SRobin Murphy min_size = PAGE_SIZE; 6063b6b7e19SRobin Murphy alloc_sizes |= PAGE_SIZE; 6073b6b7e19SRobin Murphy } else { 6083b6b7e19SRobin Murphy size = ALIGN(size, min_size); 6093b6b7e19SRobin Murphy } 61000085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 6113b6b7e19SRobin Murphy alloc_sizes = min_size; 6123b6b7e19SRobin Murphy 6133b6b7e19SRobin Murphy count = PAGE_ALIGN(size) >> PAGE_SHIFT; 614c4b17afbSGanapatrao Kulkarni pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, 615c4b17afbSGanapatrao Kulkarni gfp); 6160db2e5d1SRobin Murphy if (!pages) 6170db2e5d1SRobin Murphy return NULL; 6180db2e5d1SRobin Murphy 619842fe519SRobin Murphy size = iova_align(iovad, size); 620842fe519SRobin Murphy iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); 6210db2e5d1SRobin Murphy if (!iova) 6220db2e5d1SRobin Murphy goto out_free_pages; 6230db2e5d1SRobin Murphy 6240db2e5d1SRobin Murphy if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) 6250db2e5d1SRobin Murphy goto out_free_iova; 6260db2e5d1SRobin Murphy 62721b95aafSChristoph Hellwig if (!(ioprot & IOMMU_CACHE)) { 62823f88e0aSChristoph Hellwig struct scatterlist *sg; 62923f88e0aSChristoph Hellwig int i; 63023f88e0aSChristoph Hellwig 63123f88e0aSChristoph Hellwig for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) 63223f88e0aSChristoph Hellwig arch_dma_prep_coherent(sg_page(sg), sg->length); 6330db2e5d1SRobin Murphy } 6340db2e5d1SRobin Murphy 635781ca2deSTom Murphy if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) 6360db2e5d1SRobin Murphy < size) 6370db2e5d1SRobin Murphy goto out_free_sg; 6380db2e5d1SRobin Murphy 63951231740SChristoph Hellwig vaddr = dma_common_pages_remap(pages, size, prot, 64021b95aafSChristoph Hellwig __builtin_return_address(0)); 64121b95aafSChristoph Hellwig if (!vaddr) 64221b95aafSChristoph Hellwig goto out_unmap; 6430db2e5d1SRobin Murphy 64421b95aafSChristoph Hellwig *dma_handle = iova; 64521b95aafSChristoph Hellwig sg_free_table(&sgt); 64621b95aafSChristoph Hellwig return vaddr; 64721b95aafSChristoph Hellwig 64821b95aafSChristoph Hellwig out_unmap: 64921b95aafSChristoph Hellwig __iommu_dma_unmap(dev, iova, size); 6500db2e5d1SRobin Murphy out_free_sg: 6510db2e5d1SRobin Murphy sg_free_table(&sgt); 6520db2e5d1SRobin Murphy out_free_iova: 653842fe519SRobin Murphy iommu_dma_free_iova(cookie, iova, size); 6540db2e5d1SRobin Murphy out_free_pages: 6550db2e5d1SRobin Murphy __iommu_dma_free_pages(pages, count); 6560db2e5d1SRobin Murphy return NULL; 6570db2e5d1SRobin Murphy } 6580db2e5d1SRobin Murphy 6590db2e5d1SRobin Murphy /** 66006d60728SChristoph Hellwig * __iommu_dma_mmap - Map a buffer into provided user VMA 66106d60728SChristoph Hellwig * @pages: Array representing buffer from __iommu_dma_alloc() 6620db2e5d1SRobin Murphy * @size: Size of buffer in bytes 6630db2e5d1SRobin Murphy * @vma: VMA describing requested userspace mapping 6640db2e5d1SRobin Murphy * 6650db2e5d1SRobin Murphy * Maps the pages of the buffer in @pages into @vma. The caller is responsible 6660db2e5d1SRobin Murphy * for verifying the correct size and protection of @vma beforehand. 6670db2e5d1SRobin Murphy */ 66806d60728SChristoph Hellwig static int __iommu_dma_mmap(struct page **pages, size_t size, 66906d60728SChristoph Hellwig struct vm_area_struct *vma) 6700db2e5d1SRobin Murphy { 671b0d0084fSSouptick Joarder return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT); 6720db2e5d1SRobin Murphy } 6730db2e5d1SRobin Murphy 67406d60728SChristoph Hellwig static void iommu_dma_sync_single_for_cpu(struct device *dev, 67506d60728SChristoph Hellwig dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 6760db2e5d1SRobin Murphy { 67706d60728SChristoph Hellwig phys_addr_t phys; 6780db2e5d1SRobin Murphy 67906d60728SChristoph Hellwig if (dev_is_dma_coherent(dev)) 68006d60728SChristoph Hellwig return; 68106d60728SChristoph Hellwig 68206d60728SChristoph Hellwig phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 68356e35f9cSChristoph Hellwig arch_sync_dma_for_cpu(phys, size, dir); 6841cc896edSRobin Murphy } 6851cc896edSRobin Murphy 68606d60728SChristoph Hellwig static void iommu_dma_sync_single_for_device(struct device *dev, 68706d60728SChristoph Hellwig dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 68851f8cc9eSRobin Murphy { 68906d60728SChristoph Hellwig phys_addr_t phys; 69006d60728SChristoph Hellwig 69106d60728SChristoph Hellwig if (dev_is_dma_coherent(dev)) 69206d60728SChristoph Hellwig return; 69306d60728SChristoph Hellwig 69406d60728SChristoph Hellwig phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 69556e35f9cSChristoph Hellwig arch_sync_dma_for_device(phys, size, dir); 69651f8cc9eSRobin Murphy } 69751f8cc9eSRobin Murphy 69806d60728SChristoph Hellwig static void iommu_dma_sync_sg_for_cpu(struct device *dev, 69906d60728SChristoph Hellwig struct scatterlist *sgl, int nelems, 70006d60728SChristoph Hellwig enum dma_data_direction dir) 7010db2e5d1SRobin Murphy { 70206d60728SChristoph Hellwig struct scatterlist *sg; 70306d60728SChristoph Hellwig int i; 70406d60728SChristoph Hellwig 70506d60728SChristoph Hellwig if (dev_is_dma_coherent(dev)) 70606d60728SChristoph Hellwig return; 70706d60728SChristoph Hellwig 70806d60728SChristoph Hellwig for_each_sg(sgl, sg, nelems, i) 70956e35f9cSChristoph Hellwig arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); 71006d60728SChristoph Hellwig } 71106d60728SChristoph Hellwig 71206d60728SChristoph Hellwig static void iommu_dma_sync_sg_for_device(struct device *dev, 71306d60728SChristoph Hellwig struct scatterlist *sgl, int nelems, 71406d60728SChristoph Hellwig enum dma_data_direction dir) 71506d60728SChristoph Hellwig { 71606d60728SChristoph Hellwig struct scatterlist *sg; 71706d60728SChristoph Hellwig int i; 71806d60728SChristoph Hellwig 71906d60728SChristoph Hellwig if (dev_is_dma_coherent(dev)) 72006d60728SChristoph Hellwig return; 72106d60728SChristoph Hellwig 72206d60728SChristoph Hellwig for_each_sg(sgl, sg, nelems, i) 72356e35f9cSChristoph Hellwig arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); 72406d60728SChristoph Hellwig } 72506d60728SChristoph Hellwig 72606d60728SChristoph Hellwig static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, 72706d60728SChristoph Hellwig unsigned long offset, size_t size, enum dma_data_direction dir, 72806d60728SChristoph Hellwig unsigned long attrs) 72906d60728SChristoph Hellwig { 73006d60728SChristoph Hellwig phys_addr_t phys = page_to_phys(page) + offset; 73106d60728SChristoph Hellwig bool coherent = dev_is_dma_coherent(dev); 732b61d271eSRobin Murphy int prot = dma_info_to_prot(dir, coherent, attrs); 73306d60728SChristoph Hellwig dma_addr_t dma_handle; 73406d60728SChristoph Hellwig 7356e235020STom Murphy dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev)); 73606d60728SChristoph Hellwig if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 73706d60728SChristoph Hellwig dma_handle != DMA_MAPPING_ERROR) 73856e35f9cSChristoph Hellwig arch_sync_dma_for_device(phys, size, dir); 73906d60728SChristoph Hellwig return dma_handle; 74006d60728SChristoph Hellwig } 74106d60728SChristoph Hellwig 74206d60728SChristoph Hellwig static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, 74306d60728SChristoph Hellwig size_t size, enum dma_data_direction dir, unsigned long attrs) 74406d60728SChristoph Hellwig { 74506d60728SChristoph Hellwig if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 74606d60728SChristoph Hellwig iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); 747b61d271eSRobin Murphy __iommu_dma_unmap(dev, dma_handle, size); 7480db2e5d1SRobin Murphy } 7490db2e5d1SRobin Murphy 7500db2e5d1SRobin Murphy /* 7510db2e5d1SRobin Murphy * Prepare a successfully-mapped scatterlist to give back to the caller. 752809eac54SRobin Murphy * 753809eac54SRobin Murphy * At this point the segments are already laid out by iommu_dma_map_sg() to 754809eac54SRobin Murphy * avoid individually crossing any boundaries, so we merely need to check a 755809eac54SRobin Murphy * segment's start address to avoid concatenating across one. 7560db2e5d1SRobin Murphy */ 7570db2e5d1SRobin Murphy static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, 7580db2e5d1SRobin Murphy dma_addr_t dma_addr) 7590db2e5d1SRobin Murphy { 760809eac54SRobin Murphy struct scatterlist *s, *cur = sg; 761809eac54SRobin Murphy unsigned long seg_mask = dma_get_seg_boundary(dev); 762809eac54SRobin Murphy unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); 763809eac54SRobin Murphy int i, count = 0; 7640db2e5d1SRobin Murphy 7650db2e5d1SRobin Murphy for_each_sg(sg, s, nents, i) { 766809eac54SRobin Murphy /* Restore this segment's original unaligned fields first */ 767809eac54SRobin Murphy unsigned int s_iova_off = sg_dma_address(s); 7680db2e5d1SRobin Murphy unsigned int s_length = sg_dma_len(s); 769809eac54SRobin Murphy unsigned int s_iova_len = s->length; 7700db2e5d1SRobin Murphy 771809eac54SRobin Murphy s->offset += s_iova_off; 7720db2e5d1SRobin Murphy s->length = s_length; 773cad34be7SChristoph Hellwig sg_dma_address(s) = DMA_MAPPING_ERROR; 774809eac54SRobin Murphy sg_dma_len(s) = 0; 775809eac54SRobin Murphy 776809eac54SRobin Murphy /* 777809eac54SRobin Murphy * Now fill in the real DMA data. If... 778809eac54SRobin Murphy * - there is a valid output segment to append to 779809eac54SRobin Murphy * - and this segment starts on an IOVA page boundary 780809eac54SRobin Murphy * - but doesn't fall at a segment boundary 781809eac54SRobin Murphy * - and wouldn't make the resulting output segment too long 782809eac54SRobin Murphy */ 783809eac54SRobin Murphy if (cur_len && !s_iova_off && (dma_addr & seg_mask) && 784ab2cbeb0SRobin Murphy (max_len - cur_len >= s_length)) { 785809eac54SRobin Murphy /* ...then concatenate it with the previous one */ 786809eac54SRobin Murphy cur_len += s_length; 787809eac54SRobin Murphy } else { 788809eac54SRobin Murphy /* Otherwise start the next output segment */ 789809eac54SRobin Murphy if (i > 0) 790809eac54SRobin Murphy cur = sg_next(cur); 791809eac54SRobin Murphy cur_len = s_length; 792809eac54SRobin Murphy count++; 793809eac54SRobin Murphy 794809eac54SRobin Murphy sg_dma_address(cur) = dma_addr + s_iova_off; 7950db2e5d1SRobin Murphy } 796809eac54SRobin Murphy 797809eac54SRobin Murphy sg_dma_len(cur) = cur_len; 798809eac54SRobin Murphy dma_addr += s_iova_len; 799809eac54SRobin Murphy 800809eac54SRobin Murphy if (s_length + s_iova_off < s_iova_len) 801809eac54SRobin Murphy cur_len = 0; 802809eac54SRobin Murphy } 803809eac54SRobin Murphy return count; 8040db2e5d1SRobin Murphy } 8050db2e5d1SRobin Murphy 8060db2e5d1SRobin Murphy /* 8070db2e5d1SRobin Murphy * If mapping failed, then just restore the original list, 8080db2e5d1SRobin Murphy * but making sure the DMA fields are invalidated. 8090db2e5d1SRobin Murphy */ 8100db2e5d1SRobin Murphy static void __invalidate_sg(struct scatterlist *sg, int nents) 8110db2e5d1SRobin Murphy { 8120db2e5d1SRobin Murphy struct scatterlist *s; 8130db2e5d1SRobin Murphy int i; 8140db2e5d1SRobin Murphy 8150db2e5d1SRobin Murphy for_each_sg(sg, s, nents, i) { 816cad34be7SChristoph Hellwig if (sg_dma_address(s) != DMA_MAPPING_ERROR) 81707b48ac4SRobin Murphy s->offset += sg_dma_address(s); 8180db2e5d1SRobin Murphy if (sg_dma_len(s)) 8190db2e5d1SRobin Murphy s->length = sg_dma_len(s); 820cad34be7SChristoph Hellwig sg_dma_address(s) = DMA_MAPPING_ERROR; 8210db2e5d1SRobin Murphy sg_dma_len(s) = 0; 8220db2e5d1SRobin Murphy } 8230db2e5d1SRobin Murphy } 8240db2e5d1SRobin Murphy 8250db2e5d1SRobin Murphy /* 8260db2e5d1SRobin Murphy * The DMA API client is passing in a scatterlist which could describe 8270db2e5d1SRobin Murphy * any old buffer layout, but the IOMMU API requires everything to be 8280db2e5d1SRobin Murphy * aligned to IOMMU pages. Hence the need for this complicated bit of 8290db2e5d1SRobin Murphy * impedance-matching, to be able to hand off a suitably-aligned list, 8300db2e5d1SRobin Murphy * but still preserve the original offsets and sizes for the caller. 8310db2e5d1SRobin Murphy */ 83206d60728SChristoph Hellwig static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, 83306d60728SChristoph Hellwig int nents, enum dma_data_direction dir, unsigned long attrs) 8340db2e5d1SRobin Murphy { 83543c5bf11SRobin Murphy struct iommu_domain *domain = iommu_get_dma_domain(dev); 836842fe519SRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 837842fe519SRobin Murphy struct iova_domain *iovad = &cookie->iovad; 8380db2e5d1SRobin Murphy struct scatterlist *s, *prev = NULL; 83906d60728SChristoph Hellwig int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); 840842fe519SRobin Murphy dma_addr_t iova; 8410db2e5d1SRobin Murphy size_t iova_len = 0; 842809eac54SRobin Murphy unsigned long mask = dma_get_seg_boundary(dev); 8430db2e5d1SRobin Murphy int i; 8440db2e5d1SRobin Murphy 845795bbbb9STom Murphy if (unlikely(iommu_dma_deferred_attach(dev, domain))) 846795bbbb9STom Murphy return 0; 847795bbbb9STom Murphy 84806d60728SChristoph Hellwig if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 84906d60728SChristoph Hellwig iommu_dma_sync_sg_for_device(dev, sg, nents, dir); 85006d60728SChristoph Hellwig 8510db2e5d1SRobin Murphy /* 8520db2e5d1SRobin Murphy * Work out how much IOVA space we need, and align the segments to 8530db2e5d1SRobin Murphy * IOVA granules for the IOMMU driver to handle. With some clever 8540db2e5d1SRobin Murphy * trickery we can modify the list in-place, but reversibly, by 855809eac54SRobin Murphy * stashing the unaligned parts in the as-yet-unused DMA fields. 8560db2e5d1SRobin Murphy */ 8570db2e5d1SRobin Murphy for_each_sg(sg, s, nents, i) { 858809eac54SRobin Murphy size_t s_iova_off = iova_offset(iovad, s->offset); 8590db2e5d1SRobin Murphy size_t s_length = s->length; 860809eac54SRobin Murphy size_t pad_len = (mask - iova_len + 1) & mask; 8610db2e5d1SRobin Murphy 862809eac54SRobin Murphy sg_dma_address(s) = s_iova_off; 8630db2e5d1SRobin Murphy sg_dma_len(s) = s_length; 864809eac54SRobin Murphy s->offset -= s_iova_off; 865809eac54SRobin Murphy s_length = iova_align(iovad, s_length + s_iova_off); 8660db2e5d1SRobin Murphy s->length = s_length; 8670db2e5d1SRobin Murphy 8680db2e5d1SRobin Murphy /* 869809eac54SRobin Murphy * Due to the alignment of our single IOVA allocation, we can 870809eac54SRobin Murphy * depend on these assumptions about the segment boundary mask: 871809eac54SRobin Murphy * - If mask size >= IOVA size, then the IOVA range cannot 872809eac54SRobin Murphy * possibly fall across a boundary, so we don't care. 873809eac54SRobin Murphy * - If mask size < IOVA size, then the IOVA range must start 874809eac54SRobin Murphy * exactly on a boundary, therefore we can lay things out 875809eac54SRobin Murphy * based purely on segment lengths without needing to know 876809eac54SRobin Murphy * the actual addresses beforehand. 877809eac54SRobin Murphy * - The mask must be a power of 2, so pad_len == 0 if 878809eac54SRobin Murphy * iova_len == 0, thus we cannot dereference prev the first 879809eac54SRobin Murphy * time through here (i.e. before it has a meaningful value). 8800db2e5d1SRobin Murphy */ 881809eac54SRobin Murphy if (pad_len && pad_len < s_length - 1) { 8820db2e5d1SRobin Murphy prev->length += pad_len; 8830db2e5d1SRobin Murphy iova_len += pad_len; 8840db2e5d1SRobin Murphy } 8850db2e5d1SRobin Murphy 8860db2e5d1SRobin Murphy iova_len += s_length; 8870db2e5d1SRobin Murphy prev = s; 8880db2e5d1SRobin Murphy } 8890db2e5d1SRobin Murphy 890842fe519SRobin Murphy iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); 8910db2e5d1SRobin Murphy if (!iova) 8920db2e5d1SRobin Murphy goto out_restore_sg; 8930db2e5d1SRobin Murphy 8940db2e5d1SRobin Murphy /* 8950db2e5d1SRobin Murphy * We'll leave any physical concatenation to the IOMMU driver's 8960db2e5d1SRobin Murphy * implementation - it knows better than we do. 8970db2e5d1SRobin Murphy */ 898781ca2deSTom Murphy if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len) 8990db2e5d1SRobin Murphy goto out_free_iova; 9000db2e5d1SRobin Murphy 901842fe519SRobin Murphy return __finalise_sg(dev, sg, nents, iova); 9020db2e5d1SRobin Murphy 9030db2e5d1SRobin Murphy out_free_iova: 904842fe519SRobin Murphy iommu_dma_free_iova(cookie, iova, iova_len); 9050db2e5d1SRobin Murphy out_restore_sg: 9060db2e5d1SRobin Murphy __invalidate_sg(sg, nents); 9070db2e5d1SRobin Murphy return 0; 9080db2e5d1SRobin Murphy } 9090db2e5d1SRobin Murphy 91006d60728SChristoph Hellwig static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 91106d60728SChristoph Hellwig int nents, enum dma_data_direction dir, unsigned long attrs) 9120db2e5d1SRobin Murphy { 913842fe519SRobin Murphy dma_addr_t start, end; 914842fe519SRobin Murphy struct scatterlist *tmp; 915842fe519SRobin Murphy int i; 91606d60728SChristoph Hellwig 9171b961423SNathan Chancellor if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 91806d60728SChristoph Hellwig iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); 91906d60728SChristoph Hellwig 9200db2e5d1SRobin Murphy /* 9210db2e5d1SRobin Murphy * The scatterlist segments are mapped into a single 9220db2e5d1SRobin Murphy * contiguous IOVA allocation, so this is incredibly easy. 9230db2e5d1SRobin Murphy */ 924842fe519SRobin Murphy start = sg_dma_address(sg); 925842fe519SRobin Murphy for_each_sg(sg_next(sg), tmp, nents - 1, i) { 926842fe519SRobin Murphy if (sg_dma_len(tmp) == 0) 927842fe519SRobin Murphy break; 928842fe519SRobin Murphy sg = tmp; 929842fe519SRobin Murphy } 930842fe519SRobin Murphy end = sg_dma_address(sg) + sg_dma_len(sg); 931b61d271eSRobin Murphy __iommu_dma_unmap(dev, start, end - start); 9320db2e5d1SRobin Murphy } 9330db2e5d1SRobin Murphy 93406d60728SChristoph Hellwig static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, 93551f8cc9eSRobin Murphy size_t size, enum dma_data_direction dir, unsigned long attrs) 93651f8cc9eSRobin Murphy { 93751f8cc9eSRobin Murphy return __iommu_dma_map(dev, phys, size, 9386e235020STom Murphy dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, 9396e235020STom Murphy dma_get_mask(dev)); 94051f8cc9eSRobin Murphy } 94151f8cc9eSRobin Murphy 94206d60728SChristoph Hellwig static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, 94351f8cc9eSRobin Murphy size_t size, enum dma_data_direction dir, unsigned long attrs) 94451f8cc9eSRobin Murphy { 945b61d271eSRobin Murphy __iommu_dma_unmap(dev, handle, size); 94651f8cc9eSRobin Murphy } 94751f8cc9eSRobin Murphy 9488553f6e6SRobin Murphy static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) 949bcf4b9c4SRobin Murphy { 950bcf4b9c4SRobin Murphy size_t alloc_size = PAGE_ALIGN(size); 951bcf4b9c4SRobin Murphy int count = alloc_size >> PAGE_SHIFT; 952bcf4b9c4SRobin Murphy struct page *page = NULL, **pages = NULL; 953bcf4b9c4SRobin Murphy 954bcf4b9c4SRobin Murphy /* Non-coherent atomic allocation? Easy */ 955e6475eb0SChristoph Hellwig if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 956e6475eb0SChristoph Hellwig dma_free_from_pool(cpu_addr, alloc_size)) 957bcf4b9c4SRobin Murphy return; 958bcf4b9c4SRobin Murphy 959e6475eb0SChristoph Hellwig if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 960bcf4b9c4SRobin Murphy /* 961bcf4b9c4SRobin Murphy * If it the address is remapped, then it's either non-coherent 962bcf4b9c4SRobin Murphy * or highmem CMA, or an iommu_dma_alloc_remap() construction. 963bcf4b9c4SRobin Murphy */ 9645cf45379SChristoph Hellwig pages = dma_common_find_pages(cpu_addr); 965bcf4b9c4SRobin Murphy if (!pages) 966bcf4b9c4SRobin Murphy page = vmalloc_to_page(cpu_addr); 96751231740SChristoph Hellwig dma_common_free_remap(cpu_addr, alloc_size); 968bcf4b9c4SRobin Murphy } else { 969bcf4b9c4SRobin Murphy /* Lowmem means a coherent atomic or CMA allocation */ 970bcf4b9c4SRobin Murphy page = virt_to_page(cpu_addr); 971bcf4b9c4SRobin Murphy } 972bcf4b9c4SRobin Murphy 973bcf4b9c4SRobin Murphy if (pages) 974bcf4b9c4SRobin Murphy __iommu_dma_free_pages(pages, count); 975591fcf3bSNicolin Chen if (page) 976591fcf3bSNicolin Chen dma_free_contiguous(dev, page, alloc_size); 977bcf4b9c4SRobin Murphy } 978bcf4b9c4SRobin Murphy 9798553f6e6SRobin Murphy static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, 9808553f6e6SRobin Murphy dma_addr_t handle, unsigned long attrs) 9818553f6e6SRobin Murphy { 9828553f6e6SRobin Murphy __iommu_dma_unmap(dev, handle, size); 9838553f6e6SRobin Murphy __iommu_dma_free(dev, size, cpu_addr); 9848553f6e6SRobin Murphy } 9858553f6e6SRobin Murphy 986ee1ef05dSChristoph Hellwig static void *iommu_dma_alloc_pages(struct device *dev, size_t size, 987ee1ef05dSChristoph Hellwig struct page **pagep, gfp_t gfp, unsigned long attrs) 98806d60728SChristoph Hellwig { 98906d60728SChristoph Hellwig bool coherent = dev_is_dma_coherent(dev); 9909ad5d6edSRobin Murphy size_t alloc_size = PAGE_ALIGN(size); 99190ae409fSChristoph Hellwig int node = dev_to_node(dev); 9929a4ab94aSChristoph Hellwig struct page *page = NULL; 9939ad5d6edSRobin Murphy void *cpu_addr; 99406d60728SChristoph Hellwig 995591fcf3bSNicolin Chen page = dma_alloc_contiguous(dev, alloc_size, gfp); 99606d60728SChristoph Hellwig if (!page) 99790ae409fSChristoph Hellwig page = alloc_pages_node(node, gfp, get_order(alloc_size)); 99890ae409fSChristoph Hellwig if (!page) 99906d60728SChristoph Hellwig return NULL; 100006d60728SChristoph Hellwig 1001e6475eb0SChristoph Hellwig if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 100233dcb37cSChristoph Hellwig pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 10038680aa5aSRobin Murphy 10049ad5d6edSRobin Murphy cpu_addr = dma_common_contiguous_remap(page, alloc_size, 100551231740SChristoph Hellwig prot, __builtin_return_address(0)); 10069ad5d6edSRobin Murphy if (!cpu_addr) 1007ee1ef05dSChristoph Hellwig goto out_free_pages; 1008072bebc0SRobin Murphy 100906d60728SChristoph Hellwig if (!coherent) 10109ad5d6edSRobin Murphy arch_dma_prep_coherent(page, size); 10118680aa5aSRobin Murphy } else { 10129ad5d6edSRobin Murphy cpu_addr = page_address(page); 10138680aa5aSRobin Murphy } 1014ee1ef05dSChristoph Hellwig 1015ee1ef05dSChristoph Hellwig *pagep = page; 10169ad5d6edSRobin Murphy memset(cpu_addr, 0, alloc_size); 10179ad5d6edSRobin Murphy return cpu_addr; 1018072bebc0SRobin Murphy out_free_pages: 1019591fcf3bSNicolin Chen dma_free_contiguous(dev, page, alloc_size); 1020072bebc0SRobin Murphy return NULL; 102106d60728SChristoph Hellwig } 102206d60728SChristoph Hellwig 1023ee1ef05dSChristoph Hellwig static void *iommu_dma_alloc(struct device *dev, size_t size, 1024ee1ef05dSChristoph Hellwig dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 1025ee1ef05dSChristoph Hellwig { 1026ee1ef05dSChristoph Hellwig bool coherent = dev_is_dma_coherent(dev); 1027ee1ef05dSChristoph Hellwig int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 1028ee1ef05dSChristoph Hellwig struct page *page = NULL; 1029ee1ef05dSChristoph Hellwig void *cpu_addr; 1030ee1ef05dSChristoph Hellwig 1031ee1ef05dSChristoph Hellwig gfp |= __GFP_ZERO; 1032ee1ef05dSChristoph Hellwig 1033e6475eb0SChristoph Hellwig if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && 1034ee1ef05dSChristoph Hellwig !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) 1035ee1ef05dSChristoph Hellwig return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs); 1036ee1ef05dSChristoph Hellwig 1037e6475eb0SChristoph Hellwig if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 1038e6475eb0SChristoph Hellwig !gfpflags_allow_blocking(gfp) && !coherent) 1039ee1ef05dSChristoph Hellwig cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp); 1040ee1ef05dSChristoph Hellwig else 1041ee1ef05dSChristoph Hellwig cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); 1042ee1ef05dSChristoph Hellwig if (!cpu_addr) 1043ee1ef05dSChristoph Hellwig return NULL; 1044ee1ef05dSChristoph Hellwig 10456e235020STom Murphy *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, 10466e235020STom Murphy dev->coherent_dma_mask); 1047ee1ef05dSChristoph Hellwig if (*handle == DMA_MAPPING_ERROR) { 1048ee1ef05dSChristoph Hellwig __iommu_dma_free(dev, size, cpu_addr); 1049ee1ef05dSChristoph Hellwig return NULL; 1050ee1ef05dSChristoph Hellwig } 1051ee1ef05dSChristoph Hellwig 1052ee1ef05dSChristoph Hellwig return cpu_addr; 1053ee1ef05dSChristoph Hellwig } 1054ee1ef05dSChristoph Hellwig 105506d60728SChristoph Hellwig static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, 105606d60728SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 105706d60728SChristoph Hellwig unsigned long attrs) 105806d60728SChristoph Hellwig { 105906d60728SChristoph Hellwig unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1060efd9f10bSChristoph Hellwig unsigned long pfn, off = vma->vm_pgoff; 106106d60728SChristoph Hellwig int ret; 106206d60728SChristoph Hellwig 106333dcb37cSChristoph Hellwig vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 106406d60728SChristoph Hellwig 106506d60728SChristoph Hellwig if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 106606d60728SChristoph Hellwig return ret; 106706d60728SChristoph Hellwig 106806d60728SChristoph Hellwig if (off >= nr_pages || vma_pages(vma) > nr_pages - off) 106906d60728SChristoph Hellwig return -ENXIO; 107006d60728SChristoph Hellwig 1071e6475eb0SChristoph Hellwig if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 10725cf45379SChristoph Hellwig struct page **pages = dma_common_find_pages(cpu_addr); 107306d60728SChristoph Hellwig 1074efd9f10bSChristoph Hellwig if (pages) 10754c360aceSRobin Murphy return __iommu_dma_mmap(pages, size, vma); 1076efd9f10bSChristoph Hellwig pfn = vmalloc_to_pfn(cpu_addr); 1077efd9f10bSChristoph Hellwig } else { 1078efd9f10bSChristoph Hellwig pfn = page_to_pfn(virt_to_page(cpu_addr)); 1079efd9f10bSChristoph Hellwig } 1080efd9f10bSChristoph Hellwig 1081efd9f10bSChristoph Hellwig return remap_pfn_range(vma, vma->vm_start, pfn + off, 1082efd9f10bSChristoph Hellwig vma->vm_end - vma->vm_start, 1083efd9f10bSChristoph Hellwig vma->vm_page_prot); 108406d60728SChristoph Hellwig } 108506d60728SChristoph Hellwig 108606d60728SChristoph Hellwig static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 108706d60728SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 108806d60728SChristoph Hellwig unsigned long attrs) 108906d60728SChristoph Hellwig { 10903fb3378bSChristoph Hellwig struct page *page; 10913fb3378bSChristoph Hellwig int ret; 109206d60728SChristoph Hellwig 1093e6475eb0SChristoph Hellwig if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 10945cf45379SChristoph Hellwig struct page **pages = dma_common_find_pages(cpu_addr); 10953fb3378bSChristoph Hellwig 10963fb3378bSChristoph Hellwig if (pages) { 10973fb3378bSChristoph Hellwig return sg_alloc_table_from_pages(sgt, pages, 10983fb3378bSChristoph Hellwig PAGE_ALIGN(size) >> PAGE_SHIFT, 10993fb3378bSChristoph Hellwig 0, size, GFP_KERNEL); 110006d60728SChristoph Hellwig } 110106d60728SChristoph Hellwig 11023fb3378bSChristoph Hellwig page = vmalloc_to_page(cpu_addr); 11033fb3378bSChristoph Hellwig } else { 11043fb3378bSChristoph Hellwig page = virt_to_page(cpu_addr); 110506d60728SChristoph Hellwig } 110606d60728SChristoph Hellwig 11073fb3378bSChristoph Hellwig ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 11083fb3378bSChristoph Hellwig if (!ret) 11093fb3378bSChristoph Hellwig sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 11103fb3378bSChristoph Hellwig return ret; 111106d60728SChristoph Hellwig } 111206d60728SChristoph Hellwig 1113158a6d3cSYoshihiro Shimoda static unsigned long iommu_dma_get_merge_boundary(struct device *dev) 1114158a6d3cSYoshihiro Shimoda { 1115158a6d3cSYoshihiro Shimoda struct iommu_domain *domain = iommu_get_dma_domain(dev); 1116158a6d3cSYoshihiro Shimoda 1117158a6d3cSYoshihiro Shimoda return (1UL << __ffs(domain->pgsize_bitmap)) - 1; 1118158a6d3cSYoshihiro Shimoda } 1119158a6d3cSYoshihiro Shimoda 112006d60728SChristoph Hellwig static const struct dma_map_ops iommu_dma_ops = { 112106d60728SChristoph Hellwig .alloc = iommu_dma_alloc, 112206d60728SChristoph Hellwig .free = iommu_dma_free, 112306d60728SChristoph Hellwig .mmap = iommu_dma_mmap, 112406d60728SChristoph Hellwig .get_sgtable = iommu_dma_get_sgtable, 112506d60728SChristoph Hellwig .map_page = iommu_dma_map_page, 112606d60728SChristoph Hellwig .unmap_page = iommu_dma_unmap_page, 112706d60728SChristoph Hellwig .map_sg = iommu_dma_map_sg, 112806d60728SChristoph Hellwig .unmap_sg = iommu_dma_unmap_sg, 112906d60728SChristoph Hellwig .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, 113006d60728SChristoph Hellwig .sync_single_for_device = iommu_dma_sync_single_for_device, 113106d60728SChristoph Hellwig .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, 113206d60728SChristoph Hellwig .sync_sg_for_device = iommu_dma_sync_sg_for_device, 113306d60728SChristoph Hellwig .map_resource = iommu_dma_map_resource, 113406d60728SChristoph Hellwig .unmap_resource = iommu_dma_unmap_resource, 1135158a6d3cSYoshihiro Shimoda .get_merge_boundary = iommu_dma_get_merge_boundary, 113606d60728SChristoph Hellwig }; 113706d60728SChristoph Hellwig 113806d60728SChristoph Hellwig /* 113906d60728SChristoph Hellwig * The IOMMU core code allocates the default DMA domain, which the underlying 114006d60728SChristoph Hellwig * IOMMU driver needs to support via the dma-iommu layer. 114106d60728SChristoph Hellwig */ 114206d60728SChristoph Hellwig void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) 114306d60728SChristoph Hellwig { 114406d60728SChristoph Hellwig struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 114506d60728SChristoph Hellwig 114606d60728SChristoph Hellwig if (!domain) 114706d60728SChristoph Hellwig goto out_err; 114806d60728SChristoph Hellwig 114906d60728SChristoph Hellwig /* 115006d60728SChristoph Hellwig * The IOMMU core code allocates the default DMA domain, which the 115106d60728SChristoph Hellwig * underlying IOMMU driver needs to support via the dma-iommu layer. 115206d60728SChristoph Hellwig */ 115306d60728SChristoph Hellwig if (domain->type == IOMMU_DOMAIN_DMA) { 115406d60728SChristoph Hellwig if (iommu_dma_init_domain(domain, dma_base, size, dev)) 115506d60728SChristoph Hellwig goto out_err; 115606d60728SChristoph Hellwig dev->dma_ops = &iommu_dma_ops; 115706d60728SChristoph Hellwig } 115806d60728SChristoph Hellwig 115906d60728SChristoph Hellwig return; 116006d60728SChristoph Hellwig out_err: 116106d60728SChristoph Hellwig pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", 116206d60728SChristoph Hellwig dev_name(dev)); 116344bb7e24SRobin Murphy } 116444bb7e24SRobin Murphy 116544bb7e24SRobin Murphy static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, 116644bb7e24SRobin Murphy phys_addr_t msi_addr, struct iommu_domain *domain) 116744bb7e24SRobin Murphy { 116844bb7e24SRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 116944bb7e24SRobin Murphy struct iommu_dma_msi_page *msi_page; 1170842fe519SRobin Murphy dma_addr_t iova; 117144bb7e24SRobin Murphy int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 1172fdbe574eSRobin Murphy size_t size = cookie_msi_granule(cookie); 117344bb7e24SRobin Murphy 1174fdbe574eSRobin Murphy msi_addr &= ~(phys_addr_t)(size - 1); 117544bb7e24SRobin Murphy list_for_each_entry(msi_page, &cookie->msi_page_list, list) 117644bb7e24SRobin Murphy if (msi_page->phys == msi_addr) 117744bb7e24SRobin Murphy return msi_page; 117844bb7e24SRobin Murphy 117944bb7e24SRobin Murphy msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); 118044bb7e24SRobin Murphy if (!msi_page) 118144bb7e24SRobin Murphy return NULL; 118244bb7e24SRobin Murphy 11838af23fadSRobin Murphy iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 11848af23fadSRobin Murphy if (!iova) 118544bb7e24SRobin Murphy goto out_free_page; 118644bb7e24SRobin Murphy 11878af23fadSRobin Murphy if (iommu_map(domain, iova, msi_addr, size, prot)) 11888af23fadSRobin Murphy goto out_free_iova; 11898af23fadSRobin Murphy 119044bb7e24SRobin Murphy INIT_LIST_HEAD(&msi_page->list); 1191a44e6657SRobin Murphy msi_page->phys = msi_addr; 1192a44e6657SRobin Murphy msi_page->iova = iova; 119344bb7e24SRobin Murphy list_add(&msi_page->list, &cookie->msi_page_list); 119444bb7e24SRobin Murphy return msi_page; 119544bb7e24SRobin Murphy 11968af23fadSRobin Murphy out_free_iova: 11978af23fadSRobin Murphy iommu_dma_free_iova(cookie, iova, size); 119844bb7e24SRobin Murphy out_free_page: 119944bb7e24SRobin Murphy kfree(msi_page); 120044bb7e24SRobin Murphy return NULL; 120144bb7e24SRobin Murphy } 120244bb7e24SRobin Murphy 1203ece6e6f0SJulien Grall int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) 120444bb7e24SRobin Murphy { 1205ece6e6f0SJulien Grall struct device *dev = msi_desc_to_dev(desc); 120644bb7e24SRobin Murphy struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 120744bb7e24SRobin Murphy struct iommu_dma_cookie *cookie; 120844bb7e24SRobin Murphy struct iommu_dma_msi_page *msi_page; 120944bb7e24SRobin Murphy unsigned long flags; 121044bb7e24SRobin Murphy 1211ece6e6f0SJulien Grall if (!domain || !domain->iova_cookie) { 1212ece6e6f0SJulien Grall desc->iommu_cookie = NULL; 1213ece6e6f0SJulien Grall return 0; 1214ece6e6f0SJulien Grall } 121544bb7e24SRobin Murphy 121644bb7e24SRobin Murphy cookie = domain->iova_cookie; 121744bb7e24SRobin Murphy 121844bb7e24SRobin Murphy /* 121944bb7e24SRobin Murphy * We disable IRQs to rule out a possible inversion against 122044bb7e24SRobin Murphy * irq_desc_lock if, say, someone tries to retarget the affinity 122144bb7e24SRobin Murphy * of an MSI from within an IPI handler. 122244bb7e24SRobin Murphy */ 122344bb7e24SRobin Murphy spin_lock_irqsave(&cookie->msi_lock, flags); 122444bb7e24SRobin Murphy msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); 122544bb7e24SRobin Murphy spin_unlock_irqrestore(&cookie->msi_lock, flags); 122644bb7e24SRobin Murphy 1227ece6e6f0SJulien Grall msi_desc_set_iommu_cookie(desc, msi_page); 1228ece6e6f0SJulien Grall 1229ece6e6f0SJulien Grall if (!msi_page) 1230ece6e6f0SJulien Grall return -ENOMEM; 1231ece6e6f0SJulien Grall return 0; 123244bb7e24SRobin Murphy } 1233ece6e6f0SJulien Grall 1234ece6e6f0SJulien Grall void iommu_dma_compose_msi_msg(struct msi_desc *desc, 1235ece6e6f0SJulien Grall struct msi_msg *msg) 1236ece6e6f0SJulien Grall { 1237ece6e6f0SJulien Grall struct device *dev = msi_desc_to_dev(desc); 1238ece6e6f0SJulien Grall const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1239ece6e6f0SJulien Grall const struct iommu_dma_msi_page *msi_page; 1240ece6e6f0SJulien Grall 1241ece6e6f0SJulien Grall msi_page = msi_desc_get_iommu_cookie(desc); 1242ece6e6f0SJulien Grall 1243ece6e6f0SJulien Grall if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) 1244ece6e6f0SJulien Grall return; 1245ece6e6f0SJulien Grall 1246ece6e6f0SJulien Grall msg->address_hi = upper_32_bits(msi_page->iova); 1247ece6e6f0SJulien Grall msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; 1248ece6e6f0SJulien Grall msg->address_lo += lower_32_bits(msi_page->iova); 124944bb7e24SRobin Murphy } 125006d60728SChristoph Hellwig 125106d60728SChristoph Hellwig static int iommu_dma_init(void) 125206d60728SChristoph Hellwig { 125306d60728SChristoph Hellwig return iova_cache_get(); 12540db2e5d1SRobin Murphy } 125506d60728SChristoph Hellwig arch_initcall(iommu_dma_init); 1256