1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 20db2e5d1SRobin Murphy /* 30db2e5d1SRobin Murphy * A fairly generic DMA-API to IOMMU-API glue layer. 40db2e5d1SRobin Murphy * 50db2e5d1SRobin Murphy * Copyright (C) 2014-2015 ARM Ltd. 60db2e5d1SRobin Murphy * 70db2e5d1SRobin Murphy * based in part on arch/arm/mm/dma-mapping.c: 80db2e5d1SRobin Murphy * Copyright (C) 2000-2004 Russell King 90db2e5d1SRobin Murphy */ 100db2e5d1SRobin Murphy 11f51dc892SShameer Kolothum #include <linux/acpi_iort.h> 12a17e3026SRobin Murphy #include <linux/atomic.h> 13a17e3026SRobin Murphy #include <linux/crash_dump.h> 140db2e5d1SRobin Murphy #include <linux/device.h> 15a17e3026SRobin Murphy #include <linux/dma-direct.h> 160db2e5d1SRobin Murphy #include <linux/dma-iommu.h> 17a17e3026SRobin Murphy #include <linux/dma-map-ops.h> 185b11e9cdSRobin Murphy #include <linux/gfp.h> 190db2e5d1SRobin Murphy #include <linux/huge_mm.h> 200db2e5d1SRobin Murphy #include <linux/iommu.h> 210db2e5d1SRobin Murphy #include <linux/iova.h> 2244bb7e24SRobin Murphy #include <linux/irq.h> 23*b8397a8fSRobin Murphy #include <linux/list_sort.h> 240db2e5d1SRobin Murphy #include <linux/mm.h> 25c1864790SRobin Murphy #include <linux/mutex.h> 26fade1ec0SRobin Murphy #include <linux/pci.h> 275b11e9cdSRobin Murphy #include <linux/scatterlist.h> 28a17e3026SRobin Murphy #include <linux/spinlock.h> 29a17e3026SRobin Murphy #include <linux/swiotlb.h> 305b11e9cdSRobin Murphy #include <linux/vmalloc.h> 310db2e5d1SRobin Murphy 3244bb7e24SRobin Murphy struct iommu_dma_msi_page { 3344bb7e24SRobin Murphy struct list_head list; 3444bb7e24SRobin Murphy dma_addr_t iova; 3544bb7e24SRobin Murphy phys_addr_t phys; 3644bb7e24SRobin Murphy }; 3744bb7e24SRobin Murphy 38fdbe574eSRobin Murphy enum iommu_dma_cookie_type { 39fdbe574eSRobin Murphy IOMMU_DMA_IOVA_COOKIE, 40fdbe574eSRobin Murphy IOMMU_DMA_MSI_COOKIE, 41fdbe574eSRobin Murphy }; 42fdbe574eSRobin Murphy 4344bb7e24SRobin Murphy struct iommu_dma_cookie { 44fdbe574eSRobin Murphy enum iommu_dma_cookie_type type; 45fdbe574eSRobin Murphy union { 46fdbe574eSRobin Murphy /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ 47a17e3026SRobin Murphy struct { 4844bb7e24SRobin Murphy struct iova_domain iovad; 49a17e3026SRobin Murphy 50a17e3026SRobin Murphy struct iova_fq __percpu *fq; /* Flush queue */ 51a17e3026SRobin Murphy /* Number of TLB flushes that have been started */ 52a17e3026SRobin Murphy atomic64_t fq_flush_start_cnt; 53a17e3026SRobin Murphy /* Number of TLB flushes that have been finished */ 54a17e3026SRobin Murphy atomic64_t fq_flush_finish_cnt; 55a17e3026SRobin Murphy /* Timer to regularily empty the flush queues */ 56a17e3026SRobin Murphy struct timer_list fq_timer; 57a17e3026SRobin Murphy /* 1 when timer is active, 0 when not */ 58a17e3026SRobin Murphy atomic_t fq_timer_on; 59a17e3026SRobin Murphy }; 60fdbe574eSRobin Murphy /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ 61fdbe574eSRobin Murphy dma_addr_t msi_iova; 62fdbe574eSRobin Murphy }; 6344bb7e24SRobin Murphy struct list_head msi_page_list; 642da274cdSZhen Lei 652da274cdSZhen Lei /* Domain for flush queue callback; NULL if flush queue not in use */ 662da274cdSZhen Lei struct iommu_domain *fq_domain; 6744bb7e24SRobin Murphy }; 6844bb7e24SRobin Murphy 69a8e8af35SLianbo Jiang static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled); 703542dcb1SRobin Murphy bool iommu_dma_forcedac __read_mostly; 713542dcb1SRobin Murphy 723542dcb1SRobin Murphy static int __init iommu_dma_forcedac_setup(char *str) 733542dcb1SRobin Murphy { 743542dcb1SRobin Murphy int ret = kstrtobool(str, &iommu_dma_forcedac); 753542dcb1SRobin Murphy 763542dcb1SRobin Murphy if (!ret && iommu_dma_forcedac) 773542dcb1SRobin Murphy pr_info("Forcing DAC for PCI devices\n"); 783542dcb1SRobin Murphy return ret; 793542dcb1SRobin Murphy } 803542dcb1SRobin Murphy early_param("iommu.forcedac", iommu_dma_forcedac_setup); 81a8e8af35SLianbo Jiang 82a17e3026SRobin Murphy /* Number of entries per flush queue */ 83a17e3026SRobin Murphy #define IOVA_FQ_SIZE 256 84a17e3026SRobin Murphy 85a17e3026SRobin Murphy /* Timeout (in ms) after which entries are flushed from the queue */ 86a17e3026SRobin Murphy #define IOVA_FQ_TIMEOUT 10 87a17e3026SRobin Murphy 88a17e3026SRobin Murphy /* Flush queue entry for deferred flushing */ 89a17e3026SRobin Murphy struct iova_fq_entry { 90a17e3026SRobin Murphy unsigned long iova_pfn; 91a17e3026SRobin Murphy unsigned long pages; 92a17e3026SRobin Murphy struct list_head freelist; 93a17e3026SRobin Murphy u64 counter; /* Flush counter when this entry was added */ 94a17e3026SRobin Murphy }; 95a17e3026SRobin Murphy 96a17e3026SRobin Murphy /* Per-CPU flush queue structure */ 97a17e3026SRobin Murphy struct iova_fq { 98a17e3026SRobin Murphy struct iova_fq_entry entries[IOVA_FQ_SIZE]; 99a17e3026SRobin Murphy unsigned int head, tail; 100a17e3026SRobin Murphy spinlock_t lock; 101a17e3026SRobin Murphy }; 102a17e3026SRobin Murphy 103f7f07484SRobin Murphy #define fq_ring_for_each(i, fq) \ 104f7f07484SRobin Murphy for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE) 105f7f07484SRobin Murphy 106f7f07484SRobin Murphy static inline bool fq_full(struct iova_fq *fq) 107f7f07484SRobin Murphy { 108f7f07484SRobin Murphy assert_spin_locked(&fq->lock); 109f7f07484SRobin Murphy return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); 110f7f07484SRobin Murphy } 111f7f07484SRobin Murphy 112a17e3026SRobin Murphy static inline unsigned int fq_ring_add(struct iova_fq *fq) 113f7f07484SRobin Murphy { 114a17e3026SRobin Murphy unsigned int idx = fq->tail; 115f7f07484SRobin Murphy 116f7f07484SRobin Murphy assert_spin_locked(&fq->lock); 117f7f07484SRobin Murphy 118f7f07484SRobin Murphy fq->tail = (idx + 1) % IOVA_FQ_SIZE; 119f7f07484SRobin Murphy 120f7f07484SRobin Murphy return idx; 121f7f07484SRobin Murphy } 122f7f07484SRobin Murphy 123a17e3026SRobin Murphy static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq) 124f7f07484SRobin Murphy { 125a17e3026SRobin Murphy u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt); 126a17e3026SRobin Murphy unsigned int idx; 127f7f07484SRobin Murphy 128f7f07484SRobin Murphy assert_spin_locked(&fq->lock); 129f7f07484SRobin Murphy 130f7f07484SRobin Murphy fq_ring_for_each(idx, fq) { 131f7f07484SRobin Murphy 132f7f07484SRobin Murphy if (fq->entries[idx].counter >= counter) 133f7f07484SRobin Murphy break; 134f7f07484SRobin Murphy 135f7f07484SRobin Murphy put_pages_list(&fq->entries[idx].freelist); 136a17e3026SRobin Murphy free_iova_fast(&cookie->iovad, 137f7f07484SRobin Murphy fq->entries[idx].iova_pfn, 138f7f07484SRobin Murphy fq->entries[idx].pages); 139f7f07484SRobin Murphy 140f7f07484SRobin Murphy fq->head = (fq->head + 1) % IOVA_FQ_SIZE; 141f7f07484SRobin Murphy } 142f7f07484SRobin Murphy } 143f7f07484SRobin Murphy 144a17e3026SRobin Murphy static void fq_flush_iotlb(struct iommu_dma_cookie *cookie) 145f7f07484SRobin Murphy { 146a17e3026SRobin Murphy atomic64_inc(&cookie->fq_flush_start_cnt); 147a17e3026SRobin Murphy cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain); 148a17e3026SRobin Murphy atomic64_inc(&cookie->fq_flush_finish_cnt); 149f7f07484SRobin Murphy } 150f7f07484SRobin Murphy 151f7f07484SRobin Murphy static void fq_flush_timeout(struct timer_list *t) 152f7f07484SRobin Murphy { 153a17e3026SRobin Murphy struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer); 154f7f07484SRobin Murphy int cpu; 155f7f07484SRobin Murphy 156a17e3026SRobin Murphy atomic_set(&cookie->fq_timer_on, 0); 157a17e3026SRobin Murphy fq_flush_iotlb(cookie); 158f7f07484SRobin Murphy 159f7f07484SRobin Murphy for_each_possible_cpu(cpu) { 160f7f07484SRobin Murphy unsigned long flags; 161f7f07484SRobin Murphy struct iova_fq *fq; 162f7f07484SRobin Murphy 163a17e3026SRobin Murphy fq = per_cpu_ptr(cookie->fq, cpu); 164f7f07484SRobin Murphy spin_lock_irqsave(&fq->lock, flags); 165a17e3026SRobin Murphy fq_ring_free(cookie, fq); 166f7f07484SRobin Murphy spin_unlock_irqrestore(&fq->lock, flags); 167f7f07484SRobin Murphy } 168f7f07484SRobin Murphy } 169f7f07484SRobin Murphy 170a17e3026SRobin Murphy static void queue_iova(struct iommu_dma_cookie *cookie, 171f7f07484SRobin Murphy unsigned long pfn, unsigned long pages, 172f7f07484SRobin Murphy struct list_head *freelist) 173f7f07484SRobin Murphy { 174f7f07484SRobin Murphy struct iova_fq *fq; 175f7f07484SRobin Murphy unsigned long flags; 176a17e3026SRobin Murphy unsigned int idx; 177f7f07484SRobin Murphy 178f7f07484SRobin Murphy /* 179f7f07484SRobin Murphy * Order against the IOMMU driver's pagetable update from unmapping 180a17e3026SRobin Murphy * @pte, to guarantee that fq_flush_iotlb() observes that if called 181f7f07484SRobin Murphy * from a different CPU before we release the lock below. Full barrier 182f7f07484SRobin Murphy * so it also pairs with iommu_dma_init_fq() to avoid seeing partially 183f7f07484SRobin Murphy * written fq state here. 184f7f07484SRobin Murphy */ 185f7f07484SRobin Murphy smp_mb(); 186f7f07484SRobin Murphy 187a17e3026SRobin Murphy fq = raw_cpu_ptr(cookie->fq); 188f7f07484SRobin Murphy spin_lock_irqsave(&fq->lock, flags); 189f7f07484SRobin Murphy 190f7f07484SRobin Murphy /* 191f7f07484SRobin Murphy * First remove all entries from the flush queue that have already been 192f7f07484SRobin Murphy * flushed out on another CPU. This makes the fq_full() check below less 193f7f07484SRobin Murphy * likely to be true. 194f7f07484SRobin Murphy */ 195a17e3026SRobin Murphy fq_ring_free(cookie, fq); 196f7f07484SRobin Murphy 197f7f07484SRobin Murphy if (fq_full(fq)) { 198a17e3026SRobin Murphy fq_flush_iotlb(cookie); 199a17e3026SRobin Murphy fq_ring_free(cookie, fq); 200f7f07484SRobin Murphy } 201f7f07484SRobin Murphy 202f7f07484SRobin Murphy idx = fq_ring_add(fq); 203f7f07484SRobin Murphy 204f7f07484SRobin Murphy fq->entries[idx].iova_pfn = pfn; 205f7f07484SRobin Murphy fq->entries[idx].pages = pages; 206a17e3026SRobin Murphy fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt); 207f7f07484SRobin Murphy list_splice(freelist, &fq->entries[idx].freelist); 208f7f07484SRobin Murphy 209f7f07484SRobin Murphy spin_unlock_irqrestore(&fq->lock, flags); 210f7f07484SRobin Murphy 211f7f07484SRobin Murphy /* Avoid false sharing as much as possible. */ 212a17e3026SRobin Murphy if (!atomic_read(&cookie->fq_timer_on) && 213a17e3026SRobin Murphy !atomic_xchg(&cookie->fq_timer_on, 1)) 214a17e3026SRobin Murphy mod_timer(&cookie->fq_timer, 215f7f07484SRobin Murphy jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT)); 216f7f07484SRobin Murphy } 217f7f07484SRobin Murphy 218a17e3026SRobin Murphy static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie) 219f7f07484SRobin Murphy { 220f7f07484SRobin Murphy int cpu, idx; 221f7f07484SRobin Murphy 222a17e3026SRobin Murphy if (!cookie->fq) 223f7f07484SRobin Murphy return; 224f7f07484SRobin Murphy 225a17e3026SRobin Murphy del_timer_sync(&cookie->fq_timer); 226a17e3026SRobin Murphy /* The IOVAs will be torn down separately, so just free our queued pages */ 227f7f07484SRobin Murphy for_each_possible_cpu(cpu) { 228a17e3026SRobin Murphy struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu); 229f7f07484SRobin Murphy 230f7f07484SRobin Murphy fq_ring_for_each(idx, fq) 231f7f07484SRobin Murphy put_pages_list(&fq->entries[idx].freelist); 232f7f07484SRobin Murphy } 233f7f07484SRobin Murphy 234a17e3026SRobin Murphy free_percpu(cookie->fq); 235f7f07484SRobin Murphy } 236f7f07484SRobin Murphy 237a17e3026SRobin Murphy /* sysfs updates are serialised by the mutex of the group owning @domain */ 238a17e3026SRobin Murphy int iommu_dma_init_fq(struct iommu_domain *domain) 239f7f07484SRobin Murphy { 240a17e3026SRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 241f7f07484SRobin Murphy struct iova_fq __percpu *queue; 242f7f07484SRobin Murphy int i, cpu; 243f7f07484SRobin Murphy 244a17e3026SRobin Murphy if (cookie->fq_domain) 245a17e3026SRobin Murphy return 0; 246a17e3026SRobin Murphy 247a17e3026SRobin Murphy atomic64_set(&cookie->fq_flush_start_cnt, 0); 248a17e3026SRobin Murphy atomic64_set(&cookie->fq_flush_finish_cnt, 0); 249f7f07484SRobin Murphy 250f7f07484SRobin Murphy queue = alloc_percpu(struct iova_fq); 251a17e3026SRobin Murphy if (!queue) { 252a17e3026SRobin Murphy pr_warn("iova flush queue initialization failed\n"); 253f7f07484SRobin Murphy return -ENOMEM; 254a17e3026SRobin Murphy } 255f7f07484SRobin Murphy 256f7f07484SRobin Murphy for_each_possible_cpu(cpu) { 257f7f07484SRobin Murphy struct iova_fq *fq = per_cpu_ptr(queue, cpu); 258f7f07484SRobin Murphy 259f7f07484SRobin Murphy fq->head = 0; 260f7f07484SRobin Murphy fq->tail = 0; 261f7f07484SRobin Murphy 262f7f07484SRobin Murphy spin_lock_init(&fq->lock); 263f7f07484SRobin Murphy 264f7f07484SRobin Murphy for (i = 0; i < IOVA_FQ_SIZE; i++) 265f7f07484SRobin Murphy INIT_LIST_HEAD(&fq->entries[i].freelist); 266f7f07484SRobin Murphy } 267f7f07484SRobin Murphy 268a17e3026SRobin Murphy cookie->fq = queue; 269f7f07484SRobin Murphy 270a17e3026SRobin Murphy timer_setup(&cookie->fq_timer, fq_flush_timeout, 0); 271a17e3026SRobin Murphy atomic_set(&cookie->fq_timer_on, 0); 272a17e3026SRobin Murphy /* 273a17e3026SRobin Murphy * Prevent incomplete fq state being observable. Pairs with path from 274a17e3026SRobin Murphy * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova() 275a17e3026SRobin Murphy */ 276a17e3026SRobin Murphy smp_wmb(); 277a17e3026SRobin Murphy WRITE_ONCE(cookie->fq_domain, domain); 278f7f07484SRobin Murphy return 0; 279f7f07484SRobin Murphy } 280f7f07484SRobin Murphy 281fdbe574eSRobin Murphy static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) 282fdbe574eSRobin Murphy { 283fdbe574eSRobin Murphy if (cookie->type == IOMMU_DMA_IOVA_COOKIE) 284fdbe574eSRobin Murphy return cookie->iovad.granule; 285fdbe574eSRobin Murphy return PAGE_SIZE; 286fdbe574eSRobin Murphy } 287fdbe574eSRobin Murphy 288fdbe574eSRobin Murphy static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) 289fdbe574eSRobin Murphy { 290fdbe574eSRobin Murphy struct iommu_dma_cookie *cookie; 291fdbe574eSRobin Murphy 292fdbe574eSRobin Murphy cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); 293fdbe574eSRobin Murphy if (cookie) { 294fdbe574eSRobin Murphy INIT_LIST_HEAD(&cookie->msi_page_list); 295fdbe574eSRobin Murphy cookie->type = type; 296fdbe574eSRobin Murphy } 297fdbe574eSRobin Murphy return cookie; 29844bb7e24SRobin Murphy } 29944bb7e24SRobin Murphy 3000db2e5d1SRobin Murphy /** 3010db2e5d1SRobin Murphy * iommu_get_dma_cookie - Acquire DMA-API resources for a domain 3020db2e5d1SRobin Murphy * @domain: IOMMU domain to prepare for DMA-API usage 3030db2e5d1SRobin Murphy */ 3040db2e5d1SRobin Murphy int iommu_get_dma_cookie(struct iommu_domain *domain) 3050db2e5d1SRobin Murphy { 3060db2e5d1SRobin Murphy if (domain->iova_cookie) 3070db2e5d1SRobin Murphy return -EEXIST; 3080db2e5d1SRobin Murphy 309fdbe574eSRobin Murphy domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); 310fdbe574eSRobin Murphy if (!domain->iova_cookie) 31144bb7e24SRobin Murphy return -ENOMEM; 3120db2e5d1SRobin Murphy 31344bb7e24SRobin Murphy return 0; 3140db2e5d1SRobin Murphy } 3150db2e5d1SRobin Murphy 3160db2e5d1SRobin Murphy /** 317fdbe574eSRobin Murphy * iommu_get_msi_cookie - Acquire just MSI remapping resources 318fdbe574eSRobin Murphy * @domain: IOMMU domain to prepare 319fdbe574eSRobin Murphy * @base: Start address of IOVA region for MSI mappings 320fdbe574eSRobin Murphy * 321fdbe574eSRobin Murphy * Users who manage their own IOVA allocation and do not want DMA API support, 322fdbe574eSRobin Murphy * but would still like to take advantage of automatic MSI remapping, can use 323fdbe574eSRobin Murphy * this to initialise their own domain appropriately. Users should reserve a 324fdbe574eSRobin Murphy * contiguous IOVA region, starting at @base, large enough to accommodate the 325fdbe574eSRobin Murphy * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address 326fdbe574eSRobin Murphy * used by the devices attached to @domain. 327fdbe574eSRobin Murphy */ 328fdbe574eSRobin Murphy int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) 329fdbe574eSRobin Murphy { 330fdbe574eSRobin Murphy struct iommu_dma_cookie *cookie; 331fdbe574eSRobin Murphy 332fdbe574eSRobin Murphy if (domain->type != IOMMU_DOMAIN_UNMANAGED) 333fdbe574eSRobin Murphy return -EINVAL; 334fdbe574eSRobin Murphy 335fdbe574eSRobin Murphy if (domain->iova_cookie) 336fdbe574eSRobin Murphy return -EEXIST; 337fdbe574eSRobin Murphy 338fdbe574eSRobin Murphy cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); 339fdbe574eSRobin Murphy if (!cookie) 340fdbe574eSRobin Murphy return -ENOMEM; 341fdbe574eSRobin Murphy 342fdbe574eSRobin Murphy cookie->msi_iova = base; 343fdbe574eSRobin Murphy domain->iova_cookie = cookie; 344fdbe574eSRobin Murphy return 0; 345fdbe574eSRobin Murphy } 346fdbe574eSRobin Murphy EXPORT_SYMBOL(iommu_get_msi_cookie); 347fdbe574eSRobin Murphy 348fdbe574eSRobin Murphy /** 3490db2e5d1SRobin Murphy * iommu_put_dma_cookie - Release a domain's DMA mapping resources 350fdbe574eSRobin Murphy * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or 351fdbe574eSRobin Murphy * iommu_get_msi_cookie() 3520db2e5d1SRobin Murphy */ 3530db2e5d1SRobin Murphy void iommu_put_dma_cookie(struct iommu_domain *domain) 3540db2e5d1SRobin Murphy { 35544bb7e24SRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 35644bb7e24SRobin Murphy struct iommu_dma_msi_page *msi, *tmp; 3570db2e5d1SRobin Murphy 35844bb7e24SRobin Murphy if (!cookie) 3590db2e5d1SRobin Murphy return; 3600db2e5d1SRobin Murphy 361f7f07484SRobin Murphy if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) { 362a17e3026SRobin Murphy iommu_dma_free_fq(cookie); 36344bb7e24SRobin Murphy put_iova_domain(&cookie->iovad); 364f7f07484SRobin Murphy } 36544bb7e24SRobin Murphy 36644bb7e24SRobin Murphy list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { 36744bb7e24SRobin Murphy list_del(&msi->list); 36844bb7e24SRobin Murphy kfree(msi); 36944bb7e24SRobin Murphy } 37044bb7e24SRobin Murphy kfree(cookie); 3710db2e5d1SRobin Murphy domain->iova_cookie = NULL; 3720db2e5d1SRobin Murphy } 3730db2e5d1SRobin Murphy 374273df963SRobin Murphy /** 375273df963SRobin Murphy * iommu_dma_get_resv_regions - Reserved region driver helper 376273df963SRobin Murphy * @dev: Device from iommu_get_resv_regions() 377273df963SRobin Murphy * @list: Reserved region list from iommu_get_resv_regions() 378273df963SRobin Murphy * 379273df963SRobin Murphy * IOMMU drivers can use this to implement their .get_resv_regions callback 380cd2c9fcfSShameer Kolothum * for general non-IOMMU-specific reservations. Currently, this covers GICv3 381cd2c9fcfSShameer Kolothum * ITS region reservation on ACPI based ARM platforms that may require HW MSI 382cd2c9fcfSShameer Kolothum * reservation. 383273df963SRobin Murphy */ 384273df963SRobin Murphy void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) 385fade1ec0SRobin Murphy { 386fade1ec0SRobin Murphy 38798cc4f71SJoerg Roedel if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) 388cd2c9fcfSShameer Kolothum iort_iommu_msi_get_resv_regions(dev, list); 389f51dc892SShameer Kolothum 390fade1ec0SRobin Murphy } 391273df963SRobin Murphy EXPORT_SYMBOL(iommu_dma_get_resv_regions); 392fade1ec0SRobin Murphy 3937c1b058cSRobin Murphy static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, 3947c1b058cSRobin Murphy phys_addr_t start, phys_addr_t end) 3957c1b058cSRobin Murphy { 3967c1b058cSRobin Murphy struct iova_domain *iovad = &cookie->iovad; 3977c1b058cSRobin Murphy struct iommu_dma_msi_page *msi_page; 3987c1b058cSRobin Murphy int i, num_pages; 3997c1b058cSRobin Murphy 4007c1b058cSRobin Murphy start -= iova_offset(iovad, start); 4017c1b058cSRobin Murphy num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); 4027c1b058cSRobin Murphy 40365ac74f1SMarc Zyngier for (i = 0; i < num_pages; i++) { 40465ac74f1SMarc Zyngier msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL); 4057c1b058cSRobin Murphy if (!msi_page) 4067c1b058cSRobin Murphy return -ENOMEM; 4077c1b058cSRobin Murphy 40865ac74f1SMarc Zyngier msi_page->phys = start; 40965ac74f1SMarc Zyngier msi_page->iova = start; 41065ac74f1SMarc Zyngier INIT_LIST_HEAD(&msi_page->list); 41165ac74f1SMarc Zyngier list_add(&msi_page->list, &cookie->msi_page_list); 4127c1b058cSRobin Murphy start += iovad->granule; 4137c1b058cSRobin Murphy } 4147c1b058cSRobin Murphy 4157c1b058cSRobin Murphy return 0; 4167c1b058cSRobin Murphy } 4177c1b058cSRobin Murphy 418*b8397a8fSRobin Murphy static int iommu_dma_ranges_sort(void *priv, const struct list_head *a, 419*b8397a8fSRobin Murphy const struct list_head *b) 420*b8397a8fSRobin Murphy { 421*b8397a8fSRobin Murphy struct resource_entry *res_a = list_entry(a, typeof(*res_a), node); 422*b8397a8fSRobin Murphy struct resource_entry *res_b = list_entry(b, typeof(*res_b), node); 423*b8397a8fSRobin Murphy 424*b8397a8fSRobin Murphy return res_a->res->start > res_b->res->start; 425*b8397a8fSRobin Murphy } 426*b8397a8fSRobin Murphy 427aadad097SSrinath Mannam static int iova_reserve_pci_windows(struct pci_dev *dev, 428cd2c9fcfSShameer Kolothum struct iova_domain *iovad) 429cd2c9fcfSShameer Kolothum { 430cd2c9fcfSShameer Kolothum struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); 431cd2c9fcfSShameer Kolothum struct resource_entry *window; 432cd2c9fcfSShameer Kolothum unsigned long lo, hi; 433aadad097SSrinath Mannam phys_addr_t start = 0, end; 434cd2c9fcfSShameer Kolothum 435cd2c9fcfSShameer Kolothum resource_list_for_each_entry(window, &bridge->windows) { 436cd2c9fcfSShameer Kolothum if (resource_type(window->res) != IORESOURCE_MEM) 437cd2c9fcfSShameer Kolothum continue; 438cd2c9fcfSShameer Kolothum 439cd2c9fcfSShameer Kolothum lo = iova_pfn(iovad, window->res->start - window->offset); 440cd2c9fcfSShameer Kolothum hi = iova_pfn(iovad, window->res->end - window->offset); 441cd2c9fcfSShameer Kolothum reserve_iova(iovad, lo, hi); 442cd2c9fcfSShameer Kolothum } 443aadad097SSrinath Mannam 444aadad097SSrinath Mannam /* Get reserved DMA windows from host bridge */ 445*b8397a8fSRobin Murphy list_sort(NULL, &bridge->dma_ranges, iommu_dma_ranges_sort); 446aadad097SSrinath Mannam resource_list_for_each_entry(window, &bridge->dma_ranges) { 447aadad097SSrinath Mannam end = window->res->start - window->offset; 448aadad097SSrinath Mannam resv_iova: 449aadad097SSrinath Mannam if (end > start) { 450aadad097SSrinath Mannam lo = iova_pfn(iovad, start); 451aadad097SSrinath Mannam hi = iova_pfn(iovad, end); 452aadad097SSrinath Mannam reserve_iova(iovad, lo, hi); 453571f3160SSrinath Mannam } else if (end < start) { 454*b8397a8fSRobin Murphy /* DMA ranges should be non-overlapping */ 455571f3160SSrinath Mannam dev_err(&dev->dev, 4567154cbd3SJoerg Roedel "Failed to reserve IOVA [%pa-%pa]\n", 4577154cbd3SJoerg Roedel &start, &end); 458aadad097SSrinath Mannam return -EINVAL; 459aadad097SSrinath Mannam } 460aadad097SSrinath Mannam 461aadad097SSrinath Mannam start = window->res->end - window->offset + 1; 462aadad097SSrinath Mannam /* If window is last entry */ 463aadad097SSrinath Mannam if (window->node.next == &bridge->dma_ranges && 46429fcea8cSArnd Bergmann end != ~(phys_addr_t)0) { 46529fcea8cSArnd Bergmann end = ~(phys_addr_t)0; 466aadad097SSrinath Mannam goto resv_iova; 467aadad097SSrinath Mannam } 468aadad097SSrinath Mannam } 469aadad097SSrinath Mannam 470aadad097SSrinath Mannam return 0; 471cd2c9fcfSShameer Kolothum } 472cd2c9fcfSShameer Kolothum 4737c1b058cSRobin Murphy static int iova_reserve_iommu_regions(struct device *dev, 4747c1b058cSRobin Murphy struct iommu_domain *domain) 4757c1b058cSRobin Murphy { 4767c1b058cSRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 4777c1b058cSRobin Murphy struct iova_domain *iovad = &cookie->iovad; 4787c1b058cSRobin Murphy struct iommu_resv_region *region; 4797c1b058cSRobin Murphy LIST_HEAD(resv_regions); 4807c1b058cSRobin Murphy int ret = 0; 4817c1b058cSRobin Murphy 482aadad097SSrinath Mannam if (dev_is_pci(dev)) { 483aadad097SSrinath Mannam ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); 484aadad097SSrinath Mannam if (ret) 485aadad097SSrinath Mannam return ret; 486aadad097SSrinath Mannam } 487cd2c9fcfSShameer Kolothum 4887c1b058cSRobin Murphy iommu_get_resv_regions(dev, &resv_regions); 4897c1b058cSRobin Murphy list_for_each_entry(region, &resv_regions, list) { 4907c1b058cSRobin Murphy unsigned long lo, hi; 4917c1b058cSRobin Murphy 4927c1b058cSRobin Murphy /* We ARE the software that manages these! */ 4937c1b058cSRobin Murphy if (region->type == IOMMU_RESV_SW_MSI) 4947c1b058cSRobin Murphy continue; 4957c1b058cSRobin Murphy 4967c1b058cSRobin Murphy lo = iova_pfn(iovad, region->start); 4977c1b058cSRobin Murphy hi = iova_pfn(iovad, region->start + region->length - 1); 4987c1b058cSRobin Murphy reserve_iova(iovad, lo, hi); 4997c1b058cSRobin Murphy 5007c1b058cSRobin Murphy if (region->type == IOMMU_RESV_MSI) 5017c1b058cSRobin Murphy ret = cookie_init_hw_msi_region(cookie, region->start, 5027c1b058cSRobin Murphy region->start + region->length); 5037c1b058cSRobin Murphy if (ret) 5047c1b058cSRobin Murphy break; 5057c1b058cSRobin Murphy } 5067c1b058cSRobin Murphy iommu_put_resv_regions(dev, &resv_regions); 5077c1b058cSRobin Murphy 5087c1b058cSRobin Murphy return ret; 5097c1b058cSRobin Murphy } 5107c1b058cSRobin Murphy 51182c3cefbSLu Baolu static bool dev_is_untrusted(struct device *dev) 51282c3cefbSLu Baolu { 51382c3cefbSLu Baolu return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; 51482c3cefbSLu Baolu } 51582c3cefbSLu Baolu 5162e727bffSDavid Stevens static bool dev_use_swiotlb(struct device *dev) 5172e727bffSDavid Stevens { 5182e727bffSDavid Stevens return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev); 5192e727bffSDavid Stevens } 5202e727bffSDavid Stevens 5210db2e5d1SRobin Murphy /** 5220db2e5d1SRobin Murphy * iommu_dma_init_domain - Initialise a DMA mapping domain 5230db2e5d1SRobin Murphy * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() 5240db2e5d1SRobin Murphy * @base: IOVA at which the mappable address space starts 525ac6d7046SJean-Philippe Brucker * @limit: Last address of the IOVA space 526fade1ec0SRobin Murphy * @dev: Device the domain is being initialised for 5270db2e5d1SRobin Murphy * 528ac6d7046SJean-Philippe Brucker * @base and @limit + 1 should be exact multiples of IOMMU page granularity to 5290db2e5d1SRobin Murphy * avoid rounding surprises. If necessary, we reserve the page at address 0 5300db2e5d1SRobin Murphy * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but 5310db2e5d1SRobin Murphy * any change which could make prior IOVAs invalid will fail. 5320db2e5d1SRobin Murphy */ 53306d60728SChristoph Hellwig static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, 534ac6d7046SJean-Philippe Brucker dma_addr_t limit, struct device *dev) 5350db2e5d1SRobin Murphy { 536fdbe574eSRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 537c61a4633SShaokun Zhang unsigned long order, base_pfn; 5386b0c54e7SYunsheng Lin struct iova_domain *iovad; 53932e92d9fSJohn Garry int ret; 5400db2e5d1SRobin Murphy 541fdbe574eSRobin Murphy if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) 542fdbe574eSRobin Murphy return -EINVAL; 5430db2e5d1SRobin Murphy 5446b0c54e7SYunsheng Lin iovad = &cookie->iovad; 5456b0c54e7SYunsheng Lin 5460db2e5d1SRobin Murphy /* Use the smallest supported page size for IOVA granularity */ 547d16e0faaSRobin Murphy order = __ffs(domain->pgsize_bitmap); 5480db2e5d1SRobin Murphy base_pfn = max_t(unsigned long, 1, base >> order); 5490db2e5d1SRobin Murphy 5500db2e5d1SRobin Murphy /* Check the domain allows at least some access to the device... */ 5510db2e5d1SRobin Murphy if (domain->geometry.force_aperture) { 5520db2e5d1SRobin Murphy if (base > domain->geometry.aperture_end || 553ac6d7046SJean-Philippe Brucker limit < domain->geometry.aperture_start) { 5540db2e5d1SRobin Murphy pr_warn("specified DMA range outside IOMMU capability\n"); 5550db2e5d1SRobin Murphy return -EFAULT; 5560db2e5d1SRobin Murphy } 5570db2e5d1SRobin Murphy /* ...then finally give it a kicking to make sure it fits */ 5580db2e5d1SRobin Murphy base_pfn = max_t(unsigned long, base_pfn, 5590db2e5d1SRobin Murphy domain->geometry.aperture_start >> order); 5600db2e5d1SRobin Murphy } 5610db2e5d1SRobin Murphy 562f51d7bb7SRobin Murphy /* start_pfn is always nonzero for an already-initialised domain */ 5630db2e5d1SRobin Murphy if (iovad->start_pfn) { 5640db2e5d1SRobin Murphy if (1UL << order != iovad->granule || 565f51d7bb7SRobin Murphy base_pfn != iovad->start_pfn) { 5660db2e5d1SRobin Murphy pr_warn("Incompatible range for DMA domain\n"); 5670db2e5d1SRobin Murphy return -EFAULT; 5680db2e5d1SRobin Murphy } 5697c1b058cSRobin Murphy 5700db2e5d1SRobin Murphy return 0; 5710db2e5d1SRobin Murphy } 5727c1b058cSRobin Murphy 573aa3ac946SZhen Lei init_iova_domain(iovad, 1UL << order, base_pfn); 57432e92d9fSJohn Garry ret = iova_domain_init_rcaches(iovad); 57532e92d9fSJohn Garry if (ret) 57632e92d9fSJohn Garry return ret; 5772da274cdSZhen Lei 578c208916fSRobin Murphy /* If the FQ fails we can simply fall back to strict mode */ 579452e69b5SRobin Murphy if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain)) 580c208916fSRobin Murphy domain->type = IOMMU_DOMAIN_DMA; 5817c1b058cSRobin Murphy 5827c1b058cSRobin Murphy return iova_reserve_iommu_regions(dev, domain); 5837c1b058cSRobin Murphy } 5840db2e5d1SRobin Murphy 5850db2e5d1SRobin Murphy /** 586737c85caSMitchel Humpherys * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API 587737c85caSMitchel Humpherys * page flags. 5880db2e5d1SRobin Murphy * @dir: Direction of DMA transfer 5890db2e5d1SRobin Murphy * @coherent: Is the DMA master cache-coherent? 590737c85caSMitchel Humpherys * @attrs: DMA attributes for the mapping 5910db2e5d1SRobin Murphy * 5920db2e5d1SRobin Murphy * Return: corresponding IOMMU API page protection flags 5930db2e5d1SRobin Murphy */ 59406d60728SChristoph Hellwig static int dma_info_to_prot(enum dma_data_direction dir, bool coherent, 595737c85caSMitchel Humpherys unsigned long attrs) 5960db2e5d1SRobin Murphy { 5970db2e5d1SRobin Murphy int prot = coherent ? IOMMU_CACHE : 0; 5980db2e5d1SRobin Murphy 599737c85caSMitchel Humpherys if (attrs & DMA_ATTR_PRIVILEGED) 600737c85caSMitchel Humpherys prot |= IOMMU_PRIV; 601737c85caSMitchel Humpherys 6020db2e5d1SRobin Murphy switch (dir) { 6030db2e5d1SRobin Murphy case DMA_BIDIRECTIONAL: 6040db2e5d1SRobin Murphy return prot | IOMMU_READ | IOMMU_WRITE; 6050db2e5d1SRobin Murphy case DMA_TO_DEVICE: 6060db2e5d1SRobin Murphy return prot | IOMMU_READ; 6070db2e5d1SRobin Murphy case DMA_FROM_DEVICE: 6080db2e5d1SRobin Murphy return prot | IOMMU_WRITE; 6090db2e5d1SRobin Murphy default: 6100db2e5d1SRobin Murphy return 0; 6110db2e5d1SRobin Murphy } 6120db2e5d1SRobin Murphy } 6130db2e5d1SRobin Murphy 614842fe519SRobin Murphy static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, 615bd036d2fSRobin Murphy size_t size, u64 dma_limit, struct device *dev) 6160db2e5d1SRobin Murphy { 617a44e6657SRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 618a44e6657SRobin Murphy struct iova_domain *iovad = &cookie->iovad; 619bb65a64cSRobin Murphy unsigned long shift, iova_len, iova = 0; 6200db2e5d1SRobin Murphy 621a44e6657SRobin Murphy if (cookie->type == IOMMU_DMA_MSI_COOKIE) { 622a44e6657SRobin Murphy cookie->msi_iova += size; 623a44e6657SRobin Murphy return cookie->msi_iova - size; 624a44e6657SRobin Murphy } 625a44e6657SRobin Murphy 626a44e6657SRobin Murphy shift = iova_shift(iovad); 627a44e6657SRobin Murphy iova_len = size >> shift; 628a44e6657SRobin Murphy 629a7ba70f1SNicolas Saenz Julienne dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); 63003bfdc31SRobin Murphy 631c987ff0dSRobin Murphy if (domain->geometry.force_aperture) 632bd036d2fSRobin Murphy dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); 633122fac03SRobin Murphy 634122fac03SRobin Murphy /* Try to get PCI devices a SAC address */ 6353542dcb1SRobin Murphy if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev)) 636538d5b33STomasz Nowicki iova = alloc_iova_fast(iovad, iova_len, 637538d5b33STomasz Nowicki DMA_BIT_MASK(32) >> shift, false); 638122fac03SRobin Murphy 639bb65a64cSRobin Murphy if (!iova) 640538d5b33STomasz Nowicki iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, 641538d5b33STomasz Nowicki true); 642bb65a64cSRobin Murphy 643bb65a64cSRobin Murphy return (dma_addr_t)iova << shift; 6440db2e5d1SRobin Murphy } 6450db2e5d1SRobin Murphy 646842fe519SRobin Murphy static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, 647452e69b5SRobin Murphy dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather) 6480db2e5d1SRobin Murphy { 649842fe519SRobin Murphy struct iova_domain *iovad = &cookie->iovad; 6500db2e5d1SRobin Murphy 651a44e6657SRobin Murphy /* The MSI case is only ever cleaning up its most recent allocation */ 652bb65a64cSRobin Murphy if (cookie->type == IOMMU_DMA_MSI_COOKIE) 653a44e6657SRobin Murphy cookie->msi_iova -= size; 654452e69b5SRobin Murphy else if (gather && gather->queued) 655a17e3026SRobin Murphy queue_iova(cookie, iova_pfn(iovad, iova), 6562a2b8eaaSTom Murphy size >> iova_shift(iovad), 65787f60cc6SMatthew Wilcox (Oracle) &gather->freelist); 658bb65a64cSRobin Murphy else 6591cc896edSRobin Murphy free_iova_fast(iovad, iova_pfn(iovad, iova), 6601cc896edSRobin Murphy size >> iova_shift(iovad)); 661842fe519SRobin Murphy } 662842fe519SRobin Murphy 663b61d271eSRobin Murphy static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, 664842fe519SRobin Murphy size_t size) 665842fe519SRobin Murphy { 666b61d271eSRobin Murphy struct iommu_domain *domain = iommu_get_dma_domain(dev); 667a44e6657SRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 668a44e6657SRobin Murphy struct iova_domain *iovad = &cookie->iovad; 669842fe519SRobin Murphy size_t iova_off = iova_offset(iovad, dma_addr); 670a7d20dc1SWill Deacon struct iommu_iotlb_gather iotlb_gather; 671a7d20dc1SWill Deacon size_t unmapped; 672842fe519SRobin Murphy 673842fe519SRobin Murphy dma_addr -= iova_off; 674842fe519SRobin Murphy size = iova_align(iovad, size + iova_off); 675a7d20dc1SWill Deacon iommu_iotlb_gather_init(&iotlb_gather); 676452e69b5SRobin Murphy iotlb_gather.queued = READ_ONCE(cookie->fq_domain); 677842fe519SRobin Murphy 678a7d20dc1SWill Deacon unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); 679a7d20dc1SWill Deacon WARN_ON(unmapped != size); 680a7d20dc1SWill Deacon 681452e69b5SRobin Murphy if (!iotlb_gather.queued) 682aae4c8e2STom Murphy iommu_iotlb_sync(domain, &iotlb_gather); 683452e69b5SRobin Murphy iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather); 6840db2e5d1SRobin Murphy } 6850db2e5d1SRobin Murphy 68692aec09cSChristoph Hellwig static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, 687bd036d2fSRobin Murphy size_t size, int prot, u64 dma_mask) 68892aec09cSChristoph Hellwig { 689b61d271eSRobin Murphy struct iommu_domain *domain = iommu_get_dma_domain(dev); 69092aec09cSChristoph Hellwig struct iommu_dma_cookie *cookie = domain->iova_cookie; 6918af23fadSRobin Murphy struct iova_domain *iovad = &cookie->iovad; 6928af23fadSRobin Murphy size_t iova_off = iova_offset(iovad, phys); 69392aec09cSChristoph Hellwig dma_addr_t iova; 69492aec09cSChristoph Hellwig 695a8e8af35SLianbo Jiang if (static_branch_unlikely(&iommu_deferred_attach_enabled) && 6963ab65729SLianbo Jiang iommu_deferred_attach(dev, domain)) 697795bbbb9STom Murphy return DMA_MAPPING_ERROR; 698795bbbb9STom Murphy 6998af23fadSRobin Murphy size = iova_align(iovad, size + iova_off); 70092aec09cSChristoph Hellwig 7016e235020STom Murphy iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); 70292aec09cSChristoph Hellwig if (!iova) 70392aec09cSChristoph Hellwig return DMA_MAPPING_ERROR; 70492aec09cSChristoph Hellwig 705781ca2deSTom Murphy if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { 7062a2b8eaaSTom Murphy iommu_dma_free_iova(cookie, iova, size, NULL); 70792aec09cSChristoph Hellwig return DMA_MAPPING_ERROR; 70892aec09cSChristoph Hellwig } 70992aec09cSChristoph Hellwig return iova + iova_off; 71092aec09cSChristoph Hellwig } 71192aec09cSChristoph Hellwig 7120db2e5d1SRobin Murphy static void __iommu_dma_free_pages(struct page **pages, int count) 7130db2e5d1SRobin Murphy { 7140db2e5d1SRobin Murphy while (count--) 7150db2e5d1SRobin Murphy __free_page(pages[count]); 7160db2e5d1SRobin Murphy kvfree(pages); 7170db2e5d1SRobin Murphy } 7180db2e5d1SRobin Murphy 719c4b17afbSGanapatrao Kulkarni static struct page **__iommu_dma_alloc_pages(struct device *dev, 720c4b17afbSGanapatrao Kulkarni unsigned int count, unsigned long order_mask, gfp_t gfp) 7210db2e5d1SRobin Murphy { 7220db2e5d1SRobin Murphy struct page **pages; 723c4b17afbSGanapatrao Kulkarni unsigned int i = 0, nid = dev_to_node(dev); 7243b6b7e19SRobin Murphy 7253b6b7e19SRobin Murphy order_mask &= (2U << MAX_ORDER) - 1; 7263b6b7e19SRobin Murphy if (!order_mask) 7273b6b7e19SRobin Murphy return NULL; 7280db2e5d1SRobin Murphy 729ab6f4b00SGustavo A. R. Silva pages = kvcalloc(count, sizeof(*pages), GFP_KERNEL); 7300db2e5d1SRobin Murphy if (!pages) 7310db2e5d1SRobin Murphy return NULL; 7320db2e5d1SRobin Murphy 7330db2e5d1SRobin Murphy /* IOMMU can map any pages, so himem can also be used here */ 7340db2e5d1SRobin Murphy gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 7350db2e5d1SRobin Murphy 7364604393cSRobin Murphy /* It makes no sense to muck about with huge pages */ 7374604393cSRobin Murphy gfp &= ~__GFP_COMP; 7384604393cSRobin Murphy 7390db2e5d1SRobin Murphy while (count) { 7400db2e5d1SRobin Murphy struct page *page = NULL; 7413b6b7e19SRobin Murphy unsigned int order_size; 7420db2e5d1SRobin Murphy 7430db2e5d1SRobin Murphy /* 7440db2e5d1SRobin Murphy * Higher-order allocations are a convenience rather 7450db2e5d1SRobin Murphy * than a necessity, hence using __GFP_NORETRY until 7463b6b7e19SRobin Murphy * falling back to minimum-order allocations. 7470db2e5d1SRobin Murphy */ 7483b6b7e19SRobin Murphy for (order_mask &= (2U << __fls(count)) - 1; 7493b6b7e19SRobin Murphy order_mask; order_mask &= ~order_size) { 7503b6b7e19SRobin Murphy unsigned int order = __fls(order_mask); 751c4b17afbSGanapatrao Kulkarni gfp_t alloc_flags = gfp; 7523b6b7e19SRobin Murphy 7533b6b7e19SRobin Murphy order_size = 1U << order; 754c4b17afbSGanapatrao Kulkarni if (order_mask > order_size) 755c4b17afbSGanapatrao Kulkarni alloc_flags |= __GFP_NORETRY; 756c4b17afbSGanapatrao Kulkarni page = alloc_pages_node(nid, alloc_flags, order); 7570db2e5d1SRobin Murphy if (!page) 7580db2e5d1SRobin Murphy continue; 7594604393cSRobin Murphy if (order) 7600db2e5d1SRobin Murphy split_page(page, order); 7610db2e5d1SRobin Murphy break; 7620db2e5d1SRobin Murphy } 7630db2e5d1SRobin Murphy if (!page) { 7640db2e5d1SRobin Murphy __iommu_dma_free_pages(pages, i); 7650db2e5d1SRobin Murphy return NULL; 7660db2e5d1SRobin Murphy } 7673b6b7e19SRobin Murphy count -= order_size; 7683b6b7e19SRobin Murphy while (order_size--) 7690db2e5d1SRobin Murphy pages[i++] = page++; 7700db2e5d1SRobin Murphy } 7710db2e5d1SRobin Murphy return pages; 7720db2e5d1SRobin Murphy } 7730db2e5d1SRobin Murphy 7748230ce9aSChristoph Hellwig /* 7758230ce9aSChristoph Hellwig * If size is less than PAGE_SIZE, then a full CPU page will be allocated, 7760db2e5d1SRobin Murphy * but an IOMMU which supports smaller pages might not map the whole thing. 7770db2e5d1SRobin Murphy */ 7788230ce9aSChristoph Hellwig static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev, 7798230ce9aSChristoph Hellwig size_t size, struct sg_table *sgt, gfp_t gfp, pgprot_t prot, 780e8d39a90SChristoph Hellwig unsigned long attrs) 7810db2e5d1SRobin Murphy { 78243c5bf11SRobin Murphy struct iommu_domain *domain = iommu_get_dma_domain(dev); 783842fe519SRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 784842fe519SRobin Murphy struct iova_domain *iovad = &cookie->iovad; 78521b95aafSChristoph Hellwig bool coherent = dev_is_dma_coherent(dev); 78621b95aafSChristoph Hellwig int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 78721b95aafSChristoph Hellwig unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; 7880db2e5d1SRobin Murphy struct page **pages; 789842fe519SRobin Murphy dma_addr_t iova; 790a3884774SYunfei Wang ssize_t ret; 7910db2e5d1SRobin Murphy 792a8e8af35SLianbo Jiang if (static_branch_unlikely(&iommu_deferred_attach_enabled) && 7933ab65729SLianbo Jiang iommu_deferred_attach(dev, domain)) 794795bbbb9STom Murphy return NULL; 795795bbbb9STom Murphy 7963b6b7e19SRobin Murphy min_size = alloc_sizes & -alloc_sizes; 7973b6b7e19SRobin Murphy if (min_size < PAGE_SIZE) { 7983b6b7e19SRobin Murphy min_size = PAGE_SIZE; 7993b6b7e19SRobin Murphy alloc_sizes |= PAGE_SIZE; 8003b6b7e19SRobin Murphy } else { 8013b6b7e19SRobin Murphy size = ALIGN(size, min_size); 8023b6b7e19SRobin Murphy } 80300085f1eSKrzysztof Kozlowski if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 8043b6b7e19SRobin Murphy alloc_sizes = min_size; 8053b6b7e19SRobin Murphy 8063b6b7e19SRobin Murphy count = PAGE_ALIGN(size) >> PAGE_SHIFT; 807c4b17afbSGanapatrao Kulkarni pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, 808c4b17afbSGanapatrao Kulkarni gfp); 8090db2e5d1SRobin Murphy if (!pages) 8100db2e5d1SRobin Murphy return NULL; 8110db2e5d1SRobin Murphy 812842fe519SRobin Murphy size = iova_align(iovad, size); 813842fe519SRobin Murphy iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); 8140db2e5d1SRobin Murphy if (!iova) 8150db2e5d1SRobin Murphy goto out_free_pages; 8160db2e5d1SRobin Murphy 8178230ce9aSChristoph Hellwig if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL)) 8180db2e5d1SRobin Murphy goto out_free_iova; 8190db2e5d1SRobin Murphy 82021b95aafSChristoph Hellwig if (!(ioprot & IOMMU_CACHE)) { 82123f88e0aSChristoph Hellwig struct scatterlist *sg; 82223f88e0aSChristoph Hellwig int i; 82323f88e0aSChristoph Hellwig 8248230ce9aSChristoph Hellwig for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) 82523f88e0aSChristoph Hellwig arch_dma_prep_coherent(sg_page(sg), sg->length); 8260db2e5d1SRobin Murphy } 8270db2e5d1SRobin Murphy 828a3884774SYunfei Wang ret = iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot); 829a3884774SYunfei Wang if (ret < 0 || ret < size) 8300db2e5d1SRobin Murphy goto out_free_sg; 8310db2e5d1SRobin Murphy 8328230ce9aSChristoph Hellwig sgt->sgl->dma_address = iova; 833e817ee5fSChristoph Hellwig sgt->sgl->dma_length = size; 8348230ce9aSChristoph Hellwig return pages; 8350db2e5d1SRobin Murphy 8360db2e5d1SRobin Murphy out_free_sg: 8378230ce9aSChristoph Hellwig sg_free_table(sgt); 8380db2e5d1SRobin Murphy out_free_iova: 8392a2b8eaaSTom Murphy iommu_dma_free_iova(cookie, iova, size, NULL); 8400db2e5d1SRobin Murphy out_free_pages: 8410db2e5d1SRobin Murphy __iommu_dma_free_pages(pages, count); 8420db2e5d1SRobin Murphy return NULL; 8430db2e5d1SRobin Murphy } 8440db2e5d1SRobin Murphy 8458230ce9aSChristoph Hellwig static void *iommu_dma_alloc_remap(struct device *dev, size_t size, 8468230ce9aSChristoph Hellwig dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, 8478230ce9aSChristoph Hellwig unsigned long attrs) 8488230ce9aSChristoph Hellwig { 8498230ce9aSChristoph Hellwig struct page **pages; 8508230ce9aSChristoph Hellwig struct sg_table sgt; 8518230ce9aSChristoph Hellwig void *vaddr; 8528230ce9aSChristoph Hellwig 8538230ce9aSChristoph Hellwig pages = __iommu_dma_alloc_noncontiguous(dev, size, &sgt, gfp, prot, 8548230ce9aSChristoph Hellwig attrs); 8558230ce9aSChristoph Hellwig if (!pages) 8568230ce9aSChristoph Hellwig return NULL; 8578230ce9aSChristoph Hellwig *dma_handle = sgt.sgl->dma_address; 8588230ce9aSChristoph Hellwig sg_free_table(&sgt); 8598230ce9aSChristoph Hellwig vaddr = dma_common_pages_remap(pages, size, prot, 8608230ce9aSChristoph Hellwig __builtin_return_address(0)); 8618230ce9aSChristoph Hellwig if (!vaddr) 8628230ce9aSChristoph Hellwig goto out_unmap; 8638230ce9aSChristoph Hellwig return vaddr; 8648230ce9aSChristoph Hellwig 8658230ce9aSChristoph Hellwig out_unmap: 8668230ce9aSChristoph Hellwig __iommu_dma_unmap(dev, *dma_handle, size); 8678230ce9aSChristoph Hellwig __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); 8688230ce9aSChristoph Hellwig return NULL; 8698230ce9aSChristoph Hellwig } 8708230ce9aSChristoph Hellwig 871e817ee5fSChristoph Hellwig static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, 872e817ee5fSChristoph Hellwig size_t size, enum dma_data_direction dir, gfp_t gfp, 873e817ee5fSChristoph Hellwig unsigned long attrs) 874e817ee5fSChristoph Hellwig { 875e817ee5fSChristoph Hellwig struct dma_sgt_handle *sh; 876e817ee5fSChristoph Hellwig 877e817ee5fSChristoph Hellwig sh = kmalloc(sizeof(*sh), gfp); 878e817ee5fSChristoph Hellwig if (!sh) 879e817ee5fSChristoph Hellwig return NULL; 880e817ee5fSChristoph Hellwig 881e817ee5fSChristoph Hellwig sh->pages = __iommu_dma_alloc_noncontiguous(dev, size, &sh->sgt, gfp, 882e817ee5fSChristoph Hellwig PAGE_KERNEL, attrs); 883e817ee5fSChristoph Hellwig if (!sh->pages) { 884e817ee5fSChristoph Hellwig kfree(sh); 885e817ee5fSChristoph Hellwig return NULL; 886e817ee5fSChristoph Hellwig } 887e817ee5fSChristoph Hellwig return &sh->sgt; 888e817ee5fSChristoph Hellwig } 889e817ee5fSChristoph Hellwig 890e817ee5fSChristoph Hellwig static void iommu_dma_free_noncontiguous(struct device *dev, size_t size, 891e817ee5fSChristoph Hellwig struct sg_table *sgt, enum dma_data_direction dir) 892e817ee5fSChristoph Hellwig { 893e817ee5fSChristoph Hellwig struct dma_sgt_handle *sh = sgt_handle(sgt); 894e817ee5fSChristoph Hellwig 895e817ee5fSChristoph Hellwig __iommu_dma_unmap(dev, sgt->sgl->dma_address, size); 896e817ee5fSChristoph Hellwig __iommu_dma_free_pages(sh->pages, PAGE_ALIGN(size) >> PAGE_SHIFT); 897e817ee5fSChristoph Hellwig sg_free_table(&sh->sgt); 8980fbea680SEzequiel Garcia kfree(sh); 899e817ee5fSChristoph Hellwig } 900e817ee5fSChristoph Hellwig 90106d60728SChristoph Hellwig static void iommu_dma_sync_single_for_cpu(struct device *dev, 90206d60728SChristoph Hellwig dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 9030db2e5d1SRobin Murphy { 90406d60728SChristoph Hellwig phys_addr_t phys; 9050db2e5d1SRobin Murphy 9062e727bffSDavid Stevens if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) 90706d60728SChristoph Hellwig return; 90806d60728SChristoph Hellwig 90906d60728SChristoph Hellwig phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 91082612d66STom Murphy if (!dev_is_dma_coherent(dev)) 91156e35f9cSChristoph Hellwig arch_sync_dma_for_cpu(phys, size, dir); 91282612d66STom Murphy 9137fd856aaSClaire Chang if (is_swiotlb_buffer(dev, phys)) 91480808d27SChristoph Hellwig swiotlb_sync_single_for_cpu(dev, phys, size, dir); 9151cc896edSRobin Murphy } 9161cc896edSRobin Murphy 91706d60728SChristoph Hellwig static void iommu_dma_sync_single_for_device(struct device *dev, 91806d60728SChristoph Hellwig dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 91951f8cc9eSRobin Murphy { 92006d60728SChristoph Hellwig phys_addr_t phys; 92106d60728SChristoph Hellwig 9222e727bffSDavid Stevens if (dev_is_dma_coherent(dev) && !dev_use_swiotlb(dev)) 92306d60728SChristoph Hellwig return; 92406d60728SChristoph Hellwig 92506d60728SChristoph Hellwig phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 9267fd856aaSClaire Chang if (is_swiotlb_buffer(dev, phys)) 92780808d27SChristoph Hellwig swiotlb_sync_single_for_device(dev, phys, size, dir); 92882612d66STom Murphy 92982612d66STom Murphy if (!dev_is_dma_coherent(dev)) 93056e35f9cSChristoph Hellwig arch_sync_dma_for_device(phys, size, dir); 93151f8cc9eSRobin Murphy } 93251f8cc9eSRobin Murphy 93306d60728SChristoph Hellwig static void iommu_dma_sync_sg_for_cpu(struct device *dev, 93406d60728SChristoph Hellwig struct scatterlist *sgl, int nelems, 93506d60728SChristoph Hellwig enum dma_data_direction dir) 9360db2e5d1SRobin Murphy { 93706d60728SChristoph Hellwig struct scatterlist *sg; 93806d60728SChristoph Hellwig int i; 93906d60728SChristoph Hellwig 9402e727bffSDavid Stevens if (dev_use_swiotlb(dev)) 94108ae5d4aSDavid Stevens for_each_sg(sgl, sg, nelems, i) 94208ae5d4aSDavid Stevens iommu_dma_sync_single_for_cpu(dev, sg_dma_address(sg), 94380808d27SChristoph Hellwig sg->length, dir); 94408ae5d4aSDavid Stevens else if (!dev_is_dma_coherent(dev)) 94508ae5d4aSDavid Stevens for_each_sg(sgl, sg, nelems, i) 94608ae5d4aSDavid Stevens arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); 94706d60728SChristoph Hellwig } 94806d60728SChristoph Hellwig 94906d60728SChristoph Hellwig static void iommu_dma_sync_sg_for_device(struct device *dev, 95006d60728SChristoph Hellwig struct scatterlist *sgl, int nelems, 95106d60728SChristoph Hellwig enum dma_data_direction dir) 95206d60728SChristoph Hellwig { 95306d60728SChristoph Hellwig struct scatterlist *sg; 95406d60728SChristoph Hellwig int i; 95506d60728SChristoph Hellwig 9562e727bffSDavid Stevens if (dev_use_swiotlb(dev)) 95708ae5d4aSDavid Stevens for_each_sg(sgl, sg, nelems, i) 95808ae5d4aSDavid Stevens iommu_dma_sync_single_for_device(dev, 95908ae5d4aSDavid Stevens sg_dma_address(sg), 96080808d27SChristoph Hellwig sg->length, dir); 96108ae5d4aSDavid Stevens else if (!dev_is_dma_coherent(dev)) 96208ae5d4aSDavid Stevens for_each_sg(sgl, sg, nelems, i) 96356e35f9cSChristoph Hellwig arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); 96406d60728SChristoph Hellwig } 96506d60728SChristoph Hellwig 96606d60728SChristoph Hellwig static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, 96706d60728SChristoph Hellwig unsigned long offset, size_t size, enum dma_data_direction dir, 96806d60728SChristoph Hellwig unsigned long attrs) 96906d60728SChristoph Hellwig { 97006d60728SChristoph Hellwig phys_addr_t phys = page_to_phys(page) + offset; 97106d60728SChristoph Hellwig bool coherent = dev_is_dma_coherent(dev); 9729b49bbc2SDavid Stevens int prot = dma_info_to_prot(dir, coherent, attrs); 9739b49bbc2SDavid Stevens struct iommu_domain *domain = iommu_get_dma_domain(dev); 9749b49bbc2SDavid Stevens struct iommu_dma_cookie *cookie = domain->iova_cookie; 9759b49bbc2SDavid Stevens struct iova_domain *iovad = &cookie->iovad; 9769b49bbc2SDavid Stevens dma_addr_t iova, dma_mask = dma_get_mask(dev); 97706d60728SChristoph Hellwig 9789b49bbc2SDavid Stevens /* 9799b49bbc2SDavid Stevens * If both the physical buffer start address and size are 9809b49bbc2SDavid Stevens * page aligned, we don't need to use a bounce page. 9819b49bbc2SDavid Stevens */ 9822e727bffSDavid Stevens if (dev_use_swiotlb(dev) && iova_offset(iovad, phys | size)) { 9839b49bbc2SDavid Stevens void *padding_start; 9842cbc61a1SDavid Stevens size_t padding_size, aligned_size; 9859b49bbc2SDavid Stevens 986f316ba0aSMario Limonciello if (!is_swiotlb_active(dev)) { 987f316ba0aSMario Limonciello dev_warn_once(dev, "DMA bounce buffers are inactive, unable to map unaligned transaction.\n"); 988f316ba0aSMario Limonciello return DMA_MAPPING_ERROR; 989f316ba0aSMario Limonciello } 990f316ba0aSMario Limonciello 9919b49bbc2SDavid Stevens aligned_size = iova_align(iovad, size); 992e81e99baSDavid Stevens phys = swiotlb_tbl_map_single(dev, phys, size, aligned_size, 993e81e99baSDavid Stevens iova_mask(iovad), dir, attrs); 9949b49bbc2SDavid Stevens 9959b49bbc2SDavid Stevens if (phys == DMA_MAPPING_ERROR) 9969b49bbc2SDavid Stevens return DMA_MAPPING_ERROR; 9979b49bbc2SDavid Stevens 9989b49bbc2SDavid Stevens /* Cleanup the padding area. */ 9999b49bbc2SDavid Stevens padding_start = phys_to_virt(phys); 10009b49bbc2SDavid Stevens padding_size = aligned_size; 10019b49bbc2SDavid Stevens 10029b49bbc2SDavid Stevens if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 10039b49bbc2SDavid Stevens (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) { 10049b49bbc2SDavid Stevens padding_start += size; 10059b49bbc2SDavid Stevens padding_size -= size; 10069b49bbc2SDavid Stevens } 10079b49bbc2SDavid Stevens 10089b49bbc2SDavid Stevens memset(padding_start, 0, padding_size); 10099b49bbc2SDavid Stevens } 10109b49bbc2SDavid Stevens 10119b49bbc2SDavid Stevens if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 101256e35f9cSChristoph Hellwig arch_sync_dma_for_device(phys, size, dir); 10139b49bbc2SDavid Stevens 10142cbc61a1SDavid Stevens iova = __iommu_dma_map(dev, phys, size, prot, dma_mask); 10159b49bbc2SDavid Stevens if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys)) 10169b49bbc2SDavid Stevens swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); 10179b49bbc2SDavid Stevens return iova; 101806d60728SChristoph Hellwig } 101906d60728SChristoph Hellwig 102006d60728SChristoph Hellwig static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, 102106d60728SChristoph Hellwig size_t size, enum dma_data_direction dir, unsigned long attrs) 102206d60728SChristoph Hellwig { 10239b49bbc2SDavid Stevens struct iommu_domain *domain = iommu_get_dma_domain(dev); 10249b49bbc2SDavid Stevens phys_addr_t phys; 10259b49bbc2SDavid Stevens 10269b49bbc2SDavid Stevens phys = iommu_iova_to_phys(domain, dma_handle); 10279b49bbc2SDavid Stevens if (WARN_ON(!phys)) 10289b49bbc2SDavid Stevens return; 10299b49bbc2SDavid Stevens 10309b49bbc2SDavid Stevens if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && !dev_is_dma_coherent(dev)) 10319b49bbc2SDavid Stevens arch_sync_dma_for_cpu(phys, size, dir); 10329b49bbc2SDavid Stevens 10339b49bbc2SDavid Stevens __iommu_dma_unmap(dev, dma_handle, size); 10349b49bbc2SDavid Stevens 10359b49bbc2SDavid Stevens if (unlikely(is_swiotlb_buffer(dev, phys))) 10369b49bbc2SDavid Stevens swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); 10370db2e5d1SRobin Murphy } 10380db2e5d1SRobin Murphy 10390db2e5d1SRobin Murphy /* 10400db2e5d1SRobin Murphy * Prepare a successfully-mapped scatterlist to give back to the caller. 1041809eac54SRobin Murphy * 1042809eac54SRobin Murphy * At this point the segments are already laid out by iommu_dma_map_sg() to 1043809eac54SRobin Murphy * avoid individually crossing any boundaries, so we merely need to check a 1044809eac54SRobin Murphy * segment's start address to avoid concatenating across one. 10450db2e5d1SRobin Murphy */ 10460db2e5d1SRobin Murphy static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, 10470db2e5d1SRobin Murphy dma_addr_t dma_addr) 10480db2e5d1SRobin Murphy { 1049809eac54SRobin Murphy struct scatterlist *s, *cur = sg; 1050809eac54SRobin Murphy unsigned long seg_mask = dma_get_seg_boundary(dev); 1051809eac54SRobin Murphy unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); 1052809eac54SRobin Murphy int i, count = 0; 10530db2e5d1SRobin Murphy 10540db2e5d1SRobin Murphy for_each_sg(sg, s, nents, i) { 1055809eac54SRobin Murphy /* Restore this segment's original unaligned fields first */ 1056809eac54SRobin Murphy unsigned int s_iova_off = sg_dma_address(s); 10570db2e5d1SRobin Murphy unsigned int s_length = sg_dma_len(s); 1058809eac54SRobin Murphy unsigned int s_iova_len = s->length; 10590db2e5d1SRobin Murphy 1060809eac54SRobin Murphy s->offset += s_iova_off; 10610db2e5d1SRobin Murphy s->length = s_length; 1062cad34be7SChristoph Hellwig sg_dma_address(s) = DMA_MAPPING_ERROR; 1063809eac54SRobin Murphy sg_dma_len(s) = 0; 1064809eac54SRobin Murphy 1065809eac54SRobin Murphy /* 1066809eac54SRobin Murphy * Now fill in the real DMA data. If... 1067809eac54SRobin Murphy * - there is a valid output segment to append to 1068809eac54SRobin Murphy * - and this segment starts on an IOVA page boundary 1069809eac54SRobin Murphy * - but doesn't fall at a segment boundary 1070809eac54SRobin Murphy * - and wouldn't make the resulting output segment too long 1071809eac54SRobin Murphy */ 1072809eac54SRobin Murphy if (cur_len && !s_iova_off && (dma_addr & seg_mask) && 1073ab2cbeb0SRobin Murphy (max_len - cur_len >= s_length)) { 1074809eac54SRobin Murphy /* ...then concatenate it with the previous one */ 1075809eac54SRobin Murphy cur_len += s_length; 1076809eac54SRobin Murphy } else { 1077809eac54SRobin Murphy /* Otherwise start the next output segment */ 1078809eac54SRobin Murphy if (i > 0) 1079809eac54SRobin Murphy cur = sg_next(cur); 1080809eac54SRobin Murphy cur_len = s_length; 1081809eac54SRobin Murphy count++; 1082809eac54SRobin Murphy 1083809eac54SRobin Murphy sg_dma_address(cur) = dma_addr + s_iova_off; 10840db2e5d1SRobin Murphy } 1085809eac54SRobin Murphy 1086809eac54SRobin Murphy sg_dma_len(cur) = cur_len; 1087809eac54SRobin Murphy dma_addr += s_iova_len; 1088809eac54SRobin Murphy 1089809eac54SRobin Murphy if (s_length + s_iova_off < s_iova_len) 1090809eac54SRobin Murphy cur_len = 0; 1091809eac54SRobin Murphy } 1092809eac54SRobin Murphy return count; 10930db2e5d1SRobin Murphy } 10940db2e5d1SRobin Murphy 10950db2e5d1SRobin Murphy /* 10960db2e5d1SRobin Murphy * If mapping failed, then just restore the original list, 10970db2e5d1SRobin Murphy * but making sure the DMA fields are invalidated. 10980db2e5d1SRobin Murphy */ 10990db2e5d1SRobin Murphy static void __invalidate_sg(struct scatterlist *sg, int nents) 11000db2e5d1SRobin Murphy { 11010db2e5d1SRobin Murphy struct scatterlist *s; 11020db2e5d1SRobin Murphy int i; 11030db2e5d1SRobin Murphy 11040db2e5d1SRobin Murphy for_each_sg(sg, s, nents, i) { 1105cad34be7SChristoph Hellwig if (sg_dma_address(s) != DMA_MAPPING_ERROR) 110607b48ac4SRobin Murphy s->offset += sg_dma_address(s); 11070db2e5d1SRobin Murphy if (sg_dma_len(s)) 11080db2e5d1SRobin Murphy s->length = sg_dma_len(s); 1109cad34be7SChristoph Hellwig sg_dma_address(s) = DMA_MAPPING_ERROR; 11100db2e5d1SRobin Murphy sg_dma_len(s) = 0; 11110db2e5d1SRobin Murphy } 11120db2e5d1SRobin Murphy } 11130db2e5d1SRobin Murphy 111482612d66STom Murphy static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg, 111582612d66STom Murphy int nents, enum dma_data_direction dir, unsigned long attrs) 111682612d66STom Murphy { 111782612d66STom Murphy struct scatterlist *s; 111882612d66STom Murphy int i; 111982612d66STom Murphy 112082612d66STom Murphy for_each_sg(sg, s, nents, i) 11219b49bbc2SDavid Stevens iommu_dma_unmap_page(dev, sg_dma_address(s), 112282612d66STom Murphy sg_dma_len(s), dir, attrs); 112382612d66STom Murphy } 112482612d66STom Murphy 112582612d66STom Murphy static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg, 112682612d66STom Murphy int nents, enum dma_data_direction dir, unsigned long attrs) 112782612d66STom Murphy { 112882612d66STom Murphy struct scatterlist *s; 112982612d66STom Murphy int i; 113082612d66STom Murphy 113182612d66STom Murphy for_each_sg(sg, s, nents, i) { 11329b49bbc2SDavid Stevens sg_dma_address(s) = iommu_dma_map_page(dev, sg_page(s), 11339b49bbc2SDavid Stevens s->offset, s->length, dir, attrs); 113482612d66STom Murphy if (sg_dma_address(s) == DMA_MAPPING_ERROR) 113582612d66STom Murphy goto out_unmap; 113682612d66STom Murphy sg_dma_len(s) = s->length; 113782612d66STom Murphy } 113882612d66STom Murphy 113982612d66STom Murphy return nents; 114082612d66STom Murphy 114182612d66STom Murphy out_unmap: 114282612d66STom Murphy iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); 1143dabb16f6SLogan Gunthorpe return -EIO; 114482612d66STom Murphy } 114582612d66STom Murphy 11460db2e5d1SRobin Murphy /* 11470db2e5d1SRobin Murphy * The DMA API client is passing in a scatterlist which could describe 11480db2e5d1SRobin Murphy * any old buffer layout, but the IOMMU API requires everything to be 11490db2e5d1SRobin Murphy * aligned to IOMMU pages. Hence the need for this complicated bit of 11500db2e5d1SRobin Murphy * impedance-matching, to be able to hand off a suitably-aligned list, 11510db2e5d1SRobin Murphy * but still preserve the original offsets and sizes for the caller. 11520db2e5d1SRobin Murphy */ 115306d60728SChristoph Hellwig static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, 115406d60728SChristoph Hellwig int nents, enum dma_data_direction dir, unsigned long attrs) 11550db2e5d1SRobin Murphy { 115643c5bf11SRobin Murphy struct iommu_domain *domain = iommu_get_dma_domain(dev); 1157842fe519SRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 1158842fe519SRobin Murphy struct iova_domain *iovad = &cookie->iovad; 11590db2e5d1SRobin Murphy struct scatterlist *s, *prev = NULL; 116006d60728SChristoph Hellwig int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); 1161842fe519SRobin Murphy dma_addr_t iova; 11620db2e5d1SRobin Murphy size_t iova_len = 0; 1163809eac54SRobin Murphy unsigned long mask = dma_get_seg_boundary(dev); 1164dabb16f6SLogan Gunthorpe ssize_t ret; 11650db2e5d1SRobin Murphy int i; 11660db2e5d1SRobin Murphy 1167dabb16f6SLogan Gunthorpe if (static_branch_unlikely(&iommu_deferred_attach_enabled)) { 1168dabb16f6SLogan Gunthorpe ret = iommu_deferred_attach(dev, domain); 1169ac315f96SLogan Gunthorpe if (ret) 1170dabb16f6SLogan Gunthorpe goto out; 1171dabb16f6SLogan Gunthorpe } 1172795bbbb9STom Murphy 11732e727bffSDavid Stevens if (dev_use_swiotlb(dev)) 117482612d66STom Murphy return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs); 117582612d66STom Murphy 11760db2e5d1SRobin Murphy if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 11770db2e5d1SRobin Murphy iommu_dma_sync_sg_for_device(dev, sg, nents, dir); 11780db2e5d1SRobin Murphy 11790db2e5d1SRobin Murphy /* 11800db2e5d1SRobin Murphy * Work out how much IOVA space we need, and align the segments to 11810db2e5d1SRobin Murphy * IOVA granules for the IOMMU driver to handle. With some clever 11820db2e5d1SRobin Murphy * trickery we can modify the list in-place, but reversibly, by 1183809eac54SRobin Murphy * stashing the unaligned parts in the as-yet-unused DMA fields. 11840db2e5d1SRobin Murphy */ 11850db2e5d1SRobin Murphy for_each_sg(sg, s, nents, i) { 1186809eac54SRobin Murphy size_t s_iova_off = iova_offset(iovad, s->offset); 11870db2e5d1SRobin Murphy size_t s_length = s->length; 1188809eac54SRobin Murphy size_t pad_len = (mask - iova_len + 1) & mask; 11890db2e5d1SRobin Murphy 1190809eac54SRobin Murphy sg_dma_address(s) = s_iova_off; 11910db2e5d1SRobin Murphy sg_dma_len(s) = s_length; 1192809eac54SRobin Murphy s->offset -= s_iova_off; 1193809eac54SRobin Murphy s_length = iova_align(iovad, s_length + s_iova_off); 11940db2e5d1SRobin Murphy s->length = s_length; 11950db2e5d1SRobin Murphy 11960db2e5d1SRobin Murphy /* 1197809eac54SRobin Murphy * Due to the alignment of our single IOVA allocation, we can 1198809eac54SRobin Murphy * depend on these assumptions about the segment boundary mask: 1199809eac54SRobin Murphy * - If mask size >= IOVA size, then the IOVA range cannot 1200809eac54SRobin Murphy * possibly fall across a boundary, so we don't care. 1201809eac54SRobin Murphy * - If mask size < IOVA size, then the IOVA range must start 1202809eac54SRobin Murphy * exactly on a boundary, therefore we can lay things out 1203809eac54SRobin Murphy * based purely on segment lengths without needing to know 1204809eac54SRobin Murphy * the actual addresses beforehand. 1205809eac54SRobin Murphy * - The mask must be a power of 2, so pad_len == 0 if 1206809eac54SRobin Murphy * iova_len == 0, thus we cannot dereference prev the first 1207809eac54SRobin Murphy * time through here (i.e. before it has a meaningful value). 12080db2e5d1SRobin Murphy */ 1209809eac54SRobin Murphy if (pad_len && pad_len < s_length - 1) { 12100db2e5d1SRobin Murphy prev->length += pad_len; 12110db2e5d1SRobin Murphy iova_len += pad_len; 12120db2e5d1SRobin Murphy } 12130db2e5d1SRobin Murphy 12140db2e5d1SRobin Murphy iova_len += s_length; 12150db2e5d1SRobin Murphy prev = s; 12160db2e5d1SRobin Murphy } 12170db2e5d1SRobin Murphy 1218842fe519SRobin Murphy iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); 1219dabb16f6SLogan Gunthorpe if (!iova) { 1220dabb16f6SLogan Gunthorpe ret = -ENOMEM; 12210db2e5d1SRobin Murphy goto out_restore_sg; 1222dabb16f6SLogan Gunthorpe } 12230db2e5d1SRobin Murphy 12240db2e5d1SRobin Murphy /* 12250db2e5d1SRobin Murphy * We'll leave any physical concatenation to the IOMMU driver's 12260db2e5d1SRobin Murphy * implementation - it knows better than we do. 12270db2e5d1SRobin Murphy */ 1228dabb16f6SLogan Gunthorpe ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot); 1229a3884774SYunfei Wang if (ret < 0 || ret < iova_len) 12300db2e5d1SRobin Murphy goto out_free_iova; 12310db2e5d1SRobin Murphy 1232842fe519SRobin Murphy return __finalise_sg(dev, sg, nents, iova); 12330db2e5d1SRobin Murphy 12340db2e5d1SRobin Murphy out_free_iova: 12352a2b8eaaSTom Murphy iommu_dma_free_iova(cookie, iova, iova_len, NULL); 12360db2e5d1SRobin Murphy out_restore_sg: 12370db2e5d1SRobin Murphy __invalidate_sg(sg, nents); 1238dabb16f6SLogan Gunthorpe out: 1239dabb16f6SLogan Gunthorpe if (ret != -ENOMEM) 1240dabb16f6SLogan Gunthorpe return -EINVAL; 1241dabb16f6SLogan Gunthorpe return ret; 12420db2e5d1SRobin Murphy } 12430db2e5d1SRobin Murphy 124406d60728SChristoph Hellwig static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 124506d60728SChristoph Hellwig int nents, enum dma_data_direction dir, unsigned long attrs) 12460db2e5d1SRobin Murphy { 1247842fe519SRobin Murphy dma_addr_t start, end; 1248842fe519SRobin Murphy struct scatterlist *tmp; 1249842fe519SRobin Murphy int i; 125006d60728SChristoph Hellwig 12512e727bffSDavid Stevens if (dev_use_swiotlb(dev)) { 125282612d66STom Murphy iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs); 125382612d66STom Murphy return; 125482612d66STom Murphy } 125582612d66STom Murphy 1256ee9d4097SDavid Stevens if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 1257ee9d4097SDavid Stevens iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); 1258ee9d4097SDavid Stevens 12590db2e5d1SRobin Murphy /* 12600db2e5d1SRobin Murphy * The scatterlist segments are mapped into a single 12610db2e5d1SRobin Murphy * contiguous IOVA allocation, so this is incredibly easy. 12620db2e5d1SRobin Murphy */ 1263842fe519SRobin Murphy start = sg_dma_address(sg); 1264842fe519SRobin Murphy for_each_sg(sg_next(sg), tmp, nents - 1, i) { 1265842fe519SRobin Murphy if (sg_dma_len(tmp) == 0) 1266842fe519SRobin Murphy break; 1267842fe519SRobin Murphy sg = tmp; 1268842fe519SRobin Murphy } 1269842fe519SRobin Murphy end = sg_dma_address(sg) + sg_dma_len(sg); 1270b61d271eSRobin Murphy __iommu_dma_unmap(dev, start, end - start); 12710db2e5d1SRobin Murphy } 12720db2e5d1SRobin Murphy 127306d60728SChristoph Hellwig static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, 127451f8cc9eSRobin Murphy size_t size, enum dma_data_direction dir, unsigned long attrs) 127551f8cc9eSRobin Murphy { 127651f8cc9eSRobin Murphy return __iommu_dma_map(dev, phys, size, 12776e235020STom Murphy dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, 12786e235020STom Murphy dma_get_mask(dev)); 127951f8cc9eSRobin Murphy } 128051f8cc9eSRobin Murphy 128106d60728SChristoph Hellwig static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, 128251f8cc9eSRobin Murphy size_t size, enum dma_data_direction dir, unsigned long attrs) 128351f8cc9eSRobin Murphy { 1284b61d271eSRobin Murphy __iommu_dma_unmap(dev, handle, size); 128551f8cc9eSRobin Murphy } 128651f8cc9eSRobin Murphy 12878553f6e6SRobin Murphy static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) 1288bcf4b9c4SRobin Murphy { 1289bcf4b9c4SRobin Murphy size_t alloc_size = PAGE_ALIGN(size); 1290bcf4b9c4SRobin Murphy int count = alloc_size >> PAGE_SHIFT; 1291bcf4b9c4SRobin Murphy struct page *page = NULL, **pages = NULL; 1292bcf4b9c4SRobin Murphy 1293bcf4b9c4SRobin Murphy /* Non-coherent atomic allocation? Easy */ 1294e6475eb0SChristoph Hellwig if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 1295c84dc6e6SDavid Rientjes dma_free_from_pool(dev, cpu_addr, alloc_size)) 1296bcf4b9c4SRobin Murphy return; 1297bcf4b9c4SRobin Murphy 1298f5ff79fdSChristoph Hellwig if (is_vmalloc_addr(cpu_addr)) { 1299bcf4b9c4SRobin Murphy /* 1300bcf4b9c4SRobin Murphy * If it the address is remapped, then it's either non-coherent 1301bcf4b9c4SRobin Murphy * or highmem CMA, or an iommu_dma_alloc_remap() construction. 1302bcf4b9c4SRobin Murphy */ 13035cf45379SChristoph Hellwig pages = dma_common_find_pages(cpu_addr); 1304bcf4b9c4SRobin Murphy if (!pages) 1305bcf4b9c4SRobin Murphy page = vmalloc_to_page(cpu_addr); 130651231740SChristoph Hellwig dma_common_free_remap(cpu_addr, alloc_size); 1307bcf4b9c4SRobin Murphy } else { 1308bcf4b9c4SRobin Murphy /* Lowmem means a coherent atomic or CMA allocation */ 1309bcf4b9c4SRobin Murphy page = virt_to_page(cpu_addr); 1310bcf4b9c4SRobin Murphy } 1311bcf4b9c4SRobin Murphy 1312bcf4b9c4SRobin Murphy if (pages) 1313bcf4b9c4SRobin Murphy __iommu_dma_free_pages(pages, count); 1314591fcf3bSNicolin Chen if (page) 1315591fcf3bSNicolin Chen dma_free_contiguous(dev, page, alloc_size); 1316bcf4b9c4SRobin Murphy } 1317bcf4b9c4SRobin Murphy 13188553f6e6SRobin Murphy static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, 13198553f6e6SRobin Murphy dma_addr_t handle, unsigned long attrs) 13208553f6e6SRobin Murphy { 13218553f6e6SRobin Murphy __iommu_dma_unmap(dev, handle, size); 13228553f6e6SRobin Murphy __iommu_dma_free(dev, size, cpu_addr); 13238553f6e6SRobin Murphy } 13248553f6e6SRobin Murphy 1325ee1ef05dSChristoph Hellwig static void *iommu_dma_alloc_pages(struct device *dev, size_t size, 1326ee1ef05dSChristoph Hellwig struct page **pagep, gfp_t gfp, unsigned long attrs) 132706d60728SChristoph Hellwig { 132806d60728SChristoph Hellwig bool coherent = dev_is_dma_coherent(dev); 13299ad5d6edSRobin Murphy size_t alloc_size = PAGE_ALIGN(size); 133090ae409fSChristoph Hellwig int node = dev_to_node(dev); 13319a4ab94aSChristoph Hellwig struct page *page = NULL; 13329ad5d6edSRobin Murphy void *cpu_addr; 133306d60728SChristoph Hellwig 1334591fcf3bSNicolin Chen page = dma_alloc_contiguous(dev, alloc_size, gfp); 133506d60728SChristoph Hellwig if (!page) 133690ae409fSChristoph Hellwig page = alloc_pages_node(node, gfp, get_order(alloc_size)); 133790ae409fSChristoph Hellwig if (!page) 133806d60728SChristoph Hellwig return NULL; 133906d60728SChristoph Hellwig 1340f5ff79fdSChristoph Hellwig if (!coherent || PageHighMem(page)) { 134133dcb37cSChristoph Hellwig pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 13428680aa5aSRobin Murphy 13439ad5d6edSRobin Murphy cpu_addr = dma_common_contiguous_remap(page, alloc_size, 134451231740SChristoph Hellwig prot, __builtin_return_address(0)); 13459ad5d6edSRobin Murphy if (!cpu_addr) 1346ee1ef05dSChristoph Hellwig goto out_free_pages; 1347072bebc0SRobin Murphy 134806d60728SChristoph Hellwig if (!coherent) 13499ad5d6edSRobin Murphy arch_dma_prep_coherent(page, size); 13508680aa5aSRobin Murphy } else { 13519ad5d6edSRobin Murphy cpu_addr = page_address(page); 13528680aa5aSRobin Murphy } 1353ee1ef05dSChristoph Hellwig 1354ee1ef05dSChristoph Hellwig *pagep = page; 13559ad5d6edSRobin Murphy memset(cpu_addr, 0, alloc_size); 13569ad5d6edSRobin Murphy return cpu_addr; 1357072bebc0SRobin Murphy out_free_pages: 1358591fcf3bSNicolin Chen dma_free_contiguous(dev, page, alloc_size); 1359072bebc0SRobin Murphy return NULL; 136006d60728SChristoph Hellwig } 136106d60728SChristoph Hellwig 1362ee1ef05dSChristoph Hellwig static void *iommu_dma_alloc(struct device *dev, size_t size, 1363ee1ef05dSChristoph Hellwig dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 1364ee1ef05dSChristoph Hellwig { 1365ee1ef05dSChristoph Hellwig bool coherent = dev_is_dma_coherent(dev); 1366ee1ef05dSChristoph Hellwig int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 1367ee1ef05dSChristoph Hellwig struct page *page = NULL; 1368ee1ef05dSChristoph Hellwig void *cpu_addr; 1369ee1ef05dSChristoph Hellwig 1370ee1ef05dSChristoph Hellwig gfp |= __GFP_ZERO; 1371ee1ef05dSChristoph Hellwig 1372f5ff79fdSChristoph Hellwig if (gfpflags_allow_blocking(gfp) && 1373e8d39a90SChristoph Hellwig !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) { 1374e8d39a90SChristoph Hellwig return iommu_dma_alloc_remap(dev, size, handle, gfp, 1375e8d39a90SChristoph Hellwig dma_pgprot(dev, PAGE_KERNEL, attrs), attrs); 1376e8d39a90SChristoph Hellwig } 1377ee1ef05dSChristoph Hellwig 1378e6475eb0SChristoph Hellwig if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 1379e6475eb0SChristoph Hellwig !gfpflags_allow_blocking(gfp) && !coherent) 13809420139fSChristoph Hellwig page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr, 13819420139fSChristoph Hellwig gfp, NULL); 1382ee1ef05dSChristoph Hellwig else 1383ee1ef05dSChristoph Hellwig cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); 1384ee1ef05dSChristoph Hellwig if (!cpu_addr) 1385ee1ef05dSChristoph Hellwig return NULL; 1386ee1ef05dSChristoph Hellwig 13876e235020STom Murphy *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, 13886e235020STom Murphy dev->coherent_dma_mask); 1389ee1ef05dSChristoph Hellwig if (*handle == DMA_MAPPING_ERROR) { 1390ee1ef05dSChristoph Hellwig __iommu_dma_free(dev, size, cpu_addr); 1391ee1ef05dSChristoph Hellwig return NULL; 1392ee1ef05dSChristoph Hellwig } 1393ee1ef05dSChristoph Hellwig 1394ee1ef05dSChristoph Hellwig return cpu_addr; 1395ee1ef05dSChristoph Hellwig } 1396ee1ef05dSChristoph Hellwig 139706d60728SChristoph Hellwig static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, 139806d60728SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 139906d60728SChristoph Hellwig unsigned long attrs) 140006d60728SChristoph Hellwig { 140106d60728SChristoph Hellwig unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1402efd9f10bSChristoph Hellwig unsigned long pfn, off = vma->vm_pgoff; 140306d60728SChristoph Hellwig int ret; 140406d60728SChristoph Hellwig 140533dcb37cSChristoph Hellwig vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 140606d60728SChristoph Hellwig 140706d60728SChristoph Hellwig if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 140806d60728SChristoph Hellwig return ret; 140906d60728SChristoph Hellwig 141006d60728SChristoph Hellwig if (off >= nr_pages || vma_pages(vma) > nr_pages - off) 141106d60728SChristoph Hellwig return -ENXIO; 141206d60728SChristoph Hellwig 1413f5ff79fdSChristoph Hellwig if (is_vmalloc_addr(cpu_addr)) { 14145cf45379SChristoph Hellwig struct page **pages = dma_common_find_pages(cpu_addr); 141506d60728SChristoph Hellwig 1416efd9f10bSChristoph Hellwig if (pages) 141771fe89ceSChristoph Hellwig return vm_map_pages(vma, pages, nr_pages); 1418efd9f10bSChristoph Hellwig pfn = vmalloc_to_pfn(cpu_addr); 1419efd9f10bSChristoph Hellwig } else { 1420efd9f10bSChristoph Hellwig pfn = page_to_pfn(virt_to_page(cpu_addr)); 1421efd9f10bSChristoph Hellwig } 1422efd9f10bSChristoph Hellwig 1423efd9f10bSChristoph Hellwig return remap_pfn_range(vma, vma->vm_start, pfn + off, 1424efd9f10bSChristoph Hellwig vma->vm_end - vma->vm_start, 1425efd9f10bSChristoph Hellwig vma->vm_page_prot); 142606d60728SChristoph Hellwig } 142706d60728SChristoph Hellwig 142806d60728SChristoph Hellwig static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 142906d60728SChristoph Hellwig void *cpu_addr, dma_addr_t dma_addr, size_t size, 143006d60728SChristoph Hellwig unsigned long attrs) 143106d60728SChristoph Hellwig { 14323fb3378bSChristoph Hellwig struct page *page; 14333fb3378bSChristoph Hellwig int ret; 143406d60728SChristoph Hellwig 1435f5ff79fdSChristoph Hellwig if (is_vmalloc_addr(cpu_addr)) { 14365cf45379SChristoph Hellwig struct page **pages = dma_common_find_pages(cpu_addr); 14373fb3378bSChristoph Hellwig 14383fb3378bSChristoph Hellwig if (pages) { 14393fb3378bSChristoph Hellwig return sg_alloc_table_from_pages(sgt, pages, 14403fb3378bSChristoph Hellwig PAGE_ALIGN(size) >> PAGE_SHIFT, 14413fb3378bSChristoph Hellwig 0, size, GFP_KERNEL); 144206d60728SChristoph Hellwig } 144306d60728SChristoph Hellwig 14443fb3378bSChristoph Hellwig page = vmalloc_to_page(cpu_addr); 14453fb3378bSChristoph Hellwig } else { 14463fb3378bSChristoph Hellwig page = virt_to_page(cpu_addr); 144706d60728SChristoph Hellwig } 144806d60728SChristoph Hellwig 14493fb3378bSChristoph Hellwig ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 14503fb3378bSChristoph Hellwig if (!ret) 14513fb3378bSChristoph Hellwig sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 14523fb3378bSChristoph Hellwig return ret; 145306d60728SChristoph Hellwig } 145406d60728SChristoph Hellwig 1455158a6d3cSYoshihiro Shimoda static unsigned long iommu_dma_get_merge_boundary(struct device *dev) 1456158a6d3cSYoshihiro Shimoda { 1457158a6d3cSYoshihiro Shimoda struct iommu_domain *domain = iommu_get_dma_domain(dev); 1458158a6d3cSYoshihiro Shimoda 1459158a6d3cSYoshihiro Shimoda return (1UL << __ffs(domain->pgsize_bitmap)) - 1; 1460158a6d3cSYoshihiro Shimoda } 1461158a6d3cSYoshihiro Shimoda 146206d60728SChristoph Hellwig static const struct dma_map_ops iommu_dma_ops = { 146306d60728SChristoph Hellwig .alloc = iommu_dma_alloc, 146406d60728SChristoph Hellwig .free = iommu_dma_free, 1465efa70f2fSChristoph Hellwig .alloc_pages = dma_common_alloc_pages, 1466efa70f2fSChristoph Hellwig .free_pages = dma_common_free_pages, 1467e817ee5fSChristoph Hellwig .alloc_noncontiguous = iommu_dma_alloc_noncontiguous, 1468e817ee5fSChristoph Hellwig .free_noncontiguous = iommu_dma_free_noncontiguous, 146906d60728SChristoph Hellwig .mmap = iommu_dma_mmap, 147006d60728SChristoph Hellwig .get_sgtable = iommu_dma_get_sgtable, 147106d60728SChristoph Hellwig .map_page = iommu_dma_map_page, 147206d60728SChristoph Hellwig .unmap_page = iommu_dma_unmap_page, 147306d60728SChristoph Hellwig .map_sg = iommu_dma_map_sg, 147406d60728SChristoph Hellwig .unmap_sg = iommu_dma_unmap_sg, 147506d60728SChristoph Hellwig .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, 147606d60728SChristoph Hellwig .sync_single_for_device = iommu_dma_sync_single_for_device, 147706d60728SChristoph Hellwig .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, 147806d60728SChristoph Hellwig .sync_sg_for_device = iommu_dma_sync_sg_for_device, 147906d60728SChristoph Hellwig .map_resource = iommu_dma_map_resource, 148006d60728SChristoph Hellwig .unmap_resource = iommu_dma_unmap_resource, 1481158a6d3cSYoshihiro Shimoda .get_merge_boundary = iommu_dma_get_merge_boundary, 148206d60728SChristoph Hellwig }; 148306d60728SChristoph Hellwig 148406d60728SChristoph Hellwig /* 148506d60728SChristoph Hellwig * The IOMMU core code allocates the default DMA domain, which the underlying 148606d60728SChristoph Hellwig * IOMMU driver needs to support via the dma-iommu layer. 148706d60728SChristoph Hellwig */ 1488ac6d7046SJean-Philippe Brucker void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) 148906d60728SChristoph Hellwig { 149006d60728SChristoph Hellwig struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 149106d60728SChristoph Hellwig 149206d60728SChristoph Hellwig if (!domain) 149306d60728SChristoph Hellwig goto out_err; 149406d60728SChristoph Hellwig 149506d60728SChristoph Hellwig /* 149606d60728SChristoph Hellwig * The IOMMU core code allocates the default DMA domain, which the 149706d60728SChristoph Hellwig * underlying IOMMU driver needs to support via the dma-iommu layer. 149806d60728SChristoph Hellwig */ 1499bf3aed46SRobin Murphy if (iommu_is_dma_domain(domain)) { 1500ac6d7046SJean-Philippe Brucker if (iommu_dma_init_domain(domain, dma_base, dma_limit, dev)) 150106d60728SChristoph Hellwig goto out_err; 150206d60728SChristoph Hellwig dev->dma_ops = &iommu_dma_ops; 150306d60728SChristoph Hellwig } 150406d60728SChristoph Hellwig 150506d60728SChristoph Hellwig return; 150606d60728SChristoph Hellwig out_err: 150706d60728SChristoph Hellwig pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", 150806d60728SChristoph Hellwig dev_name(dev)); 150944bb7e24SRobin Murphy } 15108ce4904bSJean-Philippe Brucker EXPORT_SYMBOL_GPL(iommu_setup_dma_ops); 151144bb7e24SRobin Murphy 151244bb7e24SRobin Murphy static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, 151344bb7e24SRobin Murphy phys_addr_t msi_addr, struct iommu_domain *domain) 151444bb7e24SRobin Murphy { 151544bb7e24SRobin Murphy struct iommu_dma_cookie *cookie = domain->iova_cookie; 151644bb7e24SRobin Murphy struct iommu_dma_msi_page *msi_page; 1517842fe519SRobin Murphy dma_addr_t iova; 151844bb7e24SRobin Murphy int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 1519fdbe574eSRobin Murphy size_t size = cookie_msi_granule(cookie); 152044bb7e24SRobin Murphy 1521fdbe574eSRobin Murphy msi_addr &= ~(phys_addr_t)(size - 1); 152244bb7e24SRobin Murphy list_for_each_entry(msi_page, &cookie->msi_page_list, list) 152344bb7e24SRobin Murphy if (msi_page->phys == msi_addr) 152444bb7e24SRobin Murphy return msi_page; 152544bb7e24SRobin Murphy 1526c1864790SRobin Murphy msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL); 152744bb7e24SRobin Murphy if (!msi_page) 152844bb7e24SRobin Murphy return NULL; 152944bb7e24SRobin Murphy 15308af23fadSRobin Murphy iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 15318af23fadSRobin Murphy if (!iova) 153244bb7e24SRobin Murphy goto out_free_page; 153344bb7e24SRobin Murphy 15348af23fadSRobin Murphy if (iommu_map(domain, iova, msi_addr, size, prot)) 15358af23fadSRobin Murphy goto out_free_iova; 15368af23fadSRobin Murphy 153744bb7e24SRobin Murphy INIT_LIST_HEAD(&msi_page->list); 1538a44e6657SRobin Murphy msi_page->phys = msi_addr; 1539a44e6657SRobin Murphy msi_page->iova = iova; 154044bb7e24SRobin Murphy list_add(&msi_page->list, &cookie->msi_page_list); 154144bb7e24SRobin Murphy return msi_page; 154244bb7e24SRobin Murphy 15438af23fadSRobin Murphy out_free_iova: 15442a2b8eaaSTom Murphy iommu_dma_free_iova(cookie, iova, size, NULL); 154544bb7e24SRobin Murphy out_free_page: 154644bb7e24SRobin Murphy kfree(msi_page); 154744bb7e24SRobin Murphy return NULL; 154844bb7e24SRobin Murphy } 154944bb7e24SRobin Murphy 1550ece6e6f0SJulien Grall int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) 155144bb7e24SRobin Murphy { 1552ece6e6f0SJulien Grall struct device *dev = msi_desc_to_dev(desc); 155344bb7e24SRobin Murphy struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 155444bb7e24SRobin Murphy struct iommu_dma_msi_page *msi_page; 1555c1864790SRobin Murphy static DEFINE_MUTEX(msi_prepare_lock); /* see below */ 155644bb7e24SRobin Murphy 1557ece6e6f0SJulien Grall if (!domain || !domain->iova_cookie) { 1558ece6e6f0SJulien Grall desc->iommu_cookie = NULL; 1559ece6e6f0SJulien Grall return 0; 1560ece6e6f0SJulien Grall } 156144bb7e24SRobin Murphy 156244bb7e24SRobin Murphy /* 1563c1864790SRobin Murphy * In fact the whole prepare operation should already be serialised by 1564c1864790SRobin Murphy * irq_domain_mutex further up the callchain, but that's pretty subtle 1565c1864790SRobin Murphy * on its own, so consider this locking as failsafe documentation... 156644bb7e24SRobin Murphy */ 1567c1864790SRobin Murphy mutex_lock(&msi_prepare_lock); 156844bb7e24SRobin Murphy msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); 1569c1864790SRobin Murphy mutex_unlock(&msi_prepare_lock); 157044bb7e24SRobin Murphy 1571ece6e6f0SJulien Grall msi_desc_set_iommu_cookie(desc, msi_page); 1572ece6e6f0SJulien Grall 1573ece6e6f0SJulien Grall if (!msi_page) 1574ece6e6f0SJulien Grall return -ENOMEM; 1575ece6e6f0SJulien Grall return 0; 157644bb7e24SRobin Murphy } 1577ece6e6f0SJulien Grall 1578ece6e6f0SJulien Grall void iommu_dma_compose_msi_msg(struct msi_desc *desc, 1579ece6e6f0SJulien Grall struct msi_msg *msg) 1580ece6e6f0SJulien Grall { 1581ece6e6f0SJulien Grall struct device *dev = msi_desc_to_dev(desc); 1582ece6e6f0SJulien Grall const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 1583ece6e6f0SJulien Grall const struct iommu_dma_msi_page *msi_page; 1584ece6e6f0SJulien Grall 1585ece6e6f0SJulien Grall msi_page = msi_desc_get_iommu_cookie(desc); 1586ece6e6f0SJulien Grall 1587ece6e6f0SJulien Grall if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) 1588ece6e6f0SJulien Grall return; 1589ece6e6f0SJulien Grall 1590ece6e6f0SJulien Grall msg->address_hi = upper_32_bits(msi_page->iova); 1591ece6e6f0SJulien Grall msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; 1592ece6e6f0SJulien Grall msg->address_lo += lower_32_bits(msi_page->iova); 159344bb7e24SRobin Murphy } 159406d60728SChristoph Hellwig 159506d60728SChristoph Hellwig static int iommu_dma_init(void) 159606d60728SChristoph Hellwig { 1597a8e8af35SLianbo Jiang if (is_kdump_kernel()) 1598a8e8af35SLianbo Jiang static_branch_enable(&iommu_deferred_attach_enabled); 1599a8e8af35SLianbo Jiang 160006d60728SChristoph Hellwig return iova_cache_get(); 16010db2e5d1SRobin Murphy } 160206d60728SChristoph Hellwig arch_initcall(iommu_dma_init); 1603